diff --git a/main.py b/main.py index 5ea631d..bab241a 100644 --- a/main.py +++ b/main.py @@ -1,9 +1,63 @@ +import argparse + import numpy as np from keras.utils import np_utils import dataset import models +parser = argparse.ArgumentParser() + +parser.add_argument("--modes", action="store", dest="modes", nargs="+") + +# parser.add_argument("--data", action="store", dest="data", +# default="data/") +# +# parser.add_argument("--h5data", action="store", dest="h5data", +# default="") +# +# parser.add_argument("--model", action="store", dest="model", +# default="model_x") +# +# parser.add_argument("--pred", action="store", dest="pred", +# default="") +# +# parser.add_argument("--type", action="store", dest="model_type", +# default="simple_conv") +# +parser.add_argument("--batch", action="store", dest="batch_size", + default=64, type=int) + +parser.add_argument("--epochs", action="store", dest="epochs", + default=10, type=int) + +# parser.add_argument("--samples", action="store", dest="samples", +# default=100000, type=int) +# +# parser.add_argument("--samples_val", action="store", dest="samples_val", +# default=10000, type=int) +# +# parser.add_argument("--area", action="store", dest="area_size", +# default=25, type=int) +# +# parser.add_argument("--queue", action="store", dest="queue_size", +# default=50, type=int) +# +# parser.add_argument("--p", action="store", dest="p_train", +# default=0.5, type=float) +# +# parser.add_argument("--p_val", action="store", dest="p_val", +# default=0.01, type=float) +# +# parser.add_argument("--gpu", action="store", dest="gpu", +# default=0, type=int) +# +# parser.add_argument("--tmp", action="store_true", dest="tmp") +# +# parser.add_argument("--test", action="store", dest="test_image", +# default=6, choices=range(7), type=int) + +args = parser.parse_args() # config = tf.ConfigProto(log_device_placement=True) # config.gpu_options.per_process_gpu_memory_fraction = 0.5 @@ -31,7 +85,6 @@ def main(): threshold = 3 minFlowsPerUser = 10 numEpochs = 100 - timesNeg = -1 char_dict = dataset.get_character_dict() user_flow_df = dataset.get_user_flow_data() @@ -39,7 +92,7 @@ def main(): print("create training dataset") (X_tr, hits_tr, names_tr, server_tr, trusted_hits_tr) = dataset.create_dataset_from_flows( user_flow_df, char_dict, - maxLen=maxLen, windowSize=windowSize) + max_len=maxLen, window_size=windowSize) # make client labels discrete with 4 different values # TODO: use trusted_hits_tr for client classification too client_labels = np.apply_along_axis(lambda x: dataset.discretize_label(x, 3), 0, np.atleast_2d(hits_tr)) @@ -65,12 +118,14 @@ def main(): loss='binary_crossentropy', metrics=['accuracy']) - epochNumber = 0 client_labels = np_utils.to_categorical(client_labels, 2) server_labels = np_utils.to_categorical(server_labels, 2) - model.fit(X_tr, [client_labels, server_labels], batch_size=128, - epochs=epochNumber + 1, shuffle=True, initial_epoch=epochNumber) # , - # validation_data=(testData,testLabel)) + model.fit(X_tr, + [client_labels, server_labels], + batch_size=args.batch_size, + epochs=args.epochs, + shuffle=True) + # TODO: for validation we use future data -> validation_data=(testData,testLabel)) if __name__ == "__main__": diff --git a/models.py b/models.py index af9ceb1..83989f7 100644 --- a/models.py +++ b/models.py @@ -45,8 +45,8 @@ def get_top_cnn(cnn, numFeatures, maxLen, windowSize, domainFeatures, filters, k maxPool = GlobalMaxPooling1D()(cnn) cnnDropout = Dropout(cnnDropout)(maxPool) cnnDense = Dense(cnnHiddenDims, activation='relu')(cnnDropout) - cnnOutput1 = Dense(2, activation='softmax')(cnnDense) - cnnOutput2 = Dense(2, activation='softmax')(cnnDense) + cnnOutput1 = Dense(2, activation='softmax', name="client")(cnnDense) + cnnOutput2 = Dense(2, activation='softmax', name="server")(cnnDense) # We define a trainable model linking the # tweet inputs to the predictions