diff --git a/Makefile b/Makefile index fbe5254..3effa6a 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,3 @@ test: - python3 main.py --epochs 1 --batch 64 + python3 main.py --epochs 1 --batch 64 --train data/rk_data.csv.gz --test data/rk_data.csv.gz diff --git a/dataset.py b/dataset.py index a52d9a6..ae06ecb 100644 --- a/dataset.py +++ b/dataset.py @@ -117,12 +117,27 @@ def create_dataset_from_flows(user_flow_df, char_dict, max_len, window_size=10, break print("create training dataset") - return create_dataset_from_lists( + domain_tr, flow_tr, hits_tr, names_tr, server_tr, trusted_hits_tr = create_dataset_from_lists( domains=domains, features=features, vocab=char_dict, max_len=max_len, use_cisco_features=use_cisco_features, urlSIPDIct=dict(), window_size=window_size) + # make client labels discrete with 4 different values + # TODO: use trusted_hits_tr for client classification too + client_labels = np.apply_along_axis(lambda x: discretize_label(x, 3), 0, np.atleast_2d(hits_tr)) + # select only 1.0 and 0.0 from training data + pos_idx = np.where(client_labels == 1.0)[0] + neg_idx = np.where(client_labels == 0.0)[0] + idx = np.concatenate((pos_idx, neg_idx)) + # choose selected sample to train on + domain_tr = domain_tr[idx] + flow_tr = flow_tr[idx] + client_labels = client_labels[idx] + server_labels = server_tr[idx] + + return domain_tr, flow_tr, client_labels, server_labels + def create_dataset_from_lists(domains, features, vocab, max_len, use_cisco_features=False, urlSIPDIct=dict(), @@ -185,9 +200,11 @@ def discretize_label(values, threshold): return 0.0 -def get_user_flow_data(): - df = pd.read_csv("data/rk_data.csv.gz") - df.drop("Unnamed: 0", 1, inplace=True) +def get_user_flow_data(csv_file): + df = pd.read_csv(csv_file) + keys = ["duration", "bytes_down", "bytes_up", "domain", "timeStamp", "server_ip", "user_hash", "virusTotalHits", + "serverLabel", "trustedHits"] + df = df[keys] df.set_index(keys=['user_hash'], drop=False, inplace=True) return df diff --git a/main.py b/main.py index 1fc2aad..bda8ff1 100644 --- a/main.py +++ b/main.py @@ -1,6 +1,5 @@ import argparse -import numpy as np from keras.utils import np_utils import dataset @@ -8,17 +7,20 @@ import models parser = argparse.ArgumentParser() -parser.add_argument("--modes", action="store", dest="modes", nargs="+") +# parser.add_argument("--modes", action="store", dest="modes", nargs="+") + +parser.add_argument("--train", action="store", dest="train_data", + default="data/full_dataset.csv.tar.bz2") + +parser.add_argument("--test", action="store", dest="test_data", + default="data/full_future_dataset.csv.tar.bz2") -# parser.add_argument("--data", action="store", dest="data", -# default="data/") -# # parser.add_argument("--h5data", action="store", dest="h5data", # default="") # -# parser.add_argument("--model", action="store", dest="model", -# default="model_x") -# +parser.add_argument("--model", action="store", dest="model", + default="model_x") + # parser.add_argument("--pred", action="store", dest="pred", # default="") # @@ -66,8 +68,7 @@ parser.add_argument("--domain_embd", action="store", dest="domain_embedding", # # parser.add_argument("--tmp", action="store_true", dest="tmp") # -# parser.add_argument("--test", action="store", dest="test_image", -# default=6, choices=range(7), type=int) +# parser.add_argument("--test", action="store_true", dest="test") args = parser.parse_args() @@ -82,37 +83,24 @@ def main(): # parameter cnnDropout = 0.5 cnnHiddenDims = 1024 - flowFeatures = 3 numCiscoFeatures = 30 kernel_size = 3 drop_out = 0.5 filters = 128 char_dict = dataset.get_character_dict() - user_flow_df = dataset.get_user_flow_data() + user_flow_df = dataset.get_user_flow_data(args.train_data) print("create training dataset") - domain_tr, flow_tr, hits_tr, names_tr, server_tr, trusted_hits_tr = dataset.create_dataset_from_flows( + domain_tr, flow_tr, client_tr, server_tr = dataset.create_dataset_from_flows( user_flow_df, char_dict, max_len=args.domain_length, window_size=args.window) - # make client labels discrete with 4 different values - # TODO: use trusted_hits_tr for client classification too - client_labels = np.apply_along_axis(lambda x: dataset.discretize_label(x, 3), 0, np.atleast_2d(hits_tr)) - # select only 1.0 and 0.0 from training data - pos_idx = np.where(client_labels == 1.0)[0] - neg_idx = np.where(client_labels == 0.0)[0] - idx = np.concatenate((pos_idx, neg_idx)) - # choose selected sample to train on - domain_tr = domain_tr[idx] - flow_tr = flow_tr[idx] - client_labels = client_labels[idx] - server_labels = server_tr[idx] shared_cnn = models.renes_networks.get_embedding(len(char_dict) + 1, args.embedding, args.domain_length, args.hidden_char_dims, kernel_size, args.domain_embedding, 0.5) shared_cnn.summary() - model = models.renes_networks.get_model(cnnDropout, flowFeatures, args.domain_embedding, + model = models.renes_networks.get_model(cnnDropout, flow_tr.shape[-1], args.domain_embedding, args.window, args.domain_length, filters, kernel_size, cnnHiddenDims, shared_cnn) model.summary() @@ -121,14 +109,23 @@ def main(): loss='binary_crossentropy', metrics=['accuracy']) - client_labels = np_utils.to_categorical(client_labels, 2) - server_labels = np_utils.to_categorical(server_labels, 2) + client_tr = np_utils.to_categorical(client_tr, 2) + server_tr = np_utils.to_categorical(server_tr, 2) model.fit([domain_tr, flow_tr], - [client_labels, server_labels], + [client_tr, server_tr], batch_size=args.batch_size, epochs=args.epochs, - shuffle=True) - # TODO: for validation we use future data -> validation_data=(testData,testLabel)) + shuffle=True, + validation_split=0.2) + + +def test(): + char_dict = dataset.get_character_dict() + user_flow_df = dataset.get_user_flow_data(args.test_data) + domain_val, flow_val, client_val, server_val = dataset.create_dataset_from_flows( + user_flow_df, char_dict, + max_len=args.domain_length, window_size=args.window) + # TODO: get model and exec model.evaluate(...) if __name__ == "__main__":