diff --git a/dataset.py b/dataset.py index fec9efe..e8abb7e 100644 --- a/dataset.py +++ b/dataset.py @@ -5,10 +5,22 @@ import numpy as np import pandas as pd from tqdm import tqdm +chars = dict((char, idx + 1) for (idx, char) in + enumerate(string.ascii_lowercase + string.punctuation + string.digits)) + def get_character_dict(): - return dict((char, idx) for (idx, char) in - enumerate(string.ascii_lowercase + string.punctuation)) + return chars + + +def encode_char(c): + if c in chars: + return chars[c] + else: + return 0 + + +encode_char = np.vectorize(encode_char) def get_user_chunks(dataFrame, windowSize=10, overlapping=False, @@ -49,16 +61,19 @@ def get_user_chunks(dataFrame, windowSize=10, overlapping=False, curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) + if len(outDomainLists[-1]) != windowSize: + outDomainLists.pop(-1) + outDFFrames.pop(-1) return (outDomainLists, outDFFrames) def get_domain_features(domain, vocab, max_length=40): - curFeature = np.zeros([max_length, ]) + encoding = np.zeros((max_length,)) for j in range(np.min([len(domain), max_length])): curCharacter = domain[-j] if curCharacter in vocab: - curFeature[j] = vocab[curCharacter] - return curFeature + encoding[j] = vocab[curCharacter] + return encoding def get_flow_features(flow): @@ -86,66 +101,76 @@ def get_cisco_features(curDataLine, urlSIPDict): return np.zeros([numCiscoFeatures, ]).ravel() -def create_dataset_from_flows(user_flow_df, char_dict, maxLen, windowSize=10, use_cisco_features=False): - domainLists = [] - dfLists = [] +def create_dataset_from_flows(user_flow_df, char_dict, max_len, window_size=10, use_cisco_features=False): + domains = [] + features = [] print("get chunks from user data frames") for i, user_flow in enumerate(get_flow_per_user(user_flow_df)): - (domainListsTmp, dfListsTmp) = get_user_chunks(user_flow, windowSize=windowSize, - overlapping=True, maxLengthInSeconds=-1) - domainLists += domainListsTmp - dfLists += dfListsTmp + (domain_windows, feature_windows) = get_user_chunks(user_flow, + windowSize=window_size, + overlapping=True, + maxLengthInSeconds=-1) + domains += domain_windows + features += feature_windows # TODO: remove later if i >= 10: break print("create training dataset") return create_dataset_from_lists( - domains=domainLists, dfs=dfLists, vocab=char_dict, - maxLen=maxLen, + domains=domains, features=features, vocab=char_dict, + max_len=max_len, use_cisco_features=use_cisco_features, urlSIPDIct=dict(), - window_size=windowSize) + window_size=window_size) -def create_dataset_from_lists(domains, dfs, vocab, maxLen, +def create_dataset_from_lists(domains, features, vocab, max_len, use_cisco_features=False, urlSIPDIct=dict(), window_size=10): + """ + combines domain and feature windows to sequential training data + :param domains: list of domain windows + :param features: list of feature windows + :param vocab: + :param max_len: + :param use_cisco_features: idk + :param urlSIPDIct: idk + :param window_size: size of the flow window + :return: + """ # TODO: check for hits vs vth consistency - if 'hits' in dfs[0].keys(): - hitName = 'hits' - elif 'virusTotalHits' in dfs[0].keys(): - hitName = 'virusTotalHits' + # if 'hits' in dfs[0].keys(): + # hits_col = 'hits' + # elif 'virusTotalHits' in dfs[0].keys(): + # hits_col = 'virusTotalHits' + hits_col = "virusTotalHits" + numFlowFeatures = 3 numCiscoFeatures = 30 numFeatures = numFlowFeatures if use_cisco_features: numFeatures += numCiscoFeatures - Xs = [] + sample_size = len(domains) hits = [] names = [] servers = [] trusted_hits = [] - for i in range(window_size): - Xs.append(np.zeros([len(domains), maxLen])) - Xs.append(np.zeros([len(domains), numFeatures])) - for i in tqdm(np.arange(len(domains)), miniters=10): - ctr = 0 - for j in range(np.min([window_size, len(domains[i])])): - Xs[ctr][i, :] = get_domain_features(domains[i][j], vocab, maxLen) - ctr += 1 - if use_cisco_features: - Xs[ctr][i, 0:numFlowFeatures] = get_flow_features(dfs[i].iloc[j]) - Xs[ctr][i, numFlowFeatures:] = get_cisco_features(dfs[i].iloc[j], urlSIPDIct) - else: - Xs[ctr][i, :] = get_flow_features(dfs[i].iloc[j]) - ctr += 1 + domain_features = np.zeros((sample_size, window_size, max_len)) + flow_features = np.zeros((sample_size, window_size, numFeatures)) - hits.append(np.max(dfs[i][hitName])) - names.append(np.unique(dfs[i]['user_hash'])) - servers.append(np.max(dfs[i]['serverLabel'])) - trusted_hits.append(np.max(dfs[i]['trustedHits'])) - return Xs, np.array(hits), np.array(names), np.array(servers), np.array(trusted_hits) + for i in tqdm(np.arange(sample_size), miniters=10): + for j in range(window_size): + domain_features[i, j] = get_domain_features(domains[i][j], vocab, max_len) + flow_features[i, j] = get_flow_features(features[i].iloc[j]) + # TODO: cisco features? + + hits.append(np.max(features[i][hits_col])) + names.append(np.unique(features[i]['user_hash'])) + servers.append(np.max(features[i]['serverLabel'])) + trusted_hits.append(np.max(features[i]['trustedHits'])) + X = [domain_features, flow_features] + return X, np.array(hits), np.array(names), np.array(servers), np.array(trusted_hits) def discretize_label(values, threshold): diff --git a/main.py b/main.py index bab241a..5b0da5a 100644 --- a/main.py +++ b/main.py @@ -104,10 +104,6 @@ def main(): client_labels = client_labels[idx] server_labels = server_tr[idx] - # TODO: remove when features are flattened - for i in range(len(X_tr)): - X_tr[i] = X_tr[i][idx] - shared_cnn = models.get_shared_cnn(len(char_dict) + 1, embeddingSize, maxLen, domainFeatures, kernel_size, domainFeatures, 0.5)