# -*- coding: utf-8 -*- import string import h5py import numpy as np import pandas as pd from keras.utils import np_utils from tqdm import tqdm chars = dict((char, idx + 1) for (idx, char) in enumerate(string.ascii_lowercase + string.punctuation + string.digits)) def get_character_dict(): return chars def encode_char(c): if c in chars: return chars[c] else: return 0 encode_char = np.vectorize(encode_char) def get_user_chunks(dataFrame, windowSize=10, overlapping=False, maxLengthInSeconds=300): maxMilliSeconds = maxLengthInSeconds * 1000 outDomainLists = [] outDFFrames = [] if not overlapping: numBlocks = int(np.ceil(float(len(dataFrame)) / float(windowSize))) userIDs = np.arange(len(dataFrame)) for blockID in np.arange(numBlocks): curIDs = userIDs[(blockID * windowSize):((blockID + 1) * windowSize)] # print(curIDs) useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] if maxLengthInSeconds != -1: curMinMilliSeconds = np.min(useData['timeStamp']) + maxMilliSeconds underTimeOutIDs = np.where(np.array(useData['timeStamp']) <= curMinMilliSeconds) if len(underTimeOutIDs) != len(curIDs): curIDs = curIDs[underTimeOutIDs] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) else: numBlocks = len(dataFrame) + 1 - windowSize userIDs = np.arange(len(dataFrame)) for blockID in np.arange(numBlocks): curIDs = userIDs[blockID:blockID + windowSize] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] if maxLengthInSeconds != -1: curMinMilliSeconds = np.min(useData['timeStamp']) + maxMilliSeconds underTimeOutIDs = np.where(np.array(useData['timeStamp']) <= curMinMilliSeconds) if len(underTimeOutIDs) != len(curIDs): curIDs = curIDs[underTimeOutIDs] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) if len(outDomainLists[-1]) != windowSize: outDomainLists.pop(-1) outDFFrames.pop(-1) return (outDomainLists, outDFFrames) def get_domain_features(domain, vocab, max_length=40): encoding = np.zeros((max_length,)) for j in range(np.min([len(domain), max_length])): char = domain[-j] if char in vocab: encoding[j] = vocab[char] return encoding def get_flow_features(flow): keys = ['duration', 'bytes_down', 'bytes_up'] features = np.zeros([len(keys), ]) for i, key in enumerate(keys): # TODO: does it still works after exceptions occur -- default: zero! # i wonder whether something brokes # if there are exceptions regarding to inconsistent feature length try: features[i] = np.log1p(flow[key]).astype(float) except: pass return features def create_dataset_from_flows(user_flow_df, char_dict, max_len, window_size=10, use_cisco_features=False): domains = [] features = [] print("get chunks from user data frames") for i, user_flow in tqdm(list(enumerate(get_flow_per_user(user_flow_df)))): (domain_windows, feature_windows) = get_user_chunks(user_flow, windowSize=window_size, overlapping=False, maxLengthInSeconds=-1) domains += domain_windows features += feature_windows print("create training dataset") domain_tr, flow_tr, hits_tr, names_tr, server_tr, trusted_hits_tr = create_dataset_from_lists(domains=domains, flows=features, vocab=char_dict, max_len=max_len, window_size=window_size) # make client labels discrete with 4 different values hits_tr = np.apply_along_axis(lambda x: discretize_label(x, 3), 0, np.atleast_2d(hits_tr)) # select only 1.0 and 0.0 from training data pos_idx = np.where(np.logical_or(hits_tr == 1.0, trusted_hits_tr >= 1.0))[0] neg_idx = np.where(hits_tr == 0.0)[0] idx = np.concatenate((pos_idx, neg_idx)) # choose selected sample to train on domain_tr = domain_tr[idx] flow_tr = flow_tr[idx] client_tr = np.zeros_like(idx, float) client_tr[:pos_idx.shape[-1]] = 1.0 server_tr = server_tr[idx] client_tr = np_utils.to_categorical(client_tr, 2) server_tr = np_utils.to_categorical(server_tr, 2) return domain_tr, flow_tr, client_tr, server_tr def store_h5dataset(domain_tr, flow_tr, client_tr, server_tr): f = h5py.File("data/full_dataset.h5", "w") domain_tr = domain_tr.astype(np.int8) f.create_dataset("domain", data=domain_tr) f.create_dataset("flow", data=flow_tr) server_tr = server_tr.astype(np.bool) client_tr = client_tr.astype(np.bool) f.create_dataset("client", data=client_tr) f.create_dataset("server", data=server_tr) f.close() def create_dataset_from_lists(domains, flows, vocab, max_len, window_size=10): """ combines domain and feature windows to sequential training data :param domains: list of domain windows :param flows: list of flow feature windows :param vocab: :param max_len: :param window_size: size of the flow window :return: """ numFeatures = 3 sample_size = len(domains) hits = [] names = [] servers = [] trusted_hits = [] domain_features = np.zeros((sample_size, window_size, max_len)) flow_features = np.zeros((sample_size, window_size, numFeatures)) for i in tqdm(np.arange(sample_size), miniters=10): for j in range(window_size): domain_features[i, j, :] = get_domain_features(domains[i][j], vocab, max_len) flow_features[i, j, :] = get_flow_features(flows[i].iloc[j]) hits.append(np.max(flows[i]['virusTotalHits'])) names.append(np.unique(flows[i]['user_hash'])) servers.append(np.max(flows[i]['serverLabel'])) trusted_hits.append(np.max(flows[i]['trustedHits'])) return (domain_features, flow_features, np.array(hits), np.array(names), np.array(servers), np.array(trusted_hits)) def discretize_label(values, threshold): maxVal = np.max(values) if maxVal >= threshold: return 1.0 elif maxVal == -1: return -1.0 elif 0 < maxVal < threshold: return -2.0 else: return 0.0 def get_user_flow_data(csv_file): types = { "duration": int, "bytes_down": int, "bytes_up": int, "domain": object, "timeStamp": float, "server_ip": object, "user_hash": float, "virusTotalHits": int, "serverLabel": int, "trustedHits": int } df = pd.read_csv(csv_file) df = df[list(types.keys())] df.set_index(keys=['user_hash'], drop=False, inplace=True) return df def get_flow_per_user(df): users = df['user_hash'].unique().tolist() for user in users: yield df.loc[df.user_hash == user]