# -*- coding: utf-8 -*- import string import numpy as np import pandas as pd from tqdm import tqdm chars = dict((char, idx + 1) for (idx, char) in enumerate(string.ascii_lowercase + string.punctuation + string.digits)) def get_character_dict(): return chars def encode_char(c): if c in chars: return chars[c] else: return 0 encode_char = np.vectorize(encode_char) def get_user_chunks(dataFrame, windowSize=10, overlapping=False, maxLengthInSeconds=300): maxMilliSeconds = maxLengthInSeconds * 1000 outDomainLists = [] outDFFrames = [] if overlapping == False: numBlocks = int(np.ceil(float(len(dataFrame)) / float(windowSize))) userIDs = np.arange(len(dataFrame)) for blockID in np.arange(numBlocks): curIDs = userIDs[(blockID * windowSize):((blockID + 1) * windowSize)] # print(curIDs) useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] if maxLengthInSeconds != -1: curMinMilliSeconds = np.min(useData['timeStamp']) + maxMilliSeconds underTimeOutIDs = np.where(np.array(useData['timeStamp']) <= curMinMilliSeconds) if len(underTimeOutIDs) != len(curIDs): curIDs = curIDs[underTimeOutIDs] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) else: numBlocks = len(dataFrame) + 1 - windowSize userIDs = np.arange(len(dataFrame)) for blockID in np.arange(numBlocks): curIDs = userIDs[blockID:blockID + windowSize] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] if maxLengthInSeconds != -1: curMinMilliSeconds = np.min(useData['timeStamp']) + maxMilliSeconds underTimeOutIDs = np.where(np.array(useData['timeStamp']) <= curMinMilliSeconds) if len(underTimeOutIDs) != len(curIDs): curIDs = curIDs[underTimeOutIDs] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) if len(outDomainLists[-1]) != windowSize: outDomainLists.pop(-1) outDFFrames.pop(-1) return (outDomainLists, outDFFrames) def get_domain_features(domain, vocab, max_length=40): encoding = np.zeros((max_length,)) for j in range(np.min([len(domain), max_length])): curCharacter = domain[-j] if curCharacter in vocab: encoding[j] = vocab[curCharacter] return encoding def get_flow_features(flow): keys = ['duration', 'bytes_down', 'bytes_up'] features = np.zeros([len(keys), ]) for i, key in enumerate(keys): # TODO: does it still works after exceptions occur -- default: zero! # i wonder whether something brokes # if there are exceptions regarding to inconsistent feature length try: features[i] = np.log1p(flow[key]).astype(float) except: pass return features def get_cisco_features(curDataLine, urlSIPDict): numCiscoFeatures = 30 try: ciscoFeatures = urlSIPDict[str(curDataLine['domain']) + str(curDataLine['server_ip'])] # log transform ciscoFeatures = np.log1p(ciscoFeatures).astype(float) return ciscoFeatures.ravel() except: return np.zeros([numCiscoFeatures, ]).ravel() def create_dataset_from_flows(user_flow_df, char_dict, max_len, window_size=10, use_cisco_features=False): domains = [] features = [] print("get chunks from user data frames") for i, user_flow in enumerate(get_flow_per_user(user_flow_df)): (domain_windows, feature_windows) = get_user_chunks(user_flow, windowSize=window_size, overlapping=True, maxLengthInSeconds=-1) domains += domain_windows features += feature_windows # TODO: remove later if i >= 10: break print("create training dataset") return create_dataset_from_lists( domains=domains, features=features, vocab=char_dict, max_len=max_len, use_cisco_features=use_cisco_features, urlSIPDIct=dict(), window_size=window_size) def create_dataset_from_lists(domains, features, vocab, max_len, use_cisco_features=False, urlSIPDIct=dict(), window_size=10): """ combines domain and feature windows to sequential training data :param domains: list of domain windows :param features: list of feature windows :param vocab: :param max_len: :param use_cisco_features: idk :param urlSIPDIct: idk :param window_size: size of the flow window :return: """ # TODO: check for hits vs vth consistency # if 'hits' in dfs[0].keys(): # hits_col = 'hits' # elif 'virusTotalHits' in dfs[0].keys(): # hits_col = 'virusTotalHits' hits_col = "virusTotalHits" numFlowFeatures = 3 numCiscoFeatures = 30 numFeatures = numFlowFeatures if use_cisco_features: numFeatures += numCiscoFeatures sample_size = len(domains) hits = [] names = [] servers = [] trusted_hits = [] domain_features = np.zeros((sample_size, window_size, max_len)) flow_features = np.zeros((sample_size, window_size, numFeatures)) for i in tqdm(np.arange(sample_size), miniters=10): for j in range(window_size): domain_features[i, j] = get_domain_features(domains[i][j], vocab, max_len) flow_features[i, j] = get_flow_features(features[i].iloc[j]) # TODO: cisco features? hits.append(np.max(features[i][hits_col])) names.append(np.unique(features[i]['user_hash'])) servers.append(np.max(features[i]['serverLabel'])) trusted_hits.append(np.max(features[i]['trustedHits'])) X = [domain_features, flow_features] return X, np.array(hits), np.array(names), np.array(servers), np.array(trusted_hits) def discretize_label(values, threshold): maxVal = np.max(values) if maxVal >= threshold: return 1.0 elif maxVal == -1: return -1.0 elif 0 < maxVal < threshold: return -2.0 else: return 0.0 def get_user_flow_data(): df = pd.read_csv("data/rk_data.csv.gz") df.drop("Unnamed: 0", 1, inplace=True) df.set_index(keys=['user_hash'], drop=False, inplace=True) return df def get_flow_per_user(df): users = df['user_hash'].unique().tolist() for user in users: yield df.loc[df.user_hash == user]