# -*- coding: utf-8 -*- import string import numpy as np import pandas as pd from tqdm import tqdm def get_character_dict(): return dict((char, idx) for (idx, char) in enumerate(string.ascii_lowercase + string.punctuation)) def get_user_chunks(dataFrame, windowSize=10, overlapping=False, maxLengthInSeconds=300): # print('maxLength: ' + str(maxLengthInSeconds)) maxMilliSeconds = maxLengthInSeconds * 1000 outDomainLists = [] outDFFrames = [] if overlapping == False: numBlocks = int(np.ceil(float(len(dataFrame)) / float(windowSize))) userIDs = np.arange(len(dataFrame)) for blockID in np.arange(numBlocks): curIDs = userIDs[(blockID * windowSize):((blockID + 1) * windowSize)] # print(curIDs) useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] if maxLengthInSeconds != -1: curMinMilliSeconds = np.min(useData['timeStamp']) + maxMilliSeconds underTimeOutIDs = np.where(np.array(useData['timeStamp']) <= curMinMilliSeconds) if len(underTimeOutIDs) != len(curIDs): curIDs = curIDs[underTimeOutIDs] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) else: numBlocks = len(dataFrame) + 1 - windowSize userIDs = np.arange(len(dataFrame)) for blockID in np.arange(numBlocks): curIDs = userIDs[blockID:blockID + windowSize] # print(curIDs) useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] if maxLengthInSeconds != -1: curMinMilliSeconds = np.min(useData['timeStamp']) + maxMilliSeconds underTimeOutIDs = np.where(np.array(useData['timeStamp']) <= curMinMilliSeconds) if len(underTimeOutIDs) != len(curIDs): curIDs = curIDs[underTimeOutIDs] useData = dataFrame.iloc[curIDs] curDomains = useData['domain'] outDomainLists.append(list(curDomains)) outDFFrames.append(useData) return (outDomainLists, outDFFrames) def get_domain_features(domain, vocab, max_length=40): curFeature = np.zeros([max_length, ]) for j in range(np.min([len(domain), max_length])): curCharacter = domain[-j] if curCharacter in vocab: curFeature[j] = vocab[curCharacter] return curFeature def get_flow_features(flow): useKeys = ['duration', 'bytes_down', 'bytes_up'] curFeature = np.zeros([len(useKeys), ]) for i, curKey in enumerate(useKeys): try: curFeature[i] = np.log1p(flow[curKey]).astype(float) except: pass return curFeature def getCiscoFeatures(curDataLine, urlSIPDict): numCiscoFeatures = 30 try: ciscoFeatures = urlSIPDict[str(curDataLine['domain']) + str(curDataLine['server_ip'])] # log transform ciscoFeatures = np.log1p(ciscoFeatures).astype(float) return ciscoFeatures.ravel() except: return np.zeros([numCiscoFeatures, ]).ravel() def create_dataset_from_flows(user_flow_df, char_dict, maxLen, threshold=3, windowSize=10, use_cisco_features=False): domainLists = [] dfLists = [] print("get chunks from user data frames") for i, user_flow in enumerate(get_flow_per_user(user_flow_df)): (domainListsTmp, dfListsTmp) = get_user_chunks(user_flow, windowSize=windowSize, overlapping=True, maxLengthInSeconds=-1) domainLists += domainListsTmp dfLists += dfListsTmp if i >= 10: break print("create training dataset") return create_dataset_from_lists( domains=domainLists, dfs=dfLists, charachterDict=char_dict, maxLen=maxLen, threshold=threshold, use_cisco_features=use_cisco_features, urlSIPDIct=dict(), windowSize=windowSize) def create_dataset_from_lists(domains, dfs, charachterDict, maxLen, threshold=3, use_cisco_features=False, urlSIPDIct=dict(), windowSize=10): if 'hits' in dfs[0].keys(): hitName = 'hits' elif 'virusTotalHits' in dfs[0].keys(): hitName = 'virusTotalHits' numFlowFeatures = 3 numCiscoFeatures = 30 numFeatures = numFlowFeatures if use_cisco_features: numFeatures += numCiscoFeatures outputFeatures = [] label = [] hits = [] trainNames = [] for i in range(windowSize): outputFeatures.append(np.zeros([len(domains), maxLen])) outputFeatures.append(np.zeros([len(domains), numFeatures])) for i in tqdm(np.arange(len(domains)), miniters=10): curCounter = 0 # print('len domainList: ' + str(len(domainLists[i]))) # print('len df: ' + str(len(dfLists[i]))) for j in range(np.min([windowSize, len(domains[i])])): outputFeatures[curCounter][i, :] = get_domain_features(domains[i][j], charachterDict, maxLen) curCounter += 1 if use_cisco_features: outputFeatures[curCounter][i, 0:numFlowFeatures] = get_flow_features(dfs[i].iloc[j]) outputFeatures[curCounter][i, numFlowFeatures:] = get_cisco_features(dfs[i].iloc[j], urlSIPDIct) else: outputFeatures[curCounter][i, :] = get_flow_features(dfs[i].iloc[j]) curCounter += 1 curLabel = 0.0 if np.max(dfs[i][hitName]) >= threshold: curLabel = 1.0 elif np.max(dfs[i][hitName]) == -1: curLabel = -1.0 elif np.max(dfs[i][hitName]) > 0 and np.max(dfs[i][hitName]) < threshold: curLabel = -2.0 label.append(curLabel) hits.append(np.max(dfs[i][hitName])) trainNames.append(np.unique(dfs[i]['user_hash'])) return (outputFeatures, np.array(label), np.array(hits), np.array(trainNames)) def get_user_flow_data(): df = pd.read_csv("data/rk_data.csv.gz") df.drop("Unnamed: 0", 1, inplace=True) df.set_index(keys=['user_hash'], drop=False, inplace=True) return df def get_flow_per_user(df): users = df['user_hash'].unique().tolist() for user in users: yield df.loc[df.user_hash == user]