add deeper domain cnn; refactor hyperband using load_data function

This commit is contained in:
René Knaebel 2017-11-10 12:52:18 +01:00
parent 3ce385eca6
commit 461d4cab8f
4 changed files with 43 additions and 20 deletions

View File

@ -32,10 +32,13 @@ parser.add_argument("--models", action="store", dest="model_paths", nargs="+",
default=[]) default=[])
parser.add_argument("--type", action="store", dest="model_type", parser.add_argument("--type", action="store", dest="model_type",
default="final") # inter, final, staggered default="final")
parser.add_argument("--depth", action="store", dest="model_depth", parser.add_argument("--embd_type", action="store", dest="embedding_type",
default="flat1") # small, medium default="small")
# parser.add_argument("--depth", action="store", dest="model_depth",
# default="flat1")
parser.add_argument("--model_output", action="store", dest="model_output", parser.add_argument("--model_output", action="store", dest="model_output",
default="both") default="both")

21
main.py
View File

@ -58,6 +58,7 @@ if args.gpu:
# default parameter # default parameter
PARAMS = { PARAMS = {
"type": args.model_type, "type": args.model_type,
"embedding_type": args.embedding_type,
# "depth": args.model_depth, # "depth": args.model_depth,
"batch_size": args.batch_size, "batch_size": args.batch_size,
"window_size": args.window, "window_size": args.window,
@ -83,6 +84,7 @@ def get_param_dist(dist_size="small"):
return { return {
# static params # static params
"type": [args.model_type], "type": [args.model_type],
"embedding_type": [args.embedding_type],
# "depth": [args.model_depth], # "depth": [args.model_depth],
"model_output": [args.model_output], "model_output": [args.model_output],
"batch_size": [args.batch_size], "batch_size": [args.batch_size],
@ -103,6 +105,7 @@ def get_param_dist(dist_size="small"):
return { return {
# static params # static params
"type": [args.model_type], "type": [args.model_type],
"embedding_type": [args.embedding_type],
# "depth": [args.model_depth], # "depth": [args.model_depth],
"model_output": [args.model_output], "model_output": [args.model_output],
"batch_size": [args.batch_size], "batch_size": [args.batch_size],
@ -131,7 +134,7 @@ def shuffle_training_data(domain, flow, client, server):
def main_paul_best(): def main_paul_best():
pauls_best_params = best_config = { pauls_best_params = {
"type": "paul", "type": "paul",
"batch_size": 64, "batch_size": 64,
"window_size": 10, "window_size": 10,
@ -155,18 +158,8 @@ def main_paul_best():
def main_hyperband(data, domain_length, window_size, model_type, result_file, max_iter, dist_size="small"): def main_hyperband(data, domain_length, window_size, model_type, result_file, max_iter, dist_size="small"):
param_dist = get_param_dist(dist_size)
logger.info("create training dataset") logger.info("create training dataset")
domain_tr, flow_tr, name_tr, client_tr, server_windows_tr = dataset.load_or_generate_h5data(data, domain_length, domain_tr, flow_tr, client_tr, server_tr = load_data(data, domain_length, window_size, model_type, shuffled=True)
window)
server_tr = np.max(server_windows_tr, axis=1)
if model_type in ("inter", "staggered"):
server_tr = np.expand_dims(server_windows_tr, 2)
domain_tr, flow_tr, client_tr, server_tr = shuffle_training_data(domain_tr, flow_tr, client_tr, server_tr)
return run_hyperband(dist_size, domain_tr, flow_tr, client_tr, server_tr, max_iter, result_file) return run_hyperband(dist_size, domain_tr, flow_tr, client_tr, server_tr, max_iter, result_file)
@ -186,13 +179,15 @@ def train(parameters, features, labels):
pass pass
def load_data(data, domain_length, window_size, model_type): def load_data(data, domain_length, window_size, model_type, shuffled=False):
# data preparation # data preparation
domain_tr, flow_tr, name_tr, client_tr, server_windows_tr = dataset.load_or_generate_h5data(data, domain_length, domain_tr, flow_tr, name_tr, client_tr, server_windows_tr = dataset.load_or_generate_h5data(data, domain_length,
window_size) window_size)
server_tr = np.max(server_windows_tr, axis=1) server_tr = np.max(server_windows_tr, axis=1)
if model_type in ("inter", "staggered"): if model_type in ("inter", "staggered"):
server_tr = np.expand_dims(server_windows_tr, 2) server_tr = np.expand_dims(server_windows_tr, 2)
if shuffled:
domain_tr, flow_tr, client_tr, server_tr = shuffle_training_data(domain_tr, flow_tr, client_tr, server_tr)
return domain_tr, flow_tr, client_tr, server_tr return domain_tr, flow_tr, client_tr, server_tr

View File

@ -26,6 +26,7 @@ def get_models_by_params(params: dict):
K.clear_session() K.clear_session()
# decomposing param section # decomposing param section
# mainly embedding model # mainly embedding model
embedding_type = params.get("embedding_type", "small")
network_type = params.get("type") network_type = params.get("type")
# network_depth = params.get("depth") # network_depth = params.get("depth")
embedding_size = params.get("embedding") embedding_size = params.get("embedding")
@ -42,8 +43,14 @@ def get_models_by_params(params: dict):
dense_dim = params.get("dense_main") dense_dim = params.get("dense_main")
model_output = params.get("model_output", "both") model_output = params.get("model_output", "both")
domain_cnn = networks.get_domain_embedding_model(embedding_size, domain_length, filter_embedding, kernel_embedding, if embedding_type == "small":
hidden_embedding, 0.5) domain_cnn = networks.get_domain_embedding_model(embedding_size, domain_length, filter_embedding,
kernel_embedding, hidden_embedding, 0.5)
elif embedding_type == "deep":
domain_cnn = networks.get_domain_embedding_model2(embedding_size, domain_length, filter_embedding,
kernel_embedding, hidden_embedding, 0.5)
else:
raise ValueError("embedding type not found")
if network_type == "final": if network_type == "final":
model = networks.get_final_model(0.25, flow_features, window_size, domain_length, model = networks.get_final_model(0.25, flow_features, window_size, domain_length,

View File

@ -2,7 +2,7 @@ from collections import namedtuple
import keras import keras
from keras.engine import Input, Model as KerasModel from keras.engine import Input, Model as KerasModel
from keras.layers import Conv1D, Dense, Dropout, Embedding, GlobalMaxPooling1D, TimeDistributed from keras.layers import Conv1D, Dense, Dropout, Embedding, GlobalAveragePooling1D, GlobalMaxPooling1D, TimeDistributed
import dataset import dataset
@ -22,6 +22,24 @@ def get_domain_embedding_model(embedding_size, input_length, filter_size, kernel
return KerasModel(x, y) return KerasModel(x, y)
def get_domain_embedding_model2(embedding_size, input_length, filter_size, kernel_size, hidden_dims,
drop_out=0.5) -> KerasModel:
x = y = Input(shape=(input_length,))
y = Embedding(input_dim=dataset.get_vocab_size(), output_dim=embedding_size)(y)
y = Conv1D(filter_size,
kernel_size,
activation='relu')(y)
y = Conv1D(filter_size,
kernel_size,
activation='relu')(y)
y = Conv1D(filter_size,
kernel_size,
activation='relu')(y)
y = GlobalAveragePooling1D()(y)
y = Dense(hidden_dims, activation="relu")(y)
return KerasModel(x, y)
def get_final_model(cnnDropout, flow_features, window_size, domain_length, cnn_dims, kernel_size, def get_final_model(cnnDropout, flow_features, window_size, domain_length, cnn_dims, kernel_size,
dense_dim, cnn) -> Model: dense_dim, cnn) -> Model:
ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains") ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains")