move model creation back into models package

This commit is contained in:
René Knaebel 2017-11-07 20:09:20 +01:00
parent b1f48c1895
commit e12bbda8c5
6 changed files with 67 additions and 119 deletions

View File

@ -1,39 +1,28 @@
run: run:
python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_both_1 --epochs 2 --depth flat1 \ python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_client --epochs 2 --depth flat1 \
--filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \ --filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \
--dense_embd 16 --domain_embd 8 --batch 64 --balanced_weights --type final --model_output both --dense_embd 16 --domain_embd 8 --batch 64 --type final --model_output client
python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_both_2 --epochs 2 --depth flat1 \ python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_final --epochs 2 --depth flat1 \
--filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \ --filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \
--dense_embd 16 --domain_embd 8 --batch 64 --balanced_weights --type inter --model_output both --dense_embd 16 --domain_embd 8 --batch 64 --type final --model_output both
python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_both_3 --epochs 2 --depth deep1 \ python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_inter --epochs 2 --depth flat1 \
--filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \ --filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \
--dense_embd 16 --domain_embd 8 --batch 64 --balanced_weights --type final --model_output both --dense_embd 16 --domain_embd 8 --batch 64 --type inter --model_output both
python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_both_4 --epochs 2 --depth deep1 \ python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_soft --epochs 2 --depth flat1 \
--filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \ --filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \
--dense_embd 16 --domain_embd 8 --batch 64 --balanced_weights --type inter --model_output both --dense_embd 16 --domain_embd 8 --batch 64 --type soft --model_output both
python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_both_5 --epochs 2 --depth flat2 \ python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_long --epochs 2 --depth flat1 \
--filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \ --filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \
--dense_embd 16 --domain_embd 8 --batch 64 --balanced_weights --type staggered --model_output both --dense_embd 16 --domain_embd 8 --batch 64 --type long --model_output both
python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_client_1 --epochs 2 --depth flat2 \ python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_staggered --epochs 2 --depth flat1 \
--filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \ --filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \
--dense_embd 16 --domain_embd 8 --batch 64 --balanced_weights --type final --model_output client --dense_embd 16 --domain_embd 8 --batch 64 --type staggered --model_output both
python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_client_2 --epochs 2 --depth flat2 \
--filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \
--dense_embd 16 --domain_embd 8 --batch 64 --type inter --model_output client
python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_client_3 --epochs 2 --depth deep1 \
--filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \
--dense_embd 16 --domain_embd 8 --batch 64 --type final --model_output client
python3 main.py --mode train --data data/rk_mini.csv.gz --model results/test/test_client_4 --epochs 2 --depth deep1 \
--filter_embd 32 --kernel_embd 3 --filter_main 16 --kernel_main 3 --dense_main 32 \
--dense_embd 16 --domain_embd 8 --batch 64 --balanced_weights --type inter --model_output client
test: test:
python3 main.py --mode test --batch 128 --models results/test/test_both_* --data data/rk_mini.csv.gz --model_output both python3 main.py --mode test --batch 128 --models results/test/test_both_* --data data/rk_mini.csv.gz --model_output both

View File

@ -8,12 +8,10 @@ from random import random as rng
from time import ctime, time from time import ctime, time
import joblib import joblib
import keras.backend as K
import numpy as np import numpy as np
from keras.callbacks import EarlyStopping from keras.callbacks import EarlyStopping
import models import models
from main import create_model
logger = logging.getLogger('cisco_logger') logger = logging.getLogger('cisco_logger')
@ -48,27 +46,7 @@ class Hyperband:
def try_params(self, n_iterations, params): def try_params(self, n_iterations, params):
n_iterations = int(round(n_iterations)) n_iterations = int(round(n_iterations))
embedding, model, new_model, long_model, soft_model = models.get_models_by_params(params) model = models.get_models_by_params(params)
if params["type"] in ("inter", "staggered"):
model = new_model
if params["type"] == "long":
model = long_model
if params["type"] == "soft":
model = soft_model
model = create_model(model, params["model_output"])
if params["type"] == "soft":
conv_server = model.get_layer("conv_server").trainable_weights
conv_client = model.get_layer("conv_client").trainable_weights
l1 = [0.001 * K.sum(K.abs(x - y)) for (x, y) in zip(conv_server, conv_client)]
model.add_loss(l1)
dense_server = model.get_layer("dense_server").trainable_weights
dense_client = model.get_layer("dense_client").trainable_weights
l2 = [0.001 * K.sum(K.abs(x - y)) for (x, y) in zip(dense_server, dense_client)]
model.add_loss(l2)
callbacks = [EarlyStopping(monitor='val_loss', callbacks = [EarlyStopping(monitor='val_loss',
patience=5, patience=5,

34
main.py
View File

@ -3,12 +3,10 @@ import operator
import os import os
import joblib import joblib
import keras.backend as K
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import tensorflow as tf import tensorflow as tf
from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint
from keras.models import Model
from sklearn.metrics import confusion_matrix from sklearn.metrics import confusion_matrix
import arguments import arguments
@ -124,15 +122,6 @@ def get_param_dist(dist_size="small"):
} }
def create_model(model, output_type):
if output_type == "both":
return Model(inputs=[model.in_domains, model.in_flows], outputs=(model.out_client, model.out_server))
elif output_type == "client":
return Model(inputs=[model.in_domains, model.in_flows], outputs=(model.out_client,))
else:
raise Exception("unknown model output")
def shuffle_training_data(domain, flow, client, server): def shuffle_training_data(domain, flow, client, server):
idx = np.random.permutation(len(domain)) idx = np.random.permutation(len(domain))
domain = domain[idx] domain = domain[idx]
@ -247,27 +236,7 @@ def main_train(param=None):
custom_sample_weights = None custom_sample_weights = None
logger.info(f"Generator model with params: {param}") logger.info(f"Generator model with params: {param}")
embedding, model, new_model, long_model, soft_model = models.get_models_by_params(param) model = models.get_models_by_params(param)
if args.model_type in ("inter", "staggered"):
model = new_model
if args.model_type == "long":
model = long_model
if args.model_type == "soft":
model = soft_model
model = create_model(model, args.model_output)
if args.model_type == "soft":
conv_server = model.get_layer("conv_server").trainable_weights
conv_client = model.get_layer("conv_client").trainable_weights
l1 = [0.001 * K.sum(K.abs(x - y)) for (x, y) in zip(conv_server, conv_client)]
model.add_loss(l1)
dense_server = model.get_layer("dense_server").trainable_weights
dense_client = model.get_layer("dense_client").trainable_weights
l2 = [0.001 * K.sum(K.abs(x - y)) for (x, y) in zip(dense_server, dense_client)]
model.add_loss(l2)
features = {"ipt_domains": domain_tr.value, "ipt_flows": flow_tr.value} features = {"ipt_domains": domain_tr.value, "ipt_flows": flow_tr.value}
if args.model_output == "both": if args.model_output == "both":
@ -307,7 +276,6 @@ def main_train(param=None):
loss_weights = {"client": 1.0, "server": 0.0} loss_weights = {"client": 1.0, "server": 0.0}
logger.info("compile and train model") logger.info("compile and train model")
embedding.summary()
logger.info(model.get_config()) logger.info(model.get_config())
model.compile(optimizer='adam', model.compile(optimizer='adam',
loss='binary_crossentropy', loss='binary_crossentropy',

View File

@ -1,14 +1,24 @@
import keras.backend as K import keras.backend as K
from keras.models import Model
from models import deep1 from models import deep1
from models.renes_networks import selu from models.renes_networks import selu
from . import flat_2, pauls_networks, renes_networks from . import flat_2, pauls_networks, renes_networks
def create_model(model, output_type):
if output_type == "both":
return Model(inputs=[model.in_domains, model.in_flows], outputs=(model.out_client, model.out_server))
elif output_type == "client":
return Model(inputs=[model.in_domains, model.in_flows], outputs=(model.out_client,))
else:
raise Exception("unknown model output")
def get_models_by_params(params: dict): def get_models_by_params(params: dict):
# decomposing param section # decomposing param section
# mainly embedding model # mainly embedding model
# network_type = params.get("type") network_type = params.get("type")
network_depth = params.get("depth") network_depth = params.get("depth")
embedding_size = params.get("embedding") embedding_size = params.get("embedding")
filter_embedding = params.get("filter_embedding") filter_embedding = params.get("filter_embedding")
@ -33,23 +43,40 @@ def get_models_by_params(params: dict):
elif network_depth == "deep2": elif network_depth == "deep2":
networks = renes_networks networks = renes_networks
else: else:
raise Exception("network not found") raise ValueError("network not found")
embedding_model = networks.get_embedding(embedding_size, domain_length, filter_embedding, kernel_embedding,
domain_cnn = networks.get_embedding(embedding_size, domain_length, filter_embedding, kernel_embedding,
hidden_embedding, 0.5) hidden_embedding, 0.5)
final = networks.get_model(0.25, flow_features, hidden_embedding, window_size, domain_length, if network_type == "final":
filter_main, kernel_main, dense_dim, embedding_model, model_output) model = networks.get_model(0.25, flow_features, window_size, domain_length,
filter_main, kernel_main, dense_dim, domain_cnn)
model = create_model(model, model_output)
elif network_type in ("inter", "staggered"):
model = networks.get_new_model(0.25, flow_features, window_size, domain_length,
filter_main, kernel_main, dense_dim, domain_cnn)
model = create_model(model, model_output)
elif network_type == "long":
model = networks.get_new_model2(0.25, flow_features, window_size, domain_length,
filter_main, kernel_main, dense_dim, domain_cnn)
model = create_model(model, model_output)
elif network_type == "soft":
model = networks.get_new_soft(0.25, flow_features, window_size, domain_length,
filter_main, kernel_main, dense_dim, domain_cnn)
model = create_model(model, model_output)
conv_server = model.get_layer("conv_server").trainable_weights
conv_client = model.get_layer("conv_client").trainable_weights
l1 = [0.001 * K.sum(K.abs(x - y)) for (x, y) in zip(conv_server, conv_client)]
model.add_loss(l1)
dense_server = model.get_layer("dense_server").trainable_weights
dense_client = model.get_layer("dense_client").trainable_weights
l2 = [0.001 * K.sum(K.abs(x - y)) for (x, y) in zip(dense_server, dense_client)]
model.add_loss(l2)
else:
raise ValueError("network type not found")
inter = networks.get_new_model(0.25, flow_features, hidden_embedding, window_size, domain_length, return model
filter_main, kernel_main, dense_dim, embedding_model, model_output)
long = networks.get_new_model2(0.25, flow_features, hidden_embedding, window_size, domain_length,
filter_main, kernel_main, dense_dim, embedding_model, model_output)
soft = networks.get_new_soft(0.25, flow_features, hidden_embedding, window_size, domain_length,
filter_main, kernel_main, dense_dim, embedding_model, model_output)
return embedding_model, final, inter, long, soft
def get_server_model_by_params(params: dict): def get_server_model_by_params(params: dict):

View File

@ -42,8 +42,8 @@ def get_embedding(embedding_size, input_length, filter_size, kernel_size, hidden
return KerasModel(x, y) return KerasModel(x, y)
def get_model(cnnDropout, flow_features, domain_features, window_size, domain_length, cnn_dims, kernel_size, def get_model(cnnDropout, flow_features, window_size, domain_length, cnn_dims, kernel_size,
dense_dim, cnn, model_output="both") -> Model: dense_dim, cnn) -> Model:
ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains") ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains")
encoded = TimeDistributed(cnn, name="domain_cnn")(ipt_domains) encoded = TimeDistributed(cnn, name="domain_cnn")(ipt_domains)
ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows") ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows")
@ -51,8 +51,7 @@ def get_model(cnnDropout, flow_features, domain_features, window_size, domain_le
# CNN processing a small slides of flow windows # CNN processing a small slides of flow windows
y = Conv1D(cnn_dims, y = Conv1D(cnn_dims,
kernel_size, kernel_size,
activation='relu', activation='relu')(merged)
input_shape=(window_size, domain_features + flow_features))(merged)
# remove temporal dimension by global max pooling # remove temporal dimension by global max pooling
y = GlobalMaxPooling1D()(y) y = GlobalMaxPooling1D()(y)
y = Dropout(cnnDropout)(y) y = Dropout(cnnDropout)(y)
@ -63,8 +62,8 @@ def get_model(cnnDropout, flow_features, domain_features, window_size, domain_le
return Model(ipt_domains, ipt_flows, out_client, out_server) return Model(ipt_domains, ipt_flows, out_client, out_server)
def get_new_model(dropout, flow_features, domain_features, window_size, domain_length, cnn_dims, kernel_size, def get_new_model(dropout, flow_features, window_size, domain_length, cnn_dims, kernel_size,
dense_dim, cnn, model_output="both") -> Model: dense_dim, cnn) -> Model:
ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains") ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains")
ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows") ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows")
encoded = TimeDistributed(cnn, name="domain_cnn")(ipt_domains) encoded = TimeDistributed(cnn, name="domain_cnn")(ipt_domains)
@ -105,8 +104,8 @@ def get_server_model(flow_features, domain_length, dense_dim, cnn):
return KerasModel(inputs=[ipt_domains, ipt_flows], outputs=out_server) return KerasModel(inputs=[ipt_domains, ipt_flows], outputs=out_server)
def get_new_model2(dropout, flow_features, domain_features, window_size, domain_length, cnn_dims, kernel_size, def get_new_model2(dropout, flow_features, window_size, domain_length, cnn_dims, kernel_size,
dense_dim, cnn, model_output="both") -> Model: dense_dim, cnn) -> Model:
ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains") ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains")
ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows") ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows")
encoded = TimeDistributed(cnn, name="domain_cnn")(ipt_domains) encoded = TimeDistributed(cnn, name="domain_cnn")(ipt_domains)
@ -137,19 +136,8 @@ def get_new_model2(dropout, flow_features, domain_features, window_size, domain_
return Model(ipt_domains, ipt_flows, out_client, out_server) return Model(ipt_domains, ipt_flows, out_client, out_server)
import keras.backend as K def get_new_soft(dropout, flow_features, window_size, domain_length, cnn_dims, kernel_size,
dense_dim, cnn) -> Model:
def get_new_soft(dropout, flow_features, domain_features, window_size, domain_length, cnn_dims, kernel_size,
dense_dim, cnn, model_output="both") -> Model:
def dist_reg(distant_layer):
def dist_reg_h(weights):
print("REG FUNCTION")
print(weights)
print(distant_layer)
return 0.01 * K.sum(K.abs(weights - distant_layer))
return dist_reg_h
ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains") ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains")
ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows") ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows")
@ -177,7 +165,5 @@ def get_new_soft(dropout, flow_features, domain_features, window_size, domain_le
name="dense_client")(y) name="dense_client")(y)
out_client = Dense(1, activation='sigmoid', name="client")(y) out_client = Dense(1, activation='sigmoid', name="client")(y)
# model = KerasModel(inputs=(ipt_domains, ipt_flows), outputs=(out_client, out_server))
return Model(ipt_domains, ipt_flows, out_client, out_server) return Model(ipt_domains, ipt_flows, out_client, out_server)

View File

@ -15,7 +15,7 @@ EPOCHS=10
for ((i = ${N1}; i <= ${N2}; i++)) for ((i = ${N1}; i <= ${N2}; i++))
do do
python main.py --mode train \ python main.py --mode train \
--train ${DATADIR} \ --data ${DATADIR} \
--model ${RESDIR}/${OUTPUT}_${TYPE}_${i} \ --model ${RESDIR}/${OUTPUT}_${TYPE}_${i} \
--epochs ${EPOCHS} \ --epochs ${EPOCHS} \
--embd 128 \ --embd 128 \