add bulk embedding visualization and deep1 network
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
import keras.backend as K
|
||||
|
||||
from models import deep1
|
||||
from models.renes_networks import selu
|
||||
from . import flat_2, pauls_networks, renes_networks
|
||||
|
||||
@@ -10,7 +11,6 @@ def get_models_by_params(params: dict):
|
||||
# network_type = params.get("type")
|
||||
network_depth = params.get("depth")
|
||||
embedding_size = params.get("embedding")
|
||||
input_length = params.get("input_length")
|
||||
filter_embedding = params.get("filter_embedding")
|
||||
kernel_embedding = params.get("kernel_embedding")
|
||||
hidden_embedding = params.get("dense_embedding")
|
||||
@@ -32,7 +32,7 @@ def get_models_by_params(params: dict):
|
||||
networks = renes_networks
|
||||
else:
|
||||
raise Exception("network not found")
|
||||
embedding_model = networks.get_embedding(embedding_size, input_length, filter_embedding, kernel_embedding,
|
||||
embedding_model = networks.get_embedding(embedding_size, domain_length, filter_embedding, kernel_embedding,
|
||||
hidden_embedding, 0.5)
|
||||
|
||||
old_model = networks.get_model(0.25, flow_features, hidden_embedding, window_size, domain_length,
|
||||
@@ -63,6 +63,8 @@ def get_server_model_by_params(params: dict):
|
||||
elif network_depth == "flat2":
|
||||
networks = flat_2
|
||||
elif network_depth == "deep1":
|
||||
networks = deep1
|
||||
elif network_depth == "deep2":
|
||||
networks = renes_networks
|
||||
else:
|
||||
raise Exception("network not found")
|
||||
|
70
models/deep1.py
Normal file
70
models/deep1.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from collections import namedtuple
|
||||
|
||||
import keras
|
||||
from keras.engine import Input, Model as KerasModel
|
||||
from keras.layers import Conv1D, Dense, Dropout, Embedding, GlobalAveragePooling1D, GlobalMaxPooling1D, TimeDistributed
|
||||
|
||||
import dataset
|
||||
|
||||
Model = namedtuple("Model", ["in_domains", "in_flows", "out_client", "out_server"])
|
||||
|
||||
|
||||
def get_embedding(embedding_size, input_length, filter_size, kernel_size, hidden_dims, drop_out=0.5):
|
||||
x = y = Input(shape=(input_length,))
|
||||
y = Embedding(input_dim=dataset.get_vocab_size(), output_dim=embedding_size)(y)
|
||||
y = Conv1D(filter_size, kernel_size=kernel_size, activation="relu")(y)
|
||||
y = Conv1D(filter_size, kernel_size=3, activation="relu")(y)
|
||||
y = Conv1D(filter_size, kernel_size=3, activation="relu")(y)
|
||||
y = GlobalAveragePooling1D()(y)
|
||||
y = Dense(hidden_dims, activation="relu")(y)
|
||||
return KerasModel(x, y)
|
||||
|
||||
|
||||
def get_model(cnnDropout, flow_features, domain_features, window_size, domain_length, cnn_dims, kernel_size,
|
||||
dense_dim, cnn, model_output="both"):
|
||||
ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains")
|
||||
encoded = TimeDistributed(cnn, name="domain_cnn")(ipt_domains)
|
||||
ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows")
|
||||
merged = keras.layers.concatenate([encoded, ipt_flows], -1)
|
||||
# CNN processing a small slides of flow windows
|
||||
y = Conv1D(filters=cnn_dims, kernel_size=kernel_size, activation="relu", padding="same",
|
||||
input_shape=(window_size, domain_features + flow_features))(merged)
|
||||
# remove temporal dimension by global max pooling
|
||||
y = GlobalMaxPooling1D()(y)
|
||||
y = Dropout(cnnDropout)(y)
|
||||
y = Dense(dense_dim, activation="relu")(y)
|
||||
y = Dense(dense_dim, activation="relu")(y)
|
||||
out_client = Dense(1, activation='sigmoid', name="client")(y)
|
||||
out_server = Dense(1, activation='sigmoid', name="server")(y)
|
||||
|
||||
return Model(ipt_domains, ipt_flows, out_client, out_server)
|
||||
|
||||
|
||||
def get_new_model(dropout, flow_features, domain_features, window_size, domain_length, cnn_dims, kernel_size,
|
||||
dense_dim, cnn, model_output="both"):
|
||||
ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains")
|
||||
ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows")
|
||||
encoded = TimeDistributed(cnn, name="domain_cnn")(ipt_domains)
|
||||
merged = keras.layers.concatenate([encoded, ipt_flows], -1)
|
||||
y = Dense(dense_dim, activation="relu")(merged)
|
||||
y = Dense(dense_dim,
|
||||
activation="relu",
|
||||
name="dense_server")(y)
|
||||
out_server = Dense(1, activation="sigmoid", name="server")(y)
|
||||
merged = keras.layers.concatenate([merged, y], -1)
|
||||
# CNN processing a small slides of flow windows
|
||||
y = Conv1D(filters=cnn_dims,
|
||||
kernel_size=kernel_size,
|
||||
activation="relu",
|
||||
padding="same",
|
||||
input_shape=(window_size, domain_features + flow_features))(merged)
|
||||
# remove temporal dimension by global max pooling
|
||||
y = GlobalMaxPooling1D()(y)
|
||||
y = Dropout(dropout)(y)
|
||||
y = Dense(dense_dim, activation="relu")(y)
|
||||
y = Dense(dense_dim,
|
||||
activation="relu",
|
||||
name="dense_client")(y)
|
||||
out_client = Dense(1, activation='sigmoid', name="client")(y)
|
||||
|
||||
return Model(ipt_domains, ipt_flows, out_client, out_server)
|
@@ -95,6 +95,8 @@ def get_server_model(flow_features, domain_length, dense_dim, cnn):
|
||||
ipt_domains = Input(shape=(domain_length,), name="ipt_domains")
|
||||
ipt_flows = Input(shape=(flow_features,), name="ipt_flows")
|
||||
encoded = cnn(ipt_domains)
|
||||
cnn.name = "domain_cnn"
|
||||
|
||||
merged = keras.layers.concatenate([encoded, ipt_flows], -1)
|
||||
y = Dense(dense_dim,
|
||||
activation="relu",
|
||||
|
Reference in New Issue
Block a user