add deeper domain cnn; refactor hyperband using load_data function
This commit is contained in:
@@ -26,6 +26,7 @@ def get_models_by_params(params: dict):
|
||||
K.clear_session()
|
||||
# decomposing param section
|
||||
# mainly embedding model
|
||||
embedding_type = params.get("embedding_type", "small")
|
||||
network_type = params.get("type")
|
||||
# network_depth = params.get("depth")
|
||||
embedding_size = params.get("embedding")
|
||||
@@ -42,8 +43,14 @@ def get_models_by_params(params: dict):
|
||||
dense_dim = params.get("dense_main")
|
||||
model_output = params.get("model_output", "both")
|
||||
|
||||
domain_cnn = networks.get_domain_embedding_model(embedding_size, domain_length, filter_embedding, kernel_embedding,
|
||||
hidden_embedding, 0.5)
|
||||
if embedding_type == "small":
|
||||
domain_cnn = networks.get_domain_embedding_model(embedding_size, domain_length, filter_embedding,
|
||||
kernel_embedding, hidden_embedding, 0.5)
|
||||
elif embedding_type == "deep":
|
||||
domain_cnn = networks.get_domain_embedding_model2(embedding_size, domain_length, filter_embedding,
|
||||
kernel_embedding, hidden_embedding, 0.5)
|
||||
else:
|
||||
raise ValueError("embedding type not found")
|
||||
|
||||
if network_type == "final":
|
||||
model = networks.get_final_model(0.25, flow_features, window_size, domain_length,
|
||||
@@ -65,7 +72,7 @@ def get_models_by_params(params: dict):
|
||||
conv_client = model.get_layer("conv_client").trainable_weights
|
||||
l1 = [0.001 * K.sum(K.abs(x - y)) for (x, y) in zip(conv_server, conv_client)]
|
||||
model.add_loss(l1)
|
||||
|
||||
|
||||
dense_server = model.get_layer("dense_server").trainable_weights
|
||||
dense_client = model.get_layer("dense_client").trainable_weights
|
||||
l2 = [0.001 * K.sum(K.abs(x - y)) for (x, y) in zip(dense_server, dense_client)]
|
||||
|
@@ -2,7 +2,7 @@ from collections import namedtuple
|
||||
|
||||
import keras
|
||||
from keras.engine import Input, Model as KerasModel
|
||||
from keras.layers import Conv1D, Dense, Dropout, Embedding, GlobalMaxPooling1D, TimeDistributed
|
||||
from keras.layers import Conv1D, Dense, Dropout, Embedding, GlobalAveragePooling1D, GlobalMaxPooling1D, TimeDistributed
|
||||
|
||||
import dataset
|
||||
|
||||
@@ -22,6 +22,24 @@ def get_domain_embedding_model(embedding_size, input_length, filter_size, kernel
|
||||
return KerasModel(x, y)
|
||||
|
||||
|
||||
def get_domain_embedding_model2(embedding_size, input_length, filter_size, kernel_size, hidden_dims,
|
||||
drop_out=0.5) -> KerasModel:
|
||||
x = y = Input(shape=(input_length,))
|
||||
y = Embedding(input_dim=dataset.get_vocab_size(), output_dim=embedding_size)(y)
|
||||
y = Conv1D(filter_size,
|
||||
kernel_size,
|
||||
activation='relu')(y)
|
||||
y = Conv1D(filter_size,
|
||||
kernel_size,
|
||||
activation='relu')(y)
|
||||
y = Conv1D(filter_size,
|
||||
kernel_size,
|
||||
activation='relu')(y)
|
||||
y = GlobalAveragePooling1D()(y)
|
||||
y = Dense(hidden_dims, activation="relu")(y)
|
||||
return KerasModel(x, y)
|
||||
|
||||
|
||||
def get_final_model(cnnDropout, flow_features, window_size, domain_length, cnn_dims, kernel_size,
|
||||
dense_dim, cnn) -> Model:
|
||||
ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains")
|
||||
|
Reference in New Issue
Block a user