38 lines
1.6 KiB
Python
38 lines
1.6 KiB
Python
|
import keras
|
||
|
from keras.engine import Input, Model
|
||
|
from keras.layers import Embedding, Conv1D, GlobalMaxPooling1D, Dense, Dropout, Activation, TimeDistributed
|
||
|
|
||
|
|
||
|
def get_embedding(vocab_size, embedding_size, input_length,
|
||
|
filters, kernel_size, hidden_dims, drop_out=0.5):
|
||
|
x = y = Input(shape=(input_length,))
|
||
|
y = Embedding(input_dim=vocab_size, output_dim=embedding_size)(y)
|
||
|
y = Conv1D(filters, kernel_size, activation='relu')(y)
|
||
|
y = GlobalMaxPooling1D()(y)
|
||
|
y = Dense(hidden_dims)(y)
|
||
|
y = Dropout(drop_out)(y)
|
||
|
y = Activation('relu')(y)
|
||
|
return Model(x, y)
|
||
|
|
||
|
|
||
|
def get_model(cnnDropout, flow_features, domain_features, window_size, domain_length, cnn_dims, kernel_size,
|
||
|
dense_dim, cnn):
|
||
|
ipt_domains = Input(shape=(window_size, domain_length), name="ipt_domains")
|
||
|
encoded = TimeDistributed(cnn)(ipt_domains)
|
||
|
ipt_flows = Input(shape=(window_size, flow_features), name="ipt_flows")
|
||
|
merged = keras.layers.concatenate([encoded, ipt_flows], -1)
|
||
|
# CNN processing a small slides of flow windows
|
||
|
# TODO: add more layers?
|
||
|
y = Conv1D(cnn_dims,
|
||
|
kernel_size,
|
||
|
activation='relu',
|
||
|
input_shape=(window_size, domain_features + flow_features))(merged)
|
||
|
# remove temporal dimension by global max pooling
|
||
|
y = GlobalMaxPooling1D()(y)
|
||
|
y = Dropout(cnnDropout)(y)
|
||
|
y = Dense(dense_dim, activation='relu')(y)
|
||
|
y1 = Dense(2, activation='softmax', name="client")(y)
|
||
|
y2 = Dense(2, activation='softmax', name="server")(y)
|
||
|
|
||
|
return Model(inputs=[ipt_domains, ipt_flows], outputs=(y1, y2))
|