95 lines
3.0 KiB
Python
95 lines
3.0 KiB
Python
import keras.backend as K
|
|
|
|
from models.renes_networks import selu
|
|
from . import flat_2, pauls_networks, renes_networks
|
|
|
|
|
|
def get_models_by_params(params: dict):
|
|
# decomposing param section
|
|
# mainly embedding model
|
|
network_type = params.get("type")
|
|
network_depth = params.get("depth")
|
|
embedding_size = params.get("embedding_size")
|
|
input_length = params.get("input_length")
|
|
filter_embedding = params.get("filter_embedding")
|
|
kernel_embedding = params.get("kernel_embedding")
|
|
hidden_embedding = params.get("dense_embedding")
|
|
dropout = params.get("dropout")
|
|
# mainly prediction model
|
|
flow_features = params.get("flow_features")
|
|
domain_features = params.get("domain_features")
|
|
window_size = params.get("window_size")
|
|
domain_length = params.get("domain_length")
|
|
filter_main = params.get("filter_main")
|
|
kernel_main = params.get("kernel_main")
|
|
dense_dim = params.get("dense_main")
|
|
model_output = params.get("model_output", "both")
|
|
# create models
|
|
if network_depth == "flat1":
|
|
networks = pauls_networks
|
|
elif network_depth == "flat2":
|
|
networks = flat_2
|
|
elif network_depth == "deep1":
|
|
networks = renes_networks
|
|
else:
|
|
raise Exception("network not found")
|
|
embedding_model = networks.get_embedding(embedding_size, input_length, filter_embedding, kernel_embedding,
|
|
hidden_embedding, 0.5)
|
|
|
|
old_model = networks.get_model(0.25, flow_features, domain_features, window_size, domain_length,
|
|
filter_main, kernel_main, dense_dim, embedding_model, model_output)
|
|
|
|
new_model = networks.get_new_model(0.25, flow_features, domain_features, window_size, domain_length,
|
|
filter_main, kernel_main, dense_dim, embedding_model, model_output)
|
|
|
|
return embedding_model, old_model, new_model
|
|
|
|
|
|
def get_metrics():
|
|
return dict([
|
|
("precision", precision),
|
|
("recall", recall),
|
|
("f1_score", f1_score),
|
|
("selu", selu)
|
|
])
|
|
|
|
|
|
def get_metric_functions():
|
|
return [precision, recall, f1_score]
|
|
|
|
|
|
def precision(y_true, y_pred):
|
|
# Count positive samples.
|
|
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
|
|
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
|
|
return true_positives / (predicted_positives + K.epsilon())
|
|
|
|
|
|
def recall(y_true, y_pred):
|
|
# Count positive samples.
|
|
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
|
|
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
|
|
return true_positives / (possible_positives + K.epsilon())
|
|
|
|
|
|
def f1_score(y_true, y_pred):
|
|
return f_score(1)(y_true, y_pred)
|
|
|
|
|
|
def f05_score(y_true, y_pred):
|
|
return f_score(0.5)(y_true, y_pred)
|
|
|
|
|
|
def f_score(beta):
|
|
def _f(y_true, y_pred):
|
|
p = precision(y_true, y_pred)
|
|
r = recall(y_true, y_pred)
|
|
|
|
bb = beta ** 2
|
|
|
|
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
|
|
|
|
return fbeta_score
|
|
|
|
return _f
|