ma_cisco_malware/models/metrics.py

65 lines
1.6 KiB
Python

import keras.backend as K
from keras.activations import elu
def get_custom_objects():
return dict([
("precision", precision),
("recall", recall),
("f1_score", f1_score),
("selu", selu)
])
def selu(x):
"""Scaled Exponential Linear Unit. (Klambauer et al., 2017)
# Arguments
x: A tensor or variable to compute the activation function for.
# References
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
# copied from keras.io
"""
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * elu(x, alpha)
def get_metric_functions():
return [precision, recall, f1_score]
def precision(y_true, y_pred):
# Count positive samples.
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
return true_positives / (predicted_positives + K.epsilon())
def recall(y_true, y_pred):
# Count positive samples.
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
return true_positives / (possible_positives + K.epsilon())
def f1_score(y_true, y_pred):
return f_score(1)(y_true, y_pred)
def f05_score(y_true, y_pred):
return f_score(0.5)(y_true, y_pred)
def f_score(beta):
def _f(y_true, y_pred):
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
return _f