原文:https://gist.github.com/yano/3a072e5e2b7a55703028751820bfacbf
import keras.backend as K
#precision
def P(y_true, y_pred):
true_positives = K.sum(K.cast(K.greater(K.clip(y_true * y_pred, 0, 1), 0.20), 'float32'))
pred_positives = K.sum(K.cast(K.greater(K.clip(y_pred, 0, 1), 0.20), 'float32'))
precision = true_positives / (pred_positives + K.epsilon())
return precision
#recall
def R(y_true, y_pred):
true_positives = K.sum(K.cast(K.greater(K.clip(y_true * y_pred, 0, 1), 0.20), 'float32'))
poss_positives = K.sum(K.cast(K.greater(K.clip(y_true, 0, 1), 0.20), 'float32'))
recall = true_positives / (poss_positives + K.epsilon())
return recall
#f-measure
def F(y_true, y_pred):
p_val = P(y_true, y_pred)
r_val = R(y_true, y_pred)
f_val = 2*p_val*r_val / (p_val + r_val)
return f_val
#以下代码未测试。
def fbeta_score(y_true, y_pred, beta=1):
# Calculates the F score, the weighted harmonic mean of precision and recall.
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = P(y_true, y_pred)
r = R(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
def fmeasure(y_true, y_pred):
# Calculates the f-measure, the harmonic mean of precision and recall.
model.compile(optimizer=rms_prop, loss=“binary_crossentropy”, metrics=[P, R, F])