• 几种模型评价指标实现代码


    import tensorflow as tf
    
    #精确率评价指标
    def metric_precision(y_true,y_pred):    
        TP=tf.reduce_sum(y_true*tf.round(y_pred))
        TN=tf.reduce_sum((1-y_true)*(1-tf.round(y_pred)))
        FP=tf.reduce_sum((1-y_true)*tf.round(y_pred))
        FN=tf.reduce_sum(y_true*(1-tf.round(y_pred)))
        precision=TP/(TP+FP)
        return precision
    
    #召回率评价指标
    def metric_recall(y_true,y_pred):  
        TP=tf.reduce_sum(y_true*tf.round(y_pred))
        TN=tf.reduce_sum((1-y_true)*(1-tf.round(y_pred)))
        FP=tf.reduce_sum((1-y_true)*tf.round(y_pred))
        FN=tf.reduce_sum(y_true*(1-tf.round(y_pred)))
        recall=TP/(TP+FN)
        return recall
    
    #F1-score评价指标
    def metric_F1score(y_true,y_pred):    
        TP=tf.reduce_sum(y_true*tf.round(y_pred))
        TN=tf.reduce_sum((1-y_true)*(1-tf.round(y_pred)))
        FP=tf.reduce_sum((1-y_true)*tf.round(y_pred))
        FN=tf.reduce_sum(y_true*(1-tf.round(y_pred)))
        precision=TP/(TP+FP)
        recall=TP/(TP+FN)
        F1score=2*precision*recall/(precision+recall)
        return F1score

    #编译阶段引用自定义评价指标示例 model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', metric_precision, metric_recall, metric_F1score])

      

    # AUC for a binary classifier
    def auc(y_true, y_pred):
        ptas = tf.stack([binary_PTA(y_true,y_pred,k) for k in np.linspace(0, 1, 1000)],axis=0)
        pfas = tf.stack([binary_PFA(y_true,y_pred,k) for k in np.linspace(0, 1, 1000)],axis=0)
        pfas = tf.concat([tf.ones((1,)) ,pfas],axis=0)
        binSizes = -(pfas[1:]-pfas[:-1])
        s = ptas*binSizes
        return K.sum(s, axis=0)
    #-----------------------------------------------------------------------------------------------------------------------------------------------------
    # PFA, prob false alert for binary classifier
    def binary_PFA(y_true, y_pred, threshold=K.variable(value=0.5)):
        y_pred = K.cast(y_pred >= threshold, 'float32')
        # N = total number of negative labels
        N = K.sum(1 - y_true)
        # FP = total number of false alerts, alerts from the negative class labels
        FP = K.sum(y_pred - y_pred * y_true)
        return FP/N
    #-----------------------------------------------------------------------------------------------------------------------------------------------------
    # P_TA prob true alerts for binary classifier
    def binary_PTA(y_true, y_pred, threshold=K.variable(value=0.5)):
        y_pred = K.cast(y_pred >= threshold, 'float32')
        # P = total number of positive labels
        P = K.sum(y_true)
        # TP = total number of correct alerts, alerts from the positive class labels
        TP = K.sum(y_pred * y_true)
        return TP/P
     
    #接着在模型的compile中设置metrics
    

      

    # False Discovery Rate(FDR)
    from sklearn.metrics import confusion_matrix
    y_true = [0,0,0,0,0,0,,1,1,1,1,1]
    y_pred = [0,0,0,0,0,0,,1,1,1,1,1]
    
    tn, fp , fn, tp = confusion_matrix(y_true, y_pred).ravel()
    fdr = fp / (fp + tp)
    
    print(fdr)
    

      

  • 相关阅读:
    55. Jump Game
    367. Valid Perfect Square
    22. Generate Parentheses
    254. Factor Combinations
    77. Combinations
    17. Letter Combinations of a Phone Number
    javascript获取随机数的几种方式
    javascript获取随机rgb颜色和十六进制颜色的方法
    javascript遍历数组最优写法
    javascript中字符串的常用方法
  • 原文地址:https://www.cnblogs.com/ylHe/p/12149873.html
Copyright © 2020-2023  润新知