-1

I am trying to calculate ROC for a model of multi-class image. But since I didn't find any best way for multi-class classification, I have converted it to binary class. I have 31 classes of image. Using binary methods I am trying to find ROC of each 31 classes individually.

    df <- read.xlsx("data.xlsx",sheetName = 1,header = F)
    dn <- as.vector(df$X1) # 31 class 
    model_info <- read.csv("all_new.csv",stringsAsFactors = F) # details of 
    model output (Actual labels, Model labels, probabablity values)

head(model_info)

           Actual_labels             App_labels                      X1st
1 tinea cruris and corporis tinea cruris and corporis tinea cruris and corporis
2 tinea cruris and corporis tinea cruris and corporis tinea cruris and corporis
3 tinea cruris and corporis              no diagnosis             acne vulgaris
4                    eczema                    eczema                    eczema
5                    eczema              no diagnosis                 psoriasis
6              folliculitis    impetigo and pyodermas    impetigo and pyodermas
                       X2nd                      X3rd X.st.. X2nd.. X3rd..
1                 psoriasis             herpes zoster   0.89   0.05   0.03
2                 psoriasis                    eczema   0.89   0.03   0.02
3                 psoriasis     molluscum contagiosum   0.29   0.16   0.14
4 tinea cruris and corporis                 psoriasis   0.62   0.09   0.08
5                   melasma tinea cruris and corporis   0.27   0.27   0.25
6             acne vulgaris                 psoriasis   0.73   0.07   0.03

head(dn)

[1] "acne vulgaris"      "alopecia areata"    "anogenital warts"  
[4] "bullous pemphigoid" "candidiasis"        "chicken pox"   

App_call function basically converts the probability values to 0 or 1 based on whether model call is true or not

app_call <- function(cut_off, category){
            labels_thr <- rep(0,nrow(app_res))
            ind <- which(model_info$X.st.. >= cut_off) # index of instances 
             above threshold
            true_val <- which(app_res$App.Diagnosis[ind] == category) # index of instances where actual labels are similar to model labels for 1st class out of 31 class. 
            labels_thr[ind[true_val]] <- 1
            return(labels_thr)}

    index0 <- grep(pattern = paste0("^",dn[i],"$"),x = model_info$Actual_labels)

    actual_labels <- rep(0,nrow(model_info))

    if(length(index)>= 1){
        actual_labels[index0] <- 1
        actual_labels[-index0] <- 0} 

    app_labels <- app_call(cut_off = 0.5,category  = dn[i])
    res <- roc(actual_labels,app_labels)
    res1 <-   roc(actual_labels,model_info$X.st..)
dput(actual_labels)
 c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 
0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 
1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 
1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 
0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 
0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 
0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 
0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 
1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0)
    dput(app_labels)
c(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 
0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 
1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 
1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 
0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 
0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 
1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0)
dput(model_info$X.st..)
c(0.89, 0.89, 0.29, 0.62, 0.27, 0.73, 0.44, 0.7, 0.42, 0.56, 
0.87, 0.19, 0.72, 0.54, 0.37, 0.46, 0.89, 0.89, 0.88, 0.2, 0.46, 
0.75, 0.78, 0.66, 0.5, 0.67, 0.17, 0.85, 0.75, 0.58, 0.97, 0.71, 
0.8, 0.29, 0.56, 0.44, 0.6, 0.36, 0.38, 0.31, 0.17, 0.35, 0.99, 
0.19, 0.48, 0.51, 0.48, 0.92, 0.39, 0.14, 0.44, 0.55, 0.5, 0.43, 
0.38, 0.27, 0.37, 0.47, 0.63, 0.4, 0.78, 0.96, 0.67, 0.7, 0.61, 
0.44, 0.44, 0.48, 0.47, 0.35, 0.44, 0.34, 0.34, 0.6, 0.24, 0.49, 
0.47, 0.35, 0.62, 0.76, 0.67, 0.51, 0.48, 0.72, 0.81, 0.9, 0.89, 
0.92, 0.94, 0.2, 0.87, 0.98, 0.99, 0.98, 0.85, 0.89, 0.87, 0.62, 
0.19, 0.86, 0.49, 0.5, 0.2, 0.48, 0.51, 0.49, 0.3, 0.11, 0.88, 
0.35, 0.81, 0.59, 0.67, 0.31, 0.94, 0.22, 0.4, 0.52, 0.21, 0.31, 
0.19, 0.79, 0.64, 0.33, 0.27, 0.19, 0.74, 0.78, 0.41, 0.35, 0.8, 
0.39, 0.28, 0.92, 0.37, 0.66, 0.66, 0.44, 0.77, 0.49, 0.27, 0.32, 
0.65, 0.21, 0.95, 0.51, 0.16, 0.33, 0.85, 0.26, 0.61, 0.25, 0.57, 
0.26, 0.21, 0.45, 0.44, 0.83, 0.31, 0.35, 0.91, 0.34, 0.89, 0.77, 
0.56, 0.31, 0.26, 0.84, 0.15, 0.27, 0.47, 0.73, 0.28, 0.44, 0.52, 
0.65, 0.72, 0.38, 0.99, 0.46, 0.35, 0.25, 0.42, 0.82, 0.24, 0.56, 
0.31, 0.52, 0.91, 0.94, 0.68, 0.21, 0.28, 0.68, 0.67, 0.61, 0.6, 
0.43, 0.37, 0.28, 0.28, 0.24, 0.4, 0.92, 0.9, 0.45, 0.96, 0.53, 
0.72, 0.26, 0.47, 0.47, 0.97, 0.49, 0.25, 0.51, 0.3, 0.92, 0.43, 
0.33, 0.95, 0.25, 0.51, 0.98, 0.23, 0.51, 0.75, 0.84, 0.54, 0.5, 
0.54, 0.33, 0.64, 0.29, 0.93, 0.13, 0.27, 0.93, 0.59, 0.27, 0.81, 
0.57, 0.59, 0.47, 0.24, 0.53, 0.53, 0.43, 0.24, 0.94, 0.6, 0.7, 
0.23, 0.69, 0.95, 0.95, 0.49, 0.73, 0.31, 0.94, 0.15, 0.85, 0.92, 
0.34, 0.95, 0.91, 0.36, 0.55, 0.55, 0.29, 0.86, 0.31, 0.48, 0.48, 
0.45, 0.5, 0.49, 0.3, 0.33, 0.39, 0.8, 0.42, 0.51, 0.52, 0.66, 
0.19, 0.58, 0.94, 0.51, 0.39, 0.84, 0.95, 0.85, 0.72, 0.35, 0.83, 
0.5, 0.91, 0.83, 0.61, 0.79, 0.5, 0.87, 0.3, 0.5, 0.53, 0.22, 
0.82, 0.74, 0.73, 0.65, 0.88, 0.31, 0.75, 0.74, 0.92, 0.38, 0.47, 
0.26, 0.77, 0.78, 0.82, 0.59, 0.59, 0.33, 0.67, 0.31, 0.67, 0.44, 
0.77, 0.61, 0.44, 0.77, 0.83, 0.58, 0.6, 0.78, 0.76, 0.47, 0.72, 
0.47, 0.29, 0.14, 0.32, 0.17, 0.56, 0.68, 0.3, 0.46, 0.56, 0.68, 
0.61, 0.7, 0.23, 0.39, 0.79, 0.38, 0.32, 0.58, 0.46, 0.5, 0.57, 
0.93, 0.4, 0.37, 0.75, 0.76, 0.36, 0.84, 0.19, 0.18, 0.94, 0.53, 
0.53, 0.24, 0.23, 0.51, 0.53, 0.84, 0.23, 0.44, 0.85, 0.53, 0.23, 
0.56, 0.26, 0.38, 0.78, 0.93, 0.65, 0.22, 0.52, 0.35, 0.47, 0.33, 
0.31, 0.65, 0.72, 0.46, 0.44, 0.74, 0.92, 0.99, 0.72, 0.41, 0.18, 
0.85, 0.89, 0.31, 0.4, 0.98, 0.46, 0.16, 0.58, 0.25, 0.21, 0.32, 
0.43, 0.56, 0.34, 0.35, 0.7, 0.43, 0.17, 0.25, 0.33, 0.44, 0.44, 
0.58, 0.74, 0.37, 0.68, 0.52, 0.8, 0.96, 0.52, 0.25, 0.81, 0.94, 
1, 0.58, 0.42, 0.46, 0.41, 0.18, 0.37, 0.9, 0.54, 0.29, 0.38, 
0.38, 0.53, 0.99, 0.57, 0.44, 0.33, 0.45, 0.95, 0.85, 0.75, 0.19, 
0.97, 0.27, 0.94, 0.77, 0.79, 0.57, 0.33, 0.98, 0.47, 0.55, 0.27, 
0.43, 0.66, 1, 0.62, 0.34, 0.81, 0.4, 0.56, 0.33, 0.25, 0.4, 
0.25, 0.91, 0.28, 0.4, 0.73, 0.32, 0.49, 0.37, 0.19, 0.35, 0.29, 
0.77, 0.36, 0.31, 0.85, 0.33, 0.61, 0.63, 0.41, 0.98, 0.28, 0.31, 
0.91, 0.34, 0.24, 0.82, 0.46, 0.5, 0.39, 0.72, 0.67, 0.51, 0.41, 
0.81, 0.74, 0.5, 0.97, 0.65, 0.44, 0.71, 0.35, 0.84, 0.97, 0.42, 
0.75, 0.91, 0.61, 0.94, 0.48, 0.42, 0.63, 0.81, 0.83, 0.66, 0.55, 
0.61, 0.41, 0.63, 1, 0.63, 0.41, 0.75, 0.27, 0.28, 0.24, 0.55, 
0.35, 0.85, 0.97, 0.64, 0.79, 0.92, 0.47, 0.81, 0.23, 0.16, 0.75, 
0.12, 0.43, 0.18, 0.69, 0.21, 0.39, 0.19, 0.85, 0.57, 0.97, 0.56, 
0.81, 0.13, 0.4, 0.47, 0.95, 0.43, 0.9, 0.67, 0.36, 0.38, 0.83, 
0.97, 0.48, 0.93, 0.67, 0.44, 0.34, 0.83, 0.77, 0.39, 0.56, 0.85, 
0.55, 0.22, 0.48, 0.46, 0.59, 0.89, 0.99, 0.57, 0.96, 0.97, 0.95, 
0.98, 0.24, 0.89, 0.5, 0.94, 0.6, 0.41, 0.71, 0.5, 0.2, 0.96, 
0.18, 0.93, 0.92, 0.85, 0.92, 0.82, 0.48, 0.62, 0.53, 0.59, 0.38, 
0.8, 0.49, 0.91, 0.58, 0.94, 0.68, 0.15, 0.96, 0.98, 0.89, 0.84, 
0.5, 0.88, 0.29, 0.24, 0.31, 0.29, 0.33, 0.49, 0.33, 0.76, 0.54, 
0.88, 0.78, 0.26, 0.52, 0.75, 0.97, 0.93, 0.27, 0.69, 0.19, 0.69, 
0.2, 0.21, 0.84, 0.31, 0.19, 0.8, 0.6, 0.19, 0.51, 0.98, 0.27, 
0.39, 0.77, 0.95, 0.73, 0.28, 0.79, 0.19, 0.98, 0.77, 0.31, 0.84, 
0.35, 0.19, 0.26, 0.82, 0.63, 0.38, 0.38, 0.26, 0.63, 0.65, 0.55, 
0.88, 0.6, 0.71, 0.85, 0.99, 0.28, 0.42, 0.65, 0.58, 0.97, 0.35, 
0.36, 0.32, 0.79, 0.68, 0.39, 0.45, 0.71, 0.98, 0.34, 0.62, 0.24, 
0.55, 0.43, 0.95, 0.32, 0.6, 0.63, 0.98, 0.2, 0.31, 0.9, 0.3, 
0.32, 0.37, 0.52, 0.64, 0.9, 0.22, 0.31, 0.39, 0.21, 0.93, 0.64, 
0.4, 0.96, 0.31, 0.46, 0.86, 0.56, 0.99, 0.83, 0.87, 0.36, 0.59, 
0.98, 0.72, 0.21, 0.52, 0.17, 0.21, 0.42, 0.97, 0.34, 0.96, 0.18, 
0.63, 0.45, 0.36, 0.31, 0.48, 0.94, 0.86, 0.16, 0.32, 0.97, 0.29, 
0.9, 0.38, 0.88, 0.6, 0.17, 0.19, 0.44, 0.98, 0.35, 0.36, 0.2, 
0.39, 0.53, 0.35, 0.57, 0.18, 0.26, 0.17, 0.77, 0.51, 1, 0.17, 
0.57, 0.48, 0.58, 0.25, 0.32, 0.33, 0.76, 0.16, 0.13, 0.46, 0.44, 
0.31, 0.56, 0.46, 0.6, 0.17, 0.36, 0.34, 0.44, 0.43, 0.86, 0.86, 
0.44, 0.34, 0.92, 0.32, 0.78, 0.21, 0.46, 0.92, 0.27, 0.98, 0.52, 
0.34, 0.27, 0.59, 0.45, 0.58, 0.27, 0.48, 0.21, 0.24, 0.29, 0.89, 
0.25, 0.33, 0.96, 0.56, 0.29, 0.97, 0.98, 0.59, 0.28, 0.22, 0.76, 
0.91, 0.92, 0.91, 0.94, 0.83, 0.48, 0.53, 0.56, 0.5, 0.75, 0.4, 
0.98, 0.6, 0.74, 0.66, 0.97, 0.62, 0.99, 0.39, 0.89, 0.86, 0.66, 
0.92, 0.34, 0.99, 0.69, 0.71, 0.8, 0.47, 0.5, 0.83, 0.83, 0.41, 
0.72, 0.98, 0.76, 0.65, 0.71, 0.9, 0.9, 1, 0.4, 0.46, 0.35, 0.72, 
0.92, 0.74, 0.44, 0.67, 0.97, 0.88, 0.84, 0.71, 0.45, 0.78, 0.9, 
0.72, 0.57, 0.68, 0.85, 0.84, 0.46, 0.91, 0.53, 0.96, 0.49, 0.93, 
0.49, 0.37, 0.95, 0.47, 0.87, 0.49, 0.58, 0.64, 0.84, 0.8, 0.49, 
0.67, 0.75, 0.44, 0.87, 0.71, 0.47, 0.46, 0.83, 0.74, 0.99, 0.86, 
0.64, 0.74, 0.43, 0.44, 0.57, 0.89, 0.67, 0.59, 0.89, 0.45, 0.62, 
0.81, 0.93, 0.81, 0.98, 0.95, 0.63, 0.64, 0.96, 0.55, 0.49, 0.59, 
0.47, 0.42, 0.6, 0.51, 0.4, 0.3, 0.29, 0.45, 0.94, 0.29, 0.33, 
0.14, 0.71, 0.41, 0.6, 0.31, 0.95, 0.94, 0.87, 0.8, 0.53, 0.66, 
0.71, 0.19, 0.49, 0.97, 0.48, 0.43, 0.38, 0.4, 0.22, 0.38, 0.27, 
0.25, 0.45, 0.75, 0.38, 0.23, 0.92, 0.7, 0.68, 0.17, 0.39, 0.65, 
0.38, 0.39, 0.21, 0.28, 0.55, 0.89, 0.24, 0.34, 0.92, 0.31, 0.64, 
0.86, 0.94, 0.28, 0.43, 0.44, 0.82, 0.23, 0.81, 0.71, 0.53, 0.96, 
0.9, 0.55, 0.83, 0.64, 0.51, 0.32, 0.66, 0.45, 0.72, 0.28, 0.34, 
0.98, 0.76, 0.52, 0.95, 0.83, 0.47, 0.9, 0.31, 0.23, 0.61, 0.94, 
0.61, 0.42, 0.34, 0.55, 0.33, 0.93, 0.24, 0.51, 0.65, 0.17, 0.81, 
0.68, 0.51, 0.78, 0.37, 0.37, 0.99, 0.94, 0.64, 0.59, 0.61, 0.9, 
0.88, 0.64, 0.49, 0.09, 0.51, NA, 0.86, 0.45, 0.61, 0.24, 0.85, 
0.26, 0.29, 0.21, 0.66, 0.26, 0.47, 0.19, 0.99, 0.51, 0.91, 0.37, 
0.56, 0.71, 0.47, 0.44, 0.48, 0.52, 0.22, 0.52, 0.29, 0.46, 0.54, 
0.94, 0.24, 0.24, 0.47, 0.37, 0.9, 0.79, 0.81, 0.41, 0.38, 0.71, 
0.34, 0.46, 0.23, 0.54, 0.43, 0.85, 0.56, 0.26, 0.9, 0.25, 0.3, 
0.39, 0.89, 0.38, 0.18, 0.78, 0.37, 0.45, 0.51, 0.8, 0.61, 0.52, 
0.84, 0.4, 0.31, 0.28, 0.24, 0.23, 0.43, 0.77, 0.78, 0.95, 0.9, 
0.81, 0.15, 0.77, 0.77, 0.87, 0.75, 0.16, 0.49, 0.23, 0.93, 0.45, 
0.33, 0.75, 0.32, 0.75, 0.41, 0.24, 0.46, 0.17, 0.41, 0.45, 0.48, 
0.15, 0.66, 0.53, 0.75, 0.57, 0.46, 0.78, 0.24, 0.29, 0.95, 0.77, 
0.66, 0.94, 0.27, 0.29, 0.58, 0.6, 0.46, 0.58, 0.84, 0.69, 0.47, 
0.45, 0.48, 0.35, 0.89, 0.98, 0.93, 0.2, 0.94, 0.91, 0.75, 0.5, 
0.44, 0.69, 0.8, 0.76, 0.85, 0.84, 0.72, 0.25, 0.73, 0.26, 0.93, 
0.15, 0.33, 0.3, 0.6, 0.24, 0.21, 0.28, 0.51, 0.79, 0.77, 0.85, 
0.52, 0.39, 0.68, 0.83, 0.36, 0.15, 0.87, 0.55)
res1 = roc(actual_labels,app_labels)
res2= roc(actual_labels,model_info$X.st..)

The calls in the actual label class where it is "1" and have have a probablity threshold (model_info$X.st..) value more than 0.5 is named as "1" for app_labels and rest all zero

Both res1 and res2 have different values for sensitivitiy and specificity.

  • 1
    Please, share the output of `dput(actual_labels)`, `dput(app_labels)`, and `dput(probability_values)`. – Marco Sandri Aug 16 '19 at 09:59
  • Clearly app_labels does not seem to correspond to a thresholding of probability_values by 0.5. See how both 0.89 and 0.29 in the latter are mapped to 0 in app_labels. So of course with different inputs you will get different results... – Calimo Aug 16 '19 at 10:56
  • @MarcoSandri: The file is too big to paste here. Can you suggest some alternative method – Dhwani Dholakia Aug 16 '19 at 11:09
  • @Calimo: In this model, an image is classified into 40 different classes. I have converted the multi-class information to binary. If the image doesn't belong to the actual input class above a threshold the labels are converted to value "0" and if belongs to the same class above a threshold it is kept as "1". In this specific, I tried to find the roc information of the first category by keeping on the information related to first class and the true value as "1" rest all as "0" – Dhwani Dholakia Aug 16 '19 at 11:16
  • @DhwaniDholakia then `app_labels` is a boolean indicating whether the classification was correct. This is not the data you need as input to the roc function. In addition this seems to have nothing to do with what you describe. – Calimo Aug 16 '19 at 11:25
  • Take a representative subset of your data and post that. See https://stackoverflow.com/questions/5963269/how-to-make-a-great-r-reproducible-example too. – Calimo Aug 16 '19 at 11:26
  • @Calimo: i will post the total code and subset of data in few minutes using the above link – Dhwani Dholakia Aug 16 '19 at 11:29
  • Let us [continue this discussion in chat](https://chat.stackoverflow.com/rooms/198049/discussion-between-dhwani-dholakia-and-calimo). – Dhwani Dholakia Aug 16 '19 at 13:06
  • Without access to the data in data.xlsx and all_new.csv that is not useful at all. Please add only the minimal, relevant data directly in the post, as explained in the link I posted earlier. – Calimo Aug 16 '19 at 13:10
  • @Calimo : i have added information related to data also – Dhwani Dholakia Aug 16 '19 at 13:27
  • That doesn't help me to run you code. Please read the linked question carefully. – Calimo Aug 16 '19 at 14:33
  • @Calimo: Can you please tell explain again what is missing? i have added a screenshot of all_data.xlsx and all_new.csv – Dhwani Dholakia Aug 16 '19 at 15:31
  • From the linked answer: "The minimal runnable code necessary to reproduce the error, ...". I cannot run the code you provide. – Calimo Aug 16 '19 at 15:33
  • @Calimo: i have added the information to run code – Dhwani Dholakia Aug 16 '19 at 15:48
  • @MarcoSandri: i have added values of dput in the edited code. Can you please check it – Dhwani Dholakia Aug 16 '19 at 15:49

1 Answers1

0

A ROC curve shows the sensitivity and specificity tradeoff as the decision threshold of a classifier is varied. Typically ROC curve functions expect to get the prediction and the truth value as input.

This is exactly what you do when you run:

res2= roc(actual_labels,model_info$X.st..)

However your app_labels is of a very different nature: you have already merged in the "correct classification" aspect, which makes it more like a flattened contingency table than the "predictions" the ROC function expects. So you can no longer use a regular ROC function and need to calculate the sensitivity and specificity manually.

TP <- sum(app_labels & actual_labels)
TN <- sum(app_labels & !(actual_labels))
FP <- sum(!(app_labels) & !(actual_labels))
FN <- sum(!(app_labels) & actual_labels)

# Sensitivity:
TP / (TP+FN)

# Specificity:
TN / (TN + FP)
Calimo
  • 7,510
  • 4
  • 39
  • 61
  • Somehow I don't quite get to the correct value for SP, but I assume this is due to some inconsistencies in the data you provide, for instance you have an NA in model_info$X.st.., but not in app_labels... – Calimo Aug 17 '19 at 07:54
  • Thanks for the information. For the second case where i manually did the calculation, can I plot values of sensitivity and specificity at different thresholds to get ROC curve? – Dhwani Dholakia Aug 17 '19 at 08:41
  • @DhwaniDholakia you coded you app_call function to accept a cut_off parameter. So yeah basically you can use any threshold I guess? – Calimo Aug 17 '19 at 09:27
  • Just to confirm, using app_call, I can vary threshold and then get a list of sensitivity and specificity corresponding to thresholds (seq(0,1,0,1)) . and then plot(sensitivity, specificity) for ROC curve – Dhwani Dholakia Aug 17 '19 at 09:31
  • If you want a ROC curve you need to go over all thresholds, not just 11 ones. – Calimo Aug 17 '19 at 09:37
  • i meant seq(0,1,.01) which will have 100 different thresholds. – Dhwani Dholakia Aug 17 '19 at 09:55
  • can you share a small code to get all thresholds like seq(0,1,.01) for 100 thresholds – Dhwani Dholakia Aug 17 '19 at 18:59
  • The easiest is to use the data points directly, like `c(-Inf, unique(model_info$X.st..), Inf)`. – Calimo Aug 18 '19 at 12:32
  • but model_info$X.st.. is the probability values for each image that was feed to model. Can those values be used as threshold? Also most values would be towards 0.5-1. When I used seq command I assumed that I have values for uniform intervals – Dhwani Dholakia Aug 18 '19 at 13:29
  • @DhwaniDholakia This is what is usually done. I suggest you read more about ROC curve, see https://stats.stackexchange.com/questions/105501/understanding-roc-curve for a starting point. – Calimo Aug 18 '19 at 14:49