-1
# This set will get us started, but you will need to add
# others.

import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
from sklearn.preprocessing import label_binarize
from sklearn.metrics import plot_roc_curve
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import roc_auc_score
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import plot_confusion_matrix

# We can now access the dataset

wine = load_wine()
X = wine.data
y = wine.target
nclasses = len(wine.target_names)
colors = "bry"
h = .02  # step size in the mesh

# Split the data into a training set and a test set - this is
# where your implementation will need to start. Maybe you  
# will need to work with the train_test_split( ... ) function

X_train, X_test, y_train, y_test =  train_test_split(X, y)

# Standardize
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
X_train = (X_train - mean) / std

# Next we can look at the cross validation. Remember we are
# selecting the two best features  through this process.
# Take a look at tutorial 4 for an example implementation

best_performance = 0
best_mean_f1 = 0
best_f1 = 0
best_f2 = 0

# We will be testing every combination of pairs of features 

for f1 in range(0,13):
    for f2 in range(0,13):
        
        # We want 2 features, not 1
        if f1 == f2:
            continue
        
        features_idx_to_use = [f1,f2]
        
        clf = SGDClassifier(alpha=0.001, max_iter=100, random_state=42)
        clf.fit(X_train[:,features_idx_to_use], y_train)
        
        # Return the predictions for the 3-Fold crossvalidation
        y_predicted = cross_val_predict(clf, X_train[:,features_idx_to_use],y_train, cv=3)
        
        # Construct the confusion matricies
        conf_mat_train = confusion_matrix(y_train, y_predicted)
        
        # Print out the recall, precision and F1 scores
        # There will be a value for each class
        # CV Train
        print("CV Train:",f1,":",f2," - ", recall_score(y_train,y_predicted,average=None))
        print("CV Train:",f1,":",f2," - ",precision_score(y_train,y_predicted,average=None))
        print("CV Train:",f1,":",f2," - ",f1_score(y_train,y_predicted,average=None))

        current_f1 = np.mean(f1_score(y_train,y_predicted,average=None))
        if current_f1 > best_mean_f1:
            best_f1 = f1
            best_f2 = f2
            best_mean_f1 = current_f1
            best_clf = clf



fig, ax = plt.subplots()
disp = plot_confusion_matrix(best_clf, X_test[:,[best_f1, best_f2]], y_test,
                                 display_labels=wine.target_names,
                                 cmap=plt.cm.Blues,ax=ax)
ax.set_title('Testing')


        

# Once you have selected the best performing set of features
# in the cross-validation, we can test the best performing
# classifier
        
# Now we need to test our classifier using the test set.
# Recall that we standardised the data - we need to do the same with the stored
# Mean and standard deviation from the training set.

X_test = (X_test - mean) / std

y_test_predicted = best_clf.predict(X_test[:,[best_f1, best_f2]])
conf_mat_test = confusion_matrix(y_test, y_test_predicted)

# Test
print("Test:",recall_score(y_test,y_test_predicted,average=None))
print("Test:",precision_score(y_test,y_test_predicted,average=None))
print("Test:",f1_score(y_test,y_test_predicted,average=None)) 
        
y_score = best_clf.decision_function(X_test[:,[best_f1, best_f2]])
# Now we can plot a ROC curve and calculate the AUC     
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(nclasses):
    fpr[i], tpr[i], _ = roc_curve(y_test, y_score)
    roc_auc[i] = auc(fpr, tpr)
    
# Once we have finished on the performance, we can plot the
# the classifier boundaries and test points. This one is
# letf to you ;)              
h = .02  # step size in the mesh
# create a mesh to plot in
x_min, x_max = X_test[:, best_f1].min() - 1, X_test[:, best_f1].max() + 1
y_min, y_max = X_test[:, best_f2].min() - 1, X_test[:, best_f2].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                     np.arange(y_min, y_max, h))
fig, ax = plt.subplots(figsize=(10,10))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = best_clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = ax.contourf(xx, yy, Z,cmap=plt.get_cmap('plasma'))

fig, ax = plt.subplots(figsize=(10,10))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = best_clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = ax.contourf(xx, yy, Z,cmap=plt.get_cmap('plasma'))

colors = ['cyan','orange']
# Plot also the training points
for i, color in zip(best_clf.classes_, colors):
    idx = np.where(y_test == i)
    ax.scatter(X_test[idx, best_f1], X_test[idx, best_f2], c=color, label=wine.target_names[i],
                cmap=plt.get_cmap('plasma'), edgecolor='black', s=20)
ax.set_title("Decision surface of Binary SGD")
ax.axis('tight')

xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = best_clf.coef_
intercept = best_clf.intercept_

# Lets make a function to plot the hyperplanes used by the SVM for 
# Classification.

def plot_hyperplane(c, color):
    def line(x0):
        return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]

    plt.plot([xmin, xmax], [line(xmin), line(xmax)],
              ls="--", color=color)

plot_hyperplane(0, "red")

My error is Traceback (most recent call last):

File "C:\Users\Doug.spyder-py3\temp.py", line 120, in fpr[i], tpr[i], _ = roc_curve(y_test, y_score)

File "C:\Users\Doug\anaconda3\lib\site-packages\sklearn\utils\validation.py", line 63, in inner_f return f(*args, **kwargs)

File "C:\Users\Doug\anaconda3\lib\site-packages\sklearn\metrics_ranking.py", line 913, in roc_curve fps, tps, thresholds = _binary_clf_curve(

File "C:\Users\Doug\anaconda3\lib\site-packages\sklearn\metrics_ranking.py", line 691, in _binary_clf_curve raise ValueError("{0} format is not supported".format(y_type))

ValueError: multiclass format is not supported

And the problem line is fpr[i], tpr[i], _ = roc_curve(y_test, y_score)

Can someone explain what it is I am missing please? I know it's got something to do with the kind of data that I am inputting but I am not sure what?

  • Does this answer your question? [How to fix ValueError: multiclass format is not supported](https://stackoverflow.com/questions/61114520/how-to-fix-valueerror-multiclass-format-is-not-supported) – Maxi Schvindt Aug 30 '21 at 02:42

1 Answers1

0

First, as you do not use fpr and tpr later, you can merge roc_curve and auc into roc_auc_score which you already imported. See the docs.

If you are performing multiclass classification (more than two classes), you need to provide the multi_class parameter. ovo and ovr give different results; see here.

Maybe relevant:

ROC for multiclass classification

How to fix ValueError: multiclass format is not supported