0

I am trying to perform classification. I have 9 classes in total. I have 3 potential in my loss function but in training I am taking only one. I have set the weight of two loss function to zero, therefore just taking into account one term in loss function. Training runs fine. However, when I test I am getting NAN in my all class values. Can someone guide me on this. I am using caffe below is my prototxt file. "angle_head" is the potential I am interested in.

name: "Zeiler_conv5"

input: "data"
input_dim: 1
input_dim: 3
input_dim: 224
input_dim: 224

input: "rois"
input_dim: 1 # to be changed on-the-fly to num ROIs
input_dim: 5 # [batch ind, x1, y1, x2, y2] zero-based indexing
input_dim: 1
input_dim: 1

input: "labels"
input_dim: 1 # to be changed on-the-fly to match num ROIs
input_dim: 1
input_dim: 1
input_dim: 1

input: "bbox_targets"
input_dim: 1  # to be changed on-the-fly to match num ROIs
input_dim: 84 # 4 * (K+1) (=21) classes
input_dim: 1
input_dim: 1

input: "bbox_loss_weights"
input_dim: 1  # to be changed on-the-fly to match num ROIs
input_dim: 84 # 4 * (K+1) (=21) classes
input_dim: 1
input_dim: 1

input: "angle_head"
input_dim: 1  # to be changed on-the-fly to match num ROIs
input_dim: 1 # 9 (-180:45:180) classes
input_dim: 1
input_dim: 1

input: "angle_head_weight"
input_dim: 1  # to be changed on-the-fly to match num ROIs
input_dim: 1 # 9 (-180:45:180) classes
input_dim: 1
input_dim: 1


layer {
    name: "conv1"
    type: "Convolution"
    bottom: "data"
    top: "conv1"
    param {
        lr_mult: 0.0
    }
    param {
        lr_mult: 0.0
    }
    convolution_param {
        num_output: 96
        kernel_size: 7
        pad: 3
        stride: 2
        weight_filler {
            type: "gaussian"
            std: 0.01
        }
        bias_filler {
            type: "constant"
            value: 0
        }
    }
}

layer {
    name: "relu1"
    type: "ReLU"
    bottom: "conv1"
    top: "conv1"
}

layer {
    name: "norm1"
    type: "LRN"
    bottom: "conv1"
    top: "norm1"
    lrn_param {
        local_size: 3
        alpha: 0.00005
        beta: 0.75
        norm_region: WITHIN_CHANNEL
    }
}

layer {
    name: "pool1"
    type: "Pooling"
    bottom: "norm1"
    top: "pool1"
    pooling_param {
        kernel_size: 3
        stride: 2
        pad: 1
        pool: MAX
    }
}

layer {
    name: "conv2"
    type: "Convolution"
    bottom: "pool1"
    top: "conv2"
    param {
        lr_mult: 0.0
    }
    param {
        lr_mult: 0.0
    }
    convolution_param {
        num_output: 256
        kernel_size: 5
        pad: 2
        stride: 2
        weight_filler {
            type: "gaussian"
            std: 0.01
        }
        bias_filler {
            type: "constant"
            value: 1
        }
    }
}

layer {
    name: "relu2"
    type: "ReLU"
    bottom: "conv2"
    top: "conv2"
}

layer {
    name: "norm2"
    type: "LRN"
    bottom: "conv2"
    top: "norm2"
    lrn_param {
        local_size: 3
        alpha: 0.00005
        beta: 0.75
        norm_region: WITHIN_CHANNEL
    }
}

layer {
    name: "pool2"
    type: "Pooling"
    bottom: "norm2"
    top: "pool2"
    pooling_param {
        kernel_size: 3
        stride: 2
        pad: 1
        pool: MAX
    }
}

layer {
    name: "conv3"
    type: "Convolution"
    bottom: "pool2"
    top: "conv3"
    param {
        lr_mult: 0.0
    }
    param {
        lr_mult: 0.0
    }
    convolution_param {
        num_output: 384
        kernel_size: 3
        pad: 1
        stride: 1
        weight_filler {
            type: "gaussian"
            std: 0.01
        }
        bias_filler {
            type: "constant"
            value: 0
        }
    }
}

layer {
    name: "relu3"
    type: "ReLU"
    bottom: "conv3"
    top: "conv3"
}

layer {
    name: "conv4"
    type: "Convolution"
    bottom: "conv3"
    top: "conv4"
    param {
        lr_mult: 0.0
    }
    param {
        lr_mult: 0.0
    }
    convolution_param {
        num_output: 384
        kernel_size: 3
        pad: 1
        stride: 1
        weight_filler {
            type: "gaussian"
            std: 0.01
        }
        bias_filler {
            type: "constant"
            value: 1
        }
    }
}

layer {
    name: "relu4"
    type: "ReLU"
    bottom: "conv4"
    top: "conv4"
}

layer {
    name: "conv5"
    type: "Convolution"
    bottom: "conv4"
    top: "conv5"
    param {
        lr_mult: 0.0
    }
    param {
        lr_mult: 0.0
    }
    convolution_param {
        num_output: 256
        kernel_size: 3
        pad: 1
        stride: 1
        weight_filler {
            type: "gaussian"
            std: 0.01
        }
        bias_filler {
            type: "constant"
            value: 1
        }
    }
}

layer {
    name: "relu5"
    type: "ReLU"
    bottom: "conv5"
    top: "conv5"
}

layer {
    bottom: "conv5"
    bottom: "rois"
    top: "pool5"
    name: "roi_pool5"
    type: "ROIPooling"
    roi_pooling_param {
        pooled_w: 6
        pooled_h: 6
        spatial_scale: 0.0625  # (1/16)
    }
}

layer {
    bottom: "pool5"
    top: "fc6"
    name: "fc6"
    param {
        lr_mult: 1.0
    }
    param {
        lr_mult: 2.0
    }
    type: "InnerProduct"
    inner_product_param {
        num_output: 4096
    }
}

layer {
    bottom: "fc6"
    top: "fc6"
    name: "relu6"
    type: "ReLU"
}

layer {
    bottom: "fc6"
    top: "fc6"
    name: "drop6"
    type: "Dropout"
    dropout_param {
        dropout_ratio: 0.5
        scale_train: false
    }
}

layer {
    bottom: "fc6"
    top: "fc7"
    name: "fc7"
    param {
        lr_mult: 1.0
    }
    param {
        lr_mult: 2.0
    }
    type: "InnerProduct"
    inner_product_param {
        num_output: 4096
    }
}

layer {
    bottom: "fc7"
    top: "fc7"
    name: "relu7"
    type: "ReLU"
}

layer {
    bottom: "fc7"
    top: "fc7"
    name: "drop7"
    type: "Dropout"
    dropout_param {
        dropout_ratio: 0.5
        scale_train: false
    }
}

layer {
    bottom: "fc7"
    top: "cls_score"
    name: "cls_score"
    param {
        lr_mult: 1.0
    }
    param {
        lr_mult: 2.0
    }
    type: "InnerProduct"
    inner_product_param {
        num_output: 21
        weight_filler {
            type: "gaussian"
            std: 0.01
        }
        bias_filler {
            type: "constant"
            value: 0
        }
    }
}

layer {
    bottom: "fc7"
    top: "angle_pred"
    name: "angle_pred"
    type: "InnerProduct"
    param {
        lr_mult: 1.0
    }
    param {
        lr_mult: 2.0
    }
    inner_product_param {
        num_output: 9
        weight_filler {
            type: "gaussian"
            std: 0.001
        }
        bias_filler {
            type: "constant"
            value: 0
        }
    }
}

layer {
    bottom: "fc7"
    top: "bbox_pred"
    name: "bbox_pred"
    type: "InnerProduct"
    param {
        lr_mult: 1.0
    }
    param {
        lr_mult: 2.0
    }
    inner_product_param {
        num_output: 84
        weight_filler {
            type: "gaussian"
            std: 0.001
        }
        bias_filler {
            type: "constant"
            value: 0
        }
    }
}

layer {
    name: "loss"
    type: "SoftmaxWithLoss"
    bottom: "cls_score"
    bottom: "labels"
    top: "loss_cls"
    loss_weight: 0
}

layer {
    name: "accuarcy"
    type: "Accuracy"
    bottom: "cls_score"
    bottom: "labels"
    top: "accuarcy"
}

layer {
    name: "loss_angle"
    type: "SoftmaxWithLoss"
    bottom: "angle_pred"
    bottom: "angle_head"
    bottom: "angle_head_weight"
    top: "loss_angle"
    loss_weight: 1
}

layer {
    name: "loss_bbox"
    type: "SmoothL1Loss"
    bottom: "bbox_pred"
    bottom: "bbox_targets"
    bottom: "bbox_loss_weights"
    top: "loss_bbox"
    loss_weight: 0
}
  • how do you define `"SoftmaxWithLoss"` for three "bottom"s? – Shai Oct 25 '16 at 06:57
  • I will confess it straight away that I am a beginner and was following a tutorial and my understanding was that just like smoothl1loss you can use three bottoms for softmaxwithoss as well. Can you direct me ? should I just use labels and weight ? – lora loper Oct 25 '16 at 07:07
  • you will not be a beginner for much longer... What are the values of `"angle_head"` and `"angle_head_weight"` when you test your net? – Shai Oct 25 '16 at 07:17
  • When I test my net, i get and 9x1 vector as an output 9 being my classes. I get nan in each of the corresponding class label. "angle_head" contains the label(the class to which the current blob belongs) My classes are 9 (-180:45:180) . "angle_head_weight" is a binary scalar value. 1 if you want take into account this label. I know its bit tricky to explain in words but the gist of it was to not to give any weight to the labels of the blobs that belongs to background(computing IOU). – lora loper Oct 25 '16 at 07:22
  • what you said about `"angle_head"` and `"angle_head_weight"` make sense only during training. My question is what do you do for **TEST**? can you post the prototxt you are using for TEST? – Shai Oct 25 '16 at 07:26
  • please find it [link] (http://www.filedropper.com/test_140) – lora loper Oct 25 '16 at 07:42
  • please post it in a more accessible way. I am not going to download files to view. – Shai Oct 25 '16 at 07:43
  • 1
    http://pastebin.com/yt5JDsnj its at pastebin. – lora loper Oct 25 '16 at 07:51
  • try remove `loss_weight` from the last layer in TEST. – Shai Oct 25 '16 at 07:52
  • I can remove it but by default its one for the first top blob any loss layer unless mentioned otherwise. http://caffe.berkeleyvision.org/tutorial/loss.html – lora loper Oct 25 '16 at 07:59
  • but in TEST you do not have ANY loss. `"Softmax"` layer is not a loss layer. please REMOVE the line entirely. – Shai Oct 25 '16 at 08:00
  • I have tried everything , I mean literally everything but still the output is nan. – lora loper Oct 27 '16 at 08:15
  • is it possible you have "nan"s in your weights? you can check it in python: load the model and go param blob by blob and see if all weights are finite. – Shai Oct 30 '16 at 07:50
  • Have you looked at [this thread](http://stackoverflow.com/q/33962226/1714410)? it seems related: you might have NaNs during training that were saved as weights in the model. – Shai Oct 30 '16 at 08:06

0 Answers0