Precisión siempre 1 Regresión Caffe

Mi conjunto de datos contiene 400 imágenes 32x32x3 y las etiquetas contienen número flotante (-1,1). Ejemplo:

faceCroppedImages/img1.jpg 0
faceCroppedImages/img2.jpg 0.0128
faceCroppedImages/img3.jpg 0.0128
faceCroppedImages/img4.jpg 0.0128
faceCroppedImages/img22.jpg 0.0128
faceCroppedImages/img23.jpg 0.0085
faceCroppedImages/img24.jpg 0.0077
faceCroppedImages/img25.jpg 0.0077
faceCroppedImages/img293.jpg -0.023
faceCroppedImages/img294.jpg -0.023
faceCroppedImages/img295.jpg -0.0204
faceCroppedImages/img296.jpg -0.0179
faceCroppedImages/img297.jpg -0.017
faceCroppedImages/img298.jpg -0.0128

Mi'solver.prototxt' es:

net: "train_test_hdf5.prototxt"
test_iter: 100
test_interval: 500
base_lr: 0.003
momentum: 0.9
weight_decay: 0.0005
lr_policy: "inv"
gamma: 0.0001
power: 0.75
display: 100
max_iter: 10000
snapshot: 5000
snapshot_prefix: "lenet_hdf5"
solver_mode: CPU

y'train_test_hdf5.prototxt' es:

name: "MSE regression"
layer{
  name: "data"
  type: "HDF5Data"
  top: "data"
  top: "label"
  hdf5_data_param {
    source: "train_hdf5file.txt"
    batch_size: 64
    shuffle: true
  }
  include: { phase: TRAIN }
}

layer{
  name: "data"
  type: "HDF5Data"
  top: "data"
  top: "label"
  hdf5_data_param {
    source: "test_hdf5file.txt"
    batch_size: 128
  }
  include: { phase: TEST }
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param { lr_mult: 1 }
  param { lr_mult: 2 }
  convolution_param {
    num_output: 20
    kernel_size: 5
    stride: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "conv1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "dropout1"
  type: "Dropout"
  bottom: "pool1"
  top: "pool1"
  dropout_param {
    dropout_ratio: 0.1
  }
}

layer{
  name: "fc1"
  type: "InnerProduct"
  bottom: "pool1"
  top: "fc1"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }
  inner_product_param {
    num_output: 500
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "dropout2"
  type: "Dropout"
  bottom: "fc1"
  top: "fc1"
  dropout_param {
    dropout_ratio: 0.5
  }
}
layer{
  name: "fc2"
  type: "InnerProduct"
  bottom: "fc1"
  top: "fc2"
  param { lr_mult: 1 decay_mult: 1 }
  param { lr_mult: 2 decay_mult: 0 }
  inner_product_param {
    num_output: 1
    weight_filler {
      type: "xavier"
    }
    bias_filler {
      type: "constant"
      value: 0
    }
 }
}
layer {
  name: "accuracy1"
  type: "Accuracy"
  bottom: "fc2"
  bottom: "label"
  top: "accuracy1"
  include {
    phase: TEST
  }
}
layer{
  name: "loss"
  type: "EuclideanLoss"
  bottom: "fc2"
  bottom: "label"
  top: "loss"
}

Sin embargo, cuando estoy probando los datos, la precisión siempre es 1:

Intenté usar etiquetas enteras multiplicando mis etiquetas actuales por 1000, pero obtengo el error nan:

¿Puedes decirme dónde estoy haciendo mal? Soy un principiante en caffe y redes neuronales. Cualquier sugerencia será valiosa. TIA

Respuestas a la pregunta(1)

Su respuesta a la pregunta