Error trying to run custom caffenet with new data

2019-09-09 12:41发布

问题:

I just tried to train the provided Caffenet network with my own lmdb file. I changed the fully connected to convolutional layer of depth 4096 and custom frame_size.

Here is the code:

weight_param = dict(lr_mult=1, decay_mult=1)
bias_param   = dict(lr_mult=2, decay_mult=0)
learned_param = [weight_param, bias_param]
batch_size = 256

# 0 means non updating parameters
frozen_param = [dict(lr_mult=0)] * 2

def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1,
              param=learned_param,
              weight_filler=dict(type='gaussian', std=0.01),
              bias_filler=dict(type='constant', value=0.1), 
              kernel_h=None, kernel_w=None):
    if (kernel_h is not None and kernel_w is not None):
            conv = L.Convolution(bottom, kernel_h=kernel_h, kernel_w=kernel_w,
                                 num_output=nout, pad=pad, group=group,
                                 param=param, weight_filler=weight_filler,
                                 bias_filler=bias_filler)
    else:
        conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
                             num_output=nout, pad=pad, group=group,
                             param=param, weight_filler=weight_filler,
                             bias_filler=bias_filler)
    return conv, L.ReLU(conv, in_place=True)

def fc_relu(bottom, nout, param=learned_param,
            weight_filler=dict(type='gaussian', std=0.005),
            bias_filler=dict(type='constant', value=0.1)):
    fc = L.InnerProduct(bottom, num_output=nout, param=param,
                        weight_filler=weight_filler,
                        bias_filler=bias_filler)
    return fc, L.ReLU(fc, in_place=True)

def max_pool(bottom, ks, stride=1):
    return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)

def caffenet(lmdb, train=True, num_classes=1000,
             classifier_name='fc8', learn_all=False):
    """Returns a NetSpec specifying CaffeNet, following the original proto text
       specification (./models/bvlc_reference_caffenet/train_val.prototxt).
       This implementation force to train the 3 last fc layers.
       """
    n = caffe.NetSpec()
    n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB,
                             source=lmdb, ntop=2)
    param = learned_param if learn_all else frozen_param
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=2, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)

    # Convert FC layer to CONV layer to handle different sizes
    #n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=learned_param)
    n.fc6, n.relu6 = conv_relu(n.pool5, 7, 4096, pad=0, param=learned_param,
                               kernel_h=7, kernel_w=10)
    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    # n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=learned_param)
    n.fc7, n.relu7 = conv_relu(n.drop6, 1, 4096, pad=0, param=learned_param)

    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7
    # always learn fc8 (param=learned_param)
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    n.loss = L.SoftmaxWithLoss(fc8, n.label)
    n.acc = L.Accuracy(fc8, n.label)
    # write the net to a temporary file and return its filename
    with open('phiNet.prototxt', 'w') as f:
        f.write(str(n.to_proto()))
        return f.name

When compiling i get this error:

data_transformer.cpp:168] Check failed: height <= datum_height (254 vs. 221) 

My images are 336*254.

What does this error mean ?

Thanks

回答1:

You forgot to set the resize flag to true in your shell script , create_imagenet.sh, for creating the LMDB. Set RESIZE = true and set the RESIZE_HEIGHT=221 and RESIZE_WIDTH=221. And make sure that the crop_size in the data layer of your train_val.prototxt is also set to 221.