In the following function, author aims to run prediction over a downloaded image based on vgg model.
with tf.Graph().as_default():
url = ("https://upload.wikimedia.org/wikipedia/commons/d/d9/First_Student_IC_school_bus_202076.jpg")
image_string = urllib2.urlopen(url).read()
image = tf.image.decode_jpeg(image_string, channels=3)
image_float = tf.to_float(image, name='ToFloat')
# Subtract the mean pixel value from each pixel
processed_image = _mean_image_subtraction(image_float,
[_R_MEAN, _G_MEAN, _B_MEAN])
input_image = tf.expand_dims(processed_image, 0)
with slim.arg_scope(vgg.vgg_arg_scope()):
logits, _ = vgg.vgg_16(input_image,
num_classes=1000,
is_training=False,
spatial_squeeze=False)
pred = tf.argmax(logits, dimension=3)
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'vgg_16.ckpt'),
slim.get_model_variables('vgg_16'))
with tf.Session() as sess:
init_fn(sess)
segmentation, np_image, np_logits = sess.run([pred, image, logits])
I have been trying to predict over an existing image read via opencv, the only modification I made is to read image via cv2, add input_placeholder, and modify sess.run correspondingly. However, I got the following error message:
segmentation, np_image, np_logits = sess.run([pred,logits],feed_dict={input_placeholder:image})
ValueError: need more than 2 values to unpack
Would you like to let me know which modification I made is wrong?
with tf.Graph().as_default():
image = cv2.imread('/data/cat.jpg',cv2.IMREAD_UNCHANGED)
input_placeholder = tf.placeholder(tf.float32,shape = [image.shape[0],image.shape[1],image.shape[2]])
image_float = np.float32(image)
# Subtract the mean pixel value from each pixel
processed_image = _mean_image_subtraction(image_float,[_R_MEAN, _G_MEAN, _B_MEAN])
input_image = tf.expand_dims(processed_image, 0)
with slim.arg_scope(vgg.vgg_arg_scope()):
logits, _ = vgg.vgg_16(input_image,
num_classes=1000,
is_training=False,
spatial_squeeze=False)
pred = tf.argmax(logits, dimension=3)
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'vgg_16.ckpt'),
slim.get_model_variables('vgg_16'))
with tf.Session() as sess:
init_fn(sess)
segmentation, np_image, np_logits = sess.run([pred,logits],feed_dict={input_placeholder:image})