tensorflow serving prediction as b64 output top re

2019-06-10 22:43发布

I have a Keras model I converting to a tensorflow serving model. I can successfully convert my pretrained keras model to take b64 input, preprocess that input, and feed it to my model. My problem is that I don't know how to take the prediction data I am getting (which is enormous) and only export the top result. I am doing image segmentation so my output prediction is of shape (?, 473, 473, 3) and I'd like to get the top result and return it in b64 encoded format. What I have currently that just returns the entire prediction:

sess = K.get_session()
g = sess.graph
g_def = graph_util.convert_variables_to_constants(sess, 
                      g.as_graph_def(),
                      [model.output.name.replace(':0','')])

with tf.Graph().as_default() as g_input:
    input_b64 = tf.placeholder(shape=(1,),
                               dtype=tf.string,
                               name='b64')
    tf.logging.info('input b64 {}'.format(input_b64))

    image = tf.image.decode_image(input_b64[0])#input_bytes)
    image_f = tf.image.convert_image_dtype(image, dtype=tf.float16)
    input_image = tf.expand_dims(image_f, 0)
    image_r = tf.image.resize_bilinear(input_image, [HEIGHT, WIDTH], align_corners=False)
    input_data = preprocess_image(image_r)
    output = tf.identity(input_data, name='input_image')




# Convert to GraphDef
g_input_def = g_input.as_graph_def()


with tf.Graph().as_default() as g_combined:
    x = tf.placeholder(tf.string, name="b64")

    im, = tf.import_graph_def(g_input_def,
                              input_map={'b64:0': x},
                              return_elements=["input_image:0"])

    pred, = tf.import_graph_def(g_def,
             input_map={model.input.name: im},
             return_elements=[model.output.name])

    with tf.Session() as session:
        inputs = {"image_bytes": tf.saved_model.utils.build_tensor_info(x)}
        outputs = {"output_bytes":tf.saved_model.utils.build_tensor_info(pred)}
        signature =tf.saved_model.signature_def_utils.build_signature_def(
                inputs=inputs,
                outputs=outputs,
                method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
            )


        """Convert the Keras HDF5 model into TensorFlow SavedModel."""

        if os.path.exists(export_path):
            shutil.rmtree(export_path)
        legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
        builder = saved_model_builder.SavedModelBuilder(export_path)
        builder.add_meta_graph_and_variables(
            sess=session,
            tags=[tag_constants.SERVING],
            signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature },
        )
        builder.save()

I took a lot of what I have working from https://medium.com/google-cloud/serverless-transfer-learning-with-cloud-ml-engine-and-keras-335435f31e15 for reference. Thanks!

1条回答
Luminary・发光体
2楼-- · 2019-06-10 23:15

Posting my own solution I made in case someone else runs into this issue. Basically, you just do the inverse of the input function.

def postprocess_image(img, in_shape):
    class_image = tf.argmax(img, axis=2)
    colored_class_image = utils.class_image_to_image_tensor(class_image, [HEIGHT,WIDTH])

    image_expand = tf.expand_dims(colored_class_image, 0)
    image_r = tf.image.resize_bilinear(image_expand, in_shape, align_corners=False)
    casted_data = tf.bitcast(tf.cast(image_r[0], tf.int8), tf.uint8)

    out_image = tf.image.encode_png(casted_data)

    return out_image

sess = K.get_session()
g = sess.graph
g_def = graph_util.convert_variables_to_constants(sess, 
                      g.as_graph_def(),
                      [model.output.name.replace(':0','')])


with tf.Graph().as_default() as g_input:
    input_b64 = tf.placeholder(shape=(1,),
                               dtype=tf.string,
                               name='b64')
    tf.logging.info('input b64 {}'.format(input_b64))

    image = tf.image.decode_image(input_b64[0])
    image_f = tf.image.convert_image_dtype(image, dtype=tf.uint8)
    input_image = tf.expand_dims(image_f, 0)

    image_r = tf.image.resize_bilinear(input_image, [HEIGHT, WIDTH], align_corners=False)
    input_data = preprocess_image(image_r[0])
    output = tf.identity(input_data, name='input_image')


with tf.Graph().as_default() as g_output:
    first = tf.placeholder(shape=[1,473,473,150],
                               dtype=tf.float32,
                               name='activation_58/div')
    i_shape = tf.placeholder(dtype=tf.int32, shape=[2], name='in_shape')


    post_image = postprocess_image(first[0], i_shape)

    output_data = tf.identity(post_image, name='out')


g_input_def = g_input.as_graph_def()
g_output_def = g_output.as_graph_def()

with tf.Graph().as_default() as g_combined:
    x = tf.placeholder(tf.string, name="b64")
    in_shape = tf.placeholder(tf.int32, shape=[1,2],name="original_shape")

    im, = tf.import_graph_def(g_input_def,
                              input_map={'b64:0': x},
                              return_elements=["input_image:0"])

    pred, = tf.import_graph_def(g_def,
         input_map={model.input.name: im},
         return_elements=[model.output.name])



    y, = tf.import_graph_def(g_output_def,
             input_map={model.output.name: pred,
             'in_shape:0':in_shape[0]},
             return_elements=["out:0"])

    with tf.Session() as session:
        inputs = {"image_bytes": tf.saved_model.utils.build_tensor_info(x),
                "original_shape":tf.saved_model.utils.build_tensor_info(in_shape)}
        outputs = {"output_bytes":tf.saved_model.utils.build_tensor_info(y)}
        signature =tf.saved_model.signature_def_utils.build_signature_def(
                inputs=inputs,
                outputs=outputs,
                method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
            )


        """Convert the Keras HDF5 model into TensorFlow SavedModel."""

        if os.path.exists(export_path):
            shutil.rmtree(export_path)
        legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
        builder = saved_model_builder.SavedModelBuilder(export_path)
        builder.add_meta_graph_and_variables(
            sess=session,
            tags=[tag_constants.SERVING],
            signature_def_map={ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature },
        )
        builder.save()
查看更多
登录 后发表回答