mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 09:25:45 +08:00
Move instruction
This commit is contained in:
parent
5790810c3a
commit
4a19ac5aca
@ -47,9 +47,9 @@
|
||||
#include "opencv2/core/async.hpp"
|
||||
|
||||
#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_EXPERIMENTAL_NS
|
||||
#define CV__DNN_EXPERIMENTAL_NS_BEGIN namespace experimental_dnn_34_v15 {
|
||||
#define CV__DNN_EXPERIMENTAL_NS_BEGIN namespace experimental_dnn_34_v16 {
|
||||
#define CV__DNN_EXPERIMENTAL_NS_END }
|
||||
namespace cv { namespace dnn { namespace experimental_dnn_34_v15 { } using namespace experimental_dnn_34_v15; }}
|
||||
namespace cv { namespace dnn { namespace experimental_dnn_34_v16 { } using namespace experimental_dnn_34_v16; }}
|
||||
#else
|
||||
#define CV__DNN_EXPERIMENTAL_NS_BEGIN
|
||||
#define CV__DNN_EXPERIMENTAL_NS_END
|
||||
|
@ -1,6 +1,47 @@
|
||||
#!/usr/bin/env python
|
||||
'''
|
||||
You can download the converted pb model from https://www.dropbox.com/s/qag9vzambhhkvxr/lip_jppnet_384.pb?dl=0
|
||||
or convert the model yourself.
|
||||
|
||||
Follow these steps if you want to convert the original model yourself:
|
||||
To get original .meta pre-trained model download https://drive.google.com/file/d/1BFVXgeln-bek8TCbRjN6utPAgRE0LJZg/view
|
||||
For correct convert .meta to .pb model download original repository https://github.com/Engineering-Course/LIP_JPPNet
|
||||
Change script evaluate_parsing_JPPNet-s2.py for human parsing
|
||||
1. Remove preprocessing to create image_batch_origin:
|
||||
with tf.name_scope("create_inputs"):
|
||||
...
|
||||
Add
|
||||
image_batch_origin = tf.placeholder(tf.float32, shape=(2, None, None, 3), name='input')
|
||||
|
||||
2. Create input
|
||||
image = cv2.imread(path/to/image)
|
||||
image_rev = np.flip(image, axis=1)
|
||||
input = np.stack([image, image_rev], axis=0)
|
||||
|
||||
3. Hardcode image_h and image_w shapes to determine output shapes.
|
||||
We use default INPUT_SIZE = (384, 384) from evaluate_parsing_JPPNet-s2.py.
|
||||
parsing_out1 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out1_100, INPUT_SIZE),
|
||||
tf.image.resize_images(parsing_out1_075, INPUT_SIZE),
|
||||
tf.image.resize_images(parsing_out1_125, INPUT_SIZE)]), axis=0)
|
||||
Do similarly with parsing_out2, parsing_out3
|
||||
4. Remove postprocessing. Last net operation:
|
||||
raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2, parsing_out3]), axis=0)
|
||||
Change:
|
||||
parsing_ = sess.run(raw_output, feed_dict={'input:0': input})
|
||||
|
||||
5. To save model after sess.run(...) add:
|
||||
input_graph_def = tf.get_default_graph().as_graph_def()
|
||||
output_node = "Mean_3"
|
||||
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, input_graph_def, output_node)
|
||||
|
||||
output_graph = "LIP_JPPNet.pb"
|
||||
with tf.gfile.GFile(output_graph, "wb") as f:
|
||||
f.write(output_graph_def.SerializeToString())'
|
||||
'''
|
||||
|
||||
import argparse
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
import cv2 as cv
|
||||
|
||||
|
||||
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV)
|
||||
@ -116,7 +157,7 @@ if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='Use this script to run human parsing using JPPNet',
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument('--input', '-i', required=True, help='Path to input image.')
|
||||
parser.add_argument('--model', '-m', required=True, help='Path to pb model(https://www.dropbox.com/s/qag9vzambhhkvxr/lip_jppnet_384.pb?dl=0).')
|
||||
parser.add_argument('--model', '-m', required=True, help='Path to pb model.')
|
||||
parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int,
|
||||
help="Choose one of computation backends: "
|
||||
"%d: automatically (by default), "
|
||||
@ -135,38 +176,3 @@ if __name__ == '__main__':
|
||||
cv.namedWindow(winName, cv.WINDOW_AUTOSIZE)
|
||||
cv.imshow(winName, output)
|
||||
cv.waitKey()
|
||||
|
||||
|
||||
# To get original .meta pre-trained model download https://drive.google.com/file/d/1BFVXgeln-bek8TCbRjN6utPAgRE0LJZg/view
|
||||
# For correct convert .meta to .pb model download original repository https://github.com/Engineering-Course/LIP_JPPNet
|
||||
# Change script evaluate_parsing_JPPNet-s2.py for human parsing
|
||||
# 1. Remove preprocessing to create image_batch_origin:
|
||||
# - with tf.name_scope("create_inputs"):
|
||||
# ...
|
||||
# Add
|
||||
# - image_batch_origin = tf.placeholder(tf.float32, shape=(2, None, None, 3), name='input')
|
||||
#
|
||||
# 2. Create input
|
||||
# image = cv2.imread(path/to/image)
|
||||
# image_rev = np.flip(image, axis=1)
|
||||
# input = np.stack([image, image_rev], axis=0)
|
||||
#
|
||||
# 3. Hardcode image_h and image_w shapes to determine output shapes.
|
||||
# We use default INPUT_SIZE = (384, 384) from evaluate_parsing_JPPNet-s2.py.
|
||||
# - parsing_out1 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out1_100, INPUT_SIZE),
|
||||
# tf.image.resize_images(parsing_out1_075, INPUT_SIZE),
|
||||
# tf.image.resize_images(parsing_out1_125, INPUT_SIZE)]), axis=0)
|
||||
# Do similarly with parsing_out2, parsing_out3
|
||||
# 4. Remove postprocessing. Last net operation:
|
||||
# raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2, parsing_out3]), axis=0)
|
||||
# Change:
|
||||
# parsing_ = sess.run(raw_output, feed_dict={'input:0': input})
|
||||
#
|
||||
# 5. To save model after sess.run(...) add:
|
||||
# input_graph_def = tf.get_default_graph().as_graph_def()
|
||||
# output_node = "Mean_3"
|
||||
# output_graph_def = tf.graph_util.convert_variables_to_constants(sess, input_graph_def, output_node)
|
||||
#
|
||||
# output_graph = "LIP_JPPNet.pb"
|
||||
# with tf.gfile.GFile(output_graph, "wb") as f:
|
||||
# f.write(output_graph_def.SerializeToString())
|
||||
|
Loading…
Reference in New Issue
Block a user