Commit ca801ba4 authored by 武继龙's avatar 武继龙

new

parent b3ed567c
No preview for this file type
......@@ -3,6 +3,7 @@
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/onnx_infer/yolo3" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/onnx_infer" isTestSource="false" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
......
This diff is collapsed.
File added
Metadata-Version: 1.0
Name: onnx_infer
Version: 0.1.0
Summary: model inference
Home-page: http://git.wanmeizhensuo.com/wujilong/onnx_model.git
Author: wjl
Author-email: wujilong@igengmei.com
License: MIT
Description: UNKNOWN
Platform: UNKNOWN
Metadata-Version: 1.0
Name: onnx-infer
Version: 0.1.0
Summary: model inference
Home-page: http://git.wanmeizhensuo.com/wujilong/onnx_model.git
Author: wjl
Author-email: wujilong@igengmei.com
License: MIT
Description: UNKNOWN
Platform: UNKNOWN
setup.py
onnx_infer/__init__.py
onnx_infer/color.py
onnx_infer/croppic.py
onnx_infer/drawpic.py
onnx_infer/main.py
onnx_infer/onnx2kera.py
onnx_infer/supression.py
onnx_infer/yolodata.py
onnx_infer.egg-info/PKG-INFO
onnx_infer.egg-info/SOURCES.txt
onnx_infer.egg-info/dependency_links.txt
onnx_infer.egg-info/top_level.txt
onnx_infer/yolo3/__init__.py
onnx_infer/yolo3/model.py
onnx_infer/yolo3/utils.py
\ No newline at end of file
from .onnx2kera import onnxinfere
import numpy as np
import cv2
from copy import deepcopy
#this model use array to get color
# global
COLORS = ['blue','green','red','yellow','gray','white','black','pink']
def featureTransform(resized):
YCC = deepcopy(resized)
YCC = cv2.cvtColor(YCC, cv2.COLOR_BGR2LAB)[:, :, 1:3]
HSV = deepcopy(resized)
HSV = cv2.cvtColor(HSV, cv2.COLOR_BGR2HSV)[:, :, 0:3]
Luv = deepcopy(resized)
Luv = cv2.cvtColor(Luv, cv2.COLOR_BGR2YUV)[:, :, 0:1]
tmp = np.concatenate((HSV, Luv, YCC), axis=2)
return tmp
def resize(image):
# image = cv2.imread(image_path, cv2.IMREAD_COLOR)
resized = cv2.resize(image, (150,150))
return resized
def get_color(onnx_path, image, colors):
image = np.expand_dims(image, axis=0)
image = np.array(image, dtype=np.float32)
predicts = onnxinfere(onnx_path, image)
prediction = np.argmax(predicts[0], 1)
color = colors[prediction[0]]
score = predicts[0][0][prediction[0]]
return color, score
if __name__ == '__main__':
model_path = '/Users/apple/Desktop/color.onnx'
path = '/Users/apple/Desktop/8.jpg'
image = cv2.imread(path, cv2.IMREAD_COLOR)
resized = cv2.resize(image, (150, 150))
image = featureTransform(resized)
color, score = get_color(model_path, image, COLORS)
print(color)
print(score)
# crop Image with threshold
import cv2
def cropImage(image, threshold_index):
return image[int(threshold_index[0]): int(threshold_index[2]),
int(threshold_index[1]): int(threshold_index[3])]
def saveCrop(image, savePath):
cv2.imwrite(savePath, image)
if __name__ == '__main__':
from .yolodata import path2arr
path = '/Users/apple/Desktop/8.jpg'
threshold_index = [95.55165, 184.0512, 250.18225, 333.9927]
image = path2arr(path)
image = cropImage(image, threshold_index)
print(image)
save = '/Users/apple/Desktop/p.jpg'
cv2.imwrite(save, image)
# draw bounding box on the original picture
import cv2
def drawrec(image, save_path, bbox):
image = cv2.rectangle(image, (int(bbox[1]), int(bbox[0])),
(int(bbox[3]), int(bbox[2])), (255, 0, 0), 3)
cv2.imwrite(save_path, image)
if __name__ == '__main__':
bbox = [95.551674,184.05116, 250.18227, 333.99274]
image_path = '/Users/apple/Desktop/8.jpg'
save_path = '/Users/apple/Desktop/rec.jpg'
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
drawrec(image, save_path, bbox)
#*******************
"""
this is the main which contain color and yolo
the color input need abs path of color.onnx and one image arr which generate from the croppic.py。
the result is the color of the picture。
the yolo is more complex, need, the path of anchor.txt, label.txt, and the image path, the yolo.onnx path
the result of this is the category and bounding box which is the max score.
"""
#*******************
from .yolodata import path2Img, letterbox_image, path2arr, arr2Img
from .onnx2kera import onnxinfere
from .supression import Supress
import tensorflow as tf
from .color import COLORS, featureTransform, resize, get_color
from .croppic import cropImage
import cv2
#global
categorys = ['long sleeve dress', 'vest dress', 'vest', 'long sleeve outwear', 'long sleeve top',
'trousers', 'short sleeve top', 'sling dress', 'skirt', 'short sleeve dress', 'shorts']
class Main:
def __init__(self, colorOnnx_path, yoloOnnx_path, image_arr):
self.colorOnnx_path = colorOnnx_path
self.yoloOnnx_path = yoloOnnx_path
self.image_arr = image_arr
self.score = 0.05
self.iou = 0.05
self.picSize = (416, 416)
def bboxAndcategory(self):
bbox = []
category = []
image = arr2Img(self.image_arr)
image_data = letterbox_image(image, self.picSize)
precit = onnxinfere(self.yoloOnnx_path, image_data)
feature = []
for f in precit:
feature.append(tf.convert_to_tensor(f))
sup = Supress(self.score, self.iou, feature)
box, score, classes = sup.detect(image)
with tf.Session() as sess:
bbox = box.eval()[0]
index = classes.eval()[0]
return bbox, categorys[index]
def colorAndbboxAndcategory(self):
bbox, category = self.bboxAndcategory()
image_arr = self.image_arr
image_crop = cropImage(image_arr, bbox)
resized = resize(image_crop)
tmp = featureTransform(resized)
color, ratio = get_color(self.colorOnnx_path, tmp, COLORS)
return color, bbox, category
# main test
def get_result(image_arr):
colorOnnx_path = './color.onnx'
yoloOnnx_path = './yolo3.onnx'
m = Main(colorOnnx_path, yoloOnnx_path, image_arr)
color, bbox, category = m.colorAndbboxAndcategory()
return color, bbox, category
if __name__ == '__main__':
colorOnnx_path = '/Users/apple/Desktop/color.onnx'
image_path = '/Users/apple/Desktop/8.jpg'
yoloOnnx_path = '/Users/apple/Desktop/yolo3.onnx'
image_arr = cv2.imread(image_path)
color, bbox, category = get_result(image_arr)
print(color)
print(bbox)
print(category)
"""
m = Main(colorOnnx_path, yoloOnnx_path, image_path)
color, bbox, category = m.colorAndbboxAndcategory()
print('its color is : {}, category is : {} '.format(color, category))
print('the bounding box is : {}'.format(bbox))
# print(color)
# print(bbox)
# print(category)
"""
import onnxmltools
import onnxruntime
from keras.models import load_model
from copy import deepcopy
import cv2
import numpy as np
import keras.backend as k
import tensorflow as tf
from .yolodata import path2Img, letterbox_image
def hf2onnx(h5_path, save_path):
model = load_model(h5_path)
onnx_model = onnxmltools.convert_keras(model, target_opset=7)
onnxmltools.utils.save_model(onnx_model, save_path)
def onnxinfere(onnx_path, input):
session = onnxruntime.InferenceSession(onnx_path)
inname = [input.name for input in session.get_inputs()][0]
outname = [output.name for output in session.get_outputs()]
predict = session.run(outname, {inname:input})
return predict
if __name__ == '__main__':
from .supression import Supress
# path = '/Users/apple/Public/keras-yolo3/model_data/our1_yolo.h5'
save_path = '/Users/apple/Desktop/yolo3.onnx'
anncorPath = 'model_data/yolo_anchors.txt'
classPath = 'model_data/deepfashion.txt'
score = 0.01
iou = 0.01
picSize = (416, 416)
# hf2onnx(path, save_path)K.learning_phase(): 0
img_path = '/Users/apple/Desktop/8.jpg'
image = path2Img(img_path)
image_data = letterbox_image(image, picSize)
precit = onnxinfere(save_path, image_data)
print(precit)
feature = []
for f in precit:
feature.append(tf.convert_to_tensor(f))
print(feature)
sup = Supress(anncorPath, classPath, score, iou, picSize, feature)
box, score, classes = sup.detect(image)
with tf.Session() as sess:
print(box.eval())
print(score.eval())
import numpy as np
from keras import backend as k
from .yolo3.model import yolo_eval
import os
class Supress:
def __init__(self, score, iou, featureMap):
self.score = score
self.iou = iou
self.featureMap = featureMap
self.sess = k.get_session
def _get_class(self):
classNames = ['long sleeve dress', 'vest dress', 'vest', 'long sleeve outwear', 'long sleeve top', 'trousers',
'short sleeve top', 'sling dress', 'skirt', 'short sleeve dress', 'shorts']
return classNames
def _get_anchors(self):
anchors = [[10., 13.], [16., 30.], [33., 23.], [30., 61.], [62., 45.],
[59., 119.], [116., 90.], [156., 198.], [373., 326.]]
return np.array(anchors)
def detect(self, image):
anchors = self._get_anchors()
classes = self._get_class()
# self.inputShape = k.placeholder(shape=(2, ))
self.inputShape = [image.size[1], image.size[0]]
boxes, scores, classes = yolo_eval(self.featureMap, anchors, len(classes), self.inputShape,
score_threshold=self.score, iou_threshold=self.iou)
# out_boxes, out_scores, out_classes = self.sess.run([boxes, scores, classes],
# feed_dict={self.inputShape:
# [self.picSize[1],
# self.picSize[0]],
# k.learning_phase():0
# })
return boxes, scores, classes
from functools import wraps
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from .utils import compose
@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs.update(kwargs)
return Conv2D(*args, **darknet_conv_kwargs)
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
def resblock_body(x, num_filters, num_blocks):
'''A series of resblocks starting with a downsampling Convolution2D'''
# Darknet uses left and top padding instead of 'same' mode
x = ZeroPadding2D(((1,0),(1,0)))(x)
x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)
for i in range(num_blocks):
y = compose(
DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),
DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)
x = Add()([x,y])
return x
def darknet_body(x):
'''Darknent body having 52 Convolution2D layers'''
x = DarknetConv2D_BN_Leaky(32, (3,3))(x)
x = resblock_body(x, 64, 1)
x = resblock_body(x, 128, 2)
x = resblock_body(x, 256, 8)
x = resblock_body(x, 512, 8)
x = resblock_body(x, 1024, 4)
return x
def make_last_layers(x, num_filters, out_filters):
'''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
y = compose(
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D(out_filters, (1,1)))(x)
return x, y
def yolo_body(inputs, num_anchors, num_classes):
"""Create YOLO_V3 model CNN body in Keras."""
darknet = Model(inputs, darknet_body(inputs))
x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))
x = compose(
DarknetConv2D_BN_Leaky(256, (1,1)),
UpSampling2D(2))(x)
x = Concatenate()([x,darknet.layers[152].output])
x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))
x = compose(
DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x)
x = Concatenate()([x,darknet.layers[92].output])
x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))
return Model(inputs, [y1,y2,y3])
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
'''Get corrected boxes'''
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape/image_shape))
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
])
# Scale boxes back to original image shape.
boxes *= K.concatenate([image_shape, image_shape])
return boxes
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
'''Process Conv layer output'''
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,
anchors, num_classes, input_shape)
boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
boxes = K.reshape(boxes, [-1, 4])
box_scores = box_confidence * box_class_probs
box_scores = K.reshape(box_scores, [-1, num_classes])
return boxes, box_scores
def yolo_eval(yolo_outputs,
anchors,
num_classes,
image_shape,
max_boxes=20,
score_threshold=.6,
iou_threshold=.5):
"""Evaluate YOLO model on given input and return filtered boxes."""
num_layers = len(yolo_outputs)
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
boxes.append(_boxes)
box_scores.append(_box_scores)
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
mask = box_scores >= score_threshold
max_boxes_tensor = K.constant(max_boxes, dtype='int32')
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
# TODO: use keras backend instead of tf.
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
nms_index = tf.image.non_max_suppression(
class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
class_boxes = K.gather(class_boxes, nms_index)
class_box_scores = K.gather(class_box_scores, nms_index)
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
boxes_ = K.concatenate(boxes_, axis=0)
scores_ = K.concatenate(scores_, axis=0)
classes_ = K.concatenate(classes_, axis=0)
return boxes_, scores_, classes_
"""Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
from PIL import Image
import numpy as np
import cv2
# the letterbox_image input is the type of IMAGE, and the size need not reverse.
def path2Img(path):
image = cv2.imread(path, cv2.IMREAD_COLOR)
image = Image.fromarray(image)
return image
def path2arr(path):
image = cv2.imread(path, cv2.IMREAD_COLOR)
return image
def arr2Img(image_arr):
image = Image.fromarray(image_arr)
return image
# this image is one Image type
def letterbox_image(image, size):
iw, ih = image.size
h, w = size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128, 128, 128))
new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
image_data = np.array(new_image, dtype='float32')
image_data = image_data/255.
image_data = np.expand_dims(image_data, 0)
return image_data
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0
#!/user/bin/python
from setuptools import setup, find_packages
setup(
name = 'onnx_infer',
version = '0.1.0',
description = 'model inference',
author = 'wjl',
url = 'http://git.wanmeizhensuo.com/wujilong/onnx_model.git',
author_email = 'wujilong@igengmei.com',
license = 'MIT',
packages = find_packages(),
)
No preview for this file type
......@@ -56,7 +56,9 @@ class Main:
# main test
def get_result(colorOnnx_path, yoloOnnx_path, image_arr):
def get_result(image_arr):
colorOnnx_path = './color.onnx'
yoloOnnx_path = './yolo3.onnx'
m = Main(colorOnnx_path, yoloOnnx_path, image_arr)
color, bbox, category = m.colorAndbboxAndcategory()
return color, bbox, category
......@@ -67,7 +69,7 @@ if __name__ == '__main__':
image_path = '/Users/apple/Desktop/8.jpg'
yoloOnnx_path = '/Users/apple/Desktop/yolo3.onnx'
image_arr = cv2.imread(image_path)
color, bbox, category = get_result(colorOnnx_path, yoloOnnx_path, image_arr)
color, bbox, category = get_result(image_arr)
print(color)
print(bbox)
print(category)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment