Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
O
onnx_model
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
武继龙
onnx_model
Commits
ca801ba4
Commit
ca801ba4
authored
Jul 18, 2019
by
武继龙
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
new
parent
b3ed567c
Hide whitespace changes
Inline
Side-by-side
Showing
28 changed files
with
635 additions
and
54 deletions
+635
-54
.DS_Store
.DS_Store
+0
-0
onnx.iml
.idea/onnx.iml
+1
-0
workspace.xml
.idea/workspace.xml
+60
-52
.DS_Store
dist/.DS_Store
+0
-0
onnx_infer-0.1.0.tar.gz
dist/onnx_infer-0.1.0.tar.gz
+0
-0
.DS_Store
dist/onnx_infer-0.1.0/.DS_Store
+0
-0
PKG-INFO
dist/onnx_infer-0.1.0/PKG-INFO
+10
-0
PKG-INFO
dist/onnx_infer-0.1.0/onnx_infer.egg-info/PKG-INFO
+10
-0
SOURCES.txt
dist/onnx_infer-0.1.0/onnx_infer.egg-info/SOURCES.txt
+17
-0
dependency_links.txt
...onnx_infer-0.1.0/onnx_infer.egg-info/dependency_links.txt
+1
-0
top_level.txt
dist/onnx_infer-0.1.0/onnx_infer.egg-info/top_level.txt
+1
-0
__init__.py
dist/onnx_infer-0.1.0/onnx_infer/__init__.py
+0
-0
color.py
dist/onnx_infer-0.1.0/onnx_infer/color.py
+45
-0
croppic.py
dist/onnx_infer-0.1.0/onnx_infer/croppic.py
+19
-0
drawpic.py
dist/onnx_infer-0.1.0/onnx_infer/drawpic.py
+16
-0
main.py
dist/onnx_infer-0.1.0/onnx_infer/main.py
+85
-0
onnx2kera.py
dist/onnx_infer-0.1.0/onnx_infer/onnx2kera.py
+49
-0
supression.py
dist/onnx_infer-0.1.0/onnx_infer/supression.py
+39
-0
__init__.py
dist/onnx_infer-0.1.0/onnx_infer/yolo3/__init__.py
+0
-0
model.py
dist/onnx_infer-0.1.0/onnx_infer/yolo3/model.py
+192
-0
utils.py
dist/onnx_infer-0.1.0/onnx_infer/yolo3/utils.py
+32
-0
yolodata.py
dist/onnx_infer-0.1.0/onnx_infer/yolodata.py
+34
-0
setup.cfg
dist/onnx_infer-0.1.0/setup.cfg
+5
-0
setup.py
dist/onnx_infer-0.1.0/setup.py
+15
-0
.DS_Store
onnx_infer/.DS_Store
+0
-0
color.onnx
onnx_infer/color.onnx
+0
-0
main.py
onnx_infer/main.py
+4
-2
yolo3.onnx
onnx_infer/yolo3.onnx
+0
-0
No files found.
.DS_Store
View file @
ca801ba4
No preview for this file type
.idea/onnx.iml
View file @
ca801ba4
...
...
@@ -3,6 +3,7 @@
<component
name=
"NewModuleRootManager"
>
<content
url=
"file://$MODULE_DIR$"
>
<sourceFolder
url=
"file://$MODULE_DIR$/onnx_infer/yolo3"
isTestSource=
"false"
/>
<sourceFolder
url=
"file://$MODULE_DIR$/onnx_infer"
isTestSource=
"false"
/>
</content>
<orderEntry
type=
"inheritedJdk"
/>
<orderEntry
type=
"sourceFolder"
forTests=
"false"
/>
...
...
.idea/workspace.xml
View file @
ca801ba4
...
...
@@ -2,14 +2,16 @@
<project
version=
"4"
>
<component
name=
"ChangeListManager"
>
<list
default=
"true"
id=
"c99523d9-010f-4b7a-8f3f-0e626959dd1a"
name=
"Default Changelist"
comment=
""
>
<change
beforePath=
"$PROJECT_DIR$/.idea/onnx.iml"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/.idea/onnx.iml"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/.idea/workspace.xml"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/.idea/workspace.xml"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/onnx_infer/color.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/onnx_infer/color.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/onnx_infer/croppic.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/onnx_infer/croppic.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/color.onnx"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/dist/onnx_infer-0.1.0.tar.gz"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/onnx_infer.egg-info/PKG-INFO"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/onnx_infer.egg-info/SOURCES.txt"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/onnx_infer.egg-info/dependency_links.txt"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/onnx_infer.egg-info/top_level.txt"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/onnx_infer/main.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/onnx_infer/main.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/onnx_infer/onnx2kera.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/onnx_infer/onnx2kera.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/onnx_infer/supression.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/onnx_infer/supression.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/onnx_infer/yolo3/model.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/onnx_infer/yolo3/model.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/setup.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/setup.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/yolo3.onnx"
beforeDir=
"false"
/>
</list>
<option
name=
"EXCLUDED_CONVERTED_TO_IGNORED"
value=
"true"
/>
<option
name=
"SHOW_DIALOG"
value=
"false"
/>
...
...
@@ -22,7 +24,7 @@
<file
pinned=
"false"
current-in-tab=
"false"
>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/supression.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"1
69
"
>
<state
relative-caret-position=
"1
14
"
>
<caret
line=
"9"
column=
"36"
selection-start-line=
"9"
selection-start-column=
"36"
selection-end-line=
"9"
selection-end-column=
"36"
/>
<folding>
<element
signature=
"e#0#18#0"
expanded=
"true"
/>
...
...
@@ -34,8 +36,8 @@
<file
pinned=
"false"
current-in-tab=
"false"
>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/yolodata.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"
323
"
>
<caret
line=
"17"
lean-forward=
"true"
selection-start-line=
"17"
selection-end-line=
"17"
/>
<state
relative-caret-position=
"
285
"
>
<caret
line=
"17"
selection-start-line=
"17"
selection-end-line=
"17"
/>
<folding>
<element
signature=
"e#0#21#0"
expanded=
"true"
/>
</folding>
...
...
@@ -43,10 +45,10 @@
</provider>
</entry>
</file>
<file
pinned=
"false"
current-in-tab=
"
tru
e"
>
<file
pinned=
"false"
current-in-tab=
"
fals
e"
>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/yolo3/model.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"-
417
"
>
<state
relative-caret-position=
"-
1422
"
>
<caret
line=
"12"
selection-start-line=
"12"
selection-end-line=
"12"
/>
<folding>
<element
signature=
"e#0#27#0"
expanded=
"true"
/>
...
...
@@ -69,7 +71,7 @@
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/color.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"152"
>
<caret
line=
"8"
lean-forward=
"true"
selection-start-line=
"8"
selection-end-line=
"8"
/>
<caret
line=
"8"
selection-start-line=
"8"
selection-end-line=
"8"
/>
<folding>
<element
signature=
"e#0#33#0"
expanded=
"true"
/>
</folding>
...
...
@@ -94,8 +96,8 @@
<file
pinned=
"false"
current-in-tab=
"false"
>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/onnx2kera.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"
627
"
>
<caret
line=
"33"
column=
"50"
lean-forward=
"true"
selection-start-line=
"33"
selection-start-column=
"50"
selection-end-line=
"33"
selection-end-column=
"50"
/>
<state
relative-caret-position=
"
475
"
>
<caret
line=
"33"
column=
"50"
selection-start-line=
"33"
selection-start-column=
"50"
selection-end-line=
"33"
selection-end-column=
"50"
/>
<folding>
<element
signature=
"e#0#18#0"
expanded=
"true"
/>
</folding>
...
...
@@ -103,11 +105,14 @@
</provider>
</entry>
</file>
<file
pinned=
"false"
current-in-tab=
"
fals
e"
>
<file
pinned=
"false"
current-in-tab=
"
tru
e"
>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/main.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"266"
>
<caret
line=
"14"
column=
"52"
selection-start-line=
"14"
selection-start-column=
"52"
selection-end-line=
"14"
selection-end-column=
"52"
/>
<state
relative-caret-position=
"722"
>
<caret
line=
"38"
column=
"24"
lean-forward=
"true"
selection-start-line=
"38"
selection-start-column=
"24"
selection-end-line=
"38"
selection-end-column=
"24"
/>
<folding>
<element
signature=
"e#419#485#0"
expanded=
"true"
/>
</folding>
</state>
</provider>
</entry>
...
...
@@ -121,12 +126,12 @@
<option
name=
"CHANGED_PATHS"
>
<list>
<option
value=
"$PROJECT_DIR$/onnx_infer/yolodata.py"
/>
<option
value=
"$PROJECT_DIR$/onnx_infer/main.py"
/>
<option
value=
"$PROJECT_DIR$/onnx_infer/color.py"
/>
<option
value=
"$PROJECT_DIR$/onnx_infer/yolo3/model.py"
/>
<option
value=
"$PROJECT_DIR$/onnx_infer/supression.py"
/>
<option
value=
"$PROJECT_DIR$/onnx_infer/croppic.py"
/>
<option
value=
"$PROJECT_DIR$/onnx_infer/onnx2kera.py"
/>
<option
value=
"$PROJECT_DIR$/onnx_infer/main.py"
/>
</list>
</option>
</component>
...
...
@@ -257,22 +262,22 @@
<workItem
from=
"1563440209053"
duration=
"966000"
/>
<workItem
from=
"1563444603403"
duration=
"142000"
/>
<workItem
from=
"1563445246729"
duration=
"125000"
/>
<workItem
from=
"1563453823510"
duration=
"243000"
/>
</task>
<servers
/>
</component>
<component
name=
"TimeTrackingManager"
>
<option
name=
"totallyTimeSpent"
value=
"3
235
000"
/>
<option
name=
"totallyTimeSpent"
value=
"3
478
000"
/>
</component>
<component
name=
"ToolWindowManager"
>
<frame
x=
"40"
y=
"29"
width=
"1400"
height=
"815"
extended-state=
"0"
/>
<editor
active=
"true"
/>
<layout>
<window_info
active=
"true"
content_ui=
"combo"
id=
"Project"
order=
"0"
visible=
"true"
weight=
"0.26435935
"
/>
<window_info
content_ui=
"combo"
id=
"Project"
order=
"0"
visible=
"true"
weight=
"0.26804122
"
/>
<window_info
id=
"Structure"
order=
"1"
side_tool=
"true"
weight=
"0.25"
/>
<window_info
id=
"Favorites"
order=
"2"
side_tool=
"true"
/>
<window_info
anchor=
"bottom"
id=
"Message"
order=
"0"
/>
<window_info
anchor=
"bottom"
id=
"Find"
order=
"1"
/>
<window_info
anchor=
"bottom"
id=
"Run"
order=
"2"
weight=
"0.
34024897
"
/>
<window_info
anchor=
"bottom"
id=
"Run"
order=
"2"
weight=
"0.
29460582
"
/>
<window_info
anchor=
"bottom"
id=
"Debug"
order=
"3"
weight=
"0.4"
/>
<window_info
anchor=
"bottom"
id=
"Cvs"
order=
"4"
weight=
"0.25"
/>
<window_info
anchor=
"bottom"
id=
"Inspection"
order=
"5"
weight=
"0.4"
/>
...
...
@@ -295,7 +300,7 @@
</component>
<component
name=
"com.intellij.coverage.CoverageDataManagerImpl"
>
<SUITE
FILE_PATH=
"coverage/onnx$supression.coverage"
NAME=
"supression Coverage Results"
MODIFIED=
"1563430968413"
SOURCE_PROVIDER=
"com.intellij.coverage.DefaultCoverageFileProvider"
RUNNER=
"coverage.py"
COVERAGE_BY_TEST_ENABLED=
"true"
COVERAGE_TRACING_ENABLED=
"false"
WORKING_DIRECTORY=
"$PROJECT_DIR$/onnx_infer"
/>
<SUITE
FILE_PATH=
"coverage/onnx$main.coverage"
NAME=
"main Coverage Results"
MODIFIED=
"15634
4117308
0"
SOURCE_PROVIDER=
"com.intellij.coverage.DefaultCoverageFileProvider"
RUNNER=
"coverage.py"
COVERAGE_BY_TEST_ENABLED=
"true"
COVERAGE_TRACING_ENABLED=
"false"
WORKING_DIRECTORY=
"$PROJECT_DIR$/onnx_infer"
/>
<SUITE
FILE_PATH=
"coverage/onnx$main.coverage"
NAME=
"main Coverage Results"
MODIFIED=
"15634
5404432
0"
SOURCE_PROVIDER=
"com.intellij.coverage.DefaultCoverageFileProvider"
RUNNER=
"coverage.py"
COVERAGE_BY_TEST_ENABLED=
"true"
COVERAGE_TRACING_ENABLED=
"false"
WORKING_DIRECTORY=
"$PROJECT_DIR$/onnx_infer"
/>
</component>
<component
name=
"editorHistoryManager"
>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/model_data/yolo_anchors.txt"
/>
...
...
@@ -308,15 +313,9 @@
</state>
</provider>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/__init__.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
/>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/yolo3/__init__.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
/>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/supression.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"1
69
"
>
<state
relative-caret-position=
"1
14
"
>
<caret
line=
"9"
column=
"36"
selection-start-line=
"9"
selection-start-column=
"36"
selection-end-line=
"9"
selection-end-column=
"36"
/>
<folding>
<element
signature=
"e#0#18#0"
expanded=
"true"
/>
...
...
@@ -326,57 +325,66 @@
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/yolodata.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"
323
"
>
<caret
line=
"17"
lean-forward=
"true"
selection-start-line=
"17"
selection-end-line=
"17"
/>
<state
relative-caret-position=
"
285
"
>
<caret
line=
"17"
selection-start-line=
"17"
selection-end-line=
"17"
/>
<folding>
<element
signature=
"e#0#21#0"
expanded=
"true"
/>
</folding>
</state>
</provider>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/onnx2kera.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"627"
>
<caret
line=
"33"
column=
"50"
lean-forward=
"true"
selection-start-line=
"33"
selection-start-column=
"50"
selection-end-line=
"33"
selection-end-column=
"50"
/>
<folding>
<element
signature=
"e#0#18#0"
expanded=
"true"
/>
</folding>
</state>
</provider>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/yolo3/__init__.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
/>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/main.py"
>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/__init__.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
/>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/croppic.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"2
66
"
>
<caret
line=
"1
4"
column=
"52"
selection-start-line=
"14"
selection-start-column=
"52"
selection-end-line=
"14"
selection-end-column=
"52
"
/>
<state
relative-caret-position=
"2
47
"
>
<caret
line=
"1
3"
column=
"19"
selection-start-line=
"13"
selection-start-column=
"19"
selection-end-line=
"13"
selection-end-column=
"19
"
/>
</state>
</provider>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/drawpic.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
/>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/
croppic
.py"
>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/
onnx2kera
.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"247"
>
<caret
line=
"13"
column=
"19"
selection-start-line=
"13"
selection-start-column=
"19"
selection-end-line=
"13"
selection-end-column=
"19"
/>
<state
relative-caret-position=
"475"
>
<caret
line=
"33"
column=
"50"
selection-start-line=
"33"
selection-start-column=
"50"
selection-end-line=
"33"
selection-end-column=
"50"
/>
<folding>
<element
signature=
"e#0#18#0"
expanded=
"true"
/>
</folding>
</state>
</provider>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/yolo3/model.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"-1422"
>
<caret
line=
"12"
selection-start-line=
"12"
selection-end-line=
"12"
/>
<folding>
<element
signature=
"e#0#27#0"
expanded=
"true"
/>
</folding>
</state>
</provider>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/color.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"152"
>
<caret
line=
"8"
lean-forward=
"true"
selection-start-line=
"8"
selection-end-line=
"8"
/>
<caret
line=
"8"
selection-start-line=
"8"
selection-end-line=
"8"
/>
<folding>
<element
signature=
"e#0#33#0"
expanded=
"true"
/>
</folding>
</state>
</provider>
</entry>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/
yolo3/model
.py"
>
<entry
file=
"file://$PROJECT_DIR$/onnx_infer/
main
.py"
>
<provider
selected=
"true"
editor-type-id=
"text-editor"
>
<state
relative-caret-position=
"
-417
"
>
<caret
line=
"
12"
selection-start-line=
"12"
selection-end-line=
"12
"
/>
<state
relative-caret-position=
"
722
"
>
<caret
line=
"
38"
column=
"24"
lean-forward=
"true"
selection-start-line=
"38"
selection-start-column=
"24"
selection-end-line=
"38"
selection-end-column=
"24
"
/>
<folding>
<element
signature=
"e#
0#27
#0"
expanded=
"true"
/>
<element
signature=
"e#
419#485
#0"
expanded=
"true"
/>
</folding>
</state>
</provider>
...
...
dist/.DS_Store
0 → 100644
View file @
ca801ba4
File added
dist/onnx_infer-0.1.0.tar.gz
View file @
ca801ba4
No preview for this file type
dist/onnx_infer-0.1.0/.DS_Store
0 → 100644
View file @
ca801ba4
File added
dist/onnx_infer-0.1.0/PKG-INFO
0 → 100644
View file @
ca801ba4
Metadata-Version: 1.0
Name: onnx_infer
Version: 0.1.0
Summary: model inference
Home-page: http://git.wanmeizhensuo.com/wujilong/onnx_model.git
Author: wjl
Author-email: wujilong@igengmei.com
License: MIT
Description: UNKNOWN
Platform: UNKNOWN
dist/onnx_infer-0.1.0/onnx_infer.egg-info/PKG-INFO
0 → 100644
View file @
ca801ba4
Metadata-Version: 1.0
Name: onnx-infer
Version: 0.1.0
Summary: model inference
Home-page: http://git.wanmeizhensuo.com/wujilong/onnx_model.git
Author: wjl
Author-email: wujilong@igengmei.com
License: MIT
Description: UNKNOWN
Platform: UNKNOWN
dist/onnx_infer-0.1.0/onnx_infer.egg-info/SOURCES.txt
0 → 100644
View file @
ca801ba4
setup.py
onnx_infer/__init__.py
onnx_infer/color.py
onnx_infer/croppic.py
onnx_infer/drawpic.py
onnx_infer/main.py
onnx_infer/onnx2kera.py
onnx_infer/supression.py
onnx_infer/yolodata.py
onnx_infer.egg-info/PKG-INFO
onnx_infer.egg-info/SOURCES.txt
onnx_infer.egg-info/dependency_links.txt
onnx_infer.egg-info/top_level.txt
onnx_infer/yolo3/__init__.py
onnx_infer/yolo3/model.py
onnx_infer/yolo3/utils.py
\ No newline at end of file
dist/onnx_infer-0.1.0/onnx_infer.egg-info/dependency_links.txt
0 → 100644
View file @
ca801ba4
dist/onnx_infer-0.1.0/onnx_infer.egg-info/top_level.txt
0 → 100644
View file @
ca801ba4
onnx_infer
dist/onnx_infer-0.1.0/onnx_infer/__init__.py
0 → 100644
View file @
ca801ba4
dist/onnx_infer-0.1.0/onnx_infer/color.py
0 → 100644
View file @
ca801ba4
from
.onnx2kera
import
onnxinfere
import
numpy
as
np
import
cv2
from
copy
import
deepcopy
#this model use array to get color
# global
COLORS
=
[
'blue'
,
'green'
,
'red'
,
'yellow'
,
'gray'
,
'white'
,
'black'
,
'pink'
]
def
featureTransform
(
resized
):
YCC
=
deepcopy
(
resized
)
YCC
=
cv2
.
cvtColor
(
YCC
,
cv2
.
COLOR_BGR2LAB
)[:,
:,
1
:
3
]
HSV
=
deepcopy
(
resized
)
HSV
=
cv2
.
cvtColor
(
HSV
,
cv2
.
COLOR_BGR2HSV
)[:,
:,
0
:
3
]
Luv
=
deepcopy
(
resized
)
Luv
=
cv2
.
cvtColor
(
Luv
,
cv2
.
COLOR_BGR2YUV
)[:,
:,
0
:
1
]
tmp
=
np
.
concatenate
((
HSV
,
Luv
,
YCC
),
axis
=
2
)
return
tmp
def
resize
(
image
):
# image = cv2.imread(image_path, cv2.IMREAD_COLOR)
resized
=
cv2
.
resize
(
image
,
(
150
,
150
))
return
resized
def
get_color
(
onnx_path
,
image
,
colors
):
image
=
np
.
expand_dims
(
image
,
axis
=
0
)
image
=
np
.
array
(
image
,
dtype
=
np
.
float32
)
predicts
=
onnxinfere
(
onnx_path
,
image
)
prediction
=
np
.
argmax
(
predicts
[
0
],
1
)
color
=
colors
[
prediction
[
0
]]
score
=
predicts
[
0
][
0
][
prediction
[
0
]]
return
color
,
score
if
__name__
==
'__main__'
:
model_path
=
'/Users/apple/Desktop/color.onnx'
path
=
'/Users/apple/Desktop/8.jpg'
image
=
cv2
.
imread
(
path
,
cv2
.
IMREAD_COLOR
)
resized
=
cv2
.
resize
(
image
,
(
150
,
150
))
image
=
featureTransform
(
resized
)
color
,
score
=
get_color
(
model_path
,
image
,
COLORS
)
print
(
color
)
print
(
score
)
dist/onnx_infer-0.1.0/onnx_infer/croppic.py
0 → 100644
View file @
ca801ba4
# crop Image with threshold
import
cv2
def
cropImage
(
image
,
threshold_index
):
return
image
[
int
(
threshold_index
[
0
]):
int
(
threshold_index
[
2
]),
int
(
threshold_index
[
1
]):
int
(
threshold_index
[
3
])]
def
saveCrop
(
image
,
savePath
):
cv2
.
imwrite
(
savePath
,
image
)
if
__name__
==
'__main__'
:
from
.yolodata
import
path2arr
path
=
'/Users/apple/Desktop/8.jpg'
threshold_index
=
[
95.55165
,
184.0512
,
250.18225
,
333.9927
]
image
=
path2arr
(
path
)
image
=
cropImage
(
image
,
threshold_index
)
print
(
image
)
save
=
'/Users/apple/Desktop/p.jpg'
cv2
.
imwrite
(
save
,
image
)
dist/onnx_infer-0.1.0/onnx_infer/drawpic.py
0 → 100644
View file @
ca801ba4
# draw bounding box on the original picture
import
cv2
def
drawrec
(
image
,
save_path
,
bbox
):
image
=
cv2
.
rectangle
(
image
,
(
int
(
bbox
[
1
]),
int
(
bbox
[
0
])),
(
int
(
bbox
[
3
]),
int
(
bbox
[
2
])),
(
255
,
0
,
0
),
3
)
cv2
.
imwrite
(
save_path
,
image
)
if
__name__
==
'__main__'
:
bbox
=
[
95.551674
,
184.05116
,
250.18227
,
333.99274
]
image_path
=
'/Users/apple/Desktop/8.jpg'
save_path
=
'/Users/apple/Desktop/rec.jpg'
image
=
cv2
.
imread
(
image_path
,
cv2
.
IMREAD_COLOR
)
drawrec
(
image
,
save_path
,
bbox
)
dist/onnx_infer-0.1.0/onnx_infer/main.py
0 → 100644
View file @
ca801ba4
#*******************
"""
this is the main which contain color and yolo
the color input need abs path of color.onnx and one image arr which generate from the croppic.py。
the result is the color of the picture。
the yolo is more complex, need, the path of anchor.txt, label.txt, and the image path, the yolo.onnx path
the result of this is the category and bounding box which is the max score.
"""
#*******************
from
.yolodata
import
path2Img
,
letterbox_image
,
path2arr
,
arr2Img
from
.onnx2kera
import
onnxinfere
from
.supression
import
Supress
import
tensorflow
as
tf
from
.color
import
COLORS
,
featureTransform
,
resize
,
get_color
from
.croppic
import
cropImage
import
cv2
#global
categorys
=
[
'long sleeve dress'
,
'vest dress'
,
'vest'
,
'long sleeve outwear'
,
'long sleeve top'
,
'trousers'
,
'short sleeve top'
,
'sling dress'
,
'skirt'
,
'short sleeve dress'
,
'shorts'
]
class
Main
:
def
__init__
(
self
,
colorOnnx_path
,
yoloOnnx_path
,
image_arr
):
self
.
colorOnnx_path
=
colorOnnx_path
self
.
yoloOnnx_path
=
yoloOnnx_path
self
.
image_arr
=
image_arr
self
.
score
=
0.05
self
.
iou
=
0.05
self
.
picSize
=
(
416
,
416
)
def
bboxAndcategory
(
self
):
bbox
=
[]
category
=
[]
image
=
arr2Img
(
self
.
image_arr
)
image_data
=
letterbox_image
(
image
,
self
.
picSize
)
precit
=
onnxinfere
(
self
.
yoloOnnx_path
,
image_data
)
feature
=
[]
for
f
in
precit
:
feature
.
append
(
tf
.
convert_to_tensor
(
f
))
sup
=
Supress
(
self
.
score
,
self
.
iou
,
feature
)
box
,
score
,
classes
=
sup
.
detect
(
image
)
with
tf
.
Session
()
as
sess
:
bbox
=
box
.
eval
()[
0
]
index
=
classes
.
eval
()[
0
]
return
bbox
,
categorys
[
index
]
def
colorAndbboxAndcategory
(
self
):
bbox
,
category
=
self
.
bboxAndcategory
()
image_arr
=
self
.
image_arr
image_crop
=
cropImage
(
image_arr
,
bbox
)
resized
=
resize
(
image_crop
)
tmp
=
featureTransform
(
resized
)
color
,
ratio
=
get_color
(
self
.
colorOnnx_path
,
tmp
,
COLORS
)
return
color
,
bbox
,
category
# main test
def
get_result
(
image_arr
):
colorOnnx_path
=
'./color.onnx'
yoloOnnx_path
=
'./yolo3.onnx'
m
=
Main
(
colorOnnx_path
,
yoloOnnx_path
,
image_arr
)
color
,
bbox
,
category
=
m
.
colorAndbboxAndcategory
()
return
color
,
bbox
,
category
if
__name__
==
'__main__'
:
colorOnnx_path
=
'/Users/apple/Desktop/color.onnx'
image_path
=
'/Users/apple/Desktop/8.jpg'
yoloOnnx_path
=
'/Users/apple/Desktop/yolo3.onnx'
image_arr
=
cv2
.
imread
(
image_path
)
color
,
bbox
,
category
=
get_result
(
image_arr
)
print
(
color
)
print
(
bbox
)
print
(
category
)
"""
m = Main(colorOnnx_path, yoloOnnx_path, image_path)
color, bbox, category = m.colorAndbboxAndcategory()
print('its color is : {}, category is : {} '.format(color, category))
print('the bounding box is : {}'.format(bbox))
# print(color)
# print(bbox)
# print(category)
"""
dist/onnx_infer-0.1.0/onnx_infer/onnx2kera.py
0 → 100644
View file @
ca801ba4
import
onnxmltools
import
onnxruntime
from
keras.models
import
load_model
from
copy
import
deepcopy
import
cv2
import
numpy
as
np
import
keras.backend
as
k
import
tensorflow
as
tf
from
.yolodata
import
path2Img
,
letterbox_image
def
hf2onnx
(
h5_path
,
save_path
):
model
=
load_model
(
h5_path
)
onnx_model
=
onnxmltools
.
convert_keras
(
model
,
target_opset
=
7
)
onnxmltools
.
utils
.
save_model
(
onnx_model
,
save_path
)
def
onnxinfere
(
onnx_path
,
input
):
session
=
onnxruntime
.
InferenceSession
(
onnx_path
)
inname
=
[
input
.
name
for
input
in
session
.
get_inputs
()][
0
]
outname
=
[
output
.
name
for
output
in
session
.
get_outputs
()]
predict
=
session
.
run
(
outname
,
{
inname
:
input
})
return
predict
if
__name__
==
'__main__'
:
from
.supression
import
Supress
# path = '/Users/apple/Public/keras-yolo3/model_data/our1_yolo.h5'
save_path
=
'/Users/apple/Desktop/yolo3.onnx'
anncorPath
=
'model_data/yolo_anchors.txt'
classPath
=
'model_data/deepfashion.txt'
score
=
0.01
iou
=
0.01
picSize
=
(
416
,
416
)
# hf2onnx(path, save_path)K.learning_phase(): 0
img_path
=
'/Users/apple/Desktop/8.jpg'
image
=
path2Img
(
img_path
)
image_data
=
letterbox_image
(
image
,
picSize
)
precit
=
onnxinfere
(
save_path
,
image_data
)
print
(
precit
)
feature
=
[]
for
f
in
precit
:
feature
.
append
(
tf
.
convert_to_tensor
(
f
))
print
(
feature
)
sup
=
Supress
(
anncorPath
,
classPath
,
score
,
iou
,
picSize
,
feature
)
box
,
score
,
classes
=
sup
.
detect
(
image
)
with
tf
.
Session
()
as
sess
:
print
(
box
.
eval
())
print
(
score
.
eval
())
dist/onnx_infer-0.1.0/onnx_infer/supression.py
0 → 100644
View file @
ca801ba4
import
numpy
as
np
from
keras
import
backend
as
k
from
.yolo3.model
import
yolo_eval
import
os
class
Supress
:
def
__init__
(
self
,
score
,
iou
,
featureMap
):
self
.
score
=
score
self
.
iou
=
iou
self
.
featureMap
=
featureMap
self
.
sess
=
k
.
get_session
def
_get_class
(
self
):
classNames
=
[
'long sleeve dress'
,
'vest dress'
,
'vest'
,
'long sleeve outwear'
,
'long sleeve top'
,
'trousers'
,
'short sleeve top'
,
'sling dress'
,
'skirt'
,
'short sleeve dress'
,
'shorts'
]
return
classNames
def
_get_anchors
(
self
):
anchors
=
[[
10.
,
13.
],
[
16.
,
30.
],
[
33.
,
23.
],
[
30.
,
61.
],
[
62.
,
45.
],
[
59.
,
119.
],
[
116.
,
90.
],
[
156.
,
198.
],
[
373.
,
326.
]]
return
np
.
array
(
anchors
)
def
detect
(
self
,
image
):
anchors
=
self
.
_get_anchors
()
classes
=
self
.
_get_class
()
# self.inputShape = k.placeholder(shape=(2, ))
self
.
inputShape
=
[
image
.
size
[
1
],
image
.
size
[
0
]]
boxes
,
scores
,
classes
=
yolo_eval
(
self
.
featureMap
,
anchors
,
len
(
classes
),
self
.
inputShape
,
score_threshold
=
self
.
score
,
iou_threshold
=
self
.
iou
)
# out_boxes, out_scores, out_classes = self.sess.run([boxes, scores, classes],
# feed_dict={self.inputShape:
# [self.picSize[1],
# self.picSize[0]],
# k.learning_phase():0
# })
return
boxes
,
scores
,
classes
dist/onnx_infer-0.1.0/onnx_infer/yolo3/__init__.py
0 → 100644
View file @
ca801ba4
dist/onnx_infer-0.1.0/onnx_infer/yolo3/model.py
0 → 100644
View file @
ca801ba4
from
functools
import
wraps
import
numpy
as
np
import
tensorflow
as
tf
from
keras
import
backend
as
K
from
keras.layers
import
Conv2D
,
Add
,
ZeroPadding2D
,
UpSampling2D
,
Concatenate
,
MaxPooling2D
from
keras.layers.advanced_activations
import
LeakyReLU
from
keras.layers.normalization
import
BatchNormalization
from
keras.models
import
Model
from
keras.regularizers
import
l2
from
.utils
import
compose
@wraps
(
Conv2D
)
def
DarknetConv2D
(
*
args
,
**
kwargs
):
"""Wrapper to set Darknet parameters for Convolution2D."""
darknet_conv_kwargs
=
{
'kernel_regularizer'
:
l2
(
5e-4
)}
darknet_conv_kwargs
[
'padding'
]
=
'valid'
if
kwargs
.
get
(
'strides'
)
==
(
2
,
2
)
else
'same'
darknet_conv_kwargs
.
update
(
kwargs
)
return
Conv2D
(
*
args
,
**
darknet_conv_kwargs
)
def
DarknetConv2D_BN_Leaky
(
*
args
,
**
kwargs
):
"""Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
no_bias_kwargs
=
{
'use_bias'
:
False
}
no_bias_kwargs
.
update
(
kwargs
)
return
compose
(
DarknetConv2D
(
*
args
,
**
no_bias_kwargs
),
BatchNormalization
(),
LeakyReLU
(
alpha
=
0.1
))
def
resblock_body
(
x
,
num_filters
,
num_blocks
):
'''A series of resblocks starting with a downsampling Convolution2D'''
# Darknet uses left and top padding instead of 'same' mode
x
=
ZeroPadding2D
(((
1
,
0
),(
1
,
0
)))(
x
)
x
=
DarknetConv2D_BN_Leaky
(
num_filters
,
(
3
,
3
),
strides
=
(
2
,
2
))(
x
)
for
i
in
range
(
num_blocks
):
y
=
compose
(
DarknetConv2D_BN_Leaky
(
num_filters
//
2
,
(
1
,
1
)),
DarknetConv2D_BN_Leaky
(
num_filters
,
(
3
,
3
)))(
x
)
x
=
Add
()([
x
,
y
])
return
x
def
darknet_body
(
x
):
'''Darknent body having 52 Convolution2D layers'''
x
=
DarknetConv2D_BN_Leaky
(
32
,
(
3
,
3
))(
x
)
x
=
resblock_body
(
x
,
64
,
1
)
x
=
resblock_body
(
x
,
128
,
2
)
x
=
resblock_body
(
x
,
256
,
8
)
x
=
resblock_body
(
x
,
512
,
8
)
x
=
resblock_body
(
x
,
1024
,
4
)
return
x
def
make_last_layers
(
x
,
num_filters
,
out_filters
):
'''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
x
=
compose
(
DarknetConv2D_BN_Leaky
(
num_filters
,
(
1
,
1
)),
DarknetConv2D_BN_Leaky
(
num_filters
*
2
,
(
3
,
3
)),
DarknetConv2D_BN_Leaky
(
num_filters
,
(
1
,
1
)),
DarknetConv2D_BN_Leaky
(
num_filters
*
2
,
(
3
,
3
)),
DarknetConv2D_BN_Leaky
(
num_filters
,
(
1
,
1
)))(
x
)
y
=
compose
(
DarknetConv2D_BN_Leaky
(
num_filters
*
2
,
(
3
,
3
)),
DarknetConv2D
(
out_filters
,
(
1
,
1
)))(
x
)
return
x
,
y
def
yolo_body
(
inputs
,
num_anchors
,
num_classes
):
"""Create YOLO_V3 model CNN body in Keras."""
darknet
=
Model
(
inputs
,
darknet_body
(
inputs
))
x
,
y1
=
make_last_layers
(
darknet
.
output
,
512
,
num_anchors
*
(
num_classes
+
5
))
x
=
compose
(
DarknetConv2D_BN_Leaky
(
256
,
(
1
,
1
)),
UpSampling2D
(
2
))(
x
)
x
=
Concatenate
()([
x
,
darknet
.
layers
[
152
]
.
output
])
x
,
y2
=
make_last_layers
(
x
,
256
,
num_anchors
*
(
num_classes
+
5
))
x
=
compose
(
DarknetConv2D_BN_Leaky
(
128
,
(
1
,
1
)),
UpSampling2D
(
2
))(
x
)
x
=
Concatenate
()([
x
,
darknet
.
layers
[
92
]
.
output
])
x
,
y3
=
make_last_layers
(
x
,
128
,
num_anchors
*
(
num_classes
+
5
))
return
Model
(
inputs
,
[
y1
,
y2
,
y3
])
def
yolo_head
(
feats
,
anchors
,
num_classes
,
input_shape
,
calc_loss
=
False
):
"""Convert final layer features to bounding box parameters."""
num_anchors
=
len
(
anchors
)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor
=
K
.
reshape
(
K
.
constant
(
anchors
),
[
1
,
1
,
1
,
num_anchors
,
2
])
grid_shape
=
K
.
shape
(
feats
)[
1
:
3
]
# height, width
grid_y
=
K
.
tile
(
K
.
reshape
(
K
.
arange
(
0
,
stop
=
grid_shape
[
0
]),
[
-
1
,
1
,
1
,
1
]),
[
1
,
grid_shape
[
1
],
1
,
1
])
grid_x
=
K
.
tile
(
K
.
reshape
(
K
.
arange
(
0
,
stop
=
grid_shape
[
1
]),
[
1
,
-
1
,
1
,
1
]),
[
grid_shape
[
0
],
1
,
1
,
1
])
grid
=
K
.
concatenate
([
grid_x
,
grid_y
])
grid
=
K
.
cast
(
grid
,
K
.
dtype
(
feats
))
feats
=
K
.
reshape
(
feats
,
[
-
1
,
grid_shape
[
0
],
grid_shape
[
1
],
num_anchors
,
num_classes
+
5
])
# Adjust preditions to each spatial grid point and anchor size.
box_xy
=
(
K
.
sigmoid
(
feats
[
...
,
:
2
])
+
grid
)
/
K
.
cast
(
grid_shape
[::
-
1
],
K
.
dtype
(
feats
))
box_wh
=
K
.
exp
(
feats
[
...
,
2
:
4
])
*
anchors_tensor
/
K
.
cast
(
input_shape
[::
-
1
],
K
.
dtype
(
feats
))
box_confidence
=
K
.
sigmoid
(
feats
[
...
,
4
:
5
])
box_class_probs
=
K
.
sigmoid
(
feats
[
...
,
5
:])
if
calc_loss
==
True
:
return
grid
,
feats
,
box_xy
,
box_wh
return
box_xy
,
box_wh
,
box_confidence
,
box_class_probs
def
yolo_correct_boxes
(
box_xy
,
box_wh
,
input_shape
,
image_shape
):
'''Get corrected boxes'''
box_yx
=
box_xy
[
...
,
::
-
1
]
box_hw
=
box_wh
[
...
,
::
-
1
]
input_shape
=
K
.
cast
(
input_shape
,
K
.
dtype
(
box_yx
))
image_shape
=
K
.
cast
(
image_shape
,
K
.
dtype
(
box_yx
))
new_shape
=
K
.
round
(
image_shape
*
K
.
min
(
input_shape
/
image_shape
))
offset
=
(
input_shape
-
new_shape
)
/
2.
/
input_shape
scale
=
input_shape
/
new_shape
box_yx
=
(
box_yx
-
offset
)
*
scale
box_hw
*=
scale
box_mins
=
box_yx
-
(
box_hw
/
2.
)
box_maxes
=
box_yx
+
(
box_hw
/
2.
)
boxes
=
K
.
concatenate
([
box_mins
[
...
,
0
:
1
],
# y_min
box_mins
[
...
,
1
:
2
],
# x_min
box_maxes
[
...
,
0
:
1
],
# y_max
box_maxes
[
...
,
1
:
2
]
# x_max
])
# Scale boxes back to original image shape.
boxes
*=
K
.
concatenate
([
image_shape
,
image_shape
])
return
boxes
def
yolo_boxes_and_scores
(
feats
,
anchors
,
num_classes
,
input_shape
,
image_shape
):
'''Process Conv layer output'''
box_xy
,
box_wh
,
box_confidence
,
box_class_probs
=
yolo_head
(
feats
,
anchors
,
num_classes
,
input_shape
)
boxes
=
yolo_correct_boxes
(
box_xy
,
box_wh
,
input_shape
,
image_shape
)
boxes
=
K
.
reshape
(
boxes
,
[
-
1
,
4
])
box_scores
=
box_confidence
*
box_class_probs
box_scores
=
K
.
reshape
(
box_scores
,
[
-
1
,
num_classes
])
return
boxes
,
box_scores
def
yolo_eval
(
yolo_outputs
,
anchors
,
num_classes
,
image_shape
,
max_boxes
=
20
,
score_threshold
=.
6
,
iou_threshold
=.
5
):
"""Evaluate YOLO model on given input and return filtered boxes."""
num_layers
=
len
(
yolo_outputs
)
anchor_mask
=
[[
6
,
7
,
8
],
[
3
,
4
,
5
],
[
0
,
1
,
2
]]
if
num_layers
==
3
else
[[
3
,
4
,
5
],
[
1
,
2
,
3
]]
# default setting
input_shape
=
K
.
shape
(
yolo_outputs
[
0
])[
1
:
3
]
*
32
boxes
=
[]
box_scores
=
[]
for
l
in
range
(
num_layers
):
_boxes
,
_box_scores
=
yolo_boxes_and_scores
(
yolo_outputs
[
l
],
anchors
[
anchor_mask
[
l
]],
num_classes
,
input_shape
,
image_shape
)
boxes
.
append
(
_boxes
)
box_scores
.
append
(
_box_scores
)
boxes
=
K
.
concatenate
(
boxes
,
axis
=
0
)
box_scores
=
K
.
concatenate
(
box_scores
,
axis
=
0
)
mask
=
box_scores
>=
score_threshold
max_boxes_tensor
=
K
.
constant
(
max_boxes
,
dtype
=
'int32'
)
boxes_
=
[]
scores_
=
[]
classes_
=
[]
for
c
in
range
(
num_classes
):
# TODO: use keras backend instead of tf.
class_boxes
=
tf
.
boolean_mask
(
boxes
,
mask
[:,
c
])
class_box_scores
=
tf
.
boolean_mask
(
box_scores
[:,
c
],
mask
[:,
c
])
nms_index
=
tf
.
image
.
non_max_suppression
(
class_boxes
,
class_box_scores
,
max_boxes_tensor
,
iou_threshold
=
iou_threshold
)
class_boxes
=
K
.
gather
(
class_boxes
,
nms_index
)
class_box_scores
=
K
.
gather
(
class_box_scores
,
nms_index
)
classes
=
K
.
ones_like
(
class_box_scores
,
'int32'
)
*
c
boxes_
.
append
(
class_boxes
)
scores_
.
append
(
class_box_scores
)
classes_
.
append
(
classes
)
boxes_
=
K
.
concatenate
(
boxes_
,
axis
=
0
)
scores_
=
K
.
concatenate
(
scores_
,
axis
=
0
)
classes_
=
K
.
concatenate
(
classes_
,
axis
=
0
)
return
boxes_
,
scores_
,
classes_
dist/onnx_infer-0.1.0/onnx_infer/yolo3/utils.py
0 → 100644
View file @
ca801ba4
"""Miscellaneous utility functions."""
from
functools
import
reduce
from
PIL
import
Image
import
numpy
as
np
from
matplotlib.colors
import
rgb_to_hsv
,
hsv_to_rgb
def
compose
(
*
funcs
):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if
funcs
:
return
reduce
(
lambda
f
,
g
:
lambda
*
a
,
**
kw
:
g
(
f
(
*
a
,
**
kw
)),
funcs
)
else
:
raise
ValueError
(
'Composition of empty sequence not supported.'
)
def
letterbox_image
(
image
,
size
):
'''resize image with unchanged aspect ratio using padding'''
iw
,
ih
=
image
.
size
w
,
h
=
size
scale
=
min
(
w
/
iw
,
h
/
ih
)
nw
=
int
(
iw
*
scale
)
nh
=
int
(
ih
*
scale
)
image
=
image
.
resize
((
nw
,
nh
),
Image
.
BICUBIC
)
new_image
=
Image
.
new
(
'RGB'
,
size
,
(
128
,
128
,
128
))
new_image
.
paste
(
image
,
((
w
-
nw
)
//
2
,
(
h
-
nh
)
//
2
))
return
new_image
dist/onnx_infer-0.1.0/onnx_infer/yolodata.py
0 → 100644
View file @
ca801ba4
from
PIL
import
Image
import
numpy
as
np
import
cv2
# the letterbox_image input is the type of IMAGE, and the size need not reverse.
def
path2Img
(
path
):
image
=
cv2
.
imread
(
path
,
cv2
.
IMREAD_COLOR
)
image
=
Image
.
fromarray
(
image
)
return
image
def
path2arr
(
path
):
image
=
cv2
.
imread
(
path
,
cv2
.
IMREAD_COLOR
)
return
image
def
arr2Img
(
image_arr
):
image
=
Image
.
fromarray
(
image_arr
)
return
image
# this image is one Image type
def
letterbox_image
(
image
,
size
):
iw
,
ih
=
image
.
size
h
,
w
=
size
scale
=
min
(
w
/
iw
,
h
/
ih
)
nw
=
int
(
iw
*
scale
)
nh
=
int
(
ih
*
scale
)
image
=
image
.
resize
((
nw
,
nh
),
Image
.
BICUBIC
)
new_image
=
Image
.
new
(
'RGB'
,
size
,
(
128
,
128
,
128
))
new_image
.
paste
(
image
,
((
w
-
nw
)
//
2
,
(
h
-
nh
)
//
2
))
image_data
=
np
.
array
(
new_image
,
dtype
=
'float32'
)
image_data
=
image_data
/
255.
image_data
=
np
.
expand_dims
(
image_data
,
0
)
return
image_data
dist/onnx_infer-0.1.0/setup.cfg
0 → 100644
View file @
ca801ba4
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0
dist/onnx_infer-0.1.0/setup.py
0 → 100644
View file @
ca801ba4
#!/user/bin/python
from
setuptools
import
setup
,
find_packages
setup
(
name
=
'onnx_infer'
,
version
=
'0.1.0'
,
description
=
'model inference'
,
author
=
'wjl'
,
url
=
'http://git.wanmeizhensuo.com/wujilong/onnx_model.git'
,
author_email
=
'wujilong@igengmei.com'
,
license
=
'MIT'
,
packages
=
find_packages
(),
)
onnx_infer/.DS_Store
View file @
ca801ba4
No preview for this file type
color.onnx
→
onnx_infer/
color.onnx
View file @
ca801ba4
File moved
onnx_infer/main.py
View file @
ca801ba4
...
...
@@ -56,7 +56,9 @@ class Main:
# main test
def
get_result
(
colorOnnx_path
,
yoloOnnx_path
,
image_arr
):
def
get_result
(
image_arr
):
colorOnnx_path
=
'./color.onnx'
yoloOnnx_path
=
'./yolo3.onnx'
m
=
Main
(
colorOnnx_path
,
yoloOnnx_path
,
image_arr
)
color
,
bbox
,
category
=
m
.
colorAndbboxAndcategory
()
return
color
,
bbox
,
category
...
...
@@ -67,7 +69,7 @@ if __name__ == '__main__':
image_path
=
'/Users/apple/Desktop/8.jpg'
yoloOnnx_path
=
'/Users/apple/Desktop/yolo3.onnx'
image_arr
=
cv2
.
imread
(
image_path
)
color
,
bbox
,
category
=
get_result
(
colorOnnx_path
,
yoloOnnx_path
,
image_arr
)
color
,
bbox
,
category
=
get_result
(
image_arr
)
print
(
color
)
print
(
bbox
)
print
(
category
)
...
...
yolo3.onnx
→
onnx_infer/
yolo3.onnx
View file @
ca801ba4
File moved
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment