Commit 3c86aeb3 authored by zhongshangwu's avatar zhongshangwu

调整代码结构,完善文档说明

parent d9307cb1
...@@ -6,7 +6,6 @@ from scipy import misc ...@@ -6,7 +6,6 @@ from scipy import misc
import sys import sys
import os import os
import argparse import argparse
#import tensorflow as tf
import numpy as np import numpy as np
import mxnet as mx import mxnet as mx
import random import random
...@@ -17,21 +16,21 @@ from time import sleep ...@@ -17,21 +16,21 @@ from time import sleep
from easydict import EasyDict as edict from easydict import EasyDict as edict
from AgeGenderDist.mtcnn_detector import MtcnnDetector from AgeGenderDist.mtcnn_detector import MtcnnDetector
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src', 'common')) sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src', 'common'))
import AgeGenderDist.face_image from AgeGenderDist import face_image
import AgeGenderDist.face_preprocess from AgeGenderDist import face_preprocess
from pkg_resources import resource_filename from pkg_resources import resource_filename
MODEL_STR = resource_filename(__name__, "model/") MODEL_STR = resource_filename(__name__, "model/model")
def do_flip(data): def do_flip(data):
for idx in range(data.shape[0]): for idx in range(data.shape[0]):
data[idx,:,:] = np.fliplr(data[idx,:,:]) data[idx,:,:] = np.fliplr(data[idx,:,:])
def get_model(ctx, image_size, model_str, layer): def get_model(ctx, image_size, layer):
epoch = 0 epoch = 0
prefix = MODEL_STR prefix = MODEL_STR
print('loading',prefix, epoch)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch) sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
all_layers = sym.get_internals() all_layers = sym.get_internals()
sym = all_layers[layer+'_output'] sym = all_layers[layer+'_output']
...@@ -40,6 +39,16 @@ def get_model(ctx, image_size, model_str, layer): ...@@ -40,6 +39,16 @@ def get_model(ctx, image_size, model_str, layer):
model.set_params(arg_params, aux_params) model.set_params(arg_params, aux_params)
return model return model
class FaceModelArgs:
def __init__(self, image_size='112,112', gpu=0, det=0, flip=0, threshold=1.24):
self.image_size = image_size
self.gpu = gpu
self.det = det
self.flip = flip
self.threshold = threshold
class FaceModel: class FaceModel:
def __init__(self, args): def __init__(self, args):
self.args = args self.args = args
...@@ -51,8 +60,7 @@ class FaceModel: ...@@ -51,8 +60,7 @@ class FaceModel:
assert len(_vec)==2 assert len(_vec)==2
image_size = (int(_vec[0]), int(_vec[1])) image_size = (int(_vec[0]), int(_vec[1]))
self.model = None self.model = None
if len(args.model)>0: self.model = get_model(ctx, image_size, 'fc1')
self.model = get_model(ctx, image_size, args.model, 'fc1')
self.det_minsize = 50 self.det_minsize = 50
self.det_threshold = [0.6,0.7,0.8] self.det_threshold = [0.6,0.7,0.8]
...@@ -68,7 +76,6 @@ class FaceModel: ...@@ -68,7 +76,6 @@ class FaceModel:
def get_input(self, face_img): def get_input(self, face_img):
ret = self.detector.detect_face(face_img, det_type = self.args.det) ret = self.detector.detect_face(face_img, det_type = self.args.det)
#print('ret',ret)
if ret is None: if ret is None:
return None return None
bbox, points = ret bbox, points = ret
...@@ -76,16 +83,13 @@ class FaceModel: ...@@ -76,16 +83,13 @@ class FaceModel:
return None return None
bbox = bbox[0,0:4] bbox = bbox[0,0:4]
points = points[0,:].reshape((2,5)).T points = points[0,:].reshape((2,5)).T
#print(bbox)
#print(points)
nimg = face_preprocess.preprocess(face_img, bbox, points, image_size='112,112') nimg = face_preprocess.preprocess(face_img, bbox, points, image_size='112,112')
nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB) nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
aligned = np.transpose(nimg, (2,0,1)) aligned = np.transpose(nimg, (2,0,1))
input_blob = np.expand_dims(aligned, axis=0) input_blob = np.expand_dims(aligned, axis=0)
data = mx.nd.array(input_blob) data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,)) db = mx.io.DataBatch(data=(data,))
return db return db, ret
def get_ga(self, data): def get_ga(self, data):
self.model.forward(data, is_train=False) self.model.forward(data, is_train=False)
...@@ -98,3 +102,18 @@ class FaceModel: ...@@ -98,3 +102,18 @@ class FaceModel:
return gender, age return gender, age
def get_age_gender_dist(self, img_src):
img, ret =self.get_input(img_src)
if ret is None:
print('ret is none')
return None
bbox, points = ret
points = points[0,:].reshape((2,5)).T
im=img_src.copy()
lf_eye=points[0]
rt_eye=points[1]
tmp=rt_eye-lf_eye
dist=math.hypot(tmp[0],tmp[1])
gender,age=self.get_ga(img)
return age,gender,dist
AgeGenderDist # AgeGenderDist
===============================
version number: 0.1.0 人脸尺寸测量库
author: rentingting
Overview ## 安装
--------
A short description of the project 1. pip 安装
Installation / Usage ```shell
-------------------- pip install git@git.wanmeizhensuo.com:ai/AgeGenderDist.git#egg=AgeGenderDist
```
To install use pip: ## 使用方式
$ pip install AgeGenderDist ```python
from AgeGenderDist.face_model import FaceModel, FaceModelArgs
import cv2
args = FaceModelArgs()
model = FaceModel(args)
img = cv2.imread('./lihaonan.jpg')
Or clone the repo: age, gender, dist = model.get_age_gender_dist(img)
print(age) # 年龄
$ git clone https://github.com/rentingting/AgeGenderDist.git print(gender) # 性别 0: 女 1: 男
$ python setup.py install print(dist) # 瞳距 单位 px
```
Contributing
------------
TBD
Example
-------
TBD
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment