Commit e6dc7d3c authored by 任婷婷's avatar 任婷婷

delete imgpath url

parent 4571f03e
File added
File added
File added
File added
File added
File added
......@@ -15,6 +15,14 @@ from PIL import Image
import numpy as np
import random
'''
from partsim.eye96 import Eye
from partsim.nose96 import Nose
from partsim.eyebrow96 import EyeBrow
from partsim.chin96 import Chin
from partsim.contour96 import Contour
from partsim.lip96 import Lip
'''
from eye96 import Eye
from nose96 import Nose
from eyebrow96 import EyeBrow
......@@ -28,9 +36,8 @@ from align import AlignDlib
import caffe
import cv2
BASE_DIR='/home/gmface'
PREDICTOR_PATH = os.path.join(BASE_DIR, 'models','shape_predictor_68_face_landmarks.dat')
ROLL_MODEL_PATH = os.path.join(BASE_DIR, 'models','cnn_cccdd_30k.tf')
PREDICTOR_PATH = os.path.join(os.path.dirname(__file__),'models','shape_predictor_68_face_landmarks.dat')
ROLL_MODEL_PATH = os.path.join(os.path.dirname(__file__),'models','cnn_cccdd_30k.tf')
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(PREDICTOR_PATH)
......@@ -46,42 +53,22 @@ class NoFaces(Exception):
class Face_folder(object):
PARTS = {'eye': Eye, 'nose': Nose, 'eyebrow': EyeBrow, 'chin': Chin, 'contour': Contour, 'lip': Lip}
def __init__(self, image_url,img_path):
self.face_raw = None
if img_path:
self.img_path=img_path
self.img=cv2.imread(img_path)
#print('self.img',self.img)
elif image_url:
#print('url',image_url)
image_url = image_url.strip()
result = requests.get(image_url)
if result.ok:
img = Image.open(io.BytesIO(result.content))
img = img.convert('RGB')
self.img = np.array(img)
h,w,c=self.img.shape
#print(h,w)
if h<70 or w<70:
raise NoFaces
#print('img {}'.format(self.img))
else:
raise NoFaces
else:
raise NoFaces
def __init__(self,img):
self.img=np.array(img)
h,w,c=self.img.shape
if h<70 or w<70:
raise NoFaces
@classmethod
def get_vector(cls,image_url,img_path,part,net):
def get_vector(cls,img,part,net):
"""
get part vector.
:return:
"""
face = Face_folder(image_url,img_path)
face = Face_folder(img)
face.build_landmark(net)
face.build_part()
return getattr(face, part).get_vector()
def get_aligned_face_landmarks(self,net):
img= self.img
......@@ -89,48 +76,17 @@ class Face_folder(object):
bb_aligned=None
alignedFace=None
if img is not None:
#print('img{}'.format(img))
cv2.imwrite('source.jpg',img)
h,w,c = img.shape
#img = cv2.resize(img,(int(w*0.2),int(h*0.2)))
#h,w = img.shape[:2]
#cv2.imwrite('res.jpg',img)
#detector = dlib.get_frontal_face_detector()
#detpath = 'models/mmod_human_face_detector.dat'
#detector = dlib.face_detection_model_v1(detpath)
#detector = dlib.cnn_face_detection_model_v1(detpath)
#bb = detector(img)
bb = align.getLargestFaceBoundingBox(img)
if bb == None:
center = (w/2,h/2)
angle90 = 270
scale = 1.0
M = cv2.getRotationMatrix2D(center, angle90, scale)
rotated90 = cv2.warpAffine(img, M, (h, w))
#img = cv2.flip(img,1)
#img = cv2.flip(img,1)
cv2.imwrite('flip.jpg',rotated90)
faces = detector(img,1)
#print('faces',faces)
bb = faces[0]
#print('face len',len(faces))
bb = align.getLargestFaceBoundingBox(rotated90)
#print('bb',bb)
alignedFace = align.align(
112,
img,
bb,
landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
#print('aligned face{}'.format(alignedFace))
#print('ali size',alignedFace.shape)
alignedFace = cv2.resize(alignedFace,(640,640))
#cv2.imwrite('/srv/apps/gmface/align.jpg',alignedFace)
#cv2.imwrite('/srv/apps/gmface/org.jpg',img)
rects=detector(alignedFace,1)
#print('rects {}'.format(rects))
else:
print("input img is empty")
raise NoFaces
if len(rects)>1:
raise TooManyFaces
if len(rects)==0:
......@@ -138,7 +94,6 @@ class Face_folder(object):
landmarks = net.loadimageAndlandmark98(alignedFace)
lk_dict={}
im = alignedFace.copy()
for i, p in enumerate(landmarks):
d={}
d['x']=p[0]
......@@ -148,40 +103,9 @@ class Face_folder(object):
name='p_'+str(i)
points[name]=d
lk_dict.update(points)
pos = (int(p[0]), int(p[1]))
cv2.putText(im, str(i), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0, 0, 255))
cv2.circle(im, pos, 3, color=(0, 255, 255))
#cv2.imwrite('3.jpg',im)
#print('lk_dict {}'.format(lk_dict))
return alignedFace,lk_dict
def get_face_landmarks(self):
img= self.img
if img is not None:
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
rects=detector(img,1)
else:
print("input img is empty")
if len(rects)>1:
raise TooManyFaces
if len(rects)==0:
raise NoFaces
lk_dict={}
for i, p in enumerate(predictor(img,rects[0]).parts()):
d={}
d['x']=p.x
d['y']=p.y
points={}
name='p_'+str(i)
points[name]=d
lk_dict.update(points)
face=rects[0]
return face,lk_dict
def build_landmark(self,net):
"""build landmark using dlib"""
h,w,c=self.img.shape
......
File added
File added
#import partsim
import math
import cv2
from face import Face_folder,NoFaces,TooManyFaces
......@@ -5,10 +6,8 @@ from numpy import array
import argparse
import caffe
import numpy as np
from align import AlignDlib
import os
parser = argparse.ArgumentParser(description='face part classify')
parser.add_argument('-p', '--parts', help="input part")
......@@ -25,8 +24,8 @@ if __name__ == '__main__':
img1 = cv2.imread(imgpath1)
img2 = cv2.imread(imgpath2)
v1 = array(Face_folder.get_vector(None,imgpath1,part,net))
v2 = array(Face_folder.get_vector(None,imgpath2,part,net))
v1 = array(Face_folder.get_vector(img1,part,net))
v2 = array(Face_folder.get_vector(img2,part,net))
print('v1',v1)
print('v2',v2)
......
File added
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment