Commit 57dc7eca authored by 宋柯's avatar 宋柯

add

parent 03952cf7
MIT License
Copyright (c) 2019
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# BeautyGAN
Official implementation of ACM MM 2018 paper: "BeautyGAN: Instance-level Facial Makeup Transfer with Deep Generative Adversarial Network"
Dataset can be found in project page: http://colalab.org/projects/BeautyGAN
## still in construction
from easydict import EasyDict as edict
default = edict()
default.snapshot_path = './snapshot/'
default.vis_path = './visulization/'
default.log_path = './log/'
default.data_path = './data/'
config = edict()
# setting for cycleGAN
# Hyper-parameters
config.multi_gpu = False
config.gpu_ids = [0,1,2]
# Setting path
config.snapshot_path = default.snapshot_path
config.pretrained_path = default.snapshot_path
config.vis_path = default.vis_path
config.log_path = default.log_path
config.data_path = default.data_path
# Setting training parameters
config.task_name = ""
config.G_LR = 2e-5
config.D_LR = 2e-5
config.beta1 = 0.5
config.beta2 = 0.999
config.c_dim = 2
config.num_epochs = 200
config.num_epochs_decay = 100
config.ndis = 1
config.snapshot_step = 260
config.log_step = 10
config.vis_step = config.snapshot_step
config.batch_size = 1
config.lambda_A = 10.0
config.lambda_B =10.0
config.lambda_idt = 0.5
config.img_size = 256
config.g_conv_dim = 64
config.d_conv_dim = 64
config.g_repeat_num = 6
config.d_repeat_num = 3
config.checkpoint = ""
config.test_model = "51_2000"
# Setting datasets
dataset_config = edict()
dataset_config.name = 'MAKEUP'
dataset_config.dataset_path = default.data_path
dataset_config.img_size = 256
def generate_config(_network, _dataset):
for k, v in dataset_config[_dataset].items():
if k in config:
config[k] = v
elif k in default:
default[k] = v
def merge_cfg_arg(config, args):
config.gpu_ids = [int(i) for i in args.gpus.split(',')]
config.batch_size = args.batch_size
config.vis_step = args.vis_step
config.snapshot_step = args.vis_step
config.ndis = args.ndis
config.lambda_cls = args.lambda_cls
config.lambda_A = args.lambda_rec
config.lambda_B = args.lambda_rec
config.G_LR = args.LR
config.D_LR = args.LR
config.num_epochs_decay = args.decay
config.num_epochs = args.epochs
config.whichG = args.whichG
config.task_name = args.task_name
config.norm = args.norm
config.lambda_his = args.lambda_his
config.lambda_vgg = args.lambda_vgg
config.cls_list = [i for i in args.cls_list.split(',')]
config.content_layer = [i for i in args.content_layer.split(',')]
config.direct = args.direct
config.lips = args.lips
config.skin = args.skin
config.eye = args.eye
config.g_repeat = args.g_repeat
config.lambda_his_lip = args.lambda_his
config.lambda_his_skin_1 = args.lambda_his * args.lambda_skin_1
config.lambda_his_skin_2 = args.lambda_his * args.lambda_skin_2
config.lambda_his_eye = args.lambda_his * args.lambda_eye
print(config)
if "checkpoint" in config.items():
config.checkpoint = args.checkpoint
if "test_model" in config.items():
config.test_model = args.test_model
return config
This diff is collapsed.
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
from .makeup import MAKEUP
import os
import torch
import random
import linecache
from torch.utils.data import Dataset
from PIL import Image
class MAKEUP(Dataset):
def __init__(self, image_path, transform, mode, transform_mask, cls_list):
self.image_path = image_path
self.transform = transform
self.mode = mode
self.transform_mask = transform_mask
self.cls_list = cls_list
self.cls_A = cls_list[0]
self.cls_B = cls_list[1]
for cls in self.cls_list:
setattr(self, "train_" + cls + "_list_path", os.path.join(self.image_path, "train_" + cls + ".txt"))
setattr(self, "train_" + cls + "_lines", open(getattr(self, "train_" + cls + "_list_path"), 'r').readlines())
setattr(self, "num_of_train_" + cls + "_data", len(getattr(self, "train_" + cls + "_lines")))
for cls in self.cls_list:
if self.mode == "test_all":
setattr(self, "test_" + cls + "_list_path", os.path.join(self.image_path, "test_" + cls + "_all.txt"))
setattr(self, "test_" + cls + "_lines", open(getattr(self, "test_" + cls + "_list_path"), 'r').readlines())
setattr(self, "num_of_test_" + cls + "_data", len(getattr(self, "test_" + cls + "_lines")))
else:
setattr(self, "test_" + cls + "_list_path", os.path.join(self.image_path, "test_" + cls + ".txt"))
setattr(self, "test_" + cls + "_lines", open(getattr(self, "test_" + cls + "_list_path"), 'r').readlines())
setattr(self, "num_of_test_" + cls + "_data", len(getattr(self, "test_" + cls + "_lines")))
print ('Start preprocessing dataset..!')
self.preprocess()
print ('Finished preprocessing dataset..!')
def preprocess(self):
for cls in self.cls_list:
setattr(self, "train_" + cls + "_filenames", [])
setattr(self, "train_" + cls + "_mask_filenames", [])
lines = getattr(self, "train_" + cls + "_lines")
random.shuffle(lines)
for i, line in enumerate(lines):
splits = line.split()
getattr(self, "train_" + cls + "_filenames").append(splits[0])
getattr(self, "train_" + cls + "_mask_filenames").append(splits[1])
for cls in self.cls_list:
setattr(self, "test_" + cls + "_filenames", [])
setattr(self, "test_" + cls + "_mask_filenames", [])
lines = getattr(self, "test_" + cls + "_lines")
for i, line in enumerate(lines):
splits = line.split()
getattr(self, "test_" + cls + "_filenames").append(splits[0])
getattr(self, "test_" + cls + "_mask_filenames").append(splits[1])
if self.mode == "test_baseline":
setattr(self, "test_" + self.cls_A + "_filenames", os.listdir(os.path.join(self.image_path, "baseline", "org_aligned")))
setattr(self, "num_of_test_" + self.cls_A + "_data", len(os.listdir(os.path.join(self.image_path, "baseline", "org_aligned"))))
setattr(self, "test_" + self.cls_B + "_filenames", os.listdir(os.path.join(self.image_path, "baseline", "ref_aligned")))
setattr(self, "num_of_test_" + self.cls_B + "_data", len(os.listdir(os.path.join(self.image_path, "baseline", "ref_aligned"))))
def __getitem__(self, index):
if self.mode == 'train' or self.mode == 'train_finetune':
index_A = random.randint(0, getattr(self, "num_of_train_" + self.cls_A + "_data") - 1)
index_B = random.randint(0, getattr(self, "num_of_train_" + self.cls_B + "_data") - 1)
image_A = Image.open(os.path.join(self.image_path, getattr(self, "train_" + self.cls_A + "_filenames")[index_A])).convert("RGB")
image_B = Image.open(os.path.join(self.image_path, getattr(self, "train_" + self.cls_B + "_filenames")[index_B])).convert("RGB")
mask_A = Image.open(os.path.join(self.image_path, getattr(self, "train_" + self.cls_A + "_mask_filenames")[index_A]))
mask_B = Image.open(os.path.join(self.image_path, getattr(self, "train_" + self.cls_B + "_mask_filenames")[index_B]))
return self.transform(image_A), self.transform(image_B), self.transform_mask(mask_A), self.transform_mask(mask_B)
if self.mode in ['test', 'test_all']:
#"""
image_A = Image.open(os.path.join(self.image_path, getattr(self, "test_" + self.cls_A + "_filenames")[index // getattr(self, 'num_of_test_' + self.cls_list[1] + '_data')])).convert("RGB")
image_B = Image.open(os.path.join(self.image_path, getattr(self, "test_" + self.cls_B + "_filenames")[index % getattr(self, 'num_of_test_' + self.cls_list[1] + '_data')])).convert("RGB")
return self.transform(image_A), self.transform(image_B)
if self.mode == "test_baseline":
image_A = Image.open(os.path.join(self.image_path, "baseline", "org_aligned", getattr(self, "test_" + self.cls_A + "_filenames")[index // getattr(self, 'num_of_test_' + self.cls_list[1] + '_data')])).convert("RGB")
image_B = Image.open(os.path.join(self.image_path, "baseline", "ref_aligned", getattr(self, "test_" + self.cls_B + "_filenames")[index % getattr(self, 'num_of_test_' + self.cls_list[1] + '_data')])).convert("RGB")
return self.transform(image_A), self.transform(image_B)
def __len__(self):
if self.mode == 'train' or self.mode == 'train_finetune':
num_A = getattr(self, 'num_of_train_' + self.cls_list[0] + '_data')
num_B = getattr(self, 'num_of_train_' + self.cls_list[1] + '_data')
return max(num_A, num_B)
elif self.mode in ['test', "test_baseline", 'test_all']:
num_A = getattr(self, 'num_of_test_' + self.cls_list[0] + '_data')
num_B = getattr(self, 'num_of_test_' + self.cls_list[1] + '_data')
return num_A * num_B
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from data_loaders.makeup import MAKEUP
import torch
import numpy as np
import PIL
def ToTensor(pic):
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
def get_loader(data_config, config, mode="train"):
# return the DataLoader
dataset_name = data_config.name
transform = transforms.Compose([
transforms.Resize(config.img_size),
transforms.ToTensor(),
transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])])
transform_mask = transforms.Compose([
transforms.Resize(config.img_size, interpolation=PIL.Image.NEAREST),
ToTensor])
print(config.data_path)
#"""
if mode=="train":
dataset_train = eval(dataset_name)(data_config.dataset_path, transform=transform, mode= "train",\
transform_mask=transform_mask, cls_list = config.cls_list)
dataset_test = eval(dataset_name)(data_config.dataset_path, transform=transform, mode= "test",\
transform_mask=transform_mask, cls_list = config.cls_list)
#"""
data_loader_train = DataLoader(dataset=dataset_train,
batch_size=config.batch_size,
shuffle=True)
if mode=="test":
data_loader_train = None
dataset_test = eval(dataset_name)(data_config.dataset_path, transform=transform, mode= "test",\
transform_mask =transform_mask, cls_list = config.cls_list)
data_loader_test = DataLoader(dataset=dataset_test,
batch_size=1,
shuffle=False)
return [data_loader_train, data_loader_test]
\ No newline at end of file
import os
import numpy as np
images_data_path = '/Users/edz/Downloads/makeupdataset/all/images'
segs_data_path = '/Users/edz/Downloads/makeupdataset/all/segs'
images_makeup = os.path.join(images_data_path,"makeup")
segs_makeup = os.path.join(segs_data_path,"makeup")
images_nonmakeup = os.path.join(images_data_path,"non-makeup")
segs_nonmakeup = os.path.join(segs_data_path,"non-makeup")
images_makeup_files = os.listdir(images_makeup)
images_nonmakeup_files = os.listdir(images_nonmakeup)
segs_makeup_files = os.listdir(segs_makeup)
segs_nonmakeup_files = os.listdir(segs_nonmakeup)
print("images_makeup_files:{}个".format(len(images_makeup_files)))
print("images_nonmakeup_files:{}个".format(len(images_nonmakeup_files)))
print("segs_makeup_files:{}个".format(len(segs_makeup_files)))
print("segs_nonmakeup_files:{}个".format(len(segs_nonmakeup_files)))
images_makeup_files = list(set(images_makeup_files)&set(segs_makeup_files))
images_nonmakeup_files = list(set(images_nonmakeup_files)&set(segs_nonmakeup_files))
print("images_makeup_files:{}个".format(len(images_makeup_files)))
print("images_nonmakeup_files:{}个".format(len(images_nonmakeup_files)))
np.random.shuffle(images_makeup_files)
np.random.shuffle(images_nonmakeup_files)
with open("./train_makeup.txt","w") as f:
for images_makeup_file in images_makeup_files[:2448]:
f.write("{} {}\n".format(os.path.join(images_makeup,images_makeup_file),os.path.join(segs_makeup,images_makeup_file)))
with open("./test_makeup.txt","w") as f:
for images_makeup_file in images_makeup_files[2448:]:
f.write("{} {}\n".format(os.path.join(images_makeup,images_makeup_file),os.path.join(segs_makeup,images_makeup_file)))
with open("./train_nonmakeup.txt","w") as f:
for images_nonmakeup_file in images_nonmakeup_files[:1004]:
f.write("{} {}\n".format(os.path.join(images_nonmakeup,images_nonmakeup_file),os.path.join(segs_nonmakeup,images_nonmakeup_file)))
with open("./test_nonmakeup.txt","w") as f:
for images_nonmakeup_file in images_nonmakeup_files[1004:]:
f.write("{} {}\n".format(os.path.join(images_nonmakeup,images_nonmakeup_file),os.path.join(segs_nonmakeup,images_nonmakeup_file)))
\ No newline at end of file
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
import numpy as np
import torch
import copy
def cal_hist(image):
"""
cal cumulative hist for channel list
"""
hists = []
for i in range(0, 3):
channel = image[i]
# channel = image[i, :, :]
channel = torch.from_numpy(channel)
# hist, _ = np.histogram(channel, bins=256, range=(0,255))
hist = torch.histc(channel, bins=256, min=0, max=256)
hist = hist.numpy()
# refHist=hist.view(256,1)
sum = hist.sum()
pdf = [v / sum for v in hist]
for i in range(1, 256):
pdf[i] = pdf[i - 1] + pdf[i]
hists.append(pdf)
return hists
def cal_trans(ref, adj):
"""
calculate transfer function
algorithm refering to wiki item: Histogram matching
"""
table = list(range(0, 256))
for i in list(range(1, 256)):
for j in list(range(1, 256)):
if ref[i] >= adj[j - 1] and ref[i] <= adj[j]:
table[i] = j
break
table[255] = 255
return table
def histogram_matching(dstImg, refImg, index):
"""
perform histogram matching
dstImg is transformed to have the same the histogram with refImg's
index[0], index[1]: the index of pixels that need to be transformed in dstImg
index[2], index[3]: the index of pixels that to compute histogram in refImg
"""
index = [x.cpu().numpy() for x in index]
dstImg = dstImg.detach().cpu().numpy()
refImg = refImg.detach().cpu().numpy()
dst_align = [dstImg[i, index[0], index[1]] for i in range(0, 3)]
ref_align = [refImg[i, index[2], index[3]] for i in range(0, 3)]
hist_ref = cal_hist(ref_align)
hist_dst = cal_hist(dst_align)
tables = [cal_trans(hist_dst[i], hist_ref[i]) for i in range(0, 3)]
mid = copy.deepcopy(dst_align)
for i in range(0, 3):
for k in range(0, len(index[0])):
dst_align[i][k] = tables[i][int(mid[i][k])]
for i in range(0, 3):
dstImg[i, index[0], index[1]] = dst_align[i]
# dstImg = torch.FloatTensor(dstImg).cuda()
dstImg = torch.FloatTensor(dstImg)
return dstImg
import torch
import torch.nn as nn
from torch.autograd import Variable
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
\ No newline at end of file
import torch
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(object):
def __init__(self):
self.name = "weight"
#print(self.name)
self.power_iterations = 1
def compute_weight(self, module):
u = getattr(module, self.name + "_u")
v = getattr(module, self.name + "_v")
w = getattr(module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
return w / sigma.expand_as(w)
@staticmethod
def apply(module):
name = "weight"
fn = SpectralNorm()
try:
u = getattr(module, name + "_u")
v = getattr(module, name + "_v")
w = getattr(module, name + "_bar")
except AttributeError:
w = getattr(module, name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
w_bar = Parameter(w.data)
#del module._parameters[name]
module.register_parameter(name + "_u", u)
module.register_parameter(name + "_v", v)
module.register_parameter(name + "_bar", w_bar)
# remove w from parameter list
del module._parameters[name]
setattr(module, name, fn.compute_weight(module))
# recompute weight before every forward()
module.register_forward_pre_hook(fn)
return fn
def remove(self, module):
weight = self.compute_weight(module)
delattr(module, self.name)
del module._parameters[self.name + '_u']
del module._parameters[self.name + '_v']
del module._parameters[self.name + '_bar']
module.register_parameter(self.name, Parameter(weight.data))
def __call__(self, module, inputs):
setattr(module, self.name, self.compute_weight(module))
def spectral_norm(module):
SpectralNorm.apply(module)
return module
def remove_spectral_norm(module):
name = 'weight'
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, SpectralNorm) and hook.name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError("spectral_norm of '{}' not found in {}"
.format(name, module))
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
import os
import argparse
from torch.backends import cudnn
from config import config, dataset_config, merge_cfg_arg
from dataloder import get_loader
from solver_cycle import Solver_cycleGAN
from solver_makeup import Solver_makeupGAN
def parse_args():
parser = argparse.ArgumentParser(description='Train GAN')
# general
parser.add_argument('--data_path', default='makeup/makeup_final/', type=str, help='training and test data path')
parser.add_argument('--dataset', default='MAKEUP', type=str, help='dataset name, MAKEUP means two domain, MMAKEUP means multi-domain')
parser.add_argument('--gpus', default='0', type=str, help='GPU device to train with')
parser.add_argument('--batch_size', default='1', type=int, help='batch_size')
parser.add_argument('--vis_step', default='1260', type=int, help='steps between visualization')
parser.add_argument('--task_name', default='', type=str, help='task name')
parser.add_argument('--ndis', default='1', type=int, help='train discriminator steps')
parser.add_argument('--LR', default="2e-4", type=float, help='Learning rate')
parser.add_argument('--decay', default='0', type=int, help='epochs number for training')
parser.add_argument('--model', default='makeupGAN', type=str, help='which model to use: cycleGAN/ makeupGAN')
parser.add_argument('--epochs', default='300', type=int, help='nums of epochs')
parser.add_argument('--whichG', default='branch', type=str, help='which Generator to choose, normal/branch, branch means two input branches')
parser.add_argument('--norm', default='SN', type=str, help='normalization of discriminator, SN means spectrum normalization, none means no normalization')
parser.add_argument('--d_repeat', default='3', type=int, help='the repeat Res-block in discriminator')
parser.add_argument('--g_repeat', default='6', type=int, help='the repeat Res-block in Generator')
parser.add_argument('--lambda_cls', default='1', type=float, help='the lambda_cls weight')
parser.add_argument('--lambda_rec', default='10', type=int, help='lambda_A and lambda_B')
parser.add_argument('--lambda_his', default='1', type=float, help='histogram loss on lips')
parser.add_argument('--lambda_skin_1', default='0.1', type=float, help='histogram loss on skin equals to lambda_his* lambda_skin')
parser.add_argument('--lambda_skin_2', default='0.1', type=float, help='histogram loss on skin equals to lambda_his* lambda_skin')
parser.add_argument('--lambda_eye', default='1', type=float, help='histogram loss on eyes equals to lambda_his*lambda_eye')
parser.add_argument('--content_layer', default='r41', type=str, help='vgg layer we use')
parser.add_argument('--lambda_vgg', default='5e-3', type=float, help='the param of vgg loss')
parser.add_argument('--cls_list', default='A_OM,B_OM', type=str, help='the classes we choose')
parser.add_argument('--direct', action="store_true", default=False, help='direct means to add local cosmetic loss at the first, unified training')
parser.add_argument('--finetune', action="store_true", default=False, help='finetune the network or not')
parser.add_argument('--lips', action="store_true", default=False, help='whether to finetune lips color')
parser.add_argument('--skin', action="store_true", default=False, help='whether to finetune foundation color')
parser.add_argument('--eye', action="store_true", default=False, help='whether to finetune eye shadow color')
parser.add_argument('--test_model', default='20_2520', type=str, help='which one to test')
args = parser.parse_args()
return args
def tes_net():
# enable cudnn
cudnn.benchmark = True
# get the DataLoader
data_loaders = get_loader(dataset_config, config, mode="test")
#get the solver
if args.model == 'cycleGAN':
solver = Solver_cycleGAN(data_loaders, config, dataset_config)
elif args.model =='makeupGAN':
solver = Solver_makeupGAN(data_loaders, config, dataset_config)
else:
print("model that not support")
exit()
solver.test()
if __name__ == '__main__':
args = parse_args()
print("Call with args:")
print(args)
config = merge_cfg_arg(config, args)
config.test_model = args.test_model
print("The config is:")
print(config)
# Create the directories if not exist
if not os.path.exists(config.data_path):
print("No datapath!!")
dataset_config.dataset_path = os.path.join(config.data_path, args.data_path)
tes_net()
#!/usr/bin/env bash
# video! 用在VBT上效果很差,不知道为什么
--img_size 361 --cls_list wild_before,RE_ORG --batch_size 16 --test_model 66_2520
--img_size 361 --cls_list A_before,RE_ORG --batch_size 1 --test_model 66_2520
--img_size 256 --cls_list wild_256,RE_REF --batch_size 1 --test_model 66_2520
# 测试一下在有妆图片下的效果
--img_size 256 --cls_list RE_REF,RE_ORI --batch_size 1 --test_model 66_2520
--img_size 256 --cls_list RE_ORG,wild_256 --batch_size 1 --test_model 66_2520
# new
--task_name default --cls_list wild_256,RE_REF --batch_size 1 --test_model 26_2520
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
# From https://github.com/openai/improved-gan/blob/master/inception_score/model.py
# Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import glob
import scipy.misc
import math
import sys
import os
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
#os.environ["CUDA_VISIBLE_DEVICES"] = '0'
config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.4
config.gpu_options.allow_growth = True
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
assert(type(images) == list)
assert(type(images[0]) == np.ndarray)
assert(len(images[0].shape) == 3)
assert(np.max(images[0]) > 10)
assert(np.min(images[0]) >= 0.0)
inps = []
for img in images:
img = img.astype(np.float32)
inps.append(np.expand_dims(img, 0))
bs = 100
with tf.Session(config = config) as sess:
preds = []
n_batches = int(math.ceil(float(len(inps)) / float(bs)))
for i in range(n_batches):
# sys.stdout.write(".")
# sys.stdout.flush()
inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
pred = sess.run(softmax, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
# This function is called automatically.
def _init_inception():
global softmax
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session(config=config) as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o._shape = tf.TensorShape(new_shape)
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3), w)
softmax = tf.nn.softmax(logits)
if softmax is None:
_init_inception()
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import collections
import time
#import cPickle as pickle
_since_beginning = collections.defaultdict(lambda: {})
_since_last_flush = collections.defaultdict(lambda: {})
_iter = [0]
def tick():
_iter[0] += 1
def plot(name, value):
_since_last_flush[name][_iter[0]] = value
#print(_since_last_flush)
def flush(task_name):
prints = []
for name, vals in _since_last_flush.items():
#prints.append("{}\t{}".format(name, np.mean(vals.values())))
_since_beginning[name].update(vals)
"""
print(name)
print("#######################")
print(_since_beginning[name])
print("#######################")
print(_since_beginning[name].keys())
print("#######################")
print(list(_since_beginning[name].keys()))
print("#######################")
"""
x_vals = np.sort(list(_since_beginning[name].keys()))
y_vals = [_since_beginning[name][x] for x in x_vals]
plt.clf()
plt.plot(x_vals, y_vals)
plt.xlabel('iteration')
plt.ylabel(name)
plt.savefig(name.replace(' ', '_')+ "_" + task_name +"_" + '.png')
"""
print "iter {}\t{}".format(_iter[0], "\t".join(prints))
_since_last_flush.clear()
with open('log.pkl', 'wb') as f:
pickle.dump(dict(_since_beginning), f, pickle.HIGHEST_PROTOCOL)
"""
\ No newline at end of file
import os
import argparse
from torch.backends import cudnn
from config import config, dataset_config, merge_cfg_arg
from dataloder import get_loader
from solver_cycle import Solver_cycleGAN
from solver_makeup import Solver_makeupGAN
def parse_args():
parser = argparse.ArgumentParser(description='Train GAN')
# general
parser.add_argument('--data_path', default='makeup/makeup_final/', type=str, help='training and test data path')
parser.add_argument('--dataset', default='MAKEUP', type=str, help='dataset name, MAKEUP means two domain, MMAKEUP means multi-domain')
parser.add_argument('--gpus', default='0', type=str, help='GPU device to train with')
parser.add_argument('--batch_size', default='1', type=int, help='batch_size')
parser.add_argument('--vis_step', default='1260', type=int, help='steps between visualization')
parser.add_argument('--task_name', default='', type=str, help='task name')
parser.add_argument('--checkpoint', default='', type=str, help='checkpoint to load')
parser.add_argument('--ndis', default='1', type=int, help='train discriminator steps')
parser.add_argument('--LR', default="2e-4", type=float, help='Learning rate')
parser.add_argument('--decay', default='0', type=int, help='epochs number for training')
parser.add_argument('--model', default='makeupGAN', type=str, help='which model to use: cycleGAN/ makeupGAN')
parser.add_argument('--epochs', default='300', type=int, help='nums of epochs')
parser.add_argument('--whichG', default='branch', type=str, help='which Generator to choose, normal/branch, branch means two input branches')
parser.add_argument('--norm', default='SN', type=str, help='normalization of discriminator, SN means spectrum normalization, none means no normalization')
parser.add_argument('--d_repeat', default='3', type=int, help='the repeat Res-block in discriminator')
parser.add_argument('--g_repeat', default='6', type=int, help='the repeat Res-block in Generator')
parser.add_argument('--lambda_cls', default='1', type=float, help='the lambda_cls weight')
parser.add_argument('--lambda_rec', default='10', type=int, help='lambda_A and lambda_B')
parser.add_argument('--lambda_his', default='1', type=float, help='histogram loss on lips')
parser.add_argument('--lambda_skin_1', default='0.1', type=float, help='histogram loss on skin equals to lambda_his* lambda_skin')
parser.add_argument('--lambda_skin_2', default='0.1', type=float, help='histogram loss on skin equals to lambda_his* lambda_skin')
parser.add_argument('--lambda_eye', default='1', type=float, help='histogram loss on eyes equals to lambda_his*lambda_eye')
parser.add_argument('--content_layer', default='r41', type=str, help='vgg layer we use to output features')
parser.add_argument('--lambda_vgg', default='5e-3', type=float, help='the param of vgg loss')
parser.add_argument('--cls_list', default='SYMIX,MAKEMIX', type=str, help='the classes of makeup to train')
parser.add_argument('--direct', action="store_true", default=True, help='direct means to add local cosmetic loss at the first, unified training')
parser.add_argument('--lips', action="store_true", default=True, help='whether to finetune lips color')
parser.add_argument('--skin', action="store_true", default=True, help='whether to finetune foundation color')
parser.add_argument('--eye', action="store_true", default=True, help='whether to finetune eye shadow color')
args = parser.parse_args()
return args
def train_net():
# enable cudnn
cudnn.benchmark = True
data_loaders = get_loader(dataset_config, config, mode="train") # return train&test
#get the solver
if args.model == 'cycleGAN':
solver = Solver_cycleGAN(data_loaders, config, dataset_config)
elif args.model =='makeupGAN':
solver = Solver_makeupGAN(data_loaders, config, dataset_config)
else:
print("model that not support")
exit()
solver.train()
if __name__ == '__main__':
args = parse_args()
print("Call with args:")
print(args)
config = merge_cfg_arg(config, args)
dataset_config.name = args.dataset
print("The config is:")
print(config)
# Create the directories if not exist
if not os.path.exists(config.data_path):
print("No datapath!!")
exit()
if args.data_path != '':
dataset_config.dataset_path = os.path.join(config.data_path, args.data_path)
train_net()
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
import os
from PIL import Image
from easydict import EasyDict as edict
from torch.backends import cudnn
from config import config, default, dataset_config
# from solvers import *
from data_loaders import *
default.network = 'MULTICYCLEGAN'
#default.network = 'STARGAN'
default.dataset_choice = ['MAKEUP']
#default.dataset_choice = ['CELEBA']
default.model_base = 'RES'
default.loss_chosen = 'normal'
default.gpu_ids = [0,1,2]
config_default = config
def train_net():
# enable cudnn
cudnn.benchmark = True
# get the DataLoader
data_loaders = eval("get_loader_" + config.network)(default.dataset_choice, dataset_config, config, mode="test")
#get the solver
solver = eval("Solver_" + config.network +"_VIS")(default.dataset_choice, data_loaders, config, dataset_config)
solver.visualize()
if __name__ == '__main__':
print("Call with args:")
print(default)
config = config_default[default.network]
config.network = default.network
config.model_base = default.model_base
config.gpu_ids = default.gpu_ids
# Create the directories if not exist
if not os.path.exists(config.log_path):
os.makedirs(config.log_path)
if not os.path.exists(config.vis_path):
os.makedirs(config.vis_path)
if not os.path.exists(config.snapshot_path):
os.makedirs(config.snapshot_path)
if not os.path.exists(config.data_path):
print("No datapath!!")
train_net()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment