mirror of
https://github.com/TMElyralab/MuseTalk.git
synced 2026-02-04 17:39:20 +08:00
initial_commit
This commit is contained in:
5
musetalk/utils/__init__.py
Normal file
5
musetalk/utils/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
import sys
|
||||
from os.path import abspath, dirname
|
||||
current_dir = dirname(abspath(__file__))
|
||||
parent_dir = dirname(current_dir)
|
||||
sys.path.append(parent_dir+'/utils')
|
||||
60
musetalk/utils/blending.py
Normal file
60
musetalk/utils/blending.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
import cv2
|
||||
from face_parsing import FaceParsing
|
||||
|
||||
fp = FaceParsing()
|
||||
|
||||
def get_crop_box(box, expand):
|
||||
x, y, x1, y1 = box
|
||||
x_c, y_c = (x+x1)//2, (y+y1)//2
|
||||
w, h = x1-x, y1-y
|
||||
s = int(max(w, h)//2*expand)
|
||||
crop_box = [x_c-s, y_c-s, x_c+s, y_c+s]
|
||||
return crop_box, s
|
||||
|
||||
def face_seg(image):
|
||||
seg_image = fp(image)
|
||||
if seg_image is None:
|
||||
print("error, no person_segment")
|
||||
return None
|
||||
|
||||
seg_image = seg_image.resize(image.size)
|
||||
return seg_image
|
||||
|
||||
def get_image(image,face,face_box,upper_boundary_ratio = 0.5,expand=1.2):
|
||||
#print(image.shape)
|
||||
#print(face.shape)
|
||||
|
||||
body = Image.fromarray(image[:,:,::-1])
|
||||
face = Image.fromarray(face[:,:,::-1])
|
||||
|
||||
x, y, x1, y1 = face_box
|
||||
#print(x1-x,y1-y)
|
||||
crop_box, s = get_crop_box(face_box, expand)
|
||||
x_s, y_s, x_e, y_e = crop_box
|
||||
face_position = (x, y)
|
||||
|
||||
face_large = body.crop(crop_box)
|
||||
ori_shape = face_large.size
|
||||
|
||||
mask_image = face_seg(face_large)
|
||||
mask_small = mask_image.crop((x-x_s, y-y_s, x1-x_s, y1-y_s))
|
||||
mask_image = Image.new('L', ori_shape, 0)
|
||||
mask_image.paste(mask_small, (x-x_s, y-y_s, x1-x_s, y1-y_s))
|
||||
|
||||
# keep upper_boundary_ratio of talking area
|
||||
width, height = mask_image.size
|
||||
top_boundary = int(height * upper_boundary_ratio)
|
||||
modified_mask_image = Image.new('L', ori_shape, 0)
|
||||
modified_mask_image.paste(mask_image.crop((0, top_boundary, width, height)), (0, top_boundary))
|
||||
|
||||
blur_kernel_size = int(0.1 * ori_shape[0] // 2 * 2) + 1
|
||||
mask_array = cv2.GaussianBlur(np.array(modified_mask_image), (blur_kernel_size, blur_kernel_size), 0)
|
||||
mask_image = Image.fromarray(mask_array)
|
||||
mask_image.save("./debug_mask.png")
|
||||
|
||||
face_large.paste(face, (x-x_s, y-y_s, x1-x_s, y1-y_s))
|
||||
body.paste(face_large, crop_box[:2], mask_image)
|
||||
body = np.array(body)
|
||||
return body[:,:,::-1]
|
||||
54
musetalk/utils/dwpose/default_runtime.py
Normal file
54
musetalk/utils/dwpose/default_runtime.py
Normal file
@@ -0,0 +1,54 @@
|
||||
default_scope = 'mmpose'
|
||||
|
||||
# hooks
|
||||
default_hooks = dict(
|
||||
timer=dict(type='IterTimerHook'),
|
||||
logger=dict(type='LoggerHook', interval=50),
|
||||
param_scheduler=dict(type='ParamSchedulerHook'),
|
||||
checkpoint=dict(type='CheckpointHook', interval=10),
|
||||
sampler_seed=dict(type='DistSamplerSeedHook'),
|
||||
visualization=dict(type='PoseVisualizationHook', enable=False),
|
||||
badcase=dict(
|
||||
type='BadCaseAnalysisHook',
|
||||
enable=False,
|
||||
out_dir='badcase',
|
||||
metric_type='loss',
|
||||
badcase_thr=5))
|
||||
|
||||
# custom hooks
|
||||
custom_hooks = [
|
||||
# Synchronize model buffers such as running_mean and running_var in BN
|
||||
# at the end of each epoch
|
||||
dict(type='SyncBuffersHook')
|
||||
]
|
||||
|
||||
# multi-processing backend
|
||||
env_cfg = dict(
|
||||
cudnn_benchmark=False,
|
||||
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
|
||||
dist_cfg=dict(backend='nccl'),
|
||||
)
|
||||
|
||||
# visualizer
|
||||
vis_backends = [
|
||||
dict(type='LocalVisBackend'),
|
||||
# dict(type='TensorboardVisBackend'),
|
||||
# dict(type='WandbVisBackend'),
|
||||
]
|
||||
visualizer = dict(
|
||||
type='PoseLocalVisualizer', vis_backends=vis_backends, name='visualizer')
|
||||
|
||||
# logger
|
||||
log_processor = dict(
|
||||
type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
|
||||
log_level = 'INFO'
|
||||
load_from = None
|
||||
resume = False
|
||||
|
||||
# file I/O backend
|
||||
backend_args = dict(backend='local')
|
||||
|
||||
# training/validation/testing progress
|
||||
train_cfg = dict(by_epoch=True)
|
||||
val_cfg = dict()
|
||||
test_cfg = dict()
|
||||
@@ -0,0 +1,257 @@
|
||||
#_base_ = ['../../../_base_/default_runtime.py']
|
||||
_base_ = ['default_runtime.py']
|
||||
|
||||
# runtime
|
||||
max_epochs = 270
|
||||
stage2_num_epochs = 30
|
||||
base_lr = 4e-3
|
||||
train_batch_size = 32
|
||||
val_batch_size = 32
|
||||
|
||||
train_cfg = dict(max_epochs=max_epochs, val_interval=10)
|
||||
randomness = dict(seed=21)
|
||||
|
||||
# optimizer
|
||||
optim_wrapper = dict(
|
||||
type='OptimWrapper',
|
||||
optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
|
||||
paramwise_cfg=dict(
|
||||
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
|
||||
|
||||
# learning rate
|
||||
param_scheduler = [
|
||||
dict(
|
||||
type='LinearLR',
|
||||
start_factor=1.0e-5,
|
||||
by_epoch=False,
|
||||
begin=0,
|
||||
end=1000),
|
||||
dict(
|
||||
# use cosine lr from 150 to 300 epoch
|
||||
type='CosineAnnealingLR',
|
||||
eta_min=base_lr * 0.05,
|
||||
begin=max_epochs // 2,
|
||||
end=max_epochs,
|
||||
T_max=max_epochs // 2,
|
||||
by_epoch=True,
|
||||
convert_to_iter_based=True),
|
||||
]
|
||||
|
||||
# automatically scaling LR based on the actual training batch size
|
||||
auto_scale_lr = dict(base_batch_size=512)
|
||||
|
||||
# codec settings
|
||||
codec = dict(
|
||||
type='SimCCLabel',
|
||||
input_size=(288, 384),
|
||||
sigma=(6., 6.93),
|
||||
simcc_split_ratio=2.0,
|
||||
normalize=False,
|
||||
use_dark=False)
|
||||
|
||||
# model settings
|
||||
model = dict(
|
||||
type='TopdownPoseEstimator',
|
||||
data_preprocessor=dict(
|
||||
type='PoseDataPreprocessor',
|
||||
mean=[123.675, 116.28, 103.53],
|
||||
std=[58.395, 57.12, 57.375],
|
||||
bgr_to_rgb=True),
|
||||
backbone=dict(
|
||||
_scope_='mmdet',
|
||||
type='CSPNeXt',
|
||||
arch='P5',
|
||||
expand_ratio=0.5,
|
||||
deepen_factor=1.,
|
||||
widen_factor=1.,
|
||||
out_indices=(4, ),
|
||||
channel_attention=True,
|
||||
norm_cfg=dict(type='SyncBN'),
|
||||
act_cfg=dict(type='SiLU'),
|
||||
init_cfg=dict(
|
||||
type='Pretrained',
|
||||
prefix='backbone.',
|
||||
checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
|
||||
'rtmpose/cspnext-l_udp-aic-coco_210e-256x192-273b7631_20230130.pth' # noqa: E501
|
||||
)),
|
||||
head=dict(
|
||||
type='RTMCCHead',
|
||||
in_channels=1024,
|
||||
out_channels=133,
|
||||
input_size=codec['input_size'],
|
||||
in_featuremap_size=(9, 12),
|
||||
simcc_split_ratio=codec['simcc_split_ratio'],
|
||||
final_layer_kernel_size=7,
|
||||
gau_cfg=dict(
|
||||
hidden_dims=256,
|
||||
s=128,
|
||||
expansion_factor=2,
|
||||
dropout_rate=0.,
|
||||
drop_path=0.,
|
||||
act_fn='SiLU',
|
||||
use_rel_bias=False,
|
||||
pos_enc=False),
|
||||
loss=dict(
|
||||
type='KLDiscretLoss',
|
||||
use_target_weight=True,
|
||||
beta=10.,
|
||||
label_softmax=True),
|
||||
decoder=codec),
|
||||
test_cfg=dict(flip_test=True, ))
|
||||
|
||||
# base dataset settings
|
||||
dataset_type = 'UBody2dDataset'
|
||||
data_mode = 'topdown'
|
||||
data_root = 'data/UBody/'
|
||||
|
||||
backend_args = dict(backend='local')
|
||||
|
||||
scenes = [
|
||||
'Magic_show', 'Entertainment', 'ConductMusic', 'Online_class', 'TalkShow',
|
||||
'Speech', 'Fitness', 'Interview', 'Olympic', 'TVShow', 'Singing',
|
||||
'SignLanguage', 'Movie', 'LiveVlog', 'VideoConference'
|
||||
]
|
||||
|
||||
train_datasets = [
|
||||
dict(
|
||||
type='CocoWholeBodyDataset',
|
||||
data_root='data/coco/',
|
||||
data_mode=data_mode,
|
||||
ann_file='annotations/coco_wholebody_train_v1.0.json',
|
||||
data_prefix=dict(img='train2017/'),
|
||||
pipeline=[])
|
||||
]
|
||||
|
||||
for scene in scenes:
|
||||
train_dataset = dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
data_mode=data_mode,
|
||||
ann_file=f'annotations/{scene}/train_annotations.json',
|
||||
data_prefix=dict(img='images/'),
|
||||
pipeline=[],
|
||||
sample_interval=10)
|
||||
train_datasets.append(train_dataset)
|
||||
|
||||
# pipelines
|
||||
train_pipeline = [
|
||||
dict(type='LoadImage', backend_args=backend_args),
|
||||
dict(type='GetBBoxCenterScale'),
|
||||
dict(type='RandomFlip', direction='horizontal'),
|
||||
dict(type='RandomHalfBody'),
|
||||
dict(
|
||||
type='RandomBBoxTransform', scale_factor=[0.5, 1.5], rotate_factor=90),
|
||||
dict(type='TopdownAffine', input_size=codec['input_size']),
|
||||
dict(type='mmdet.YOLOXHSVRandomAug'),
|
||||
dict(
|
||||
type='Albumentation',
|
||||
transforms=[
|
||||
dict(type='Blur', p=0.1),
|
||||
dict(type='MedianBlur', p=0.1),
|
||||
dict(
|
||||
type='CoarseDropout',
|
||||
max_holes=1,
|
||||
max_height=0.4,
|
||||
max_width=0.4,
|
||||
min_holes=1,
|
||||
min_height=0.2,
|
||||
min_width=0.2,
|
||||
p=1.0),
|
||||
]),
|
||||
dict(type='GenerateTarget', encoder=codec),
|
||||
dict(type='PackPoseInputs')
|
||||
]
|
||||
val_pipeline = [
|
||||
dict(type='LoadImage', backend_args=backend_args),
|
||||
dict(type='GetBBoxCenterScale'),
|
||||
dict(type='TopdownAffine', input_size=codec['input_size']),
|
||||
dict(type='PackPoseInputs')
|
||||
]
|
||||
|
||||
train_pipeline_stage2 = [
|
||||
dict(type='LoadImage', backend_args=backend_args),
|
||||
dict(type='GetBBoxCenterScale'),
|
||||
dict(type='RandomFlip', direction='horizontal'),
|
||||
dict(type='RandomHalfBody'),
|
||||
dict(
|
||||
type='RandomBBoxTransform',
|
||||
shift_factor=0.,
|
||||
scale_factor=[0.5, 1.5],
|
||||
rotate_factor=90),
|
||||
dict(type='TopdownAffine', input_size=codec['input_size']),
|
||||
dict(type='mmdet.YOLOXHSVRandomAug'),
|
||||
dict(
|
||||
type='Albumentation',
|
||||
transforms=[
|
||||
dict(type='Blur', p=0.1),
|
||||
dict(type='MedianBlur', p=0.1),
|
||||
dict(
|
||||
type='CoarseDropout',
|
||||
max_holes=1,
|
||||
max_height=0.4,
|
||||
max_width=0.4,
|
||||
min_holes=1,
|
||||
min_height=0.2,
|
||||
min_width=0.2,
|
||||
p=0.5),
|
||||
]),
|
||||
dict(type='GenerateTarget', encoder=codec),
|
||||
dict(type='PackPoseInputs')
|
||||
]
|
||||
|
||||
# data loaders
|
||||
train_dataloader = dict(
|
||||
batch_size=train_batch_size,
|
||||
num_workers=10,
|
||||
persistent_workers=True,
|
||||
sampler=dict(type='DefaultSampler', shuffle=True),
|
||||
dataset=dict(
|
||||
type='CombinedDataset',
|
||||
metainfo=dict(from_file='configs/_base_/datasets/coco_wholebody.py'),
|
||||
datasets=train_datasets,
|
||||
pipeline=train_pipeline,
|
||||
test_mode=False,
|
||||
))
|
||||
|
||||
val_dataloader = dict(
|
||||
batch_size=val_batch_size,
|
||||
num_workers=10,
|
||||
persistent_workers=True,
|
||||
drop_last=False,
|
||||
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
|
||||
dataset=dict(
|
||||
type='CocoWholeBodyDataset',
|
||||
data_root=data_root,
|
||||
data_mode=data_mode,
|
||||
ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json',
|
||||
bbox_file='data/coco/person_detection_results/'
|
||||
'COCO_val2017_detections_AP_H_56_person.json',
|
||||
data_prefix=dict(img='coco/val2017/'),
|
||||
test_mode=True,
|
||||
pipeline=val_pipeline,
|
||||
))
|
||||
test_dataloader = val_dataloader
|
||||
|
||||
# hooks
|
||||
default_hooks = dict(
|
||||
checkpoint=dict(
|
||||
save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1))
|
||||
|
||||
custom_hooks = [
|
||||
dict(
|
||||
type='EMAHook',
|
||||
ema_type='ExpMomentumEMA',
|
||||
momentum=0.0002,
|
||||
update_buffers=True,
|
||||
priority=49),
|
||||
dict(
|
||||
type='mmdet.PipelineSwitchHook',
|
||||
switch_epoch=max_epochs - stage2_num_epochs,
|
||||
switch_pipeline=train_pipeline_stage2)
|
||||
]
|
||||
|
||||
# evaluators
|
||||
val_evaluator = dict(
|
||||
type='CocoWholeBodyMetric',
|
||||
ann_file='data/coco/annotations/coco_wholebody_val_v1.0.json')
|
||||
test_evaluator = val_evaluator
|
||||
1
musetalk/utils/face_detection/README.md
Normal file
1
musetalk/utils/face_detection/README.md
Normal file
@@ -0,0 +1 @@
|
||||
The code for Face Detection in this folder has been taken from the wonderful [face_alignment](https://github.com/1adrianb/face-alignment) repository. This has been modified to take batches of faces at a time.
|
||||
7
musetalk/utils/face_detection/__init__.py
Normal file
7
musetalk/utils/face_detection/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__author__ = """Adrian Bulat"""
|
||||
__email__ = 'adrian.bulat@nottingham.ac.uk'
|
||||
__version__ = '1.0.1'
|
||||
|
||||
from .api import FaceAlignment, LandmarksType, NetworkSize, YOLOv8_face
|
||||
240
musetalk/utils/face_detection/api.py
Normal file
240
musetalk/utils/face_detection/api.py
Normal file
@@ -0,0 +1,240 @@
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import torch
|
||||
from torch.utils.model_zoo import load_url
|
||||
from enum import Enum
|
||||
import numpy as np
|
||||
import cv2
|
||||
try:
|
||||
import urllib.request as request_file
|
||||
except BaseException:
|
||||
import urllib as request_file
|
||||
|
||||
from .models import FAN, ResNetDepth
|
||||
from .utils import *
|
||||
|
||||
|
||||
class LandmarksType(Enum):
|
||||
"""Enum class defining the type of landmarks to detect.
|
||||
|
||||
``_2D`` - the detected points ``(x,y)`` are detected in a 2D space and follow the visible contour of the face
|
||||
``_2halfD`` - this points represent the projection of the 3D points into 3D
|
||||
``_3D`` - detect the points ``(x,y,z)``` in a 3D space
|
||||
|
||||
"""
|
||||
_2D = 1
|
||||
_2halfD = 2
|
||||
_3D = 3
|
||||
|
||||
|
||||
class NetworkSize(Enum):
|
||||
# TINY = 1
|
||||
# SMALL = 2
|
||||
# MEDIUM = 3
|
||||
LARGE = 4
|
||||
|
||||
def __new__(cls, value):
|
||||
member = object.__new__(cls)
|
||||
member._value_ = value
|
||||
return member
|
||||
|
||||
def __int__(self):
|
||||
return self.value
|
||||
|
||||
|
||||
|
||||
class FaceAlignment:
|
||||
def __init__(self, landmarks_type, network_size=NetworkSize.LARGE,
|
||||
device='cuda', flip_input=False, face_detector='sfd', verbose=False):
|
||||
self.device = device
|
||||
self.flip_input = flip_input
|
||||
self.landmarks_type = landmarks_type
|
||||
self.verbose = verbose
|
||||
|
||||
network_size = int(network_size)
|
||||
|
||||
if 'cuda' in device:
|
||||
torch.backends.cudnn.benchmark = True
|
||||
# torch.backends.cuda.matmul.allow_tf32 = False
|
||||
# torch.backends.cudnn.benchmark = True
|
||||
# torch.backends.cudnn.deterministic = False
|
||||
# torch.backends.cudnn.allow_tf32 = True
|
||||
print('cuda start')
|
||||
|
||||
|
||||
# Get the face detector
|
||||
face_detector_module = __import__('face_detection.detection.' + face_detector,
|
||||
globals(), locals(), [face_detector], 0)
|
||||
|
||||
self.face_detector = face_detector_module.FaceDetector(device=device, verbose=verbose)
|
||||
|
||||
def get_detections_for_batch(self, images):
|
||||
images = images[..., ::-1]
|
||||
detected_faces = self.face_detector.detect_from_batch(images.copy())
|
||||
results = []
|
||||
|
||||
for i, d in enumerate(detected_faces):
|
||||
if len(d) == 0:
|
||||
results.append(None)
|
||||
continue
|
||||
d = d[0]
|
||||
d = np.clip(d, 0, None)
|
||||
|
||||
x1, y1, x2, y2 = map(int, d[:-1])
|
||||
results.append((x1, y1, x2, y2))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
class YOLOv8_face:
|
||||
def __init__(self, path = 'face_detection/weights/yolov8n-face.onnx', conf_thres=0.2, iou_thres=0.5):
|
||||
self.conf_threshold = conf_thres
|
||||
self.iou_threshold = iou_thres
|
||||
self.class_names = ['face']
|
||||
self.num_classes = len(self.class_names)
|
||||
# Initialize model
|
||||
self.net = cv2.dnn.readNet(path)
|
||||
self.input_height = 640
|
||||
self.input_width = 640
|
||||
self.reg_max = 16
|
||||
|
||||
self.project = np.arange(self.reg_max)
|
||||
self.strides = (8, 16, 32)
|
||||
self.feats_hw = [(math.ceil(self.input_height / self.strides[i]), math.ceil(self.input_width / self.strides[i])) for i in range(len(self.strides))]
|
||||
self.anchors = self.make_anchors(self.feats_hw)
|
||||
|
||||
def make_anchors(self, feats_hw, grid_cell_offset=0.5):
|
||||
"""Generate anchors from features."""
|
||||
anchor_points = {}
|
||||
for i, stride in enumerate(self.strides):
|
||||
h,w = feats_hw[i]
|
||||
x = np.arange(0, w) + grid_cell_offset # shift x
|
||||
y = np.arange(0, h) + grid_cell_offset # shift y
|
||||
sx, sy = np.meshgrid(x, y)
|
||||
# sy, sx = np.meshgrid(y, x)
|
||||
anchor_points[stride] = np.stack((sx, sy), axis=-1).reshape(-1, 2)
|
||||
return anchor_points
|
||||
|
||||
def softmax(self, x, axis=1):
|
||||
x_exp = np.exp(x)
|
||||
# 如果是列向量,则axis=0
|
||||
x_sum = np.sum(x_exp, axis=axis, keepdims=True)
|
||||
s = x_exp / x_sum
|
||||
return s
|
||||
|
||||
def resize_image(self, srcimg, keep_ratio=True):
|
||||
top, left, newh, neww = 0, 0, self.input_width, self.input_height
|
||||
if keep_ratio and srcimg.shape[0] != srcimg.shape[1]:
|
||||
hw_scale = srcimg.shape[0] / srcimg.shape[1]
|
||||
if hw_scale > 1:
|
||||
newh, neww = self.input_height, int(self.input_width / hw_scale)
|
||||
img = cv2.resize(srcimg, (neww, newh), interpolation=cv2.INTER_AREA)
|
||||
left = int((self.input_width - neww) * 0.5)
|
||||
img = cv2.copyMakeBorder(img, 0, 0, left, self.input_width - neww - left, cv2.BORDER_CONSTANT,
|
||||
value=(0, 0, 0)) # add border
|
||||
else:
|
||||
newh, neww = int(self.input_height * hw_scale), self.input_width
|
||||
img = cv2.resize(srcimg, (neww, newh), interpolation=cv2.INTER_AREA)
|
||||
top = int((self.input_height - newh) * 0.5)
|
||||
img = cv2.copyMakeBorder(img, top, self.input_height - newh - top, 0, 0, cv2.BORDER_CONSTANT,
|
||||
value=(0, 0, 0))
|
||||
else:
|
||||
img = cv2.resize(srcimg, (self.input_width, self.input_height), interpolation=cv2.INTER_AREA)
|
||||
return img, newh, neww, top, left
|
||||
|
||||
def detect(self, srcimg):
|
||||
input_img, newh, neww, padh, padw = self.resize_image(cv2.cvtColor(srcimg, cv2.COLOR_BGR2RGB))
|
||||
scale_h, scale_w = srcimg.shape[0]/newh, srcimg.shape[1]/neww
|
||||
input_img = input_img.astype(np.float32) / 255.0
|
||||
|
||||
blob = cv2.dnn.blobFromImage(input_img)
|
||||
self.net.setInput(blob)
|
||||
outputs = self.net.forward(self.net.getUnconnectedOutLayersNames())
|
||||
# if isinstance(outputs, tuple):
|
||||
# outputs = list(outputs)
|
||||
# if float(cv2.__version__[:3])>=4.7:
|
||||
# outputs = [outputs[2], outputs[0], outputs[1]] ###opencv4.7需要这一步,opencv4.5不需要
|
||||
# Perform inference on the image
|
||||
det_bboxes, det_conf, det_classid, landmarks = self.post_process(outputs, scale_h, scale_w, padh, padw)
|
||||
return det_bboxes, det_conf, det_classid, landmarks
|
||||
|
||||
def post_process(self, preds, scale_h, scale_w, padh, padw):
|
||||
bboxes, scores, landmarks = [], [], []
|
||||
for i, pred in enumerate(preds):
|
||||
stride = int(self.input_height/pred.shape[2])
|
||||
pred = pred.transpose((0, 2, 3, 1))
|
||||
|
||||
box = pred[..., :self.reg_max * 4]
|
||||
cls = 1 / (1 + np.exp(-pred[..., self.reg_max * 4:-15])).reshape((-1,1))
|
||||
kpts = pred[..., -15:].reshape((-1,15)) ### x1,y1,score1, ..., x5,y5,score5
|
||||
|
||||
# tmp = box.reshape(self.feats_hw[i][0], self.feats_hw[i][1], 4, self.reg_max)
|
||||
tmp = box.reshape(-1, 4, self.reg_max)
|
||||
bbox_pred = self.softmax(tmp, axis=-1)
|
||||
bbox_pred = np.dot(bbox_pred, self.project).reshape((-1,4))
|
||||
|
||||
bbox = self.distance2bbox(self.anchors[stride], bbox_pred, max_shape=(self.input_height, self.input_width)) * stride
|
||||
kpts[:, 0::3] = (kpts[:, 0::3] * 2.0 + (self.anchors[stride][:, 0].reshape((-1,1)) - 0.5)) * stride
|
||||
kpts[:, 1::3] = (kpts[:, 1::3] * 2.0 + (self.anchors[stride][:, 1].reshape((-1,1)) - 0.5)) * stride
|
||||
kpts[:, 2::3] = 1 / (1+np.exp(-kpts[:, 2::3]))
|
||||
|
||||
bbox -= np.array([[padw, padh, padw, padh]]) ###合理使用广播法则
|
||||
bbox *= np.array([[scale_w, scale_h, scale_w, scale_h]])
|
||||
kpts -= np.tile(np.array([padw, padh, 0]), 5).reshape((1,15))
|
||||
kpts *= np.tile(np.array([scale_w, scale_h, 1]), 5).reshape((1,15))
|
||||
|
||||
bboxes.append(bbox)
|
||||
scores.append(cls)
|
||||
landmarks.append(kpts)
|
||||
|
||||
bboxes = np.concatenate(bboxes, axis=0)
|
||||
scores = np.concatenate(scores, axis=0)
|
||||
landmarks = np.concatenate(landmarks, axis=0)
|
||||
|
||||
bboxes_wh = bboxes.copy()
|
||||
bboxes_wh[:, 2:4] = bboxes[:, 2:4] - bboxes[:, 0:2] ####xywh
|
||||
classIds = np.argmax(scores, axis=1)
|
||||
confidences = np.max(scores, axis=1) ####max_class_confidence
|
||||
|
||||
mask = confidences>self.conf_threshold
|
||||
bboxes_wh = bboxes_wh[mask] ###合理使用广播法则
|
||||
confidences = confidences[mask]
|
||||
classIds = classIds[mask]
|
||||
landmarks = landmarks[mask]
|
||||
|
||||
indices = cv2.dnn.NMSBoxes(bboxes_wh.tolist(), confidences.tolist(), self.conf_threshold,
|
||||
self.iou_threshold).flatten()
|
||||
if len(indices) > 0:
|
||||
mlvl_bboxes = bboxes_wh[indices]
|
||||
confidences = confidences[indices]
|
||||
classIds = classIds[indices]
|
||||
landmarks = landmarks[indices]
|
||||
return mlvl_bboxes, confidences, classIds, landmarks
|
||||
else:
|
||||
print('nothing detect')
|
||||
return np.array([]), np.array([]), np.array([]), np.array([])
|
||||
|
||||
def distance2bbox(self, points, distance, max_shape=None):
|
||||
x1 = points[:, 0] - distance[:, 0]
|
||||
y1 = points[:, 1] - distance[:, 1]
|
||||
x2 = points[:, 0] + distance[:, 2]
|
||||
y2 = points[:, 1] + distance[:, 3]
|
||||
if max_shape is not None:
|
||||
x1 = np.clip(x1, 0, max_shape[1])
|
||||
y1 = np.clip(y1, 0, max_shape[0])
|
||||
x2 = np.clip(x2, 0, max_shape[1])
|
||||
y2 = np.clip(y2, 0, max_shape[0])
|
||||
return np.stack([x1, y1, x2, y2], axis=-1)
|
||||
|
||||
def draw_detections(self, image, boxes, scores, kpts):
|
||||
for box, score, kp in zip(boxes, scores, kpts):
|
||||
x, y, w, h = box.astype(int)
|
||||
# Draw rectangle
|
||||
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), thickness=3)
|
||||
cv2.putText(image, "face:"+str(round(score,2)), (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), thickness=2)
|
||||
for i in range(5):
|
||||
cv2.circle(image, (int(kp[i * 3]), int(kp[i * 3 + 1])), 4, (0, 255, 0), thickness=-1)
|
||||
# cv2.putText(image, str(i), (int(kp[i * 3]), int(kp[i * 3 + 1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), thickness=1)
|
||||
return image
|
||||
|
||||
ROOT = os.path.dirname(os.path.abspath(__file__))
|
||||
1
musetalk/utils/face_detection/detection/__init__.py
Normal file
1
musetalk/utils/face_detection/detection/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .core import FaceDetector
|
||||
130
musetalk/utils/face_detection/detection/core.py
Normal file
130
musetalk/utils/face_detection/detection/core.py
Normal file
@@ -0,0 +1,130 @@
|
||||
import logging
|
||||
import glob
|
||||
from tqdm import tqdm
|
||||
import numpy as np
|
||||
import torch
|
||||
import cv2
|
||||
|
||||
|
||||
class FaceDetector(object):
|
||||
"""An abstract class representing a face detector.
|
||||
|
||||
Any other face detection implementation must subclass it. All subclasses
|
||||
must implement ``detect_from_image``, that return a list of detected
|
||||
bounding boxes. Optionally, for speed considerations detect from path is
|
||||
recommended.
|
||||
"""
|
||||
|
||||
def __init__(self, device, verbose):
|
||||
self.device = device
|
||||
self.verbose = verbose
|
||||
|
||||
if verbose:
|
||||
if 'cpu' in device:
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.warning("Detection running on CPU, this may be potentially slow.")
|
||||
|
||||
if 'cpu' not in device and 'cuda' not in device:
|
||||
if verbose:
|
||||
logger.error("Expected values for device are: {cpu, cuda} but got: %s", device)
|
||||
raise ValueError
|
||||
|
||||
def detect_from_image(self, tensor_or_path):
|
||||
"""Detects faces in a given image.
|
||||
|
||||
This function detects the faces present in a provided BGR(usually)
|
||||
image. The input can be either the image itself or the path to it.
|
||||
|
||||
Arguments:
|
||||
tensor_or_path {numpy.ndarray, torch.tensor or string} -- the path
|
||||
to an image or the image itself.
|
||||
|
||||
Example::
|
||||
|
||||
>>> path_to_image = 'data/image_01.jpg'
|
||||
... detected_faces = detect_from_image(path_to_image)
|
||||
[A list of bounding boxes (x1, y1, x2, y2)]
|
||||
>>> image = cv2.imread(path_to_image)
|
||||
... detected_faces = detect_from_image(image)
|
||||
[A list of bounding boxes (x1, y1, x2, y2)]
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def detect_from_directory(self, path, extensions=['.jpg', '.png'], recursive=False, show_progress_bar=True):
|
||||
"""Detects faces from all the images present in a given directory.
|
||||
|
||||
Arguments:
|
||||
path {string} -- a string containing a path that points to the folder containing the images
|
||||
|
||||
Keyword Arguments:
|
||||
extensions {list} -- list of string containing the extensions to be
|
||||
consider in the following format: ``.extension_name`` (default:
|
||||
{['.jpg', '.png']}) recursive {bool} -- option wherever to scan the
|
||||
folder recursively (default: {False}) show_progress_bar {bool} --
|
||||
display a progressbar (default: {True})
|
||||
|
||||
Example:
|
||||
>>> directory = 'data'
|
||||
... detected_faces = detect_from_directory(directory)
|
||||
{A dictionary of [lists containing bounding boxes(x1, y1, x2, y2)]}
|
||||
|
||||
"""
|
||||
if self.verbose:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if len(extensions) == 0:
|
||||
if self.verbose:
|
||||
logger.error("Expected at list one extension, but none was received.")
|
||||
raise ValueError
|
||||
|
||||
if self.verbose:
|
||||
logger.info("Constructing the list of images.")
|
||||
additional_pattern = '/**/*' if recursive else '/*'
|
||||
files = []
|
||||
for extension in extensions:
|
||||
files.extend(glob.glob(path + additional_pattern + extension, recursive=recursive))
|
||||
|
||||
if self.verbose:
|
||||
logger.info("Finished searching for images. %s images found", len(files))
|
||||
logger.info("Preparing to run the detection.")
|
||||
|
||||
predictions = {}
|
||||
for image_path in tqdm(files, disable=not show_progress_bar):
|
||||
if self.verbose:
|
||||
logger.info("Running the face detector on image: %s", image_path)
|
||||
predictions[image_path] = self.detect_from_image(image_path)
|
||||
|
||||
if self.verbose:
|
||||
logger.info("The detector was successfully run on all %s images", len(files))
|
||||
|
||||
return predictions
|
||||
|
||||
@property
|
||||
def reference_scale(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def reference_x_shift(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def reference_y_shift(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def tensor_or_path_to_ndarray(tensor_or_path, rgb=True):
|
||||
"""Convert path (represented as a string) or torch.tensor to a numpy.ndarray
|
||||
|
||||
Arguments:
|
||||
tensor_or_path {numpy.ndarray, torch.tensor or string} -- path to the image, or the image itself
|
||||
"""
|
||||
if isinstance(tensor_or_path, str):
|
||||
return cv2.imread(tensor_or_path) if not rgb else cv2.imread(tensor_or_path)[..., ::-1]
|
||||
elif torch.is_tensor(tensor_or_path):
|
||||
# Call cpu in case its coming from cuda
|
||||
return tensor_or_path.cpu().numpy()[..., ::-1].copy() if not rgb else tensor_or_path.cpu().numpy()
|
||||
elif isinstance(tensor_or_path, np.ndarray):
|
||||
return tensor_or_path[..., ::-1].copy() if not rgb else tensor_or_path
|
||||
else:
|
||||
raise TypeError
|
||||
1
musetalk/utils/face_detection/detection/sfd/__init__.py
Normal file
1
musetalk/utils/face_detection/detection/sfd/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .sfd_detector import SFDDetector as FaceDetector
|
||||
129
musetalk/utils/face_detection/detection/sfd/bbox.py
Normal file
129
musetalk/utils/face_detection/detection/sfd/bbox.py
Normal file
@@ -0,0 +1,129 @@
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
import cv2
|
||||
import random
|
||||
import datetime
|
||||
import time
|
||||
import math
|
||||
import argparse
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
try:
|
||||
from iou import IOU
|
||||
except BaseException:
|
||||
# IOU cython speedup 10x
|
||||
def IOU(ax1, ay1, ax2, ay2, bx1, by1, bx2, by2):
|
||||
sa = abs((ax2 - ax1) * (ay2 - ay1))
|
||||
sb = abs((bx2 - bx1) * (by2 - by1))
|
||||
x1, y1 = max(ax1, bx1), max(ay1, by1)
|
||||
x2, y2 = min(ax2, bx2), min(ay2, by2)
|
||||
w = x2 - x1
|
||||
h = y2 - y1
|
||||
if w < 0 or h < 0:
|
||||
return 0.0
|
||||
else:
|
||||
return 1.0 * w * h / (sa + sb - w * h)
|
||||
|
||||
|
||||
def bboxlog(x1, y1, x2, y2, axc, ayc, aww, ahh):
|
||||
xc, yc, ww, hh = (x2 + x1) / 2, (y2 + y1) / 2, x2 - x1, y2 - y1
|
||||
dx, dy = (xc - axc) / aww, (yc - ayc) / ahh
|
||||
dw, dh = math.log(ww / aww), math.log(hh / ahh)
|
||||
return dx, dy, dw, dh
|
||||
|
||||
|
||||
def bboxloginv(dx, dy, dw, dh, axc, ayc, aww, ahh):
|
||||
xc, yc = dx * aww + axc, dy * ahh + ayc
|
||||
ww, hh = math.exp(dw) * aww, math.exp(dh) * ahh
|
||||
x1, x2, y1, y2 = xc - ww / 2, xc + ww / 2, yc - hh / 2, yc + hh / 2
|
||||
return x1, y1, x2, y2
|
||||
|
||||
|
||||
def nms(dets, thresh):
|
||||
if 0 == len(dets):
|
||||
return []
|
||||
x1, y1, x2, y2, scores = dets[:, 0], dets[:, 1], dets[:, 2], dets[:, 3], dets[:, 4]
|
||||
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
|
||||
order = scores.argsort()[::-1]
|
||||
|
||||
keep = []
|
||||
while order.size > 0:
|
||||
i = order[0]
|
||||
keep.append(i)
|
||||
xx1, yy1 = np.maximum(x1[i], x1[order[1:]]), np.maximum(y1[i], y1[order[1:]])
|
||||
xx2, yy2 = np.minimum(x2[i], x2[order[1:]]), np.minimum(y2[i], y2[order[1:]])
|
||||
|
||||
w, h = np.maximum(0.0, xx2 - xx1 + 1), np.maximum(0.0, yy2 - yy1 + 1)
|
||||
ovr = w * h / (areas[i] + areas[order[1:]] - w * h)
|
||||
|
||||
inds = np.where(ovr <= thresh)[0]
|
||||
order = order[inds + 1]
|
||||
|
||||
return keep
|
||||
|
||||
|
||||
def encode(matched, priors, variances):
|
||||
"""Encode the variances from the priorbox layers into the ground truth boxes
|
||||
we have matched (based on jaccard overlap) with the prior boxes.
|
||||
Args:
|
||||
matched: (tensor) Coords of ground truth for each prior in point-form
|
||||
Shape: [num_priors, 4].
|
||||
priors: (tensor) Prior boxes in center-offset form
|
||||
Shape: [num_priors,4].
|
||||
variances: (list[float]) Variances of priorboxes
|
||||
Return:
|
||||
encoded boxes (tensor), Shape: [num_priors, 4]
|
||||
"""
|
||||
|
||||
# dist b/t match center and prior's center
|
||||
g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
|
||||
# encode variance
|
||||
g_cxcy /= (variances[0] * priors[:, 2:])
|
||||
# match wh / prior wh
|
||||
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
|
||||
g_wh = torch.log(g_wh) / variances[1]
|
||||
# return target for smooth_l1_loss
|
||||
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
|
||||
|
||||
|
||||
def decode(loc, priors, variances):
|
||||
"""Decode locations from predictions using priors to undo
|
||||
the encoding we did for offset regression at train time.
|
||||
Args:
|
||||
loc (tensor): location predictions for loc layers,
|
||||
Shape: [num_priors,4]
|
||||
priors (tensor): Prior boxes in center-offset form.
|
||||
Shape: [num_priors,4].
|
||||
variances: (list[float]) Variances of priorboxes
|
||||
Return:
|
||||
decoded bounding box predictions
|
||||
"""
|
||||
|
||||
boxes = torch.cat((
|
||||
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
|
||||
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
|
||||
boxes[:, :2] -= boxes[:, 2:] / 2
|
||||
boxes[:, 2:] += boxes[:, :2]
|
||||
return boxes
|
||||
|
||||
def batch_decode(loc, priors, variances):
|
||||
"""Decode locations from predictions using priors to undo
|
||||
the encoding we did for offset regression at train time.
|
||||
Args:
|
||||
loc (tensor): location predictions for loc layers,
|
||||
Shape: [num_priors,4]
|
||||
priors (tensor): Prior boxes in center-offset form.
|
||||
Shape: [num_priors,4].
|
||||
variances: (list[float]) Variances of priorboxes
|
||||
Return:
|
||||
decoded bounding box predictions
|
||||
"""
|
||||
|
||||
boxes = torch.cat((
|
||||
priors[:, :, :2] + loc[:, :, :2] * variances[0] * priors[:, :, 2:],
|
||||
priors[:, :, 2:] * torch.exp(loc[:, :, 2:] * variances[1])), 2)
|
||||
boxes[:, :, :2] -= boxes[:, :, 2:] / 2
|
||||
boxes[:, :, 2:] += boxes[:, :, :2]
|
||||
return boxes
|
||||
114
musetalk/utils/face_detection/detection/sfd/detect.py
Normal file
114
musetalk/utils/face_detection/detection/sfd/detect.py
Normal file
@@ -0,0 +1,114 @@
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
||||
import os
|
||||
import sys
|
||||
import cv2
|
||||
import random
|
||||
import datetime
|
||||
import math
|
||||
import argparse
|
||||
import numpy as np
|
||||
|
||||
import scipy.io as sio
|
||||
import zipfile
|
||||
from .net_s3fd import s3fd
|
||||
from .bbox import *
|
||||
|
||||
|
||||
def detect(net, img, device):
|
||||
img = img - np.array([104, 117, 123])
|
||||
img = img.transpose(2, 0, 1)
|
||||
img = img.reshape((1,) + img.shape)
|
||||
|
||||
if 'cuda' in device:
|
||||
torch.backends.cudnn.benchmark = True
|
||||
|
||||
img = torch.from_numpy(img).float().to(device)
|
||||
BB, CC, HH, WW = img.size()
|
||||
with torch.no_grad():
|
||||
olist = net(img)
|
||||
|
||||
bboxlist = []
|
||||
for i in range(len(olist) // 2):
|
||||
olist[i * 2] = F.softmax(olist[i * 2], dim=1)
|
||||
olist = [oelem.data.cpu() for oelem in olist]
|
||||
for i in range(len(olist) // 2):
|
||||
ocls, oreg = olist[i * 2], olist[i * 2 + 1]
|
||||
FB, FC, FH, FW = ocls.size() # feature map size
|
||||
stride = 2**(i + 2) # 4,8,16,32,64,128
|
||||
anchor = stride * 4
|
||||
poss = zip(*np.where(ocls[:, 1, :, :] > 0.05))
|
||||
for Iindex, hindex, windex in poss:
|
||||
axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
|
||||
score = ocls[0, 1, hindex, windex]
|
||||
loc = oreg[0, :, hindex, windex].contiguous().view(1, 4)
|
||||
priors = torch.Tensor([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])
|
||||
variances = [0.1, 0.2]
|
||||
box = decode(loc, priors, variances)
|
||||
x1, y1, x2, y2 = box[0] * 1.0
|
||||
# cv2.rectangle(imgshow,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,255),1)
|
||||
bboxlist.append([x1, y1, x2, y2, score])
|
||||
bboxlist = np.array(bboxlist)
|
||||
if 0 == len(bboxlist):
|
||||
bboxlist = np.zeros((1, 5))
|
||||
|
||||
return bboxlist
|
||||
|
||||
def batch_detect(net, imgs, device):
|
||||
imgs = imgs - np.array([104, 117, 123])
|
||||
imgs = imgs.transpose(0, 3, 1, 2)
|
||||
|
||||
if 'cuda' in device:
|
||||
torch.backends.cudnn.benchmark = True
|
||||
|
||||
imgs = torch.from_numpy(imgs).float().to(device)
|
||||
BB, CC, HH, WW = imgs.size()
|
||||
with torch.no_grad():
|
||||
olist = net(imgs)
|
||||
# print(olist)
|
||||
|
||||
bboxlist = []
|
||||
for i in range(len(olist) // 2):
|
||||
olist[i * 2] = F.softmax(olist[i * 2], dim=1)
|
||||
|
||||
olist = [oelem.cpu() for oelem in olist]
|
||||
for i in range(len(olist) // 2):
|
||||
ocls, oreg = olist[i * 2], olist[i * 2 + 1]
|
||||
FB, FC, FH, FW = ocls.size() # feature map size
|
||||
stride = 2**(i + 2) # 4,8,16,32,64,128
|
||||
anchor = stride * 4
|
||||
poss = zip(*np.where(ocls[:, 1, :, :] > 0.05))
|
||||
for Iindex, hindex, windex in poss:
|
||||
axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
|
||||
score = ocls[:, 1, hindex, windex]
|
||||
loc = oreg[:, :, hindex, windex].contiguous().view(BB, 1, 4)
|
||||
priors = torch.Tensor([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]).view(1, 1, 4)
|
||||
variances = [0.1, 0.2]
|
||||
box = batch_decode(loc, priors, variances)
|
||||
box = box[:, 0] * 1.0
|
||||
# cv2.rectangle(imgshow,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,255),1)
|
||||
bboxlist.append(torch.cat([box, score.unsqueeze(1)], 1).cpu().numpy())
|
||||
bboxlist = np.array(bboxlist)
|
||||
if 0 == len(bboxlist):
|
||||
bboxlist = np.zeros((1, BB, 5))
|
||||
|
||||
return bboxlist
|
||||
|
||||
def flip_detect(net, img, device):
|
||||
img = cv2.flip(img, 1)
|
||||
b = detect(net, img, device)
|
||||
|
||||
bboxlist = np.zeros(b.shape)
|
||||
bboxlist[:, 0] = img.shape[1] - b[:, 2]
|
||||
bboxlist[:, 1] = b[:, 1]
|
||||
bboxlist[:, 2] = img.shape[1] - b[:, 0]
|
||||
bboxlist[:, 3] = b[:, 3]
|
||||
bboxlist[:, 4] = b[:, 4]
|
||||
return bboxlist
|
||||
|
||||
|
||||
def pts_to_bb(pts):
|
||||
min_x, min_y = np.min(pts, axis=0)
|
||||
max_x, max_y = np.max(pts, axis=0)
|
||||
return np.array([min_x, min_y, max_x, max_y])
|
||||
129
musetalk/utils/face_detection/detection/sfd/net_s3fd.py
Normal file
129
musetalk/utils/face_detection/detection/sfd/net_s3fd.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
class L2Norm(nn.Module):
|
||||
def __init__(self, n_channels, scale=1.0):
|
||||
super(L2Norm, self).__init__()
|
||||
self.n_channels = n_channels
|
||||
self.scale = scale
|
||||
self.eps = 1e-10
|
||||
self.weight = nn.Parameter(torch.Tensor(self.n_channels))
|
||||
self.weight.data *= 0.0
|
||||
self.weight.data += self.scale
|
||||
|
||||
def forward(self, x):
|
||||
norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
|
||||
x = x / norm * self.weight.view(1, -1, 1, 1)
|
||||
return x
|
||||
|
||||
|
||||
class s3fd(nn.Module):
|
||||
def __init__(self):
|
||||
super(s3fd, self).__init__()
|
||||
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
|
||||
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
|
||||
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
|
||||
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
|
||||
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
|
||||
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
|
||||
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
|
||||
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
|
||||
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
self.fc6 = nn.Conv2d(512, 1024, kernel_size=3, stride=1, padding=3)
|
||||
self.fc7 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1, padding=0)
|
||||
|
||||
self.conv6_1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
|
||||
self.conv6_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)
|
||||
|
||||
self.conv7_1 = nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0)
|
||||
self.conv7_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
|
||||
|
||||
self.conv3_3_norm = L2Norm(256, scale=10)
|
||||
self.conv4_3_norm = L2Norm(512, scale=8)
|
||||
self.conv5_3_norm = L2Norm(512, scale=5)
|
||||
|
||||
self.conv3_3_norm_mbox_conf = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
|
||||
self.conv3_3_norm_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
|
||||
self.conv4_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
|
||||
self.conv4_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
|
||||
self.conv5_3_norm_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
|
||||
self.conv5_3_norm_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
self.fc7_mbox_conf = nn.Conv2d(1024, 2, kernel_size=3, stride=1, padding=1)
|
||||
self.fc7_mbox_loc = nn.Conv2d(1024, 4, kernel_size=3, stride=1, padding=1)
|
||||
self.conv6_2_mbox_conf = nn.Conv2d(512, 2, kernel_size=3, stride=1, padding=1)
|
||||
self.conv6_2_mbox_loc = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
|
||||
self.conv7_2_mbox_conf = nn.Conv2d(256, 2, kernel_size=3, stride=1, padding=1)
|
||||
self.conv7_2_mbox_loc = nn.Conv2d(256, 4, kernel_size=3, stride=1, padding=1)
|
||||
|
||||
def forward(self, x):
|
||||
h = F.relu(self.conv1_1(x))
|
||||
h = F.relu(self.conv1_2(h))
|
||||
h = F.max_pool2d(h, 2, 2)
|
||||
|
||||
h = F.relu(self.conv2_1(h))
|
||||
h = F.relu(self.conv2_2(h))
|
||||
h = F.max_pool2d(h, 2, 2)
|
||||
|
||||
h = F.relu(self.conv3_1(h))
|
||||
h = F.relu(self.conv3_2(h))
|
||||
h = F.relu(self.conv3_3(h))
|
||||
f3_3 = h
|
||||
h = F.max_pool2d(h, 2, 2)
|
||||
|
||||
h = F.relu(self.conv4_1(h))
|
||||
h = F.relu(self.conv4_2(h))
|
||||
h = F.relu(self.conv4_3(h))
|
||||
f4_3 = h
|
||||
h = F.max_pool2d(h, 2, 2)
|
||||
|
||||
h = F.relu(self.conv5_1(h))
|
||||
h = F.relu(self.conv5_2(h))
|
||||
h = F.relu(self.conv5_3(h))
|
||||
f5_3 = h
|
||||
h = F.max_pool2d(h, 2, 2)
|
||||
|
||||
h = F.relu(self.fc6(h))
|
||||
h = F.relu(self.fc7(h))
|
||||
ffc7 = h
|
||||
h = F.relu(self.conv6_1(h))
|
||||
h = F.relu(self.conv6_2(h))
|
||||
f6_2 = h
|
||||
h = F.relu(self.conv7_1(h))
|
||||
h = F.relu(self.conv7_2(h))
|
||||
f7_2 = h
|
||||
|
||||
f3_3 = self.conv3_3_norm(f3_3)
|
||||
f4_3 = self.conv4_3_norm(f4_3)
|
||||
f5_3 = self.conv5_3_norm(f5_3)
|
||||
|
||||
cls1 = self.conv3_3_norm_mbox_conf(f3_3)
|
||||
reg1 = self.conv3_3_norm_mbox_loc(f3_3)
|
||||
cls2 = self.conv4_3_norm_mbox_conf(f4_3)
|
||||
reg2 = self.conv4_3_norm_mbox_loc(f4_3)
|
||||
cls3 = self.conv5_3_norm_mbox_conf(f5_3)
|
||||
reg3 = self.conv5_3_norm_mbox_loc(f5_3)
|
||||
cls4 = self.fc7_mbox_conf(ffc7)
|
||||
reg4 = self.fc7_mbox_loc(ffc7)
|
||||
cls5 = self.conv6_2_mbox_conf(f6_2)
|
||||
reg5 = self.conv6_2_mbox_loc(f6_2)
|
||||
cls6 = self.conv7_2_mbox_conf(f7_2)
|
||||
reg6 = self.conv7_2_mbox_loc(f7_2)
|
||||
|
||||
# max-out background label
|
||||
chunk = torch.chunk(cls1, 4, 1)
|
||||
bmax = torch.max(torch.max(chunk[0], chunk[1]), chunk[2])
|
||||
cls1 = torch.cat([bmax, chunk[3]], dim=1)
|
||||
|
||||
return [cls1, reg1, cls2, reg2, cls3, reg3, cls4, reg4, cls5, reg5, cls6, reg6]
|
||||
59
musetalk/utils/face_detection/detection/sfd/sfd_detector.py
Normal file
59
musetalk/utils/face_detection/detection/sfd/sfd_detector.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import os
|
||||
import cv2
|
||||
from torch.utils.model_zoo import load_url
|
||||
|
||||
from ..core import FaceDetector
|
||||
|
||||
from .net_s3fd import s3fd
|
||||
from .bbox import *
|
||||
from .detect import *
|
||||
|
||||
models_urls = {
|
||||
's3fd': 'https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth',
|
||||
}
|
||||
|
||||
|
||||
class SFDDetector(FaceDetector):
|
||||
def __init__(self, device, path_to_detector=os.path.join(os.path.dirname(os.path.abspath(__file__)), 's3fd.pth'), verbose=False):
|
||||
super(SFDDetector, self).__init__(device, verbose)
|
||||
|
||||
# Initialise the face detector
|
||||
if not os.path.isfile(path_to_detector):
|
||||
model_weights = load_url(models_urls['s3fd'])
|
||||
else:
|
||||
model_weights = torch.load(path_to_detector)
|
||||
|
||||
self.face_detector = s3fd()
|
||||
self.face_detector.load_state_dict(model_weights)
|
||||
self.face_detector.to(device)
|
||||
self.face_detector.eval()
|
||||
|
||||
def detect_from_image(self, tensor_or_path):
|
||||
image = self.tensor_or_path_to_ndarray(tensor_or_path)
|
||||
|
||||
bboxlist = detect(self.face_detector, image, device=self.device)
|
||||
keep = nms(bboxlist, 0.3)
|
||||
bboxlist = bboxlist[keep, :]
|
||||
bboxlist = [x for x in bboxlist if x[-1] > 0.5]
|
||||
|
||||
return bboxlist
|
||||
|
||||
def detect_from_batch(self, images):
|
||||
bboxlists = batch_detect(self.face_detector, images, device=self.device)
|
||||
keeps = [nms(bboxlists[:, i, :], 0.3) for i in range(bboxlists.shape[1])]
|
||||
bboxlists = [bboxlists[keep, i, :] for i, keep in enumerate(keeps)]
|
||||
bboxlists = [[x for x in bboxlist if x[-1] > 0.5] for bboxlist in bboxlists]
|
||||
|
||||
return bboxlists
|
||||
|
||||
@property
|
||||
def reference_scale(self):
|
||||
return 195
|
||||
|
||||
@property
|
||||
def reference_x_shift(self):
|
||||
return 0
|
||||
|
||||
@property
|
||||
def reference_y_shift(self):
|
||||
return 0
|
||||
261
musetalk/utils/face_detection/models.py
Normal file
261
musetalk/utils/face_detection/models.py
Normal file
@@ -0,0 +1,261 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import math
|
||||
|
||||
|
||||
def conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False):
|
||||
"3x3 convolution with padding"
|
||||
return nn.Conv2d(in_planes, out_planes, kernel_size=3,
|
||||
stride=strd, padding=padding, bias=bias)
|
||||
|
||||
|
||||
class ConvBlock(nn.Module):
|
||||
def __init__(self, in_planes, out_planes):
|
||||
super(ConvBlock, self).__init__()
|
||||
self.bn1 = nn.BatchNorm2d(in_planes)
|
||||
self.conv1 = conv3x3(in_planes, int(out_planes / 2))
|
||||
self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
|
||||
self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4))
|
||||
self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
|
||||
self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4))
|
||||
|
||||
if in_planes != out_planes:
|
||||
self.downsample = nn.Sequential(
|
||||
nn.BatchNorm2d(in_planes),
|
||||
nn.ReLU(True),
|
||||
nn.Conv2d(in_planes, out_planes,
|
||||
kernel_size=1, stride=1, bias=False),
|
||||
)
|
||||
else:
|
||||
self.downsample = None
|
||||
|
||||
def forward(self, x):
|
||||
residual = x
|
||||
|
||||
out1 = self.bn1(x)
|
||||
out1 = F.relu(out1, True)
|
||||
out1 = self.conv1(out1)
|
||||
|
||||
out2 = self.bn2(out1)
|
||||
out2 = F.relu(out2, True)
|
||||
out2 = self.conv2(out2)
|
||||
|
||||
out3 = self.bn3(out2)
|
||||
out3 = F.relu(out3, True)
|
||||
out3 = self.conv3(out3)
|
||||
|
||||
out3 = torch.cat((out1, out2, out3), 1)
|
||||
|
||||
if self.downsample is not None:
|
||||
residual = self.downsample(residual)
|
||||
|
||||
out3 += residual
|
||||
|
||||
return out3
|
||||
|
||||
|
||||
class Bottleneck(nn.Module):
|
||||
|
||||
expansion = 4
|
||||
|
||||
def __init__(self, inplanes, planes, stride=1, downsample=None):
|
||||
super(Bottleneck, self).__init__()
|
||||
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
|
||||
self.bn1 = nn.BatchNorm2d(planes)
|
||||
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
|
||||
padding=1, bias=False)
|
||||
self.bn2 = nn.BatchNorm2d(planes)
|
||||
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
|
||||
self.bn3 = nn.BatchNorm2d(planes * 4)
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.downsample = downsample
|
||||
self.stride = stride
|
||||
|
||||
def forward(self, x):
|
||||
residual = x
|
||||
|
||||
out = self.conv1(x)
|
||||
out = self.bn1(out)
|
||||
out = self.relu(out)
|
||||
|
||||
out = self.conv2(out)
|
||||
out = self.bn2(out)
|
||||
out = self.relu(out)
|
||||
|
||||
out = self.conv3(out)
|
||||
out = self.bn3(out)
|
||||
|
||||
if self.downsample is not None:
|
||||
residual = self.downsample(x)
|
||||
|
||||
out += residual
|
||||
out = self.relu(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class HourGlass(nn.Module):
|
||||
def __init__(self, num_modules, depth, num_features):
|
||||
super(HourGlass, self).__init__()
|
||||
self.num_modules = num_modules
|
||||
self.depth = depth
|
||||
self.features = num_features
|
||||
|
||||
self._generate_network(self.depth)
|
||||
|
||||
def _generate_network(self, level):
|
||||
self.add_module('b1_' + str(level), ConvBlock(self.features, self.features))
|
||||
|
||||
self.add_module('b2_' + str(level), ConvBlock(self.features, self.features))
|
||||
|
||||
if level > 1:
|
||||
self._generate_network(level - 1)
|
||||
else:
|
||||
self.add_module('b2_plus_' + str(level), ConvBlock(self.features, self.features))
|
||||
|
||||
self.add_module('b3_' + str(level), ConvBlock(self.features, self.features))
|
||||
|
||||
def _forward(self, level, inp):
|
||||
# Upper branch
|
||||
up1 = inp
|
||||
up1 = self._modules['b1_' + str(level)](up1)
|
||||
|
||||
# Lower branch
|
||||
low1 = F.avg_pool2d(inp, 2, stride=2)
|
||||
low1 = self._modules['b2_' + str(level)](low1)
|
||||
|
||||
if level > 1:
|
||||
low2 = self._forward(level - 1, low1)
|
||||
else:
|
||||
low2 = low1
|
||||
low2 = self._modules['b2_plus_' + str(level)](low2)
|
||||
|
||||
low3 = low2
|
||||
low3 = self._modules['b3_' + str(level)](low3)
|
||||
|
||||
up2 = F.interpolate(low3, scale_factor=2, mode='nearest')
|
||||
|
||||
return up1 + up2
|
||||
|
||||
def forward(self, x):
|
||||
return self._forward(self.depth, x)
|
||||
|
||||
|
||||
class FAN(nn.Module):
|
||||
|
||||
def __init__(self, num_modules=1):
|
||||
super(FAN, self).__init__()
|
||||
self.num_modules = num_modules
|
||||
|
||||
# Base part
|
||||
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
|
||||
self.bn1 = nn.BatchNorm2d(64)
|
||||
self.conv2 = ConvBlock(64, 128)
|
||||
self.conv3 = ConvBlock(128, 128)
|
||||
self.conv4 = ConvBlock(128, 256)
|
||||
|
||||
# Stacking part
|
||||
for hg_module in range(self.num_modules):
|
||||
self.add_module('m' + str(hg_module), HourGlass(1, 4, 256))
|
||||
self.add_module('top_m_' + str(hg_module), ConvBlock(256, 256))
|
||||
self.add_module('conv_last' + str(hg_module),
|
||||
nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
|
||||
self.add_module('bn_end' + str(hg_module), nn.BatchNorm2d(256))
|
||||
self.add_module('l' + str(hg_module), nn.Conv2d(256,
|
||||
68, kernel_size=1, stride=1, padding=0))
|
||||
|
||||
if hg_module < self.num_modules - 1:
|
||||
self.add_module(
|
||||
'bl' + str(hg_module), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
|
||||
self.add_module('al' + str(hg_module), nn.Conv2d(68,
|
||||
256, kernel_size=1, stride=1, padding=0))
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(self.bn1(self.conv1(x)), True)
|
||||
x = F.avg_pool2d(self.conv2(x), 2, stride=2)
|
||||
x = self.conv3(x)
|
||||
x = self.conv4(x)
|
||||
|
||||
previous = x
|
||||
|
||||
outputs = []
|
||||
for i in range(self.num_modules):
|
||||
hg = self._modules['m' + str(i)](previous)
|
||||
|
||||
ll = hg
|
||||
ll = self._modules['top_m_' + str(i)](ll)
|
||||
|
||||
ll = F.relu(self._modules['bn_end' + str(i)]
|
||||
(self._modules['conv_last' + str(i)](ll)), True)
|
||||
|
||||
# Predict heatmaps
|
||||
tmp_out = self._modules['l' + str(i)](ll)
|
||||
outputs.append(tmp_out)
|
||||
|
||||
if i < self.num_modules - 1:
|
||||
ll = self._modules['bl' + str(i)](ll)
|
||||
tmp_out_ = self._modules['al' + str(i)](tmp_out)
|
||||
previous = previous + ll + tmp_out_
|
||||
|
||||
return outputs
|
||||
|
||||
|
||||
class ResNetDepth(nn.Module):
|
||||
|
||||
def __init__(self, block=Bottleneck, layers=[3, 8, 36, 3], num_classes=68):
|
||||
self.inplanes = 64
|
||||
super(ResNetDepth, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3 + 68, 64, kernel_size=7, stride=2, padding=3,
|
||||
bias=False)
|
||||
self.bn1 = nn.BatchNorm2d(64)
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
||||
self.layer1 = self._make_layer(block, 64, layers[0])
|
||||
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
|
||||
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
|
||||
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
|
||||
self.avgpool = nn.AvgPool2d(7)
|
||||
self.fc = nn.Linear(512 * block.expansion, num_classes)
|
||||
|
||||
for m in self.modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
|
||||
m.weight.data.normal_(0, math.sqrt(2. / n))
|
||||
elif isinstance(m, nn.BatchNorm2d):
|
||||
m.weight.data.fill_(1)
|
||||
m.bias.data.zero_()
|
||||
|
||||
def _make_layer(self, block, planes, blocks, stride=1):
|
||||
downsample = None
|
||||
if stride != 1 or self.inplanes != planes * block.expansion:
|
||||
downsample = nn.Sequential(
|
||||
nn.Conv2d(self.inplanes, planes * block.expansion,
|
||||
kernel_size=1, stride=stride, bias=False),
|
||||
nn.BatchNorm2d(planes * block.expansion),
|
||||
)
|
||||
|
||||
layers = []
|
||||
layers.append(block(self.inplanes, planes, stride, downsample))
|
||||
self.inplanes = planes * block.expansion
|
||||
for i in range(1, blocks):
|
||||
layers.append(block(self.inplanes, planes))
|
||||
|
||||
return nn.Sequential(*layers)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv1(x)
|
||||
x = self.bn1(x)
|
||||
x = self.relu(x)
|
||||
x = self.maxpool(x)
|
||||
|
||||
x = self.layer1(x)
|
||||
x = self.layer2(x)
|
||||
x = self.layer3(x)
|
||||
x = self.layer4(x)
|
||||
|
||||
x = self.avgpool(x)
|
||||
x = x.view(x.size(0), -1)
|
||||
x = self.fc(x)
|
||||
|
||||
return x
|
||||
313
musetalk/utils/face_detection/utils.py
Normal file
313
musetalk/utils/face_detection/utils.py
Normal file
@@ -0,0 +1,313 @@
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import torch
|
||||
import math
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
|
||||
def _gaussian(
|
||||
size=3, sigma=0.25, amplitude=1, normalize=False, width=None,
|
||||
height=None, sigma_horz=None, sigma_vert=None, mean_horz=0.5,
|
||||
mean_vert=0.5):
|
||||
# handle some defaults
|
||||
if width is None:
|
||||
width = size
|
||||
if height is None:
|
||||
height = size
|
||||
if sigma_horz is None:
|
||||
sigma_horz = sigma
|
||||
if sigma_vert is None:
|
||||
sigma_vert = sigma
|
||||
center_x = mean_horz * width + 0.5
|
||||
center_y = mean_vert * height + 0.5
|
||||
gauss = np.empty((height, width), dtype=np.float32)
|
||||
# generate kernel
|
||||
for i in range(height):
|
||||
for j in range(width):
|
||||
gauss[i][j] = amplitude * math.exp(-(math.pow((j + 1 - center_x) / (
|
||||
sigma_horz * width), 2) / 2.0 + math.pow((i + 1 - center_y) / (sigma_vert * height), 2) / 2.0))
|
||||
if normalize:
|
||||
gauss = gauss / np.sum(gauss)
|
||||
return gauss
|
||||
|
||||
|
||||
def draw_gaussian(image, point, sigma):
|
||||
# Check if the gaussian is inside
|
||||
ul = [math.floor(point[0] - 3 * sigma), math.floor(point[1] - 3 * sigma)]
|
||||
br = [math.floor(point[0] + 3 * sigma), math.floor(point[1] + 3 * sigma)]
|
||||
if (ul[0] > image.shape[1] or ul[1] > image.shape[0] or br[0] < 1 or br[1] < 1):
|
||||
return image
|
||||
size = 6 * sigma + 1
|
||||
g = _gaussian(size)
|
||||
g_x = [int(max(1, -ul[0])), int(min(br[0], image.shape[1])) - int(max(1, ul[0])) + int(max(1, -ul[0]))]
|
||||
g_y = [int(max(1, -ul[1])), int(min(br[1], image.shape[0])) - int(max(1, ul[1])) + int(max(1, -ul[1]))]
|
||||
img_x = [int(max(1, ul[0])), int(min(br[0], image.shape[1]))]
|
||||
img_y = [int(max(1, ul[1])), int(min(br[1], image.shape[0]))]
|
||||
assert (g_x[0] > 0 and g_y[1] > 0)
|
||||
image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]
|
||||
] = image[img_y[0] - 1:img_y[1], img_x[0] - 1:img_x[1]] + g[g_y[0] - 1:g_y[1], g_x[0] - 1:g_x[1]]
|
||||
image[image > 1] = 1
|
||||
return image
|
||||
|
||||
|
||||
def transform(point, center, scale, resolution, invert=False):
|
||||
"""Generate and affine transformation matrix.
|
||||
|
||||
Given a set of points, a center, a scale and a targer resolution, the
|
||||
function generates and affine transformation matrix. If invert is ``True``
|
||||
it will produce the inverse transformation.
|
||||
|
||||
Arguments:
|
||||
point {torch.tensor} -- the input 2D point
|
||||
center {torch.tensor or numpy.array} -- the center around which to perform the transformations
|
||||
scale {float} -- the scale of the face/object
|
||||
resolution {float} -- the output resolution
|
||||
|
||||
Keyword Arguments:
|
||||
invert {bool} -- define wherever the function should produce the direct or the
|
||||
inverse transformation matrix (default: {False})
|
||||
"""
|
||||
_pt = torch.ones(3)
|
||||
_pt[0] = point[0]
|
||||
_pt[1] = point[1]
|
||||
|
||||
h = 200.0 * scale
|
||||
t = torch.eye(3)
|
||||
t[0, 0] = resolution / h
|
||||
t[1, 1] = resolution / h
|
||||
t[0, 2] = resolution * (-center[0] / h + 0.5)
|
||||
t[1, 2] = resolution * (-center[1] / h + 0.5)
|
||||
|
||||
if invert:
|
||||
t = torch.inverse(t)
|
||||
|
||||
new_point = (torch.matmul(t, _pt))[0:2]
|
||||
|
||||
return new_point.int()
|
||||
|
||||
|
||||
def crop(image, center, scale, resolution=256.0):
|
||||
"""Center crops an image or set of heatmaps
|
||||
|
||||
Arguments:
|
||||
image {numpy.array} -- an rgb image
|
||||
center {numpy.array} -- the center of the object, usually the same as of the bounding box
|
||||
scale {float} -- scale of the face
|
||||
|
||||
Keyword Arguments:
|
||||
resolution {float} -- the size of the output cropped image (default: {256.0})
|
||||
|
||||
Returns:
|
||||
[type] -- [description]
|
||||
""" # Crop around the center point
|
||||
""" Crops the image around the center. Input is expected to be an np.ndarray """
|
||||
ul = transform([1, 1], center, scale, resolution, True)
|
||||
br = transform([resolution, resolution], center, scale, resolution, True)
|
||||
# pad = math.ceil(torch.norm((ul - br).float()) / 2.0 - (br[0] - ul[0]) / 2.0)
|
||||
if image.ndim > 2:
|
||||
newDim = np.array([br[1] - ul[1], br[0] - ul[0],
|
||||
image.shape[2]], dtype=np.int32)
|
||||
newImg = np.zeros(newDim, dtype=np.uint8)
|
||||
else:
|
||||
newDim = np.array([br[1] - ul[1], br[0] - ul[0]], dtype=np.int)
|
||||
newImg = np.zeros(newDim, dtype=np.uint8)
|
||||
ht = image.shape[0]
|
||||
wd = image.shape[1]
|
||||
newX = np.array(
|
||||
[max(1, -ul[0] + 1), min(br[0], wd) - ul[0]], dtype=np.int32)
|
||||
newY = np.array(
|
||||
[max(1, -ul[1] + 1), min(br[1], ht) - ul[1]], dtype=np.int32)
|
||||
oldX = np.array([max(1, ul[0] + 1), min(br[0], wd)], dtype=np.int32)
|
||||
oldY = np.array([max(1, ul[1] + 1), min(br[1], ht)], dtype=np.int32)
|
||||
newImg[newY[0] - 1:newY[1], newX[0] - 1:newX[1]
|
||||
] = image[oldY[0] - 1:oldY[1], oldX[0] - 1:oldX[1], :]
|
||||
newImg = cv2.resize(newImg, dsize=(int(resolution), int(resolution)),
|
||||
interpolation=cv2.INTER_LINEAR)
|
||||
return newImg
|
||||
|
||||
|
||||
def get_preds_fromhm(hm, center=None, scale=None):
|
||||
"""Obtain (x,y) coordinates given a set of N heatmaps. If the center
|
||||
and the scale is provided the function will return the points also in
|
||||
the original coordinate frame.
|
||||
|
||||
Arguments:
|
||||
hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H]
|
||||
|
||||
Keyword Arguments:
|
||||
center {torch.tensor} -- the center of the bounding box (default: {None})
|
||||
scale {float} -- face scale (default: {None})
|
||||
"""
|
||||
max, idx = torch.max(
|
||||
hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
|
||||
idx += 1
|
||||
preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
|
||||
preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)
|
||||
preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)
|
||||
|
||||
for i in range(preds.size(0)):
|
||||
for j in range(preds.size(1)):
|
||||
hm_ = hm[i, j, :]
|
||||
pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1
|
||||
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
|
||||
diff = torch.FloatTensor(
|
||||
[hm_[pY, pX + 1] - hm_[pY, pX - 1],
|
||||
hm_[pY + 1, pX] - hm_[pY - 1, pX]])
|
||||
preds[i, j].add_(diff.sign_().mul_(.25))
|
||||
|
||||
preds.add_(-.5)
|
||||
|
||||
preds_orig = torch.zeros(preds.size())
|
||||
if center is not None and scale is not None:
|
||||
for i in range(hm.size(0)):
|
||||
for j in range(hm.size(1)):
|
||||
preds_orig[i, j] = transform(
|
||||
preds[i, j], center, scale, hm.size(2), True)
|
||||
|
||||
return preds, preds_orig
|
||||
|
||||
def get_preds_fromhm_batch(hm, centers=None, scales=None):
|
||||
"""Obtain (x,y) coordinates given a set of N heatmaps. If the centers
|
||||
and the scales is provided the function will return the points also in
|
||||
the original coordinate frame.
|
||||
|
||||
Arguments:
|
||||
hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H]
|
||||
|
||||
Keyword Arguments:
|
||||
centers {torch.tensor} -- the centers of the bounding box (default: {None})
|
||||
scales {float} -- face scales (default: {None})
|
||||
"""
|
||||
max, idx = torch.max(
|
||||
hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
|
||||
idx += 1
|
||||
preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
|
||||
preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)
|
||||
preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)
|
||||
|
||||
for i in range(preds.size(0)):
|
||||
for j in range(preds.size(1)):
|
||||
hm_ = hm[i, j, :]
|
||||
pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1
|
||||
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
|
||||
diff = torch.FloatTensor(
|
||||
[hm_[pY, pX + 1] - hm_[pY, pX - 1],
|
||||
hm_[pY + 1, pX] - hm_[pY - 1, pX]])
|
||||
preds[i, j].add_(diff.sign_().mul_(.25))
|
||||
|
||||
preds.add_(-.5)
|
||||
|
||||
preds_orig = torch.zeros(preds.size())
|
||||
if centers is not None and scales is not None:
|
||||
for i in range(hm.size(0)):
|
||||
for j in range(hm.size(1)):
|
||||
preds_orig[i, j] = transform(
|
||||
preds[i, j], centers[i], scales[i], hm.size(2), True)
|
||||
|
||||
return preds, preds_orig
|
||||
|
||||
def shuffle_lr(parts, pairs=None):
|
||||
"""Shuffle the points left-right according to the axis of symmetry
|
||||
of the object.
|
||||
|
||||
Arguments:
|
||||
parts {torch.tensor} -- a 3D or 4D object containing the
|
||||
heatmaps.
|
||||
|
||||
Keyword Arguments:
|
||||
pairs {list of integers} -- [order of the flipped points] (default: {None})
|
||||
"""
|
||||
if pairs is None:
|
||||
pairs = [16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
|
||||
26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 27, 28, 29, 30, 35,
|
||||
34, 33, 32, 31, 45, 44, 43, 42, 47, 46, 39, 38, 37, 36, 41,
|
||||
40, 54, 53, 52, 51, 50, 49, 48, 59, 58, 57, 56, 55, 64, 63,
|
||||
62, 61, 60, 67, 66, 65]
|
||||
if parts.ndimension() == 3:
|
||||
parts = parts[pairs, ...]
|
||||
else:
|
||||
parts = parts[:, pairs, ...]
|
||||
|
||||
return parts
|
||||
|
||||
|
||||
def flip(tensor, is_label=False):
|
||||
"""Flip an image or a set of heatmaps left-right
|
||||
|
||||
Arguments:
|
||||
tensor {numpy.array or torch.tensor} -- [the input image or heatmaps]
|
||||
|
||||
Keyword Arguments:
|
||||
is_label {bool} -- [denote wherever the input is an image or a set of heatmaps ] (default: {False})
|
||||
"""
|
||||
if not torch.is_tensor(tensor):
|
||||
tensor = torch.from_numpy(tensor)
|
||||
|
||||
if is_label:
|
||||
tensor = shuffle_lr(tensor).flip(tensor.ndimension() - 1)
|
||||
else:
|
||||
tensor = tensor.flip(tensor.ndimension() - 1)
|
||||
|
||||
return tensor
|
||||
|
||||
# From pyzolib/paths.py (https://bitbucket.org/pyzo/pyzolib/src/tip/paths.py)
|
||||
|
||||
|
||||
def appdata_dir(appname=None, roaming=False):
|
||||
""" appdata_dir(appname=None, roaming=False)
|
||||
|
||||
Get the path to the application directory, where applications are allowed
|
||||
to write user specific files (e.g. configurations). For non-user specific
|
||||
data, consider using common_appdata_dir().
|
||||
If appname is given, a subdir is appended (and created if necessary).
|
||||
If roaming is True, will prefer a roaming directory (Windows Vista/7).
|
||||
"""
|
||||
|
||||
# Define default user directory
|
||||
userDir = os.getenv('FACEALIGNMENT_USERDIR', None)
|
||||
if userDir is None:
|
||||
userDir = os.path.expanduser('~')
|
||||
if not os.path.isdir(userDir): # pragma: no cover
|
||||
userDir = '/var/tmp' # issue #54
|
||||
|
||||
# Get system app data dir
|
||||
path = None
|
||||
if sys.platform.startswith('win'):
|
||||
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
|
||||
path = (path2 or path1) if roaming else (path1 or path2)
|
||||
elif sys.platform.startswith('darwin'):
|
||||
path = os.path.join(userDir, 'Library', 'Application Support')
|
||||
# On Linux and as fallback
|
||||
if not (path and os.path.isdir(path)):
|
||||
path = userDir
|
||||
|
||||
# Maybe we should store things local to the executable (in case of a
|
||||
# portable distro or a frozen application that wants to be portable)
|
||||
prefix = sys.prefix
|
||||
if getattr(sys, 'frozen', None):
|
||||
prefix = os.path.abspath(os.path.dirname(sys.executable))
|
||||
for reldir in ('settings', '../settings'):
|
||||
localpath = os.path.abspath(os.path.join(prefix, reldir))
|
||||
if os.path.isdir(localpath): # pragma: no cover
|
||||
try:
|
||||
open(os.path.join(localpath, 'test.write'), 'wb').close()
|
||||
os.remove(os.path.join(localpath, 'test.write'))
|
||||
except IOError:
|
||||
pass # We cannot write in this directory
|
||||
else:
|
||||
path = localpath
|
||||
break
|
||||
|
||||
# Get path specific for this app
|
||||
if appname:
|
||||
if path == userDir:
|
||||
appname = '.' + appname.lstrip('.') # Make it a hidden directory
|
||||
path = os.path.join(path, appname)
|
||||
if not os.path.isdir(path): # pragma: no cover
|
||||
os.mkdir(path)
|
||||
|
||||
# Done
|
||||
return path
|
||||
50
musetalk/utils/face_parsing/__init__.py
Executable file
50
musetalk/utils/face_parsing/__init__.py
Executable file
@@ -0,0 +1,50 @@
|
||||
import torch
|
||||
import time
|
||||
import os
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from .model import BiSeNet
|
||||
import torchvision.transforms as transforms
|
||||
|
||||
class FaceParsing():
|
||||
def __init__(self):
|
||||
self.net = self.model_init()
|
||||
self.preprocess = self.image_preprocess()
|
||||
|
||||
def model_init(self,
|
||||
resnet_path='./models/face-parse-bisent/resnet18-5c106cde.pth',
|
||||
model_pth='./models/face-parse-bisent/79999_iter.pth'):
|
||||
net = BiSeNet(resnet_path)
|
||||
net.cuda()
|
||||
net.load_state_dict(torch.load(model_pth))
|
||||
net.eval()
|
||||
return net
|
||||
|
||||
def image_preprocess(self):
|
||||
return transforms.Compose([
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
|
||||
])
|
||||
|
||||
def __call__(self, image, size=(512, 512)):
|
||||
if isinstance(image, str):
|
||||
image = Image.open(image)
|
||||
|
||||
width, height = image.size
|
||||
with torch.no_grad():
|
||||
image = image.resize(size, Image.BILINEAR)
|
||||
img = self.preprocess(image)
|
||||
img = torch.unsqueeze(img, 0).cuda()
|
||||
out = self.net(img)[0]
|
||||
parsing = out.squeeze(0).cpu().numpy().argmax(0)
|
||||
parsing[np.where(parsing>13)] = 0
|
||||
parsing[np.where(parsing>=1)] = 255
|
||||
parsing = Image.fromarray(parsing.astype(np.uint8))
|
||||
return parsing
|
||||
|
||||
if __name__ == "__main__":
|
||||
fp = FaceParsing()
|
||||
segmap = fp('154_small.png')
|
||||
segmap.save('res.png')
|
||||
|
||||
283
musetalk/utils/face_parsing/model.py
Executable file
283
musetalk/utils/face_parsing/model.py
Executable file
@@ -0,0 +1,283 @@
|
||||
#!/usr/bin/python
|
||||
# -*- encoding: utf-8 -*-
|
||||
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torchvision
|
||||
|
||||
from .resnet import Resnet18
|
||||
# from modules.bn import InPlaceABNSync as BatchNorm2d
|
||||
|
||||
|
||||
class ConvBNReLU(nn.Module):
|
||||
def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs):
|
||||
super(ConvBNReLU, self).__init__()
|
||||
self.conv = nn.Conv2d(in_chan,
|
||||
out_chan,
|
||||
kernel_size = ks,
|
||||
stride = stride,
|
||||
padding = padding,
|
||||
bias = False)
|
||||
self.bn = nn.BatchNorm2d(out_chan)
|
||||
self.init_weight()
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv(x)
|
||||
x = F.relu(self.bn(x))
|
||||
return x
|
||||
|
||||
def init_weight(self):
|
||||
for ly in self.children():
|
||||
if isinstance(ly, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(ly.weight, a=1)
|
||||
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
|
||||
|
||||
class BiSeNetOutput(nn.Module):
|
||||
def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs):
|
||||
super(BiSeNetOutput, self).__init__()
|
||||
self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)
|
||||
self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False)
|
||||
self.init_weight()
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv(x)
|
||||
x = self.conv_out(x)
|
||||
return x
|
||||
|
||||
def init_weight(self):
|
||||
for ly in self.children():
|
||||
if isinstance(ly, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(ly.weight, a=1)
|
||||
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
|
||||
|
||||
def get_params(self):
|
||||
wd_params, nowd_params = [], []
|
||||
for name, module in self.named_modules():
|
||||
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
|
||||
wd_params.append(module.weight)
|
||||
if not module.bias is None:
|
||||
nowd_params.append(module.bias)
|
||||
elif isinstance(module, nn.BatchNorm2d):
|
||||
nowd_params += list(module.parameters())
|
||||
return wd_params, nowd_params
|
||||
|
||||
|
||||
class AttentionRefinementModule(nn.Module):
|
||||
def __init__(self, in_chan, out_chan, *args, **kwargs):
|
||||
super(AttentionRefinementModule, self).__init__()
|
||||
self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
|
||||
self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size= 1, bias=False)
|
||||
self.bn_atten = nn.BatchNorm2d(out_chan)
|
||||
self.sigmoid_atten = nn.Sigmoid()
|
||||
self.init_weight()
|
||||
|
||||
def forward(self, x):
|
||||
feat = self.conv(x)
|
||||
atten = F.avg_pool2d(feat, feat.size()[2:])
|
||||
atten = self.conv_atten(atten)
|
||||
atten = self.bn_atten(atten)
|
||||
atten = self.sigmoid_atten(atten)
|
||||
out = torch.mul(feat, atten)
|
||||
return out
|
||||
|
||||
def init_weight(self):
|
||||
for ly in self.children():
|
||||
if isinstance(ly, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(ly.weight, a=1)
|
||||
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
|
||||
|
||||
|
||||
class ContextPath(nn.Module):
|
||||
def __init__(self, resnet_path, *args, **kwargs):
|
||||
super(ContextPath, self).__init__()
|
||||
self.resnet = Resnet18(resnet_path)
|
||||
self.arm16 = AttentionRefinementModule(256, 128)
|
||||
self.arm32 = AttentionRefinementModule(512, 128)
|
||||
self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
|
||||
self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
|
||||
self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)
|
||||
|
||||
self.init_weight()
|
||||
|
||||
def forward(self, x):
|
||||
H0, W0 = x.size()[2:]
|
||||
feat8, feat16, feat32 = self.resnet(x)
|
||||
H8, W8 = feat8.size()[2:]
|
||||
H16, W16 = feat16.size()[2:]
|
||||
H32, W32 = feat32.size()[2:]
|
||||
|
||||
avg = F.avg_pool2d(feat32, feat32.size()[2:])
|
||||
avg = self.conv_avg(avg)
|
||||
avg_up = F.interpolate(avg, (H32, W32), mode='nearest')
|
||||
|
||||
feat32_arm = self.arm32(feat32)
|
||||
feat32_sum = feat32_arm + avg_up
|
||||
feat32_up = F.interpolate(feat32_sum, (H16, W16), mode='nearest')
|
||||
feat32_up = self.conv_head32(feat32_up)
|
||||
|
||||
feat16_arm = self.arm16(feat16)
|
||||
feat16_sum = feat16_arm + feat32_up
|
||||
feat16_up = F.interpolate(feat16_sum, (H8, W8), mode='nearest')
|
||||
feat16_up = self.conv_head16(feat16_up)
|
||||
|
||||
return feat8, feat16_up, feat32_up # x8, x8, x16
|
||||
|
||||
def init_weight(self):
|
||||
for ly in self.children():
|
||||
if isinstance(ly, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(ly.weight, a=1)
|
||||
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
|
||||
|
||||
def get_params(self):
|
||||
wd_params, nowd_params = [], []
|
||||
for name, module in self.named_modules():
|
||||
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
||||
wd_params.append(module.weight)
|
||||
if not module.bias is None:
|
||||
nowd_params.append(module.bias)
|
||||
elif isinstance(module, nn.BatchNorm2d):
|
||||
nowd_params += list(module.parameters())
|
||||
return wd_params, nowd_params
|
||||
|
||||
|
||||
### This is not used, since I replace this with the resnet feature with the same size
|
||||
class SpatialPath(nn.Module):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(SpatialPath, self).__init__()
|
||||
self.conv1 = ConvBNReLU(3, 64, ks=7, stride=2, padding=3)
|
||||
self.conv2 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1)
|
||||
self.conv3 = ConvBNReLU(64, 64, ks=3, stride=2, padding=1)
|
||||
self.conv_out = ConvBNReLU(64, 128, ks=1, stride=1, padding=0)
|
||||
self.init_weight()
|
||||
|
||||
def forward(self, x):
|
||||
feat = self.conv1(x)
|
||||
feat = self.conv2(feat)
|
||||
feat = self.conv3(feat)
|
||||
feat = self.conv_out(feat)
|
||||
return feat
|
||||
|
||||
def init_weight(self):
|
||||
for ly in self.children():
|
||||
if isinstance(ly, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(ly.weight, a=1)
|
||||
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
|
||||
|
||||
def get_params(self):
|
||||
wd_params, nowd_params = [], []
|
||||
for name, module in self.named_modules():
|
||||
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
|
||||
wd_params.append(module.weight)
|
||||
if not module.bias is None:
|
||||
nowd_params.append(module.bias)
|
||||
elif isinstance(module, nn.BatchNorm2d):
|
||||
nowd_params += list(module.parameters())
|
||||
return wd_params, nowd_params
|
||||
|
||||
|
||||
class FeatureFusionModule(nn.Module):
|
||||
def __init__(self, in_chan, out_chan, *args, **kwargs):
|
||||
super(FeatureFusionModule, self).__init__()
|
||||
self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
|
||||
self.conv1 = nn.Conv2d(out_chan,
|
||||
out_chan//4,
|
||||
kernel_size = 1,
|
||||
stride = 1,
|
||||
padding = 0,
|
||||
bias = False)
|
||||
self.conv2 = nn.Conv2d(out_chan//4,
|
||||
out_chan,
|
||||
kernel_size = 1,
|
||||
stride = 1,
|
||||
padding = 0,
|
||||
bias = False)
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.sigmoid = nn.Sigmoid()
|
||||
self.init_weight()
|
||||
|
||||
def forward(self, fsp, fcp):
|
||||
fcat = torch.cat([fsp, fcp], dim=1)
|
||||
feat = self.convblk(fcat)
|
||||
atten = F.avg_pool2d(feat, feat.size()[2:])
|
||||
atten = self.conv1(atten)
|
||||
atten = self.relu(atten)
|
||||
atten = self.conv2(atten)
|
||||
atten = self.sigmoid(atten)
|
||||
feat_atten = torch.mul(feat, atten)
|
||||
feat_out = feat_atten + feat
|
||||
return feat_out
|
||||
|
||||
def init_weight(self):
|
||||
for ly in self.children():
|
||||
if isinstance(ly, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(ly.weight, a=1)
|
||||
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
|
||||
|
||||
def get_params(self):
|
||||
wd_params, nowd_params = [], []
|
||||
for name, module in self.named_modules():
|
||||
if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):
|
||||
wd_params.append(module.weight)
|
||||
if not module.bias is None:
|
||||
nowd_params.append(module.bias)
|
||||
elif isinstance(module, nn.BatchNorm2d):
|
||||
nowd_params += list(module.parameters())
|
||||
return wd_params, nowd_params
|
||||
|
||||
|
||||
class BiSeNet(nn.Module):
|
||||
def __init__(self, resnet_path='models/resnet18-5c106cde.pth', n_classes=19, *args, **kwargs):
|
||||
super(BiSeNet, self).__init__()
|
||||
self.cp = ContextPath(resnet_path)
|
||||
## here self.sp is deleted
|
||||
self.ffm = FeatureFusionModule(256, 256)
|
||||
self.conv_out = BiSeNetOutput(256, 256, n_classes)
|
||||
self.conv_out16 = BiSeNetOutput(128, 64, n_classes)
|
||||
self.conv_out32 = BiSeNetOutput(128, 64, n_classes)
|
||||
self.init_weight()
|
||||
|
||||
def forward(self, x):
|
||||
H, W = x.size()[2:]
|
||||
feat_res8, feat_cp8, feat_cp16 = self.cp(x) # here return res3b1 feature
|
||||
feat_sp = feat_res8 # use res3b1 feature to replace spatial path feature
|
||||
feat_fuse = self.ffm(feat_sp, feat_cp8)
|
||||
|
||||
feat_out = self.conv_out(feat_fuse)
|
||||
feat_out16 = self.conv_out16(feat_cp8)
|
||||
feat_out32 = self.conv_out32(feat_cp16)
|
||||
|
||||
feat_out = F.interpolate(feat_out, (H, W), mode='bilinear', align_corners=True)
|
||||
feat_out16 = F.interpolate(feat_out16, (H, W), mode='bilinear', align_corners=True)
|
||||
feat_out32 = F.interpolate(feat_out32, (H, W), mode='bilinear', align_corners=True)
|
||||
return feat_out, feat_out16, feat_out32
|
||||
|
||||
def init_weight(self):
|
||||
for ly in self.children():
|
||||
if isinstance(ly, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(ly.weight, a=1)
|
||||
if not ly.bias is None: nn.init.constant_(ly.bias, 0)
|
||||
|
||||
def get_params(self):
|
||||
wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], []
|
||||
for name, child in self.named_children():
|
||||
child_wd_params, child_nowd_params = child.get_params()
|
||||
if isinstance(child, FeatureFusionModule) or isinstance(child, BiSeNetOutput):
|
||||
lr_mul_wd_params += child_wd_params
|
||||
lr_mul_nowd_params += child_nowd_params
|
||||
else:
|
||||
wd_params += child_wd_params
|
||||
nowd_params += child_nowd_params
|
||||
return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
net = BiSeNet(19)
|
||||
net.cuda()
|
||||
net.eval()
|
||||
in_ten = torch.randn(16, 3, 640, 480).cuda()
|
||||
out, out16, out32 = net(in_ten)
|
||||
print(out.shape)
|
||||
|
||||
net.get_params()
|
||||
109
musetalk/utils/face_parsing/resnet.py
Executable file
109
musetalk/utils/face_parsing/resnet.py
Executable file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/python
|
||||
# -*- encoding: utf-8 -*-
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.utils.model_zoo as modelzoo
|
||||
|
||||
# from modules.bn import InPlaceABNSync as BatchNorm2d
|
||||
|
||||
resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
|
||||
|
||||
|
||||
def conv3x3(in_planes, out_planes, stride=1):
|
||||
"""3x3 convolution with padding"""
|
||||
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
||||
padding=1, bias=False)
|
||||
|
||||
|
||||
class BasicBlock(nn.Module):
|
||||
def __init__(self, in_chan, out_chan, stride=1):
|
||||
super(BasicBlock, self).__init__()
|
||||
self.conv1 = conv3x3(in_chan, out_chan, stride)
|
||||
self.bn1 = nn.BatchNorm2d(out_chan)
|
||||
self.conv2 = conv3x3(out_chan, out_chan)
|
||||
self.bn2 = nn.BatchNorm2d(out_chan)
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.downsample = None
|
||||
if in_chan != out_chan or stride != 1:
|
||||
self.downsample = nn.Sequential(
|
||||
nn.Conv2d(in_chan, out_chan,
|
||||
kernel_size=1, stride=stride, bias=False),
|
||||
nn.BatchNorm2d(out_chan),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
residual = self.conv1(x)
|
||||
residual = F.relu(self.bn1(residual))
|
||||
residual = self.conv2(residual)
|
||||
residual = self.bn2(residual)
|
||||
|
||||
shortcut = x
|
||||
if self.downsample is not None:
|
||||
shortcut = self.downsample(x)
|
||||
|
||||
out = shortcut + residual
|
||||
out = self.relu(out)
|
||||
return out
|
||||
|
||||
|
||||
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
|
||||
layers = [BasicBlock(in_chan, out_chan, stride=stride)]
|
||||
for i in range(bnum-1):
|
||||
layers.append(BasicBlock(out_chan, out_chan, stride=1))
|
||||
return nn.Sequential(*layers)
|
||||
|
||||
|
||||
class Resnet18(nn.Module):
|
||||
def __init__(self, model_path):
|
||||
super(Resnet18, self).__init__()
|
||||
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
|
||||
bias=False)
|
||||
self.bn1 = nn.BatchNorm2d(64)
|
||||
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
||||
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
|
||||
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
|
||||
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
|
||||
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
|
||||
self.init_weight(model_path)
|
||||
|
||||
def forward(self, x):
|
||||
x = self.conv1(x)
|
||||
x = F.relu(self.bn1(x))
|
||||
x = self.maxpool(x)
|
||||
|
||||
x = self.layer1(x)
|
||||
feat8 = self.layer2(x) # 1/8
|
||||
feat16 = self.layer3(feat8) # 1/16
|
||||
feat32 = self.layer4(feat16) # 1/32
|
||||
return feat8, feat16, feat32
|
||||
|
||||
def init_weight(self, model_path):
|
||||
state_dict = torch.load(model_path) #modelzoo.load_url(resnet18_url)
|
||||
self_state_dict = self.state_dict()
|
||||
for k, v in state_dict.items():
|
||||
if 'fc' in k: continue
|
||||
self_state_dict.update({k: v})
|
||||
self.load_state_dict(self_state_dict)
|
||||
|
||||
def get_params(self):
|
||||
wd_params, nowd_params = [], []
|
||||
for name, module in self.named_modules():
|
||||
if isinstance(module, (nn.Linear, nn.Conv2d)):
|
||||
wd_params.append(module.weight)
|
||||
if not module.bias is None:
|
||||
nowd_params.append(module.bias)
|
||||
elif isinstance(module, nn.BatchNorm2d):
|
||||
nowd_params += list(module.parameters())
|
||||
return wd_params, nowd_params
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
net = Resnet18()
|
||||
x = torch.randn(16, 3, 224, 224)
|
||||
out = net(x)
|
||||
print(out[0].size())
|
||||
print(out[1].size())
|
||||
print(out[2].size())
|
||||
net.get_params()
|
||||
113
musetalk/utils/preprocessing.py
Normal file
113
musetalk/utils/preprocessing.py
Normal file
@@ -0,0 +1,113 @@
|
||||
import sys
|
||||
from face_detection import FaceAlignment,LandmarksType
|
||||
from os import listdir, path
|
||||
import subprocess
|
||||
import numpy as np
|
||||
import cv2
|
||||
import pickle
|
||||
import os
|
||||
import json
|
||||
from mmpose.apis import inference_topdown, init_model
|
||||
from mmpose.structures import merge_data_samples
|
||||
import torch
|
||||
from tqdm import tqdm
|
||||
|
||||
# initialize the mmpose model
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
config_file = './musetalk/utils/dwpose/rtmpose-l_8xb32-270e_coco-ubody-wholebody-384x288.py'
|
||||
checkpoint_file = './models/dwpose/dw-ll_ucoco_384.pth'
|
||||
model = init_model(config_file, checkpoint_file, device=device)
|
||||
|
||||
# initialize the face detection model
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
fa = FaceAlignment(LandmarksType._2D, flip_input=False,device=device)
|
||||
|
||||
# maker if the bbox is not sufficient
|
||||
coord_placeholder = (0.0,0.0,0.0,0.0)
|
||||
|
||||
def resize_landmark(landmark, w, h, new_w, new_h):
|
||||
w_ratio = new_w / w
|
||||
h_ratio = new_h / h
|
||||
landmark_norm = landmark / [w, h]
|
||||
landmark_resized = landmark_norm * [new_w, new_h]
|
||||
return landmark_resized
|
||||
|
||||
def read_imgs(img_list):
|
||||
frames = []
|
||||
print('reading images...')
|
||||
for img_path in tqdm(img_list):
|
||||
frame = cv2.imread(img_path)
|
||||
frames.append(frame)
|
||||
return frames
|
||||
|
||||
def get_landmark_and_bbox(img_list,upperbondrange =0):
|
||||
frames = read_imgs(img_list)
|
||||
batch_size_fa = 1
|
||||
batches = [frames[i:i + batch_size_fa] for i in range(0, len(frames), batch_size_fa)]
|
||||
coords_list = []
|
||||
landmarks = []
|
||||
if upperbondrange != 0:
|
||||
print('get key_landmark and face bounding boxes with the bbox_shift:',upperbondrange)
|
||||
else:
|
||||
print('get key_landmark and face bounding boxes with the default value')
|
||||
average_range_minus = []
|
||||
average_range_plus = []
|
||||
for fb in tqdm(batches):
|
||||
results = inference_topdown(model, np.asarray(fb)[0])
|
||||
results = merge_data_samples(results)
|
||||
keypoints = results.pred_instances.keypoints
|
||||
face_land_mark= keypoints[0][23:91]
|
||||
face_land_mark = face_land_mark.astype(np.int32)
|
||||
|
||||
# get bounding boxes by face detetion
|
||||
bbox = fa.get_detections_for_batch(np.asarray(fb))
|
||||
|
||||
# adjust the bounding box refer to landmark
|
||||
# Add the bounding box to a tuple and append it to the coordinates list
|
||||
for j, f in enumerate(bbox):
|
||||
if f is None: # no face in the image
|
||||
coords_list += [coord_placeholder]
|
||||
continue
|
||||
|
||||
half_face_coord = face_land_mark[29]#np.mean([face_land_mark[28], face_land_mark[29]], axis=0)
|
||||
range_minus = (face_land_mark[30]- face_land_mark[29])[1]
|
||||
range_plus = (face_land_mark[29]- face_land_mark[28])[1]
|
||||
average_range_minus.append(range_minus)
|
||||
average_range_plus.append(range_plus)
|
||||
if upperbondrange != 0:
|
||||
half_face_coord[1] = upperbondrange+half_face_coord[1] #手动调整 + 向下(偏29) - 向上(偏28)
|
||||
half_face_dist = np.max(face_land_mark[:,1]) - half_face_coord[1]
|
||||
upper_bond = half_face_coord[1]-half_face_dist
|
||||
|
||||
f_landmark = (np.min(face_land_mark[:, 0]),int(upper_bond),np.max(face_land_mark[:, 0]),np.max(face_land_mark[:,1]))
|
||||
x1, y1, x2, y2 = f_landmark
|
||||
|
||||
if y2-y1<=0 or x2-x1<=0 or x1<0: # if the landmark bbox is not suitable, reuse the bbox
|
||||
coords_list += [f]
|
||||
w,h = f[2]-f[0], f[3]-f[1]
|
||||
print("error bbox:",f)
|
||||
else:
|
||||
coords_list += [f_landmark]
|
||||
|
||||
print("********************************************bbox_shift parameter adjustment**********************************************************")
|
||||
print(f"Total frame:「{len(frames)}」 Manually adjust range : [ -{int(sum(average_range_minus) / len(average_range_minus))}~{int(sum(average_range_plus) / len(average_range_plus))} ] , the current value: {upperbondrange}")
|
||||
print("*************************************************************************************************************************************")
|
||||
return coords_list,frames
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
img_list = ["./results/lyria/00000.png","./results/lyria/00001.png","./results/lyria/00002.png","./results/lyria/00003.png"]
|
||||
crop_coord_path = "./coord_face.pkl"
|
||||
coords_list,full_frames = get_landmark_and_bbox(img_list)
|
||||
with open(crop_coord_path, 'wb') as f:
|
||||
pickle.dump(coords_list, f)
|
||||
|
||||
for bbox, frame in zip(coords_list,full_frames):
|
||||
if bbox == coord_placeholder:
|
||||
continue
|
||||
x1, y1, x2, y2 = bbox
|
||||
crop_frame = frame[y1:y2, x1:x2]
|
||||
print('Cropped shape', crop_frame.shape)
|
||||
|
||||
#cv2.imwrite(path.join(save_dir, '{}.png'.format(i)),full_frames[i][0][y1:y2, x1:x2])
|
||||
print(coords_list)
|
||||
61
musetalk/utils/utils.py
Normal file
61
musetalk/utils/utils.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import os
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
ffmpeg_path = os.getenv('FFMPEG_PATH')
|
||||
if ffmpeg_path is None:
|
||||
print("please download ffmpeg-static and export to FFMPEG_PATH. \nFor example: export FFMPEG_PATH=/musetalk/ffmpeg-4.4-amd64-static")
|
||||
elif ffmpeg_path not in os.getenv('PATH'):
|
||||
print("add ffmpeg to path")
|
||||
os.environ["PATH"] = f"{ffmpeg_path}:{os.environ['PATH']}"
|
||||
|
||||
|
||||
from musetalk.whisper.audio2feature import Audio2Feature
|
||||
from musetalk.models.vae import VAE
|
||||
from musetalk.models.unet import UNet,PositionalEncoding
|
||||
|
||||
def load_all_model():
|
||||
audio_processor = Audio2Feature(model_path="./models/whisper/tiny.pt")
|
||||
vae = VAE(model_path = "./models/sd-vae-ft-mse/")
|
||||
unet = UNet(unet_config="./models/musetalk/musetalk.json",
|
||||
model_path ="./models/musetalk/pytorch_model.bin")
|
||||
pe = PositionalEncoding(d_model=384)
|
||||
return audio_processor,vae,unet,pe
|
||||
|
||||
def get_file_type(video_path):
|
||||
_, ext = os.path.splitext(video_path)
|
||||
|
||||
if ext.lower() in ['.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff']:
|
||||
return 'image'
|
||||
elif ext.lower() in ['.avi', '.mp4', '.mov', '.flv', '.mkv']:
|
||||
return 'video'
|
||||
else:
|
||||
return 'unsupported'
|
||||
|
||||
def get_video_fps(video_path):
|
||||
video = cv2.VideoCapture(video_path)
|
||||
fps = video.get(cv2.CAP_PROP_FPS)
|
||||
video.release()
|
||||
return fps
|
||||
|
||||
def datagen(whisper_chunks,vae_encode_latents,batch_size=8,delay_frame = 0):
|
||||
whisper_batch, latent_batch = [], []
|
||||
for i, w in enumerate(whisper_chunks):
|
||||
idx = (i+delay_frame)%len(vae_encode_latents)
|
||||
latent = vae_encode_latents[idx]
|
||||
whisper_batch.append(w)
|
||||
latent_batch.append(latent)
|
||||
|
||||
if len(latent_batch) >= batch_size:
|
||||
whisper_batch = np.asarray(whisper_batch)
|
||||
latent_batch = torch.cat(latent_batch, dim=0)
|
||||
yield whisper_batch, latent_batch
|
||||
whisper_batch, latent_batch = [], []
|
||||
|
||||
# the last batch may smaller than batch size
|
||||
if len(latent_batch) > 0:
|
||||
whisper_batch = np.asarray(whisper_batch)
|
||||
latent_batch = torch.cat(latent_batch, dim=0)
|
||||
|
||||
yield whisper_batch, latent_batch
|
||||
Reference in New Issue
Block a user