mirror of
https://github.com/aigc3d/LAM_Audio2Expression.git
synced 2026-02-04 17:39:24 +08:00
feat: release LAM-Avatar feature
This commit is contained in:
@@ -21,6 +21,7 @@ To enable ARKit-driven animation of the LAM model, we adapted ARKit blendshapes
|
||||
|
||||
## 📢 News
|
||||
|
||||
**[May 21, 2025]** We have released a [Avatar Export Feature](https://www.modelscope.cn/studios/Damo_XR_Lab/LAM_Large_Avatar_Model), enabling users to generate facial expressions from audio using any [LAM-generated](https://github.com/aigc3d/LAM) 3D digital humans. <br>
|
||||
**[April 21, 2025]** We have released the [ModelScope](https://www.modelscope.cn/studios/Damo_XR_Lab/LAM-A2E) Space ! <br>
|
||||
**[April 21, 2025]** We have released the WebGL Interactive Chatting Avatar SDK on [OpenAvatarChat](https://github.com/HumanAIGC-Engineering/OpenAvatarChat) (including LLM, ASR, TTS, Avatar), with which you can freely chat with our generated 3D Digital Human ! 🔥 <br>
|
||||
|
||||
|
||||
@@ -133,9 +133,19 @@ def create_zip_archive(output_zip='assets/arkitWithBSData.zip', base_dir=""):
|
||||
|
||||
|
||||
def demo_lam_audio2exp(infer, cfg):
|
||||
def core_fn(image_path: str, audio_params, working_dir):
|
||||
def core_fn(image_path: str, audio_params, working_dir, input_zip_textbox):
|
||||
|
||||
base_id = os.path.basename(image_path).split(".")[0]
|
||||
if(os.path.exists(input_zip_textbox)):
|
||||
base_id = os.path.basename(input_zip_textbox).split(".")[0]
|
||||
output_dir = os.path.join('assets', 'sample_lam', base_id)
|
||||
# unzip_dir
|
||||
if (not os.path.exists(os.path.join(output_dir, 'arkitWithBSData'))):
|
||||
run_command = 'unzip -d '+output_dir+' '+input_zip_textbox
|
||||
os.system(run_command)
|
||||
rename_command = 'mv '+os.path.join(output_dir,base_id)+' '+os.path.join(output_dir,'arkitWithBSData')
|
||||
os.system(rename_command)
|
||||
else:
|
||||
base_id = os.path.basename(image_path).split(".")[0]
|
||||
|
||||
# set input audio
|
||||
cfg.audio_input = audio_params
|
||||
@@ -177,7 +187,8 @@ def demo_lam_audio2exp(infer, cfg):
|
||||
width=270,
|
||||
sources='upload',
|
||||
type='filepath', # 'numpy',
|
||||
elem_id='content_image')
|
||||
elem_id='content_image',
|
||||
interactive=False)
|
||||
# EXAMPLES
|
||||
with gr.Row():
|
||||
examples = [
|
||||
@@ -222,6 +233,12 @@ def demo_lam_audio2exp(infer, cfg):
|
||||
# SETTING
|
||||
with gr.Row():
|
||||
with gr.Column(variant='panel', scale=1):
|
||||
input_zip_textbox = gr.Textbox(
|
||||
label="Input Local Path to LAM-Generated ZIP File",
|
||||
interactive=True,
|
||||
placeholder="Input Local Path to LAM-Generated ZIP File",
|
||||
visible=True
|
||||
)
|
||||
submit = gr.Button('Generate',
|
||||
elem_id='lam_generate',
|
||||
variant='primary')
|
||||
@@ -246,7 +263,7 @@ def demo_lam_audio2exp(infer, cfg):
|
||||
).success(
|
||||
fn=core_fn,
|
||||
inputs=[input_image, audio_input,
|
||||
working_dir],
|
||||
working_dir, input_zip_textbox],
|
||||
outputs=[selected_audio, selected_render_file],
|
||||
queue=False,
|
||||
).success(
|
||||
|
||||
Reference in New Issue
Block a user