feat: release Modelscope and OpenAvatarChat

This commit is contained in:
fdyuandong
2025-04-21 18:33:18 +08:00
parent 7cc233a737
commit 105cfbfc89
5 changed files with 19 additions and 11 deletions

View File

@@ -142,9 +142,13 @@ def demo_lam_audio2exp(infer, cfg):
cfg.save_json_path = os.path.join("./assets/sample_lam", base_id, 'arkitWithBSData', 'bsData.json')
infer.infer()
create_zip_archive(output_zip='./assets/arkitWithBSData.zip', base_dir=os.path.join("./assets/sample_lam", base_id))
output_file_name = base_id+'_'+os.path.basename(audio_params).split(".")[0]+'.zip'
assetPrefix = 'gradio_api/file=assets/'
output_file_path = os.path.join('./assets',output_file_name)
return 'gradio_api/file='+audio_params
create_zip_archive(output_zip=output_file_path, base_dir=os.path.join("./assets/sample_lam", base_id))
return 'gradio_api/file='+audio_params, assetPrefix+output_file_name
with gr.Blocks(analytics_enabled=False) as demo:
logo_url = './assets/images/logo.jpeg'
@@ -158,7 +162,7 @@ def demo_lam_audio2exp(infer, cfg):
""")
gr.HTML(
"""<p><h4 style="color: blue;"> Notes: This project leverages audio input to generate ARKit blendshapes-driven facial expressions in ⚡real-time⚡, powering ultra-realistic 3D avatars generated by LAM.</h4></p>"""
"""<p><h4 style="color: blue;"> Notes: This project leverages audio input to generate ARKit blendshapes-driven facial expressions in ⚡real-time⚡, powering ultra-realistic 3D avatars generated by <a href="https://github.com/aigc3d/LAM">LAM</a>.</h4></p>"""
)
# DISPLAY
@@ -224,12 +228,12 @@ def demo_lam_audio2exp(infer, cfg):
if h5_rendering:
gr.set_static_paths(Path.cwd().absolute() / "assets/")
assetPrefix = 'gradio_api/file=assets/'
with gr.Row():
gs = gaussian_render(width=380, height=680, assets=assetPrefix + 'arkitWithBSData.zip')
gs = gaussian_render(width=380, height=680)
working_dir = gr.State()
selected_audio = gr.Textbox(visible=False)
selected_render_file = gr.Textbox(visible=False)
submit.click(
fn=assert_input_image,
@@ -243,7 +247,7 @@ def demo_lam_audio2exp(infer, cfg):
fn=core_fn,
inputs=[input_image, audio_input,
working_dir],
outputs=[selected_audio],
outputs=[selected_audio, selected_render_file],
queue=False,
).success(
fn=audio_loading,
@@ -251,7 +255,8 @@ def demo_lam_audio2exp(infer, cfg):
js='''(output_component) => window.loadAudio(output_component)'''
).success(
fn=do_render(),
js='''() => window.start()'''
outputs=[selected_render_file],
js='''(selected_render_file) => window.start(selected_render_file)'''
)
demo.queue()