diff --git a/README.md b/README.md
index cf1fefe..b1c904b 100644
--- a/README.md
+++ b/README.md
@@ -21,6 +21,7 @@ To enable ARKit-driven animation of the LAM model, we adapted ARKit blendshapes
## 📢 News
+**[May 21, 2025]** We have released a [Avatar Export Feature](https://www.modelscope.cn/studios/Damo_XR_Lab/LAM_Large_Avatar_Model), enabling users to generate facial expressions from audio using any [LAM-generated](https://github.com/aigc3d/LAM) 3D digital humans.
**[April 21, 2025]** We have released the [ModelScope](https://www.modelscope.cn/studios/Damo_XR_Lab/LAM-A2E) Space !
**[April 21, 2025]** We have released the WebGL Interactive Chatting Avatar SDK on [OpenAvatarChat](https://github.com/HumanAIGC-Engineering/OpenAvatarChat) (including LLM, ASR, TTS, Avatar), with which you can freely chat with our generated 3D Digital Human ! 🔥
diff --git a/app_lam_audio2exp.py b/app_lam_audio2exp.py
index 5acc4a0..6d30f08 100644
--- a/app_lam_audio2exp.py
+++ b/app_lam_audio2exp.py
@@ -133,9 +133,19 @@ def create_zip_archive(output_zip='assets/arkitWithBSData.zip', base_dir=""):
def demo_lam_audio2exp(infer, cfg):
- def core_fn(image_path: str, audio_params, working_dir):
+ def core_fn(image_path: str, audio_params, working_dir, input_zip_textbox):
- base_id = os.path.basename(image_path).split(".")[0]
+ if(os.path.exists(input_zip_textbox)):
+ base_id = os.path.basename(input_zip_textbox).split(".")[0]
+ output_dir = os.path.join('assets', 'sample_lam', base_id)
+ # unzip_dir
+ if (not os.path.exists(os.path.join(output_dir, 'arkitWithBSData'))):
+ run_command = 'unzip -d '+output_dir+' '+input_zip_textbox
+ os.system(run_command)
+ rename_command = 'mv '+os.path.join(output_dir,base_id)+' '+os.path.join(output_dir,'arkitWithBSData')
+ os.system(rename_command)
+ else:
+ base_id = os.path.basename(image_path).split(".")[0]
# set input audio
cfg.audio_input = audio_params
@@ -177,7 +187,8 @@ def demo_lam_audio2exp(infer, cfg):
width=270,
sources='upload',
type='filepath', # 'numpy',
- elem_id='content_image')
+ elem_id='content_image',
+ interactive=False)
# EXAMPLES
with gr.Row():
examples = [
@@ -222,6 +233,12 @@ def demo_lam_audio2exp(infer, cfg):
# SETTING
with gr.Row():
with gr.Column(variant='panel', scale=1):
+ input_zip_textbox = gr.Textbox(
+ label="Input Local Path to LAM-Generated ZIP File",
+ interactive=True,
+ placeholder="Input Local Path to LAM-Generated ZIP File",
+ visible=True
+ )
submit = gr.Button('Generate',
elem_id='lam_generate',
variant='primary')
@@ -246,7 +263,7 @@ def demo_lam_audio2exp(infer, cfg):
).success(
fn=core_fn,
inputs=[input_image, audio_input,
- working_dir],
+ working_dir, input_zip_textbox],
outputs=[selected_audio, selected_render_file],
queue=False,
).success(