diff --git a/README.md b/README.md
index 61f6ccb..3645a9e 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,7 @@
[](https://aigc3d.github.io/projects/LAM/)
[](https://www.apache.org/licenses/LICENSE-2.0)
+[](https://www.modelscope.cn/studios/Damo_XR_Lab/LAM-A2E)
#### This project leverages audio input to generate ARKit blendshapes-driven facial expressions in ⚡real-time⚡, powering ultra-realistic 3D avatars generated by [LAM](https://github.com/aigc3d/LAM).
@@ -14,12 +15,14 @@
## 📢 News
+**[April 21, 2025]** We have released the [ModelScope](https://www.modelscope.cn/studios/Damo_XR_Lab/LAM-A2E) Space !
+**[April 21, 2025]** We have released the WebGL Interactive Chatting Avatar SDK on [OpenAvatarChat](https://github.com/HumanAIGC-Engineering/OpenAvatarChat) (including LLM, ASR, TTS, Avatar), with which you can freely chat with our generated 3D Digital Human ! 🔥
### To do list
- [ ] Release Huggingface space.
-- [ ] Release Modelscope space.
+- [x] Release Modelscope space.
- [ ] Release the LAM-A2E model based on the Flame expression.
-- [ ] Release Interactive Chatting Avatar SDK with [OpenAvatarChat](https://github.com/HumanAIGC-Engineering/OpenAvatarChat), including LLM, ASR, TTS, LAM-Avatars.
+- [x] Release Interactive Chatting Avatar SDK with [OpenAvatarChat](https://www.modelscope.cn/studios/Damo_XR_Lab/LAM-A2E), including LLM, ASR, TTS, LAM-Avatars.
diff --git a/app_lam_audio2exp.py b/app_lam_audio2exp.py
index 1eb9ef4..5acc4a0 100644
--- a/app_lam_audio2exp.py
+++ b/app_lam_audio2exp.py
@@ -142,9 +142,13 @@ def demo_lam_audio2exp(infer, cfg):
cfg.save_json_path = os.path.join("./assets/sample_lam", base_id, 'arkitWithBSData', 'bsData.json')
infer.infer()
- create_zip_archive(output_zip='./assets/arkitWithBSData.zip', base_dir=os.path.join("./assets/sample_lam", base_id))
+ output_file_name = base_id+'_'+os.path.basename(audio_params).split(".")[0]+'.zip'
+ assetPrefix = 'gradio_api/file=assets/'
+ output_file_path = os.path.join('./assets',output_file_name)
- return 'gradio_api/file='+audio_params
+ create_zip_archive(output_zip=output_file_path, base_dir=os.path.join("./assets/sample_lam", base_id))
+
+ return 'gradio_api/file='+audio_params, assetPrefix+output_file_name
with gr.Blocks(analytics_enabled=False) as demo:
logo_url = './assets/images/logo.jpeg'
@@ -158,7 +162,7 @@ def demo_lam_audio2exp(infer, cfg):
""")
gr.HTML(
- """