mirror of
https://github.com/HumanAIGC-Engineering/gradio-webrtc.git
synced 2026-02-05 18:09:23 +08:00
Video Bugfix + generator (#96)
* Code * Fix demo * move to init --------- Co-authored-by: Freddy Boulton <freddyboulton@hf-freddy.local>
This commit is contained in:
@@ -1,7 +1,3 @@
|
||||
import subprocess
|
||||
|
||||
subprocess.run(["pip", "install", "fastrtc==0.0.4.post1"])
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import os
|
||||
@@ -80,12 +76,11 @@ class PhonicHandler(AsyncStreamHandler):
|
||||
return super().shutdown()
|
||||
|
||||
|
||||
def add_to_chatbot(state, chatbot, message):
|
||||
state.append(message)
|
||||
return state, gr.skip()
|
||||
def add_to_chatbot(chatbot, message):
|
||||
chatbot.append(message)
|
||||
return chatbot
|
||||
|
||||
|
||||
state = gr.State(value=[])
|
||||
chatbot = gr.Chatbot(type="messages", value=[])
|
||||
stream = Stream(
|
||||
handler=PhonicHandler(),
|
||||
@@ -99,7 +94,7 @@ stream = Stream(
|
||||
info="Select a voice from the dropdown",
|
||||
)
|
||||
],
|
||||
additional_outputs=[state, chatbot],
|
||||
additional_outputs=[chatbot],
|
||||
additional_outputs_handler=add_to_chatbot,
|
||||
ui_args={
|
||||
"title": "Phonic Chat (Powered by FastRTC ⚡️)",
|
||||
@@ -109,8 +104,8 @@ stream = Stream(
|
||||
time_limit=90 if get_space() else None,
|
||||
)
|
||||
|
||||
with stream.ui:
|
||||
state.change(lambda s: s, inputs=state, outputs=chatbot)
|
||||
# with stream.ui:
|
||||
# state.change(lambda s: s, inputs=state, outputs=chatbot)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if (mode := os.getenv("MODE")) == "UI":
|
||||
|
||||
@@ -38,6 +38,7 @@ def response(
|
||||
):
|
||||
gradio_chatbot = gradio_chatbot or []
|
||||
conversation_state = conversation_state or []
|
||||
print("chatbot", gradio_chatbot)
|
||||
|
||||
text = stt_model.stt(audio)
|
||||
sample_rate, array = audio
|
||||
@@ -47,7 +48,6 @@ def response(
|
||||
yield AdditionalOutputs(gradio_chatbot, conversation_state)
|
||||
|
||||
conversation_state.append({"role": "user", "content": text})
|
||||
|
||||
request = client.chat.completions.create(
|
||||
model="meta-llama/Llama-3.2-3B-Instruct",
|
||||
messages=conversation_state, # type: ignore
|
||||
|
||||
Reference in New Issue
Block a user