Files
gradio-webrtc/frontend/shared/VideoChat/stream_utils.ts
neil.xh f476f9cf29 gs对话接入
本次代码评审新增并完善了gs视频聊天功能,包括前后端接口定义、状态管理及UI组件实现,并引入了新的依赖库以支持更多互动特性。
Link: https://code.alibaba-inc.com/xr-paas/gradio_webrtc/codereview/21273476
* 更新python 部分

* 合并videochat前端部分

* Merge branch 'feature/update-fastrtc-0.0.19' of http://gitlab.alibaba-inc.com/xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 替换audiowave

* 导入路径修改

* 合并websocket mode逻辑

* feat: gaussian avatar chat

* 增加其他渲染的入参

* feat: ws连接和使用

* Merge branch 'feature/update-fastrtc-0.0.19' of http://gitlab.alibaba-inc.com/xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 右边距离超出容器宽度,则向左移动

* 配置传递

* Merge branch 'feature/update-fastrtc-0.0.19' of gitlab.alibaba-inc.com:xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 高斯包异常

* 同步webrtc_utils

* 更新webrtc_utils

* 兼容on_chat_datachannel

* 修复设备名称列表没有正常显示的问题

* copy 传递 webrtc_id

* Merge branch 'feature/update-fastrtc-0.0.19' of gitlab.alibaba-inc.com:xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 保证webrtc 完成后再进行websocket连接

* feat: 音频表情数据接入

* dist 上传

* canvas 隐藏

* feat: 高斯文件下载进度透出

* Merge branch 'feature/update-fastrtc-0.0.19' of http://gitlab.alibaba-inc.com/xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 修改无法获取权限问题

* Merge branch 'feature/update-fastrtc-0.0.19' of gitlab.alibaba-inc.com:xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 先获取权限再获取设备

* fix: gs资源下载完成前不处理ws数据

* fix: merge

* 话术调整

* Merge branch 'feature/update-fastrtc-0.0.19' of gitlab.alibaba-inc.com:xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 修复设备切换后重新对话,又切换回默认设备的问题

* Merge branch 'feature/update-fastrtc-0.0.19' of http://gitlab.alibaba-inc.com/xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 更新localvideo 尺寸

* Merge branch 'feature/update-fastrtc-0.0.19' of gitlab.alibaba-inc.com:xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 不能默认default

* 修改音频权限问题

* 更新打包结果

* fix: 对话按钮状态跟gs资源挂钩,删除无用代码

* fix: merge

* feat: gs渲染模块从npm包引入

* fix

* 新增对话记录

* Merge branch 'feature/update-fastrtc-0.0.19' of http://gitlab.alibaba-inc.com/xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 样式修改

* 更新包

* fix: gs数字人初始化位置和静音

* 对话记录滚到底部

* 至少100%高度

* Merge branch 'feature/update-fastrtc-0.0.19' of gitlab.alibaba-inc.com:xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 略微上移文本框

* 开始连接时清空对话记录

* fix: update gs render npm

* Merge branch 'feature/update-fastrtc-0.0.19' of http://gitlab.alibaba-inc.com/xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 逻辑保证

* Merge branch 'feature/update-fastrtc-0.0.19' of gitlab.alibaba-inc.com:xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* feat: 音频初始化配置是否静音

* actionsbar在有字幕时调整位置

* Merge branch 'feature/update-fastrtc-0.0.19' of http://gitlab.alibaba-inc.com/xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 样式优化

* feat: 增加readme

* fix: 资源图片

* fix: docs

* fix: update gs render sdk

* fix: gs模式下画面位置计算

* fix: update readme

* 设备判断,太窄处理

* Merge branch 'feature/update-fastrtc-0.0.19' of gitlab.alibaba-inc.com:xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* 是否有权限和是否有设备分开

* feat: gs 下载和加载钩子函数分离

* Merge branch 'feature/update-fastrtc-0.0.19' of http://gitlab.alibaba-inc.com/xr-paas/gradio_webrtc into feature/update-fastrtc-0.0.19

* fix: update gs render sdk

* 替换

* dist

* 上传文件

* del
2025-04-16 19:09:04 +08:00

132 lines
3.9 KiB
TypeScript

export function get_devices(): Promise<MediaDeviceInfo[]> {
return navigator.mediaDevices.enumerateDevices();
}
export function handle_error(error: string): void {
throw new Error(error);
}
export function set_local_stream(
local_stream: MediaStream | null,
video_source: HTMLVideoElement,
): void {
video_source.srcObject = local_stream;
video_source.muted = true;
video_source.play();
}
export async function get_stream(
audio: boolean | { deviceId: { exact: string } },
video: boolean | { deviceId: { exact: string } },
video_source: HTMLVideoElement,
track_constraints?:
| MediaTrackConstraints
| { video: MediaTrackConstraints; audio: MediaTrackConstraints },
): Promise<MediaStream> {
const video_fallback_constraints = (track_constraints as any)?.video ||
track_constraints || {
width: { ideal: 500 },
height: { ideal: 500 },
};
const audio_fallback_constraints = (track_constraints as any)?.audio ||
track_constraints || {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
};
const constraints = {
video:
typeof video === "object"
? { ...video, ...video_fallback_constraints }
: video,
audio:
typeof audio === "object"
? { ...audio, ...audio_fallback_constraints }
: audio,
};
return navigator.mediaDevices
.getUserMedia(constraints)
.then((local_stream: MediaStream) => {
return local_stream;
});
}
export function set_available_devices(
devices: MediaDeviceInfo[],
kind: "videoinput" | "audioinput" = "videoinput",
): MediaDeviceInfo[] {
const cameras = devices.filter(
(device: MediaDeviceInfo) => device.kind === kind,
);
return cameras;
}
let video_track: MediaStreamTrack | null = null;
let audio_track: MediaStreamTrack | null = null;
export function createSimulatedVideoTrack(width = 1, height = 1) {
// if (video_track) return video_track
// 创建一个 canvas 元素
const canvas = document.createElement("canvas");
document.body.appendChild(canvas);
canvas.width = width || 500;
canvas.height = height || 500;
canvas.style.width = "1px";
canvas.style.height = "1px";
canvas.style.position = "fixed";
canvas.style.visibility = "hidden";
const ctx = canvas.getContext("2d") as CanvasRenderingContext2D;
ctx.fillStyle = `hsl(0,0, 0, 1)`; // 动态颜色
ctx.fillRect(0, 0, canvas.width, canvas.height);
let time = 0;
// 在 canvas 上绘制动画内容
function drawFrame() {
// ctx.fillStyle = `rgb(0, ${(Date.now() / 10) % 360}, 1)`; // 动态颜色
ctx.fillStyle = `rgb(255, 255, 255)`; // 动态颜色
ctx.fillRect(0, 0, canvas.width, canvas.height);
// ctx.font = 'bold 50px Arial';
// ctx.fillStyle = `rgb(0, 0, 0)`;
// ctx.fillText(String(time++), 100, 100)
requestAnimationFrame(drawFrame);
}
drawFrame();
// 捕获 canvas 的视频流
const stream = canvas.captureStream(30); // 30 FPS
video_track = stream.getVideoTracks()[0]; // 返回视频轨道
video_track.stop = () => {
canvas.remove();
};
video_track.onended = () => {
video_track?.stop();
};
return video_track;
}
export function createSimulatedAudioTrack() {
if (audio_track) return audio_track;
const audioContext = new (window.AudioContext || window.webkitAudioContext)();
const oscillator = audioContext.createOscillator();
oscillator.frequency.setValueAtTime(0, audioContext.currentTime);
const gainNode = audioContext.createGain();
gainNode.gain.setValueAtTime(0, audioContext.currentTime);
const destination = audioContext.createMediaStreamDestination();
oscillator.connect(gainNode);
gainNode.connect(destination);
oscillator.start();
audio_track = destination.stream.getAudioTracks()[0];
audio_track.stop = () => {
audioContext.close();
};
audio_track.onended = () => {
audio_track?.stop();
};
return audio_track;
}