diff --git a/demo/nextjs_voice_chat/README.md b/demo/nextjs_voice_chat/README.md
new file mode 100644
index 0000000..3e3dce5
--- /dev/null
+++ b/demo/nextjs_voice_chat/README.md
@@ -0,0 +1,74 @@
+# FastRTC POC
+A simple POC for a fast real-time voice chat application using FastAPI and FastRTC by [rohanprichard](https://github.com/rohanprichard). I wanted to make one as an example with more production-ready languages, rather than just Gradio.
+
+## Setup
+1. Set your API keys in an `.env` file based on the `.env.example` file
+2. Create a virtual environment and install the dependencies
+ ```bash
+ python3 -m venv env
+ source env/bin/activate
+ pip install -r requirements.txt
+ ```
+
+3. Run the server
+ ```bash
+ ./run.sh
+ ```
+4. Navigate into the frontend directory in another terminal
+ ```bash
+ cd frontend/fastrtc-demo
+ ```
+5. Run the frontend
+ ```bash
+ npm install
+ npm run dev
+ ```
+6. Go to the URL and click the microphone icon to start chatting!
+
+7. Reset chats by clicking the trash button on the bottom right
+
+## Notes
+You can choose to not install the requirements for TTS and STT by removing the `[tts, stt]` from the specifier in the `requirements.txt` file.
+
+- The STT is currently using the ElevenLabs API.
+- The LLM is currently using the OpenAI API.
+- The TTS is currently using the ElevenLabs API.
+- The VAD is currently using the Silero VAD model.
+- You may need to install ffmpeg if you get errors in STT
+
+The prompt can be changed in the `backend/server.py` file and modified as you like.
+
+### Audio Parameters
+
+#### AlgoOptions
+
+- **audio_chunk_duration**: Length of audio chunks in seconds. Smaller values allow for faster processing but may be less accurate.
+- **started_talking_threshold**: If a chunk has more than this many seconds of speech, the system considers that the user has started talking.
+- **speech_threshold**: After the user has started speaking, if a chunk has less than this many seconds of speech, the system considers that the user has paused.
+
+#### SileroVadOptions
+
+- **threshold**: Speech probability threshold (0.0-1.0). Values above this are considered speech. Higher values are more strict.
+- **min_speech_duration_ms**: Speech segments shorter than this (in milliseconds) are filtered out.
+- **min_silence_duration_ms**: The system waits for this duration of silence (in milliseconds) before considering speech to be finished.
+- **speech_pad_ms**: Padding added to both ends of detected speech segments to prevent cutting off words.
+- **max_speech_duration_s**: Maximum allowed duration for a speech segment in seconds. Prevents indefinite listening.
+
+### Tuning Recommendations
+
+- If the AI interrupts you too early:
+ - Increase `min_silence_duration_ms`
+ - Increase `speech_threshold`
+ - Increase `speech_pad_ms`
+
+- If the AI is slow to respond after you finish speaking:
+ - Decrease `min_silence_duration_ms`
+ - Decrease `speech_threshold`
+
+- If the system fails to detect some speech:
+ - Lower the `threshold` value
+ - Decrease `started_talking_threshold`
+
+
+## Credits:
+Credit for the UI components goes to Shadcn, Aceternity UI and Kokonut UI.
diff --git a/demo/nextjs_voice_chat/backend/env.py b/demo/nextjs_voice_chat/backend/env.py
new file mode 100644
index 0000000..f678f64
--- /dev/null
+++ b/demo/nextjs_voice_chat/backend/env.py
@@ -0,0 +1,7 @@
+from dotenv import load_dotenv
+import os
+
+load_dotenv()
+
+LLM_API_KEY = os.getenv("LLM_API_KEY")
+ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY")
diff --git a/demo/nextjs_voice_chat/backend/server.py b/demo/nextjs_voice_chat/backend/server.py
new file mode 100644
index 0000000..ef3b687
--- /dev/null
+++ b/demo/nextjs_voice_chat/backend/server.py
@@ -0,0 +1,133 @@
+import fastapi
+from fastapi.responses import FileResponse
+from fastrtc import ReplyOnPause, Stream, AlgoOptions, SileroVadOptions
+from fastrtc.utils import audio_to_bytes
+from openai import OpenAI
+import logging
+import time
+from fastapi.middleware.cors import CORSMiddleware
+from elevenlabs import VoiceSettings, stream
+from elevenlabs.client import ElevenLabs
+import numpy as np
+import io
+
+from .env import LLM_API_KEY, ELEVENLABS_API_KEY
+
+
+sys_prompt = """
+You are a helpful assistant. You are witty, engaging and fun. You love being interactive with the user.
+You also can add minimalistic utterances like 'uh-huh' or 'mm-hmm' to the conversation to make it more natural. However, only vocalization are allowed, no actions or other non-vocal sounds.
+Begin a conversation with a self-deprecating joke like 'I'm not sure if I'm ready for this...' or 'I bet you already regret clicking that button...'
+"""
+
+messages = [{"role": "system", "content": sys_prompt}]
+
+openai_client = OpenAI(
+ api_key=LLM_API_KEY
+)
+
+elevenlabs_client = ElevenLabs(api_key=ELEVENLABS_API_KEY)
+
+logging.basicConfig(level=logging.INFO)
+
+def echo(audio):
+
+ stt_time = time.time()
+
+ logging.info("Performing STT")
+
+ transcription = elevenlabs_client.speech_to_text.convert(
+ file=audio_to_bytes(audio),
+ model_id="scribe_v1",
+ tag_audio_events=False,
+ language_code="eng",
+ diarize=False,
+ )
+ prompt = transcription.text
+ if prompt == "":
+ logging.info("STT returned empty string")
+ return
+ logging.info(f"STT response: {prompt}")
+
+ messages.append({"role": "user", "content": prompt})
+
+ logging.info(f"STT took {time.time() - stt_time} seconds")
+
+ llm_time = time.time()
+
+ def text_stream():
+ global full_response
+ full_response = ""
+
+ response = openai_client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=messages,
+ max_tokens=200,
+ stream=True
+ )
+
+ for chunk in response:
+ if chunk.choices[0].finish_reason == "stop":
+ break
+ if chunk.choices[0].delta.content:
+ full_response += chunk.choices[0].delta.content
+ yield chunk.choices[0].delta.content
+
+ audio_stream = elevenlabs_client.generate(
+ text=text_stream(),
+ voice="Rachel", # Cassidy is also really good
+ voice_settings=VoiceSettings(
+ similarity_boost=0.9,
+ stability=0.6,
+ style=0.4,
+ speed=1
+ ),
+ model="eleven_multilingual_v2",
+ output_format="pcm_24000",
+ stream=True
+ )
+
+ for audio_chunk in audio_stream:
+ audio_array = np.frombuffer(audio_chunk, dtype=np.int16).astype(np.float32) / 32768.0
+ yield (24000, audio_array)
+
+ messages.append({"role": "assistant", "content": full_response + " "})
+ logging.info(f"LLM response: {full_response}")
+ logging.info(f"LLM took {time.time() - llm_time} seconds")
+
+
+stream = Stream(ReplyOnPause(echo,
+ algo_options=AlgoOptions(
+ audio_chunk_duration=0.5,
+ started_talking_threshold=0.1,
+ speech_threshold=0.03
+ ),
+ model_options=SileroVadOptions(
+ threshold=0.75,
+ min_speech_duration_ms=250,
+ min_silence_duration_ms=1500,
+ speech_pad_ms=400,
+ max_speech_duration_s=15
+ )),
+ modality="audio",
+ mode="send-receive"
+ )
+
+app = fastapi.FastAPI()
+
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+stream.mount(app)
+
+@app.get("/reset")
+async def reset():
+ global messages
+ logging.info("Resetting chat")
+ messages = [{"role": "system", "content": sys_prompt}]
+ return {"status": "success"}
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/.gitignore b/demo/nextjs_voice_chat/frontend/fastrtc-demo/.gitignore
new file mode 100644
index 0000000..5ef6a52
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/.gitignore
@@ -0,0 +1,41 @@
+# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
+
+# dependencies
+/node_modules
+/.pnp
+.pnp.*
+.yarn/*
+!.yarn/patches
+!.yarn/plugins
+!.yarn/releases
+!.yarn/versions
+
+# testing
+/coverage
+
+# next.js
+/.next/
+/out/
+
+# production
+/build
+
+# misc
+.DS_Store
+*.pem
+
+# debug
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+.pnpm-debug.log*
+
+# env files (can opt-in for committing if needed)
+.env*
+
+# vercel
+.vercel
+
+# typescript
+*.tsbuildinfo
+next-env.d.ts
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/README.md b/demo/nextjs_voice_chat/frontend/fastrtc-demo/README.md
new file mode 100644
index 0000000..e215bc4
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/README.md
@@ -0,0 +1,36 @@
+This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://nextjs.org/docs/app/api-reference/cli/create-next-app).
+
+## Getting Started
+
+First, run the development server:
+
+```bash
+npm run dev
+# or
+yarn dev
+# or
+pnpm dev
+# or
+bun dev
+```
+
+Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
+
+You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file.
+
+This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel.
+
+## Learn More
+
+To learn more about Next.js, take a look at the following resources:
+
+- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
+- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
+
+You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome!
+
+## Deploy on Vercel
+
+The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.
+
+Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details.
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/favicon.ico b/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/favicon.ico
new file mode 100644
index 0000000..718d6fe
Binary files /dev/null and b/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/favicon.ico differ
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/globals.css b/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/globals.css
new file mode 100644
index 0000000..7ae6ba4
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/globals.css
@@ -0,0 +1,130 @@
+@import "tailwindcss";
+
+@plugin "tailwindcss-animate";
+
+@custom-variant dark (&:is(.dark *));
+
+@theme inline {
+ --color-background: var(--background);
+ --color-foreground: var(--foreground);
+ --font-sans: var(--font-geist-sans);
+ --font-mono: var(--font-geist-mono);
+ --color-sidebar-ring: var(--sidebar-ring);
+ --color-sidebar-border: var(--sidebar-border);
+ --color-sidebar-accent-foreground: var(--sidebar-accent-foreground);
+ --color-sidebar-accent: var(--sidebar-accent);
+ --color-sidebar-primary-foreground: var(--sidebar-primary-foreground);
+ --color-sidebar-primary: var(--sidebar-primary);
+ --color-sidebar-foreground: var(--sidebar-foreground);
+ --color-sidebar: var(--sidebar);
+ --color-chart-5: var(--chart-5);
+ --color-chart-4: var(--chart-4);
+ --color-chart-3: var(--chart-3);
+ --color-chart-2: var(--chart-2);
+ --color-chart-1: var(--chart-1);
+ --color-ring: var(--ring);
+ --color-input: var(--input);
+ --color-border: var(--border);
+ --color-destructive-foreground: var(--destructive-foreground);
+ --color-destructive: var(--destructive);
+ --color-accent-foreground: var(--accent-foreground);
+ --color-accent: var(--accent);
+ --color-muted-foreground: var(--muted-foreground);
+ --color-muted: var(--muted);
+ --color-secondary-foreground: var(--secondary-foreground);
+ --color-secondary: var(--secondary);
+ --color-primary-foreground: var(--primary-foreground);
+ --color-primary: var(--primary);
+ --color-popover-foreground: var(--popover-foreground);
+ --color-popover: var(--popover);
+ --color-card-foreground: var(--card-foreground);
+ --color-card: var(--card);
+ --radius-sm: calc(var(--radius) - 4px);
+ --radius-md: calc(var(--radius) - 2px);
+ --radius-lg: var(--radius);
+ --radius-xl: calc(var(--radius) + 4px);
+}
+
+:root {
+ --background: oklch(1 0 0);
+ --foreground: oklch(0.129 0.042 264.695);
+ --card: oklch(1 0 0);
+ --card-foreground: oklch(0.129 0.042 264.695);
+ --popover: oklch(1 0 0);
+ --popover-foreground: oklch(0.129 0.042 264.695);
+ --primary: oklch(0.208 0.042 265.755);
+ --primary-foreground: oklch(0.984 0.003 247.858);
+ --secondary: oklch(0.968 0.007 247.896);
+ --secondary-foreground: oklch(0.208 0.042 265.755);
+ --muted: oklch(0.968 0.007 247.896);
+ --muted-foreground: oklch(0.554 0.046 257.417);
+ --accent: oklch(0.968 0.007 247.896);
+ --accent-foreground: oklch(0.208 0.042 265.755);
+ --destructive: oklch(0.577 0.245 27.325);
+ --destructive-foreground: oklch(0.577 0.245 27.325);
+ --border: oklch(0.929 0.013 255.508);
+ --input: oklch(0.929 0.013 255.508);
+ --ring: oklch(0.704 0.04 256.788);
+ --chart-1: oklch(0.646 0.222 41.116);
+ --chart-2: oklch(0.6 0.118 184.704);
+ --chart-3: oklch(0.398 0.07 227.392);
+ --chart-4: oklch(0.828 0.189 84.429);
+ --chart-5: oklch(0.769 0.188 70.08);
+ --radius: 0.625rem;
+ --sidebar: oklch(0.984 0.003 247.858);
+ --sidebar-foreground: oklch(0.129 0.042 264.695);
+ --sidebar-primary: oklch(0.208 0.042 265.755);
+ --sidebar-primary-foreground: oklch(0.984 0.003 247.858);
+ --sidebar-accent: oklch(0.968 0.007 247.896);
+ --sidebar-accent-foreground: oklch(0.208 0.042 265.755);
+ --sidebar-border: oklch(0.929 0.013 255.508);
+ --sidebar-ring: oklch(0.704 0.04 256.788);
+}
+
+.dark {
+ --background: oklch(0.129 0.042 264.695);
+ --foreground: oklch(0.984 0.003 247.858);
+ --card: oklch(0.129 0.042 264.695);
+ --card-foreground: oklch(0.984 0.003 247.858);
+ --popover: oklch(0.129 0.042 264.695);
+ --popover-foreground: oklch(0.984 0.003 247.858);
+ --primary: oklch(0.984 0.003 247.858);
+ --primary-foreground: oklch(0.208 0.042 265.755);
+ --secondary: oklch(0.279 0.041 260.031);
+ --secondary-foreground: oklch(0.984 0.003 247.858);
+ --muted: oklch(0.279 0.041 260.031);
+ --muted-foreground: oklch(0.704 0.04 256.788);
+ --accent: oklch(0.279 0.041 260.031);
+ --accent-foreground: oklch(0.984 0.003 247.858);
+ --destructive: oklch(0.396 0.141 25.723);
+ --destructive-foreground: oklch(0.637 0.237 25.331);
+ --border: oklch(0.279 0.041 260.031);
+ --input: oklch(0.279 0.041 260.031);
+ --ring: oklch(0.446 0.043 257.281);
+ --chart-1: oklch(0.488 0.243 264.376);
+ --chart-2: oklch(0.696 0.17 162.48);
+ --chart-3: oklch(0.769 0.188 70.08);
+ --chart-4: oklch(0.627 0.265 303.9);
+ --chart-5: oklch(0.645 0.246 16.439);
+ --sidebar: oklch(0.208 0.042 265.755);
+ --sidebar-foreground: oklch(0.984 0.003 247.858);
+ --sidebar-primary: oklch(0.488 0.243 264.376);
+ --sidebar-primary-foreground: oklch(0.984 0.003 247.858);
+ --sidebar-accent: oklch(0.279 0.041 260.031);
+ --sidebar-accent-foreground: oklch(0.984 0.003 247.858);
+ --sidebar-border: oklch(0.279 0.041 260.031);
+ --sidebar-ring: oklch(0.446 0.043 257.281);
+}
+
+@layer base {
+ * {
+ @apply border-border outline-ring/50;
+ }
+ body {
+ @apply bg-background text-foreground;
+ }
+}
+
+.no-transitions * {
+ transition: none !important;
+}
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/layout.tsx b/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/layout.tsx
new file mode 100644
index 0000000..428c1d1
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/layout.tsx
@@ -0,0 +1,44 @@
+import type { Metadata } from "next";
+import { Geist, Geist_Mono } from "next/font/google";
+import "./globals.css";
+import { ThemeProvider } from "@/components/theme-provider";
+import { ThemeTransition } from "@/components/ui/theme-transition";
+
+const geistSans = Geist({
+ variable: "--font-geist-sans",
+ subsets: ["latin"],
+});
+
+const geistMono = Geist_Mono({
+ variable: "--font-geist-mono",
+ subsets: ["latin"],
+});
+
+export const metadata: Metadata = {
+ title: "FastRTC Demo",
+ description: "Interactive WebRTC demo with audio visualization",
+};
+
+export default function RootLayout({
+ children,
+}: Readonly<{
+ children: React.ReactNode;
+}>) {
+ return (
+
+
+
+ {children}
+
+
+
+
+ );
+}
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/page.tsx b/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/page.tsx
new file mode 100644
index 0000000..fe41cea
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/app/page.tsx
@@ -0,0 +1,16 @@
+import { BackgroundCircleProvider } from "@/components/background-circle-provider";
+import { ThemeToggle } from "@/components/ui/theme-toggle";
+import { ResetChat } from "@/components/ui/reset-chat";
+export default function Home() {
+ return (
+
+ );
+}
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/components.json b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components.json
new file mode 100644
index 0000000..a08feaa
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components.json
@@ -0,0 +1,21 @@
+{
+ "$schema": "https://ui.shadcn.com/schema.json",
+ "style": "new-york",
+ "rsc": true,
+ "tsx": true,
+ "tailwind": {
+ "config": "",
+ "css": "app/globals.css",
+ "baseColor": "slate",
+ "cssVariables": true,
+ "prefix": ""
+ },
+ "aliases": {
+ "components": "@/components",
+ "utils": "@/lib/utils",
+ "ui": "@/components/ui",
+ "lib": "@/lib",
+ "hooks": "@/hooks"
+ },
+ "iconLibrary": "lucide"
+}
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/background-circle-provider.tsx b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/background-circle-provider.tsx
new file mode 100644
index 0000000..eb0925b
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/background-circle-provider.tsx
@@ -0,0 +1,123 @@
+"use client"
+
+import { useState, useEffect, useRef, useCallback } from "react";
+import { BackgroundCircles } from "@/components/ui/background-circles";
+import { AIVoiceInput } from "@/components/ui/ai-voice-input";
+import { WebRTCClient } from "@/lib/webrtc-client";
+
+export function BackgroundCircleProvider() {
+ const [currentVariant, setCurrentVariant] =
+ useState("octonary");
+ const [isConnected, setIsConnected] = useState(false);
+ const [webrtcClient, setWebrtcClient] = useState(null);
+ const [audioLevel, setAudioLevel] = useState(0);
+ const audioRef = useRef(null);
+
+ // Memoize callbacks to prevent recreation on each render
+ const handleConnected = useCallback(() => setIsConnected(true), []);
+ const handleDisconnected = useCallback(() => setIsConnected(false), []);
+
+ const handleAudioStream = useCallback((stream: MediaStream) => {
+ if (audioRef.current) {
+ audioRef.current.srcObject = stream;
+ }
+ }, []);
+
+ const handleAudioLevel = useCallback((level: number) => {
+ // Apply some smoothing to the audio level
+ setAudioLevel(prev => prev * 0.7 + level * 0.3);
+ }, []);
+
+ // Get all available variants
+ const variants = Object.keys(
+ COLOR_VARIANTS
+ ) as (keyof typeof COLOR_VARIANTS)[];
+
+ // Function to change to the next color variant
+ const changeVariant = () => {
+ const currentIndex = variants.indexOf(currentVariant);
+ const nextVariant = variants[(currentIndex + 1) % variants.length];
+ setCurrentVariant(nextVariant);
+ };
+
+ useEffect(() => {
+ // Initialize WebRTC client with memoized callbacks
+ const client = new WebRTCClient({
+ onConnected: handleConnected,
+ onDisconnected: handleDisconnected,
+ onAudioStream: handleAudioStream,
+ onAudioLevel: handleAudioLevel
+ });
+ setWebrtcClient(client);
+
+ return () => {
+ client.disconnect();
+ };
+ }, [handleConnected, handleDisconnected, handleAudioStream, handleAudioLevel]);
+
+ const handleStart = () => {
+ webrtcClient?.connect();
+ };
+
+ const handleStop = () => {
+ webrtcClient?.disconnect();
+ };
+
+ return (
+
+ );
+}
+
+export default { BackgroundCircleProvider }
+
+const COLOR_VARIANTS = {
+ primary: {
+ border: [
+ "border-emerald-500/60",
+ "border-cyan-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-emerald-500/30",
+ },
+ secondary: {
+ border: [
+ "border-violet-500/60",
+ "border-fuchsia-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-violet-500/30",
+ },
+ senary: {
+ border: [
+ "border-blue-500/60",
+ "border-sky-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-blue-500/30",
+ }, // blue
+ octonary: {
+ border: [
+ "border-red-500/60",
+ "border-rose-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-red-500/30",
+ },
+} as const;
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/theme-provider.tsx b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/theme-provider.tsx
new file mode 100644
index 0000000..896e023
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/theme-provider.tsx
@@ -0,0 +1,101 @@
+"use client";
+
+import { createContext, useContext, useEffect, useState } from "react";
+
+type Theme = "light" | "dark" | "system";
+
+type ThemeProviderProps = {
+ children: React.ReactNode;
+ defaultTheme?: Theme;
+ storageKey?: string;
+ attribute?: string;
+ enableSystem?: boolean;
+ disableTransitionOnChange?: boolean;
+};
+
+type ThemeProviderState = {
+ theme: Theme;
+ setTheme: (theme: Theme) => void;
+};
+
+const initialState: ThemeProviderState = {
+ theme: "system",
+ setTheme: () => null,
+};
+
+const ThemeProviderContext = createContext(initialState);
+
+export function ThemeProvider({
+ children,
+ defaultTheme = "system",
+ storageKey = "theme",
+ attribute = "class",
+ enableSystem = true,
+ disableTransitionOnChange = false,
+ ...props
+}: ThemeProviderProps) {
+ const [theme, setTheme] = useState(defaultTheme);
+
+ useEffect(() => {
+ const savedTheme = localStorage.getItem(storageKey) as Theme | null;
+
+ if (savedTheme) {
+ setTheme(savedTheme);
+ } else if (defaultTheme === "system" && enableSystem) {
+ const systemTheme = window.matchMedia("(prefers-color-scheme: dark)").matches
+ ? "dark"
+ : "light";
+ setTheme(systemTheme);
+ }
+ }, [defaultTheme, storageKey, enableSystem]);
+
+ useEffect(() => {
+ const root = window.document.documentElement;
+
+ if (disableTransitionOnChange) {
+ root.classList.add("no-transitions");
+
+ // Force a reflow
+ window.getComputedStyle(root).getPropertyValue("opacity");
+
+ setTimeout(() => {
+ root.classList.remove("no-transitions");
+ }, 0);
+ }
+
+ root.classList.remove("light", "dark");
+
+ if (theme === "system" && enableSystem) {
+ const systemTheme = window.matchMedia("(prefers-color-scheme: dark)").matches
+ ? "dark"
+ : "light";
+ root.classList.add(systemTheme);
+ } else {
+ root.classList.add(theme);
+ }
+
+ localStorage.setItem(storageKey, theme);
+ }, [theme, storageKey, enableSystem, disableTransitionOnChange]);
+
+ const value = {
+ theme,
+ setTheme: (theme: Theme) => {
+ setTheme(theme);
+ },
+ };
+
+ return (
+
+ {children}
+
+ );
+}
+
+export const useTheme = () => {
+ const context = useContext(ThemeProviderContext);
+
+ if (context === undefined)
+ throw new Error("useTheme must be used within a ThemeProvider");
+
+ return context;
+};
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/ai-voice-input.tsx b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/ai-voice-input.tsx
new file mode 100644
index 0000000..f3558b8
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/ai-voice-input.tsx
@@ -0,0 +1,114 @@
+"use client";
+
+import { Mic, Square } from "lucide-react";
+import { useState, useEffect } from "react";
+import { cn } from "@/lib/utils";
+
+interface AIVoiceInputProps {
+ onStart?: () => void;
+ onStop?: (duration: number) => void;
+ isConnected?: boolean;
+ className?: string;
+}
+
+export function AIVoiceInput({
+ onStart,
+ onStop,
+ isConnected = false,
+ className
+}: AIVoiceInputProps) {
+ const [active, setActive] = useState(false);
+ const [time, setTime] = useState(0);
+ const [isClient, setIsClient] = useState(false);
+ const [status, setStatus] = useState<'disconnected' | 'connecting' | 'connected'>('disconnected');
+
+ useEffect(() => {
+ setIsClient(true);
+ }, []);
+
+ useEffect(() => {
+ let intervalId: NodeJS.Timeout;
+
+ if (active) {
+ intervalId = setInterval(() => {
+ setTime((t) => t + 1);
+ }, 1000);
+ } else {
+ setTime(0);
+ }
+
+ return () => clearInterval(intervalId);
+ }, [active]);
+
+ useEffect(() => {
+ if (isConnected) {
+ setStatus('connected');
+ setActive(true);
+ } else {
+ setStatus('disconnected');
+ setActive(false);
+ }
+ }, [isConnected]);
+
+ const formatTime = (seconds: number) => {
+ const mins = Math.floor(seconds / 60);
+ const secs = seconds % 60;
+ return `${mins.toString().padStart(2, "0")}:${secs.toString().padStart(2, "0")}`;
+ };
+
+ const handleStart = () => {
+ setStatus('connecting');
+ onStart?.();
+ };
+
+ const handleStop = () => {
+ onStop?.(time);
+ setStatus('disconnected');
+ };
+
+ return (
+
+
+
+ {status === 'connected' ? 'Connected' : status === 'connecting' ? 'Connecting...' : 'Disconnected'}
+
+
+
+
+
+ {formatTime(time)}
+
+
+
+ );
+}
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/background-circles.tsx b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/background-circles.tsx
new file mode 100644
index 0000000..c899496
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/background-circles.tsx
@@ -0,0 +1,309 @@
+"use client";
+
+import { motion } from "framer-motion";
+import clsx from "clsx";
+import { useState, useEffect } from "react";
+
+interface BackgroundCirclesProps {
+ title?: string;
+ description?: string;
+ className?: string;
+ variant?: keyof typeof COLOR_VARIANTS;
+ audioLevel?: number;
+ isActive?: boolean;
+}
+
+const COLOR_VARIANTS = {
+ primary: {
+ border: [
+ "border-emerald-500/60",
+ "border-cyan-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-emerald-500/30",
+ },
+ secondary: {
+ border: [
+ "border-violet-500/60",
+ "border-fuchsia-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-violet-500/30",
+ },
+ tertiary: {
+ border: [
+ "border-orange-500/60",
+ "border-yellow-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-orange-500/30",
+ },
+ quaternary: {
+ border: [
+ "border-purple-500/60",
+ "border-pink-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-purple-500/30",
+ },
+ quinary: {
+ border: [
+ "border-red-500/60",
+ "border-rose-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-red-500/30",
+ }, // red
+ senary: {
+ border: [
+ "border-blue-500/60",
+ "border-sky-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-blue-500/30",
+ }, // blue
+ septenary: {
+ border: [
+ "border-gray-500/60",
+ "border-gray-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-gray-500/30",
+ },
+ octonary: {
+ border: [
+ "border-red-500/60",
+ "border-rose-400/50",
+ "border-slate-600/30",
+ ],
+ gradient: "from-red-500/30",
+ },
+} as const;
+
+const AnimatedGrid = () => (
+
+
+
+);
+
+export function BackgroundCircles({
+ title = "",
+ description = "",
+ className,
+ variant = "octonary",
+ audioLevel = 0,
+ isActive = false,
+}: BackgroundCirclesProps) {
+ const variantStyles = COLOR_VARIANTS[variant];
+ const [animationParams, setAnimationParams] = useState({
+ scale: 1,
+ duration: 5,
+ intensity: 0
+ });
+ const [isLoaded, setIsLoaded] = useState(false);
+
+ // Initial page load animation
+ useEffect(() => {
+ // Small delay to ensure the black screen is visible first
+ const timer = setTimeout(() => {
+ setIsLoaded(true);
+ }, 300);
+
+ return () => clearTimeout(timer);
+ }, []);
+
+ // Update animation based on audio level
+ useEffect(() => {
+ if (isActive && audioLevel > 0) {
+ // Simple enhancement of audio level for more dramatic effect
+ const enhancedLevel = Math.min(1, audioLevel * 1.5);
+
+ setAnimationParams({
+ scale: 1 + enhancedLevel * 0.3,
+ duration: Math.max(2, 5 - enhancedLevel * 3),
+ intensity: enhancedLevel
+ });
+ } else if (animationParams.intensity > 0) {
+ // Only reset if we need to (prevents unnecessary updates)
+ const timer = setTimeout(() => {
+ setAnimationParams({
+ scale: 1,
+ duration: 5,
+ intensity: 0
+ });
+ }, 300);
+
+ return () => clearTimeout(timer);
+ }
+ }, [audioLevel, isActive, animationParams.intensity]);
+
+ return (
+ <>
+ {/* Initial black overlay that fades out */}
+
+
+
+
+
+ {[0, 1, 2].map((i) => (
+
+
+
+ ))}
+
+
+
+
+
+
+ {/* Additional glow that appears only during high audio levels */}
+ {isActive && animationParams.intensity > 0.4 && (
+
+ )}
+
+
+ >
+ );
+}
+
+export function DemoCircles() {
+ const [currentVariant, setCurrentVariant] =
+ useState("octonary");
+
+ const variants = Object.keys(
+ COLOR_VARIANTS
+ ) as (keyof typeof COLOR_VARIANTS)[];
+
+ function getNextVariant() {
+ const currentIndex = variants.indexOf(currentVariant);
+ const nextVariant = variants[(currentIndex + 1) % variants.length];
+ return nextVariant;
+ }
+
+ return (
+ <>
+
+
+
+
+ >
+ );
+}
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/reset-chat.tsx b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/reset-chat.tsx
new file mode 100644
index 0000000..b53a21a
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/reset-chat.tsx
@@ -0,0 +1,18 @@
+"use client"
+
+import { Trash } from "lucide-react"
+
+export function ResetChat() {
+ return (
+
+ )
+}
+
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/theme-toggle.tsx b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/theme-toggle.tsx
new file mode 100644
index 0000000..a6ef0d8
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/theme-toggle.tsx
@@ -0,0 +1,61 @@
+"use client";
+
+import { useTheme } from "@/components/theme-provider";
+import { cn } from "@/lib/utils";
+import { Moon, Sun } from "lucide-react";
+import { useRef } from "react";
+
+interface ThemeToggleProps {
+ className?: string;
+}
+
+export function ThemeToggle({ className }: ThemeToggleProps) {
+ const { theme } = useTheme();
+ const buttonRef = useRef(null);
+
+ const toggleTheme = () => {
+ // Instead of directly changing the theme, dispatch a custom event
+ const newTheme = theme === "light" ? "dark" : "light";
+
+ // Dispatch custom event with the new theme
+ window.dispatchEvent(
+ new CustomEvent('themeToggleRequest', {
+ detail: { theme: newTheme }
+ })
+ );
+ };
+
+ return (
+
+ );
+}
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/theme-transition.tsx b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/theme-transition.tsx
new file mode 100644
index 0000000..caf9601
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/components/ui/theme-transition.tsx
@@ -0,0 +1,120 @@
+"use client";
+
+import { useTheme } from "@/components/theme-provider";
+import { useEffect, useState } from "react";
+import { motion, AnimatePresence } from "framer-motion";
+
+interface ThemeTransitionProps {
+ className?: string;
+}
+
+export function ThemeTransition({ className }: ThemeTransitionProps) {
+ const { theme, setTheme } = useTheme();
+ const [position, setPosition] = useState({ x: 0, y: 0 });
+ const [isAnimating, setIsAnimating] = useState(false);
+ const [pendingTheme, setPendingTheme] = useState(null);
+ const [visualTheme, setVisualTheme] = useState(theme);
+
+ // Track mouse/touch position for click events
+ useEffect(() => {
+ const handleMouseMove = (e: MouseEvent) => {
+ setPosition({ x: e.clientX, y: e.clientY });
+ };
+
+ const handleTouchMove = (e: TouchEvent) => {
+ if (e.touches[0]) {
+ setPosition({ x: e.touches[0].clientX, y: e.touches[0].clientY });
+ }
+ };
+
+ window.addEventListener("mousemove", handleMouseMove);
+ window.addEventListener("touchmove", handleTouchMove);
+
+ return () => {
+ window.removeEventListener("mousemove", handleMouseMove);
+ window.removeEventListener("touchmove", handleTouchMove);
+ };
+ }, []);
+
+ // Listen for theme toggle requests
+ useEffect(() => {
+ // Custom event for theme toggle requests
+ const handleThemeToggle = (e: CustomEvent) => {
+ if (isAnimating) return; // Prevent multiple animations
+
+ const newTheme = e.detail.theme;
+ if (newTheme === theme) return;
+
+ // Store the pending theme but don't apply it yet
+ setPendingTheme(newTheme);
+ setIsAnimating(true);
+
+ // The actual theme will be applied mid-animation
+ };
+
+ window.addEventListener('themeToggleRequest' as any, handleThemeToggle as EventListener);
+
+ return () => {
+ window.removeEventListener('themeToggleRequest' as any, handleThemeToggle as EventListener);
+ };
+ }, [theme, isAnimating]);
+
+ // Apply the theme change mid-animation
+ useEffect(() => {
+ if (isAnimating && pendingTheme) {
+ // Set visual theme immediately for the animation
+ setVisualTheme(pendingTheme);
+
+ // Apply the actual theme change after a delay (mid-animation)
+ const timer = setTimeout(() => {
+ setTheme(pendingTheme as any);
+ }, 400); // Half of the animation duration
+
+ // End the animation after it completes
+ const endTimer = setTimeout(() => {
+ setIsAnimating(false);
+ setPendingTheme(null);
+ }, 1000); // Match with animation duration
+
+ return () => {
+ clearTimeout(timer);
+ clearTimeout(endTimer);
+ };
+ }
+ }, [isAnimating, pendingTheme, setTheme]);
+
+ return (
+
+ {isAnimating && (
+
+
+
+ )}
+
+ );
+}
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/eslint.config.mjs b/demo/nextjs_voice_chat/frontend/fastrtc-demo/eslint.config.mjs
new file mode 100644
index 0000000..521f586
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/eslint.config.mjs
@@ -0,0 +1,28 @@
+import { dirname } from "path";
+import { fileURLToPath } from "url";
+import { FlatCompat } from "@eslint/eslintrc";
+
+const __filename = fileURLToPath(import.meta.url);
+const __dirname = dirname(__filename);
+
+const compat = new FlatCompat({
+ baseDirectory: __dirname,
+});
+
+const eslintConfig = [
+ ...compat.extends("next/core-web-vitals", "next/typescript"),
+ {
+ rules: {
+ "no-unused-vars": "off",
+ "no-explicit-any": "off",
+ "no-console": "off",
+ "no-debugger": "off",
+ "eqeqeq": "off",
+ "curly": "off",
+ "quotes": "off",
+ "semi": "off",
+ },
+ },
+];
+
+export default eslintConfig;
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/lib/utils.ts b/demo/nextjs_voice_chat/frontend/fastrtc-demo/lib/utils.ts
new file mode 100644
index 0000000..bd0c391
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/lib/utils.ts
@@ -0,0 +1,6 @@
+import { clsx, type ClassValue } from "clsx"
+import { twMerge } from "tailwind-merge"
+
+export function cn(...inputs: ClassValue[]) {
+ return twMerge(clsx(inputs))
+}
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/lib/webrtc-client.ts b/demo/nextjs_voice_chat/frontend/fastrtc-demo/lib/webrtc-client.ts
new file mode 100644
index 0000000..72ea3ac
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/lib/webrtc-client.ts
@@ -0,0 +1,189 @@
+interface WebRTCClientOptions {
+ onConnected?: () => void;
+ onDisconnected?: () => void;
+ onMessage?: (message: any) => void;
+ onAudioStream?: (stream: MediaStream) => void;
+ onAudioLevel?: (level: number) => void;
+}
+
+export class WebRTCClient {
+ private peerConnection: RTCPeerConnection | null = null;
+ private mediaStream: MediaStream | null = null;
+ private dataChannel: RTCDataChannel | null = null;
+ private options: WebRTCClientOptions;
+ private audioContext: AudioContext | null = null;
+ private analyser: AnalyserNode | null = null;
+ private dataArray: Uint8Array | null = null;
+ private animationFrameId: number | null = null;
+
+ constructor(options: WebRTCClientOptions = {}) {
+ this.options = options;
+ }
+
+ async connect() {
+ try {
+ this.peerConnection = new RTCPeerConnection();
+
+ // Get user media
+ try {
+ this.mediaStream = await navigator.mediaDevices.getUserMedia({
+ audio: true
+ });
+ } catch (mediaError: any) {
+ console.error('Media error:', mediaError);
+ if (mediaError.name === 'NotAllowedError') {
+ throw new Error('Microphone access denied. Please allow microphone access and try again.');
+ } else if (mediaError.name === 'NotFoundError') {
+ throw new Error('No microphone detected. Please connect a microphone and try again.');
+ } else {
+ throw mediaError;
+ }
+ }
+
+ this.setupAudioAnalysis();
+
+ this.mediaStream.getTracks().forEach(track => {
+ if (this.peerConnection) {
+ this.peerConnection.addTrack(track, this.mediaStream!);
+ }
+ });
+
+ this.peerConnection.addEventListener('track', (event) => {
+ if (this.options.onAudioStream) {
+ this.options.onAudioStream(event.streams[0]);
+ }
+ });
+
+ this.dataChannel = this.peerConnection.createDataChannel('text');
+
+ this.dataChannel.addEventListener('message', (event) => {
+ try {
+ const message = JSON.parse(event.data);
+ console.log('Received message:', message);
+
+ if (this.options.onMessage) {
+ this.options.onMessage(message);
+ }
+ } catch (error) {
+ console.error('Error parsing message:', error);
+ }
+ });
+
+ // Create and send offer
+ const offer = await this.peerConnection.createOffer();
+ await this.peerConnection.setLocalDescription(offer);
+
+ // Use same-origin request to avoid CORS preflight
+ const response = await fetch('http://localhost:8000/webrtc/offer', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json'
+ },
+ mode: 'cors', // Explicitly set CORS mode
+ credentials: 'same-origin',
+ body: JSON.stringify({
+ sdp: offer.sdp,
+ type: offer.type,
+ webrtc_id: Math.random().toString(36).substring(7)
+ })
+ });
+
+ const serverResponse = await response.json();
+ await this.peerConnection.setRemoteDescription(serverResponse);
+
+ if (this.options.onConnected) {
+ this.options.onConnected();
+ }
+ } catch (error) {
+ console.error('Error connecting:', error);
+ this.disconnect();
+ throw error;
+ }
+ }
+
+ private setupAudioAnalysis() {
+ if (!this.mediaStream) return;
+
+ try {
+ this.audioContext = new AudioContext();
+ this.analyser = this.audioContext.createAnalyser();
+ this.analyser.fftSize = 256;
+
+ const source = this.audioContext.createMediaStreamSource(this.mediaStream);
+ source.connect(this.analyser);
+
+ const bufferLength = this.analyser.frequencyBinCount;
+ this.dataArray = new Uint8Array(bufferLength);
+
+ this.startAnalysis();
+ } catch (error) {
+ console.error('Error setting up audio analysis:', error);
+ }
+ }
+
+ private startAnalysis() {
+ if (!this.analyser || !this.dataArray || !this.options.onAudioLevel) return;
+
+ // Add throttling to prevent too many updates
+ let lastUpdateTime = 0;
+ const throttleInterval = 100; // Only update every 100ms
+
+ const analyze = () => {
+ this.analyser!.getByteFrequencyData(this.dataArray!);
+
+ const currentTime = Date.now();
+ // Only update if enough time has passed since last update
+ if (currentTime - lastUpdateTime > throttleInterval) {
+ // Calculate average volume level (0-1)
+ let sum = 0;
+ for (let i = 0; i < this.dataArray!.length; i++) {
+ sum += this.dataArray![i];
+ }
+ const average = sum / this.dataArray!.length / 255;
+
+ this.options.onAudioLevel!(average);
+ lastUpdateTime = currentTime;
+ }
+
+ this.animationFrameId = requestAnimationFrame(analyze);
+ };
+
+ this.animationFrameId = requestAnimationFrame(analyze);
+ }
+
+ private stopAnalysis() {
+ if (this.animationFrameId !== null) {
+ cancelAnimationFrame(this.animationFrameId);
+ this.animationFrameId = null;
+ }
+
+ if (this.audioContext) {
+ this.audioContext.close();
+ this.audioContext = null;
+ }
+
+ this.analyser = null;
+ this.dataArray = null;
+ }
+
+ disconnect() {
+ this.stopAnalysis();
+
+ if (this.mediaStream) {
+ this.mediaStream.getTracks().forEach(track => track.stop());
+ this.mediaStream = null;
+ }
+
+ if (this.peerConnection) {
+ this.peerConnection.close();
+ this.peerConnection = null;
+ }
+
+ this.dataChannel = null;
+
+ if (this.options.onDisconnected) {
+ this.options.onDisconnected();
+ }
+ }
+}
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/next.config.ts b/demo/nextjs_voice_chat/frontend/fastrtc-demo/next.config.ts
new file mode 100644
index 0000000..e9ffa30
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/next.config.ts
@@ -0,0 +1,7 @@
+import type { NextConfig } from "next";
+
+const nextConfig: NextConfig = {
+ /* config options here */
+};
+
+export default nextConfig;
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/package.json b/demo/nextjs_voice_chat/frontend/fastrtc-demo/package.json
new file mode 100644
index 0000000..93c285e
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/package.json
@@ -0,0 +1,33 @@
+{
+ "name": "fastrtc-demo",
+ "version": "0.1.0",
+ "private": true,
+ "scripts": {
+ "dev": "next dev --turbopack",
+ "build": "next build --no-lint",
+ "start": "next start",
+ "lint": "next lint"
+ },
+ "dependencies": {
+ "class-variance-authority": "^0.7.1",
+ "clsx": "^2.1.1",
+ "framer-motion": "^12.4.10",
+ "lucide-react": "^0.477.0",
+ "next": "15.2.2-canary.1",
+ "react": "^19.0.0",
+ "react-dom": "^19.0.0",
+ "tailwind-merge": "^3.0.2",
+ "tailwindcss-animate": "^1.0.7"
+ },
+ "devDependencies": {
+ "@eslint/eslintrc": "^3",
+ "@tailwindcss/postcss": "^4",
+ "@types/node": "^20",
+ "@types/react": "^19",
+ "@types/react-dom": "^19",
+ "eslint": "^9",
+ "eslint-config-next": "15.2.2-canary.1",
+ "tailwindcss": "^4",
+ "typescript": "^5"
+ }
+}
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/postcss.config.mjs b/demo/nextjs_voice_chat/frontend/fastrtc-demo/postcss.config.mjs
new file mode 100644
index 0000000..c7bcb4b
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/postcss.config.mjs
@@ -0,0 +1,5 @@
+const config = {
+ plugins: ["@tailwindcss/postcss"],
+};
+
+export default config;
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/file.svg b/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/file.svg
new file mode 100644
index 0000000..004145c
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/file.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/globe.svg b/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/globe.svg
new file mode 100644
index 0000000..567f17b
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/globe.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/next.svg b/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/next.svg
new file mode 100644
index 0000000..5174b28
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/next.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/vercel.svg b/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/vercel.svg
new file mode 100644
index 0000000..7705396
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/vercel.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/window.svg b/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/window.svg
new file mode 100644
index 0000000..b2b2a44
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/public/window.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/frontend/fastrtc-demo/tsconfig.json b/demo/nextjs_voice_chat/frontend/fastrtc-demo/tsconfig.json
new file mode 100644
index 0000000..d8b9323
--- /dev/null
+++ b/demo/nextjs_voice_chat/frontend/fastrtc-demo/tsconfig.json
@@ -0,0 +1,27 @@
+{
+ "compilerOptions": {
+ "target": "ES2017",
+ "lib": ["dom", "dom.iterable", "esnext"],
+ "allowJs": true,
+ "skipLibCheck": true,
+ "strict": true,
+ "noEmit": true,
+ "esModuleInterop": true,
+ "module": "esnext",
+ "moduleResolution": "bundler",
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "jsx": "preserve",
+ "incremental": true,
+ "plugins": [
+ {
+ "name": "next"
+ }
+ ],
+ "paths": {
+ "@/*": ["./*"]
+ }
+ },
+ "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
+ "exclude": ["node_modules"]
+}
diff --git a/demo/nextjs_voice_chat/requirements.txt b/demo/nextjs_voice_chat/requirements.txt
new file mode 100644
index 0000000..d9f0bb3
--- /dev/null
+++ b/demo/nextjs_voice_chat/requirements.txt
@@ -0,0 +1,5 @@
+openai
+fastapi
+python-dotenv
+elevenlabs
+fastrtc[vad, stt, tts]
\ No newline at end of file
diff --git a/demo/nextjs_voice_chat/run.sh b/demo/nextjs_voice_chat/run.sh
new file mode 100755
index 0000000..814e8bd
--- /dev/null
+++ b/demo/nextjs_voice_chat/run.sh
@@ -0,0 +1 @@
+uvicorn backend.server:app --host 0.0.0.0 --port 8000
\ No newline at end of file