Skip to content

Commit 79fc77a

Browse files
committed
feat: update tanchat example
1 parent e07d0b0 commit 79fc77a

File tree

18 files changed

+1883
-173
lines changed

18 files changed

+1883
-173
lines changed
Lines changed: 85 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
import { useCallback, useRef, useState } from 'react'
2+
3+
/**
4+
* Hook for recording audio and transcribing it via the transcription API.
5+
*/
6+
export function useAudioRecorder() {
7+
const [isRecording, setIsRecording] = useState(false)
8+
const [isTranscribing, setIsTranscribing] = useState(false)
9+
const mediaRecorderRef = useRef<MediaRecorder | null>(null)
10+
const chunksRef = useRef<Blob[]>([])
11+
12+
const startRecording = useCallback(async () => {
13+
try {
14+
const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
15+
const mediaRecorder = new MediaRecorder(stream, {
16+
mimeType: 'audio/webm;codecs=opus',
17+
})
18+
mediaRecorderRef.current = mediaRecorder
19+
chunksRef.current = []
20+
21+
mediaRecorder.ondataavailable = (e) => {
22+
if (e.data.size > 0) {
23+
chunksRef.current.push(e.data)
24+
}
25+
}
26+
27+
mediaRecorder.start()
28+
setIsRecording(true)
29+
} catch (error) {
30+
console.error('Failed to start recording:', error)
31+
alert('Could not access microphone. Please check permissions.')
32+
}
33+
}, [])
34+
35+
const stopRecording = useCallback(async (): Promise<string | null> => {
36+
return new Promise((resolve) => {
37+
const mediaRecorder = mediaRecorderRef.current
38+
if (!mediaRecorder) {
39+
resolve(null)
40+
return
41+
}
42+
43+
mediaRecorder.onstop = async () => {
44+
setIsRecording(false)
45+
setIsTranscribing(true)
46+
47+
const audioBlob = new Blob(chunksRef.current, { type: 'audio/webm' })
48+
49+
// Stop all tracks
50+
mediaRecorder.stream.getTracks().forEach((track) => track.stop())
51+
52+
try {
53+
const formData = new FormData()
54+
formData.append(
55+
'audio',
56+
new File([audioBlob], 'recording.webm', { type: 'audio/webm' }),
57+
)
58+
formData.append('model', 'whisper-1')
59+
60+
const response = await fetch('/demo/api/transcription', {
61+
method: 'POST',
62+
body: formData,
63+
})
64+
65+
if (!response.ok) {
66+
const errorData = await response.json()
67+
throw new Error(errorData.error || 'Transcription failed')
68+
}
69+
70+
const result = await response.json()
71+
setIsTranscribing(false)
72+
resolve(result.text || null)
73+
} catch (error) {
74+
console.error('Transcription error:', error)
75+
setIsTranscribing(false)
76+
resolve(null)
77+
}
78+
}
79+
80+
mediaRecorder.stop()
81+
})
82+
}, [])
83+
84+
return { isRecording, isTranscribing, startRecording, stopRecording }
85+
}
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
import { useCallback, useRef, useState } from 'react'
2+
3+
/**
4+
* Hook for text-to-speech playback via the TTS API.
5+
*/
6+
export function useTTS() {
7+
const [playingId, setPlayingId] = useState<string | null>(null)
8+
const audioRef = useRef<HTMLAudioElement | null>(null)
9+
10+
const speak = useCallback(async (text: string, id: string) => {
11+
// Stop any currently playing audio
12+
if (audioRef.current) {
13+
audioRef.current.pause()
14+
audioRef.current = null
15+
}
16+
17+
setPlayingId(id)
18+
19+
try {
20+
const response = await fetch('/demo/api/tts', {
21+
method: 'POST',
22+
headers: { 'Content-Type': 'application/json' },
23+
body: JSON.stringify({
24+
text,
25+
voice: 'nova',
26+
model: 'tts-1',
27+
format: 'mp3',
28+
}),
29+
})
30+
31+
if (!response.ok) {
32+
const errorData = await response.json()
33+
throw new Error(errorData.error || 'TTS failed')
34+
}
35+
36+
const result = await response.json()
37+
38+
// Convert base64 to audio and play
39+
const audioData = atob(result.audio)
40+
const bytes = new Uint8Array(audioData.length)
41+
for (let i = 0; i < audioData.length; i++) {
42+
bytes[i] = audioData.charCodeAt(i)
43+
}
44+
const blob = new Blob([bytes], { type: result.contentType })
45+
const url = URL.createObjectURL(blob)
46+
47+
const audio = new Audio(url)
48+
audioRef.current = audio
49+
50+
audio.onended = () => {
51+
URL.revokeObjectURL(url)
52+
setPlayingId(null)
53+
audioRef.current = null
54+
}
55+
56+
audio.onerror = () => {
57+
URL.revokeObjectURL(url)
58+
setPlayingId(null)
59+
audioRef.current = null
60+
}
61+
62+
await audio.play()
63+
} catch (error) {
64+
console.error('TTS error:', error)
65+
setPlayingId(null)
66+
}
67+
}, [])
68+
69+
const stop = useCallback(() => {
70+
if (audioRef.current) {
71+
audioRef.current.pause()
72+
audioRef.current = null
73+
}
74+
setPlayingId(null)
75+
}, [])
76+
77+
return { playingId, speak, stop }
78+
}
Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
export type Provider = 'openai' | 'anthropic' | 'gemini' | 'ollama'
2+
3+
export interface ModelOption {
4+
provider: Provider
5+
model: string
6+
label: string
7+
}
8+
9+
export const MODEL_OPTIONS: Array<ModelOption> = [
10+
// OpenAI models
11+
{ provider: 'openai', model: 'gpt-4o', label: 'OpenAI - GPT-4o' },
12+
{ provider: 'openai', model: 'gpt-4o-mini', label: 'OpenAI - GPT-4o Mini' },
13+
14+
// Anthropic models
15+
{
16+
provider: 'anthropic',
17+
model: 'claude-haiku-4-5',
18+
label: 'Anthropic - Claude Haiku 4.5',
19+
},
20+
{
21+
provider: 'anthropic',
22+
model: 'claude-sonnet-4-5-20250929',
23+
label: 'Anthropic - Claude Sonnet 4.5',
24+
},
25+
26+
// Gemini models
27+
{
28+
provider: 'gemini',
29+
model: 'gemini-2.0-flash-exp',
30+
label: 'Gemini - 2.0 Flash',
31+
},
32+
33+
// Ollama models
34+
{ provider: 'ollama', model: 'mistral:7b', label: 'Ollama - Mistral 7B' },
35+
]
36+
37+
const STORAGE_KEY = 'tanstack-ai-model-preference'
38+
39+
export function getStoredModelPreference(): ModelOption | null {
40+
if (typeof window === 'undefined') return null
41+
try {
42+
const stored = localStorage.getItem(STORAGE_KEY)
43+
if (stored) {
44+
const parsed = JSON.parse(stored)
45+
// Validate that the stored option still exists in MODEL_OPTIONS
46+
const found = MODEL_OPTIONS.find(
47+
(o) => o.provider === parsed.provider && o.model === parsed.model,
48+
)
49+
if (found) return found
50+
}
51+
} catch {
52+
// Ignore storage errors
53+
}
54+
return null
55+
}
56+
57+
export function setStoredModelPreference(option: ModelOption): void {
58+
if (typeof window === 'undefined') return
59+
try {
60+
localStorage.setItem(STORAGE_KEY, JSON.stringify(option))
61+
} catch {
62+
// Ignore storage errors
63+
}
64+
}
65+
66+
export function getDefaultModelOption(): ModelOption {
67+
return getStoredModelPreference() || MODEL_OPTIONS[0]
68+
}
69+
70+
export function getModelOptionsForProvider(provider: Provider): ModelOption[] {
71+
return MODEL_OPTIONS.filter((o) => o.provider === provider)
72+
}
73+
74+
export function getAvailableModelOptions(
75+
availableProviders: Provider[],
76+
): ModelOption[] {
77+
return MODEL_OPTIONS.filter((o) => availableProviders.includes(o.provider))
78+
}
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
import type { Provider } from './model-selection'
2+
3+
export interface VendorCapabilities {
4+
chat: boolean
5+
structured: boolean
6+
image: boolean
7+
transcription: boolean
8+
tts: boolean
9+
}
10+
11+
export const VENDOR_CAPABILITIES: Record<Provider, VendorCapabilities> = {
12+
openai: {
13+
chat: true,
14+
structured: true,
15+
image: true,
16+
transcription: true,
17+
tts: true,
18+
},
19+
anthropic: {
20+
chat: true,
21+
structured: true,
22+
image: false,
23+
transcription: false,
24+
tts: false,
25+
},
26+
gemini: {
27+
chat: true,
28+
structured: true,
29+
image: false,
30+
transcription: false,
31+
tts: false,
32+
},
33+
ollama: {
34+
chat: true,
35+
structured: true,
36+
image: false,
37+
transcription: false,
38+
tts: false,
39+
},
40+
}
41+
42+
export function hasCapability(
43+
provider: Provider,
44+
capability: keyof VendorCapabilities,
45+
): boolean {
46+
return VENDOR_CAPABILITIES[provider]?.[capability] ?? false
47+
}
48+
49+
export function getProvidersWithCapability(
50+
capability: keyof VendorCapabilities,
51+
): Provider[] {
52+
return (Object.keys(VENDOR_CAPABILITIES) as Provider[]).filter(
53+
(provider) => VENDOR_CAPABILITIES[provider][capability],
54+
)
55+
}
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
import { createFileRoute } from '@tanstack/react-router'
2+
import type { Provider } from '@/lib/model-selection'
3+
4+
export const Route = createFileRoute('/demo/api/available-providers')({
5+
server: {
6+
handlers: {
7+
GET: async () => {
8+
const available: Provider[] = []
9+
10+
if (process.env.OPENAI_API_KEY) {
11+
available.push('openai')
12+
}
13+
if (process.env.ANTHROPIC_API_KEY) {
14+
available.push('anthropic')
15+
}
16+
if (process.env.GEMINI_API_KEY) {
17+
available.push('gemini')
18+
}
19+
// Ollama is always available (local, no key needed)
20+
available.push('ollama')
21+
22+
return new Response(
23+
JSON.stringify({
24+
providers: available,
25+
hasOpenAI: available.includes('openai'),
26+
}),
27+
{
28+
status: 200,
29+
headers: { 'Content-Type': 'application/json' },
30+
},
31+
)
32+
},
33+
},
34+
},
35+
})

0 commit comments

Comments
 (0)