Skip to content

New models addition #1233

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 16, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions backend/example.env
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,9 @@ DEFAULT_DIFFBOT_CHAT_MODEL="openai_gpt_4o" #whichever model specified here , ne
LLM_MODEL_CONFIG_openai_gpt_3.5="gpt-3.5-turbo-0125,openai_api_key"
LLM_MODEL_CONFIG_openai_gpt_4o_mini="gpt-4o-mini-2024-07-18,openai_api_key"
LLM_MODEL_CONFIG_openai_gpt_4o="gpt-4o-2024-11-20,openai_api_key"
LLM_MODEL_CONFIG_openai-gpt-o3-mini="o3-mini-2025-01-31,openai_api_key"
LLM_MODEL_CONFIG_openai_gpt_4.1_mini="gpt-4.1-mini,openai_api_key"
LLM_MODEL_CONFIG_openai_gpt_4.1="gpt-4.1,openai_api_key"
LLM_MODEL_CONFIG_openai_gpt_o3_mini="o3-mini-2025-01-31,openai_api_key"
LLM_MODEL_CONFIG_gemini_1.5_pro="gemini-1.5-pro-002"
LLM_MODEL_CONFIG_gemini_1.5_flash="gemini-1.5-flash-002"
LLM_MODEL_CONFIG_gemini_2.0_flash="gemini-2.0-flash-001"
Expand All @@ -41,7 +43,7 @@ LLM_MODEL_CONFIG_azure_ai_gpt_35="azure_deployment_name,azure_endpoint or base_u
LLM_MODEL_CONFIG_azure_ai_gpt_4o="gpt-4o,https://YOUR-ENDPOINT.openai.azure.com/,azure_api_key,api_version"
LLM_MODEL_CONFIG_groq_llama3_70b="model_name,base_url,groq_api_key"
LLM_MODEL_CONFIG_anthropic_claude_3_5_sonnet="model_name,anthropic_api_key"
LLM_MODEL_CONFIG_fireworks_llama_v3_70b="model_name,fireworks_api_key"
LLM_MODEL_CONFIG_fireworks_llama4_maverick="model_name,fireworks_api_key"
LLM_MODEL_CONFIG_bedrock_claude_3_5_sonnet="model_name,aws_access_key_id,aws_secret__access_key,region_name"
LLM_MODEL_CONFIG_ollama_llama3="model_name,model_local_url"
YOUTUBE_TRANSCRIPT_PROXY="https://user:pass@domain:port"
Expand Down
133 changes: 67 additions & 66 deletions frontend/src/utils/Constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,36 +12,38 @@ export const llms =
process.env?.VITE_LLM_MODELS?.trim() != ''
? (process.env.VITE_LLM_MODELS?.split(',') as string[])
: [
'openai-gpt-o3-mini',
'openai_gpt_4o',
'openai_gpt_4o_mini',
'openai_gpt_4.5',
'gemini_1.5_pro',
'gemini_1.5_flash',
'gemini_2.0_flash',
'gemini_2.5_pro',
'diffbot',
'azure_ai_gpt_35',
'azure_ai_gpt_4o',
'ollama_llama3',
'groq_llama3_70b',
'anthropic_claude_3_7_sonnet',
'fireworks_llama4_maverick',
'fireworks_llama_v3p2_90b',
'fireworks_qwen72b_instruct',
'bedrock_claude_3_5_sonnet',
'bedrock_nova_micro_v1',
'bedrock_nova_lite_v1',
'bedrock_nova_pro_v1',
'fireworks_deepseek_r1',
'fireworks_deepseek_v3',
];
'openai_gpt_4o',
'openai_gpt_4o_mini',
'openai_gpt_4.1',
'openai_gpt_4.1_mini',
'openai_gpt_o3_mini',
'gemini_1.5_pro',
'gemini_1.5_flash',
'gemini_2.0_flash',
'gemini_2.5_pro',
'diffbot',
'azure_ai_gpt_35',
'azure_ai_gpt_4o',
'ollama_llama3',
'groq_llama3_70b',
'anthropic_claude_3_7_sonnet',
'fireworks_llama4_maverick',
'fireworks_llama4_scout',
'fireworks_qwen72b_instruct',
'bedrock_claude_3_5_sonnet',
'bedrock_nova_micro_v1',
'bedrock_nova_lite_v1',
'bedrock_nova_pro_v1',
'fireworks_deepseek_r1',
'fireworks_deepseek_v3',
];

export const supportedLLmsForRagas = [
'openai_gpt_4.5',
'openai_gpt_4',
'openai_gpt_4o',
'openai_gpt_4o_mini',
'openai_gpt_4.1',
'openai_gpt_4.1_mini',
'gemini_1.5_pro',
'gemini_1.5_flash',
'gemini_2.0_flash',
Expand All @@ -51,30 +53,29 @@ export const supportedLLmsForRagas = [
'groq_llama3_70b',
'anthropic_claude_3_7_sonnet',
'fireworks_llama4_maverick',
'fireworks_llama_v3_70b',
'fireworks_llama4_instruct',
'fireworks_llama4_scout',
'bedrock_claude_3_5_sonnet',
'openai-gpt-o3-mini',
'openai_gpt_o3_mini',
];
export const supportedLLmsForGroundTruthMetrics = [
'openai_gpt_4.5',
'openai_gpt_4',
'openai_gpt_4o',
'openai_gpt_4o_mini',
'openai_gpt_4.1',
'openai_gpt_4.1_mini',
'azure_ai_gpt_35',
'azure_ai_gpt_4o',
'groq_llama3_70b',
'anthropic_claude_3_7_sonnet',
'fireworks_llama4_maverick',
'fireworks_llama_v3_70b',
'fireworks_llama4_instruct',
'fireworks_llama4_scout',
'bedrock_claude_3_5_sonnet',
'openai-gpt-o3-mini',
'openai_gpt_o3_mini',
];
export const prodllms =
process.env.VITE_LLM_MODELS_PROD?.trim() != ''
? (process.env.VITE_LLM_MODELS_PROD?.split(',') as string[])
: ['openai_gpt_4o', 'openai_gpt_4o_mini', 'diffbot', 'gemini_1.5_flash'];
: ['openai_gpt_4o', 'openai_gpt_4o_mini', 'diffbot', 'gemini_2.0_flash'];

export const chatModeLables = {
vector: 'vector',
Expand All @@ -101,40 +102,40 @@ export const chatModeReadableLables: Record<string, string> = {
export const chatModes =
process.env?.VITE_CHAT_MODES?.trim() != ''
? process.env.VITE_CHAT_MODES?.split(',').map((mode) => ({
mode: mode.trim(),
description: getDescriptionForChatMode(mode.trim()),
}))
mode: mode.trim(),
description: getDescriptionForChatMode(mode.trim()),
}))
: [
{
mode: chatModeLables.vector,
description: 'Performs semantic similarity search on text chunks using vector indexing.',
},
{
mode: chatModeLables.graph,
description: 'Translates text to Cypher queries for precise data retrieval from a graph database.',
},
{
mode: chatModeLables['graph+vector'],
description: 'Combines vector indexing and graph connections for contextually enhanced semantic search.',
},
{
mode: chatModeLables.fulltext,
description: 'Conducts fast, keyword-based search using full-text indexing on text chunks.',
},
{
mode: chatModeLables['graph+vector+fulltext'],
description: 'Integrates vector, graph, and full-text indexing for comprehensive search results.',
},
{
mode: chatModeLables['entity search+vector'],
description: 'Uses vector indexing on entity nodes for highly relevant entity-based search.',
},
{
mode: chatModeLables['global search+vector+fulltext'],
description:
'Use vector and full-text indexing on community nodes to provide accurate, context-aware answers globally.',
},
];
{
mode: chatModeLables.vector,
description: 'Performs semantic similarity search on text chunks using vector indexing.',
},
{
mode: chatModeLables.graph,
description: 'Translates text to Cypher queries for precise data retrieval from a graph database.',
},
{
mode: chatModeLables['graph+vector'],
description: 'Combines vector indexing and graph connections for contextually enhanced semantic search.',
},
{
mode: chatModeLables.fulltext,
description: 'Conducts fast, keyword-based search using full-text indexing on text chunks.',
},
{
mode: chatModeLables['graph+vector+fulltext'],
description: 'Integrates vector, graph, and full-text indexing for comprehensive search results.',
},
{
mode: chatModeLables['entity search+vector'],
description: 'Uses vector indexing on entity nodes for highly relevant entity-based search.',
},
{
mode: chatModeLables['global search+vector+fulltext'],
description:
'Use vector and full-text indexing on community nodes to provide accurate, context-aware answers globally.',
},
];

export const chunkSize = process.env.VITE_CHUNK_SIZE ? Number(process.env.VITE_CHUNK_SIZE) : 1 * 1024 * 1024;
export const tokenchunkSize = process.env.VITE_TOKENS_PER_CHUNK ? Number(process.env.VITE_TOKENS_PER_CHUNK) : 100;
Expand Down