@@ -35,12 +35,8 @@ os.setenv('OPENAI_API_KEY', 'your_key_here')
3535
3636# add Azure Keys
3737os.setenv(' AZURE_KEYS' , ' {"name_realm":{"url": "https://baseurl.azure.com/","key": "secret"}} ' )
38-
39- # Other env Variables;
40- # CLAUDE_API_KEY
41- # GEMINI_API_KEY
42- # OPENAI_ORG
43- # NEBIUS_KEY
38+ # or creating flow_prompt obj
39+ Lamoom(azure_keys = {" realm_name" :{" url" : " https://baseurl.azure.com/" , " key" : " your_secret" }})
4440```
4541
4642### Model Agnostic:
@@ -49,40 +45,25 @@ Mix models easily, and districute the load across models. The system will automa
4945- Gemini
5046- OpenAI (w/ Azure OpenAI models)
5147- Nebius with (Llama, DeepSeek, Mistral, Mixtral, dolphin, Qwen and others)
52- ```
53- from lamoom import LamoomModelProviders
54-
55- def_behaviour = behaviour.AIModelsBehaviour(attempts=[
56- AttemptToCall(provider='openai', model='gpt-4o', weight=100),
57- AttemptToCall(provider='azure', realm='useast-1', deployment_id='gpt-4o' weight=100),
58- AttemptToCall(provider='azure', realm='useast-2', deployment_id='gpt-4o' weight=100),
59- AttemptToCall(provider=LamoomModelProviders.anthropic, model='claude-3-5-sonnet-20240620', weight=100
60- ),
61- AttemptToCall(provider=LamoomModelProviders.gemini, model='gemini-1.5-pro', weight=100
62- ),
63- AttemptToCall(provider=LamoomModelProviders.nebius, model='deepseek-ai/DeepSeek-R1', weight=100
64- )
65- ])
66-
67- response_llm = client.call(agent.id, context, def_behaviour)
68- ```
6948
70- ### Add Behavious :
71- - use OPENAI_BEHAVIOR
72- - or add your own Behaviour, you can set max count of attempts, if you have different AI Models, if the first attempt will fail because of retryable error, the second will be called, based on the weights.
73- `` `
74- from lamoom import OPENAI_GPT4_0125_PREVIEW_BEHAVIOUR
75- behaviour = OPENAI_GPT4_0125_PREVIEW_BEHAVIOUR
76- ```
77- or:
49+ Model string format is the following for Claude, Gemini, OpenAI, Nebius :
50+ ` "{model_provider}/{model_name}" `
51+ For Azure models format is the following:
52+ ` "azure/{realm}/{model_name}" `
53+
54+ ``` python
55+ response_llm = client.call(agent.id, context, model = " openai/gpt-4o " )
56+ response_llm = client.call(agent.id, context, model = " azure/useast/gpt-4o " )
7857```
79- from lamoom import behaviour
80- behaviour = behaviour.AIModelsBehaviour(
81- attempts=[
82- AttemptToCall(provider='azure', realm='useast-1', deployment_id='gpt-4o' weight=100),
83- AttemptToCall(provider='azure', realm='useast-2', deployment_id='gpt-4o' weight=100),
84- ]
85- )
58+
59+ ### Lamoom Keys
60+ Obtain an API token from Flow Prompt and add it:
61+
62+ ``` python
63+ # As an environment variable:
64+ os.setenv(' LAMOOM_API_TOKEN' , ' your_token_here' )
65+ # Via code:
66+ Lamoom(api_token = ' your_api_token' )
8667```
8768
8869## Usage Examples:
@@ -100,7 +81,7 @@ prompt.add("You're {name}. Say Hello and ask what's their name.", role="system")
10081# Call AI model with Lamoom
10182context = {" name" : " John Doe" }
10283# test_data - optional parameter used for generating tests
103- response = client.call(prompt.id, context, behavior , test_data = {
84+ response = client.call(prompt.id, context, " openai/gpt-4o " , test_data = {
10485 ' ideal_answer' : " Hello, I'm John Doe. What's your name?" ,
10586 ' behavior_name' : " gemini"
10687 }
0 commit comments