From 5c9473874c00c8d205788d12a3d35f0cf2136b84 Mon Sep 17 00:00:00 2001 From: Tanaro Laptop Date: Sat, 20 Jan 2024 01:50:27 +0100 Subject: [PATCH 001/378] change max_tokens type to int --- litellm/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/main.py b/litellm/main.py index 2fef048a6981..407e851f5b63 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -366,7 +366,7 @@ def completion( n: Optional[int] = None, stream: Optional[bool] = None, stop=None, - max_tokens: Optional[float] = None, + max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, From b69cb2c773ce0fb06835dd7594329cbe6ca88556 Mon Sep 17 00:00:00 2001 From: TanaroSch Date: Tue, 6 Feb 2024 11:19:28 +0100 Subject: [PATCH 002/378] change max_tokens float to int --- docs/my-website/docs/completion/input.md | 2 +- litellm/main.py | 4 ++-- litellm/types/completion.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/my-website/docs/completion/input.md b/docs/my-website/docs/completion/input.md index 676e4d23265e..445fd072bae2 100644 --- a/docs/my-website/docs/completion/input.md +++ b/docs/my-website/docs/completion/input.md @@ -73,7 +73,7 @@ def completion( n: Optional[int] = None, stream: Optional[bool] = None, stop=None, - max_tokens: Optional[float] = None, + max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, diff --git a/litellm/main.py b/litellm/main.py index 407e851f5b63..fb7574b06387 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -143,7 +143,7 @@ async def acompletion( n: Optional[int] = None, stream: Optional[bool] = None, stop=None, - max_tokens: Optional[float] = None, + max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, @@ -1884,7 +1884,7 @@ def batch_completion( n: Optional[int] = None, stream: Optional[bool] = None, stop=None, - max_tokens: Optional[float] = None, + max_tokens: Optional[int] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, logit_bias: Optional[dict] = None, diff --git a/litellm/types/completion.py b/litellm/types/completion.py index 5eac90575601..3ce441880c5b 100644 --- a/litellm/types/completion.py +++ b/litellm/types/completion.py @@ -12,7 +12,7 @@ class CompletionRequest(BaseModel): n: Optional[int] = None stream: Optional[bool] = None stop: Optional[dict] = None - max_tokens: Optional[float] = None + max_tokens: Optional[int] = None presence_penalty: Optional[float] = None frequency_penalty: Optional[float] = None logit_bias: Optional[dict] = None From 872ff6176d506dfb4f7e2ee8d22505c98b96c6c6 Mon Sep 17 00:00:00 2001 From: Lucca Zenobio Date: Wed, 20 Mar 2024 15:22:23 -0300 Subject: [PATCH 003/378] updates --- litellm/llms/prompt_templates/factory.py | 6 +++--- litellm/utils.py | 5 ++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index b23f10315102..abe340e7d109 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -604,13 +604,13 @@ def convert_to_anthropic_tool_result(message: dict) -> str: def convert_to_anthropic_tool_invoke(tool_calls: list) -> str: invokes = "" for tool in tool_calls: - if tool["type"] != "function": + if tool.type != "function": continue - tool_name = tool["function"]["name"] + tool_name = tool.function.name parameters = "".join( f"<{param}>{val}\n" - for param, val in json.loads(tool["function"]["arguments"]).items() + for param, val in json.loads(tool.function.arguments).items() ) invokes += ( "\n" diff --git a/litellm/utils.py b/litellm/utils.py index a8c0031812ef..57327473d88f 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -271,7 +271,10 @@ def __init__( if tool_calls is not None: self.tool_calls = [] for tool_call in tool_calls: - self.tool_calls.append(ChatCompletionMessageToolCall(**tool_call)) + if isinstance(tool_call, dict): + self.tool_calls.append(ChatCompletionMessageToolCall(**tool_call)) + else: + self.tool_calls.append(tool_call) if logprobs is not None: self._logprobs = logprobs From 0c0780be83c7a4559684d44c538fea6a5b07cad3 Mon Sep 17 00:00:00 2001 From: Lucca Zenobio Date: Thu, 21 Mar 2024 10:43:27 -0300 Subject: [PATCH 004/378] extra headers --- litellm/llms/bedrock.py | 15 ++++++++++++++- litellm/llms/prompt_templates/factory.py | 9 +++++---- litellm/main.py | 1 + litellm/utils.py | 2 +- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index 0f52d3abc422..a32cee381ec0 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -495,6 +495,15 @@ def get_config(cls): } +def add_custom_header(headers): + """Closure to capture the headers and add them.""" + def callback(request, **kwargs): + """Actual callback function that Boto3 will call.""" + for header_name, header_value in headers.items(): + request.headers.add_header(header_name, header_value) + return callback + + def init_bedrock_client( region_name=None, aws_access_key_id: Optional[str] = None, @@ -504,12 +513,12 @@ def init_bedrock_client( aws_session_name: Optional[str] = None, aws_profile_name: Optional[str] = None, aws_role_name: Optional[str] = None, + extra_headers: Optional[dict] = None, timeout: Optional[int] = None, ): # check for custom AWS_REGION_NAME and use it if not passed to init_bedrock_client litellm_aws_region_name = get_secret("AWS_REGION_NAME", None) standard_aws_region_name = get_secret("AWS_REGION", None) - ## CHECK IS 'os.environ/' passed in # Define the list of parameters to check params_to_check = [ @@ -618,6 +627,8 @@ def init_bedrock_client( endpoint_url=endpoint_url, config=config, ) + if extra_headers: + client.meta.events.register('before-sign.bedrock-runtime.*', add_custom_header(extra_headers)) return client @@ -677,6 +688,7 @@ def completion( litellm_params=None, logger_fn=None, timeout=None, + extra_headers: Optional[dict] = None, ): exception_mapping_worked = False try: @@ -704,6 +716,7 @@ def completion( aws_role_name=aws_role_name, aws_session_name=aws_session_name, aws_profile_name=aws_profile_name, + extra_headers=extra_headers, timeout=timeout, ) diff --git a/litellm/llms/prompt_templates/factory.py b/litellm/llms/prompt_templates/factory.py index abe340e7d109..a09b988a575c 100644 --- a/litellm/llms/prompt_templates/factory.py +++ b/litellm/llms/prompt_templates/factory.py @@ -604,13 +604,14 @@ def convert_to_anthropic_tool_result(message: dict) -> str: def convert_to_anthropic_tool_invoke(tool_calls: list) -> str: invokes = "" for tool in tool_calls: - if tool.type != "function": + tool = dict(tool) + if tool["type"] != "function": continue - - tool_name = tool.function.name + tool_function = dict(tool["function"]) + tool_name = tool_function["name"] parameters = "".join( f"<{param}>{val}\n" - for param, val in json.loads(tool.function.arguments).items() + for param, val in json.loads(tool_function["arguments"]).items() ) invokes += ( "\n" diff --git a/litellm/main.py b/litellm/main.py index 724190391c59..dceaf9bf5c86 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -1749,6 +1749,7 @@ def completion( logger_fn=logger_fn, encoding=encoding, logging_obj=logging, + extra_headers=extra_headers, timeout=timeout, ) diff --git a/litellm/utils.py b/litellm/utils.py index 57327473d88f..4124ea4370a6 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5146,7 +5146,7 @@ def get_supported_openai_params(model: str, custom_llm_provider: str): """ if custom_llm_provider == "bedrock": if model.startswith("anthropic.claude-3"): - return litellm.AmazonAnthropicClaude3Config().get_supported_openai_params() + return litellm.AmazonAnthropicClaude3Config().get_supported_openai_params() + ["extra_headers"] elif model.startswith("anthropic"): return litellm.AmazonAnthropicConfig().get_supported_openai_params() elif model.startswith("ai21"): From cda78a5da0b2d705fb5ac56193c7508dc1ba7c4f Mon Sep 17 00:00:00 2001 From: Lucca Zenobio Date: Mon, 25 Mar 2024 13:08:17 -0300 Subject: [PATCH 005/378] update --- litellm/llms/bedrock.py | 2 +- litellm/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index a32cee381ec0..d13301910cfc 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -128,7 +128,7 @@ def get_config(cls): } def get_supported_openai_params(self): - return ["max_tokens", "tools", "tool_choice", "stream"] + return ["max_tokens", "tools", "tool_choice", "stream", "extra_headers"] def map_openai_params(self, non_default_params: dict, optional_params: dict): for param, value in non_default_params.items(): diff --git a/litellm/utils.py b/litellm/utils.py index 4124ea4370a6..57327473d88f 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5146,7 +5146,7 @@ def get_supported_openai_params(model: str, custom_llm_provider: str): """ if custom_llm_provider == "bedrock": if model.startswith("anthropic.claude-3"): - return litellm.AmazonAnthropicClaude3Config().get_supported_openai_params() + ["extra_headers"] + return litellm.AmazonAnthropicClaude3Config().get_supported_openai_params() elif model.startswith("anthropic"): return litellm.AmazonAnthropicConfig().get_supported_openai_params() elif model.startswith("ai21"): From a9e2ef62125c462cc62d824f3d90bbc1d0366dfe Mon Sep 17 00:00:00 2001 From: Lucca Zenobio Date: Mon, 29 Apr 2024 10:05:30 -0300 Subject: [PATCH 006/378] test --- litellm/tests/test_bedrock_completion.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/litellm/tests/test_bedrock_completion.py b/litellm/tests/test_bedrock_completion.py index ca2ffea5f527..2aab8a3b440c 100644 --- a/litellm/tests/test_bedrock_completion.py +++ b/litellm/tests/test_bedrock_completion.py @@ -207,6 +207,25 @@ def test_completion_bedrock_claude_sts_client_auth(): # test_completion_bedrock_claude_sts_client_auth() +def test_bedrock_extra_headers(): + try: + litellm.set_verbose = True + response: ModelResponse = completion( + model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0", + messages=messages, + max_tokens=10, + temperature=0.78, + extra_headers={"x-key": "x_key_value"} + ) + # Add any assertions here to check the response + assert len(response.choices) > 0 + assert len(response.choices[0].message.content) > 0 + except RateLimitError: + pass + except Exception as e: + pytest.fail(f"Error occurred: {e}") + + def test_bedrock_claude_3(): try: litellm.set_verbose = True From dd166680d173379231f52ca0bb0b41478106f5aa Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Tue, 30 Apr 2024 15:07:10 -0700 Subject: [PATCH 007/378] Move chat_completions before completions so that the `chat_completions` route is defined before the `completions` route. This is necessary because the `chat_completions` route is more specific than the `completions` route, and the order of route definitions matters in FastAPI. Without this, doing a request to `/openai/deployments/{model_in_url}/chat/completions` might trigger `completions` being called (with `model` set to `{model_in_url}/chat` instead of `chat_completions` getting called, which is the correct function. Fixes: GH-3372 --- litellm/proxy/proxy_server.py | 332 +++++++++++++++++----------------- 1 file changed, 166 insertions(+), 166 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 29f3c41dba95..11c5d01e208b 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3371,172 +3371,6 @@ def model_list( ) -@router.post( - "/v1/completions", dependencies=[Depends(user_api_key_auth)], tags=["completions"] -) -@router.post( - "/completions", dependencies=[Depends(user_api_key_auth)], tags=["completions"] -) -@router.post( - "/engines/{model:path}/completions", - dependencies=[Depends(user_api_key_auth)], - tags=["completions"], -) -@router.post( - "/openai/deployments/{model:path}/completions", - dependencies=[Depends(user_api_key_auth)], - tags=["completions"], -) -async def completion( - request: Request, - fastapi_response: Response, - model: Optional[str] = None, - user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), -): - global user_temperature, user_request_timeout, user_max_tokens, user_api_base - try: - body = await request.body() - body_str = body.decode() - try: - data = ast.literal_eval(body_str) - except: - data = json.loads(body_str) - - data["user"] = data.get("user", user_api_key_dict.user_id) - data["model"] = ( - general_settings.get("completion_model", None) # server default - or user_model # model name passed via cli args - or model # for azure deployments - or data["model"] # default passed in http request - ) - if user_model: - data["model"] = user_model - if "metadata" not in data: - data["metadata"] = {} - data["metadata"]["user_api_key"] = user_api_key_dict.api_key - data["metadata"]["user_api_key_metadata"] = user_api_key_dict.metadata - data["metadata"]["user_api_key_alias"] = getattr( - user_api_key_dict, "key_alias", None - ) - data["metadata"]["user_api_key_user_id"] = user_api_key_dict.user_id - data["metadata"]["user_api_key_team_id"] = getattr( - user_api_key_dict, "team_id", None - ) - data["metadata"]["user_api_key_team_alias"] = getattr( - user_api_key_dict, "team_alias", None - ) - _headers = dict(request.headers) - _headers.pop( - "authorization", None - ) # do not store the original `sk-..` api key in the db - data["metadata"]["headers"] = _headers - data["metadata"]["endpoint"] = str(request.url) - - # override with user settings, these are params passed via cli - if user_temperature: - data["temperature"] = user_temperature - if user_request_timeout: - data["request_timeout"] = user_request_timeout - if user_max_tokens: - data["max_tokens"] = user_max_tokens - if user_api_base: - data["api_base"] = user_api_base - - ### MODEL ALIAS MAPPING ### - # check if model name in model alias map - # get the actual model name - if data["model"] in litellm.model_alias_map: - data["model"] = litellm.model_alias_map[data["model"]] - - ### CALL HOOKS ### - modify incoming data before calling the model - data = await proxy_logging_obj.pre_call_hook( - user_api_key_dict=user_api_key_dict, data=data, call_type="completion" - ) - - ### ROUTE THE REQUESTs ### - router_model_names = llm_router.model_names if llm_router is not None else [] - # skip router if user passed their key - if "api_key" in data: - response = await litellm.atext_completion(**data) - elif ( - llm_router is not None and data["model"] in router_model_names - ): # model in router model list - response = await llm_router.atext_completion(**data) - elif ( - llm_router is not None - and llm_router.model_group_alias is not None - and data["model"] in llm_router.model_group_alias - ): # model set in model_group_alias - response = await llm_router.atext_completion(**data) - elif ( - llm_router is not None and data["model"] in llm_router.deployment_names - ): # model in router deployments, calling a specific deployment on the router - response = await llm_router.atext_completion( - **data, specific_deployment=True - ) - elif ( - llm_router is not None - and data["model"] not in router_model_names - and llm_router.default_deployment is not None - ): # model in router deployments, calling a specific deployment on the router - response = await llm_router.atext_completion(**data) - elif user_model is not None: # `litellm --model ` - response = await litellm.atext_completion(**data) - else: - raise HTTPException( - status_code=status.HTTP_400_BAD_REQUEST, - detail={ - "error": "Invalid model name passed in model=" - + data.get("model", "") - }, - ) - - if hasattr(response, "_hidden_params"): - model_id = response._hidden_params.get("model_id", None) or "" - original_response = ( - response._hidden_params.get("original_response", None) or "" - ) - else: - model_id = "" - original_response = "" - - verbose_proxy_logger.debug("final response: %s", response) - if ( - "stream" in data and data["stream"] == True - ): # use generate_responses to stream responses - custom_headers = { - "x-litellm-model-id": model_id, - } - selected_data_generator = select_data_generator( - response=response, user_api_key_dict=user_api_key_dict - ) - - return StreamingResponse( - selected_data_generator, - media_type="text/event-stream", - headers=custom_headers, - ) - - fastapi_response.headers["x-litellm-model-id"] = model_id - return response - except Exception as e: - data["litellm_status"] = "fail" # used for alerting - verbose_proxy_logger.debug("EXCEPTION RAISED IN PROXY MAIN.PY") - verbose_proxy_logger.debug( - "\033[1;31mAn error occurred: %s\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`", - e, - ) - traceback.print_exc() - error_traceback = traceback.format_exc() - error_msg = f"{str(e)}" - raise ProxyException( - message=getattr(e, "message", error_msg), - type=getattr(e, "type", "None"), - param=getattr(e, "param", "None"), - code=getattr(e, "status_code", 500), - ) - - @router.post( "/v1/chat/completions", dependencies=[Depends(user_api_key_auth)], @@ -3809,6 +3643,172 @@ async def chat_completion( ) +@router.post( + "/v1/completions", dependencies=[Depends(user_api_key_auth)], tags=["completions"] +) +@router.post( + "/completions", dependencies=[Depends(user_api_key_auth)], tags=["completions"] +) +@router.post( + "/engines/{model:path}/completions", + dependencies=[Depends(user_api_key_auth)], + tags=["completions"], +) +@router.post( + "/openai/deployments/{model:path}/completions", + dependencies=[Depends(user_api_key_auth)], + tags=["completions"], +) +async def completion( + request: Request, + fastapi_response: Response, + model: Optional[str] = None, + user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), +): + global user_temperature, user_request_timeout, user_max_tokens, user_api_base + try: + body = await request.body() + body_str = body.decode() + try: + data = ast.literal_eval(body_str) + except: + data = json.loads(body_str) + + data["user"] = data.get("user", user_api_key_dict.user_id) + data["model"] = ( + general_settings.get("completion_model", None) # server default + or user_model # model name passed via cli args + or model # for azure deployments + or data["model"] # default passed in http request + ) + if user_model: + data["model"] = user_model + if "metadata" not in data: + data["metadata"] = {} + data["metadata"]["user_api_key"] = user_api_key_dict.api_key + data["metadata"]["user_api_key_metadata"] = user_api_key_dict.metadata + data["metadata"]["user_api_key_alias"] = getattr( + user_api_key_dict, "key_alias", None + ) + data["metadata"]["user_api_key_user_id"] = user_api_key_dict.user_id + data["metadata"]["user_api_key_team_id"] = getattr( + user_api_key_dict, "team_id", None + ) + data["metadata"]["user_api_key_team_alias"] = getattr( + user_api_key_dict, "team_alias", None + ) + _headers = dict(request.headers) + _headers.pop( + "authorization", None + ) # do not store the original `sk-..` api key in the db + data["metadata"]["headers"] = _headers + data["metadata"]["endpoint"] = str(request.url) + + # override with user settings, these are params passed via cli + if user_temperature: + data["temperature"] = user_temperature + if user_request_timeout: + data["request_timeout"] = user_request_timeout + if user_max_tokens: + data["max_tokens"] = user_max_tokens + if user_api_base: + data["api_base"] = user_api_base + + ### MODEL ALIAS MAPPING ### + # check if model name in model alias map + # get the actual model name + if data["model"] in litellm.model_alias_map: + data["model"] = litellm.model_alias_map[data["model"]] + + ### CALL HOOKS ### - modify incoming data before calling the model + data = await proxy_logging_obj.pre_call_hook( + user_api_key_dict=user_api_key_dict, data=data, call_type="completion" + ) + + ### ROUTE THE REQUESTs ### + router_model_names = llm_router.model_names if llm_router is not None else [] + # skip router if user passed their key + if "api_key" in data: + response = await litellm.atext_completion(**data) + elif ( + llm_router is not None and data["model"] in router_model_names + ): # model in router model list + response = await llm_router.atext_completion(**data) + elif ( + llm_router is not None + and llm_router.model_group_alias is not None + and data["model"] in llm_router.model_group_alias + ): # model set in model_group_alias + response = await llm_router.atext_completion(**data) + elif ( + llm_router is not None and data["model"] in llm_router.deployment_names + ): # model in router deployments, calling a specific deployment on the router + response = await llm_router.atext_completion( + **data, specific_deployment=True + ) + elif ( + llm_router is not None + and data["model"] not in router_model_names + and llm_router.default_deployment is not None + ): # model in router deployments, calling a specific deployment on the router + response = await llm_router.atext_completion(**data) + elif user_model is not None: # `litellm --model ` + response = await litellm.atext_completion(**data) + else: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail={ + "error": "Invalid model name passed in model=" + + data.get("model", "") + }, + ) + + if hasattr(response, "_hidden_params"): + model_id = response._hidden_params.get("model_id", None) or "" + original_response = ( + response._hidden_params.get("original_response", None) or "" + ) + else: + model_id = "" + original_response = "" + + verbose_proxy_logger.debug("final response: %s", response) + if ( + "stream" in data and data["stream"] == True + ): # use generate_responses to stream responses + custom_headers = { + "x-litellm-model-id": model_id, + } + selected_data_generator = select_data_generator( + response=response, user_api_key_dict=user_api_key_dict + ) + + return StreamingResponse( + selected_data_generator, + media_type="text/event-stream", + headers=custom_headers, + ) + + fastapi_response.headers["x-litellm-model-id"] = model_id + return response + except Exception as e: + data["litellm_status"] = "fail" # used for alerting + verbose_proxy_logger.debug("EXCEPTION RAISED IN PROXY MAIN.PY") + verbose_proxy_logger.debug( + "\033[1;31mAn error occurred: %s\n\n Debug this by setting `--debug`, e.g. `litellm --model gpt-3.5-turbo --debug`", + e, + ) + traceback.print_exc() + error_traceback = traceback.format_exc() + error_msg = f"{str(e)}" + raise ProxyException( + message=getattr(e, "message", error_msg), + type=getattr(e, "type", "None"), + param=getattr(e, "param", "None"), + code=getattr(e, "status_code", 500), + ) + + @router.post( "/v1/embeddings", dependencies=[Depends(user_api_key_auth)], From 2fd2e811fdf21287afe9c24f3e7b369f0b4206f2 Mon Sep 17 00:00:00 2001 From: Christian Privitelli <40876121+Priva28@users.noreply.github.com> Date: Thu, 2 May 2024 14:05:38 +1000 Subject: [PATCH 008/378] add llama3 tokenizer and support for custom tokenizer --- litellm/utils.py | 42 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/litellm/utils.py b/litellm/utils.py index 6243195ef8c8..f32e6ed9b21b 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -3687,6 +3687,10 @@ def _select_tokenizer(model: str): elif "llama-2" in model.lower() or "replicate" in model.lower(): tokenizer = Tokenizer.from_pretrained("hf-internal-testing/llama-tokenizer") return {"type": "huggingface_tokenizer", "tokenizer": tokenizer} + # llama3 + elif "llama-3" in model.lower(): + tokenizer = Tokenizer.from_pretrained("Xenova/llama-3-tokenizer") + return {"type": "huggingface_tokenizer", "tokenizer": tokenizer} # default - tiktoken else: return {"type": "openai_tokenizer", "tokenizer": encoding} @@ -3881,10 +3885,41 @@ def calculage_img_tokens( tile_tokens = (base_tokens * 2) * tiles_needed_high_res total_tokens = base_tokens + tile_tokens return total_tokens + + +def create_pretrained_tokenizer( + identifier: str, + revision="main", + auth_token: Optional[str] = None +): + """ + Creates a tokenizer from an existing file on a HuggingFace repository to be used with `token_counter`. + + Args: + identifier (str): The identifier of a Model on the Hugging Face Hub, that contains a tokenizer.json file + revision (str, defaults to main): A branch or commit id + auth_token (str, optional, defaults to None): An optional auth token used to access private repositories on the Hugging Face Hub + """ + + tokenizer = Tokenizer.from_pretrained(identifier, revision=revision, auth_token=auth_token) + return {"type": "huggingface_tokenizer", "tokenizer": tokenizer} + + +def create_tokenizer(json: str): + """ + Creates a tokenizer from a valid JSON string for use with `token_counter`. + + Args: + json (str): A valid JSON string representing a previously serialized tokenizer + """ + + tokenizer = Tokenizer.from_str(json) + return {"type": "huggingface_tokenizer", "tokenizer": tokenizer} def token_counter( model="", + custom_tokenizer: Optional[dict] = None, text: Optional[Union[str, List[str]]] = None, messages: Optional[List] = None, count_response_tokens: Optional[bool] = False, @@ -3894,13 +3929,14 @@ def token_counter( Args: model (str): The name of the model to use for tokenization. Default is an empty string. + tokenizer (Optional[dict]): A custom tokenizer created with the `create_pretrained_tokenizer` or `create_tokenizer` method. Must be a dictionary with a string value for `type` and Tokenizer for `tokenizer`. Default is None. text (str): The raw text string to be passed to the model. Default is None. messages (Optional[List[Dict[str, str]]]): Alternative to passing in text. A list of dictionaries representing messages with "role" and "content" keys. Default is None. Returns: int: The number of tokens in the text. """ - # use tiktoken, anthropic, cohere or llama2's tokenizer depending on the model + # use tiktoken, anthropic, cohere, llama2, or llama3's tokenizer depending on the model is_tool_call = False num_tokens = 0 if text == None: @@ -3942,8 +3978,8 @@ def token_counter( elif isinstance(text, str): count_response_tokens = True # user just trying to count tokens for a text. don't add the chat_ml +3 tokens to this - if model is not None: - tokenizer_json = _select_tokenizer(model=model) + if model is not None or custom_tokenizer is not None: + tokenizer_json = custom_tokenizer or _select_tokenizer(model=model) if tokenizer_json["type"] == "huggingface_tokenizer": print_verbose( f"Token Counter - using hugging face token counter, for model={model}" From b4d4b42cfaa809be25f537422d7173385231ce01 Mon Sep 17 00:00:00 2001 From: Christian Privitelli <40876121+Priva28@users.noreply.github.com> Date: Thu, 2 May 2024 14:11:03 +1000 Subject: [PATCH 009/378] fix docs --- litellm/utils.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/litellm/utils.py b/litellm/utils.py index f32e6ed9b21b..b527972ba7c6 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -3899,6 +3899,9 @@ def create_pretrained_tokenizer( identifier (str): The identifier of a Model on the Hugging Face Hub, that contains a tokenizer.json file revision (str, defaults to main): A branch or commit id auth_token (str, optional, defaults to None): An optional auth token used to access private repositories on the Hugging Face Hub + + Returns: + dict: A dictionary with the tokenizer and its type. """ tokenizer = Tokenizer.from_pretrained(identifier, revision=revision, auth_token=auth_token) @@ -3911,6 +3914,9 @@ def create_tokenizer(json: str): Args: json (str): A valid JSON string representing a previously serialized tokenizer + + Returns: + dict: A dictionary with the tokenizer and its type. """ tokenizer = Tokenizer.from_str(json) @@ -3929,7 +3935,7 @@ def token_counter( Args: model (str): The name of the model to use for tokenization. Default is an empty string. - tokenizer (Optional[dict]): A custom tokenizer created with the `create_pretrained_tokenizer` or `create_tokenizer` method. Must be a dictionary with a string value for `type` and Tokenizer for `tokenizer`. Default is None. + custom_tokenizer (Optional[dict]): A custom tokenizer created with the `create_pretrained_tokenizer` or `create_tokenizer` method. Must be a dictionary with a string value for `type` and Tokenizer for `tokenizer`. Default is None. text (str): The raw text string to be passed to the model. Default is None. messages (Optional[List[Dict[str, str]]]): Alternative to passing in text. A list of dictionaries representing messages with "role" and "content" keys. Default is None. From 3449a5e446ea38fb0a1c545a46e1515e729f907c Mon Sep 17 00:00:00 2001 From: Christian Privitelli <40876121+Priva28@users.noreply.github.com> Date: Thu, 2 May 2024 14:56:38 +1000 Subject: [PATCH 010/378] update docs, allow use with encode/decode --- .../my-website/docs/completion/token_usage.md | 45 +++++++++++++------ litellm/utils.py | 9 ++-- 2 files changed, 36 insertions(+), 18 deletions(-) diff --git a/docs/my-website/docs/completion/token_usage.md b/docs/my-website/docs/completion/token_usage.md index 626973c57be0..807ccfd91ec8 100644 --- a/docs/my-website/docs/completion/token_usage.md +++ b/docs/my-website/docs/completion/token_usage.md @@ -1,7 +1,7 @@ # Completion Token Usage & Cost By default LiteLLM returns token usage in all completion requests ([See here](https://litellm.readthedocs.io/en/latest/output/)) -However, we also expose 5 helper functions + **[NEW]** an API to calculate token usage across providers: +However, we also expose some helper functions + **[NEW]** an API to calculate token usage across providers: - `encode`: This encodes the text passed in, using the model-specific tokenizer. [**Jump to code**](#1-encode) @@ -9,17 +9,19 @@ However, we also expose 5 helper functions + **[NEW]** an API to calculate token - `token_counter`: This returns the number of tokens for a given input - it uses the tokenizer based on the model, and defaults to tiktoken if no model-specific tokenizer is available. [**Jump to code**](#3-token_counter) -- `cost_per_token`: This returns the cost (in USD) for prompt (input) and completion (output) tokens. Uses the live list from `api.litellm.ai`. [**Jump to code**](#4-cost_per_token) +- `create_pretrained_tokenizer` and `create_tokenizer`: LiteLLM provides default tokenizer support for OpenAI, Cohere, Anthropic, Llama2, and Llama3 models. If you are using a different model, you can create a custom tokenizer and pass it as `custom_tokenizer` to the `encode`, `decode`, and `token_counter` methods. [**Jump to code**](#4-create_pretrained_tokenizer-and-create_tokenizer) -- `completion_cost`: This returns the overall cost (in USD) for a given LLM API Call. It combines `token_counter` and `cost_per_token` to return the cost for that query (counting both cost of input and output). [**Jump to code**](#5-completion_cost) +- `cost_per_token`: This returns the cost (in USD) for prompt (input) and completion (output) tokens. Uses the live list from `api.litellm.ai`. [**Jump to code**](#5-cost_per_token) -- `get_max_tokens`: This returns the maximum number of tokens allowed for the given model. [**Jump to code**](#6-get_max_tokens) +- `completion_cost`: This returns the overall cost (in USD) for a given LLM API Call. It combines `token_counter` and `cost_per_token` to return the cost for that query (counting both cost of input and output). [**Jump to code**](#6-completion_cost) -- `model_cost`: This returns a dictionary for all models, with their max_tokens, input_cost_per_token and output_cost_per_token. It uses the `api.litellm.ai` call shown below. [**Jump to code**](#7-model_cost) +- `get_max_tokens`: This returns the maximum number of tokens allowed for the given model. [**Jump to code**](#7-get_max_tokens) -- `register_model`: This registers new / overrides existing models (and their pricing details) in the model cost dictionary. [**Jump to code**](#8-register_model) +- `model_cost`: This returns a dictionary for all models, with their max_tokens, input_cost_per_token and output_cost_per_token. It uses the `api.litellm.ai` call shown below. [**Jump to code**](#8-model_cost) -- `api.litellm.ai`: Live token + price count across [all supported models](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json). [**Jump to code**](#9-apilitellmai) +- `register_model`: This registers new / overrides existing models (and their pricing details) in the model cost dictionary. [**Jump to code**](#9-register_model) + +- `api.litellm.ai`: Live token + price count across [all supported models](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json). [**Jump to code**](#10-apilitellmai) 📣 This is a community maintained list. Contributions are welcome! ❤️ @@ -60,7 +62,24 @@ messages = [{"user": "role", "content": "Hey, how's it going"}] print(token_counter(model="gpt-3.5-turbo", messages=messages)) ``` -### 4. `cost_per_token` +### 4. `create_pretrained_tokenizer` and `create_tokenizer` + +```python +from litellm import create_pretrained_tokenizer, create_tokenizer + +# get tokenizer from huggingface repo +custom_tokenizer_1 = create_pretrained_tokenizer("Xenova/llama-3-tokenizer") + +# use tokenizer from json file +with open("tokenizer.json") as f: + json_data = json.load(f) + +json_str = json.dumps(json_data) + +custom_tokenizer_2 = create_tokenizer(json_str) +``` + +### 5. `cost_per_token` ```python from litellm import cost_per_token @@ -72,7 +91,7 @@ prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar = cost_per_toke print(prompt_tokens_cost_usd_dollar, completion_tokens_cost_usd_dollar) ``` -### 5. `completion_cost` +### 6. `completion_cost` * Input: Accepts a `litellm.completion()` response **OR** prompt + completion strings * Output: Returns a `float` of cost for the `completion` call @@ -99,7 +118,7 @@ cost = completion_cost(model="bedrock/anthropic.claude-v2", prompt="Hey!", compl formatted_string = f"${float(cost):.10f}" print(formatted_string) ``` -### 6. `get_max_tokens` +### 7. `get_max_tokens` Input: Accepts a model name - e.g., gpt-3.5-turbo (to get a complete list, call litellm.model_list). Output: Returns the maximum number of tokens allowed for the given model @@ -112,7 +131,7 @@ model = "gpt-3.5-turbo" print(get_max_tokens(model)) # Output: 4097 ``` -### 7. `model_cost` +### 8. `model_cost` * Output: Returns a dict object containing the max_tokens, input_cost_per_token, output_cost_per_token for all models on [community-maintained list](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json) @@ -122,7 +141,7 @@ from litellm import model_cost print(model_cost) # {'gpt-3.5-turbo': {'max_tokens': 4000, 'input_cost_per_token': 1.5e-06, 'output_cost_per_token': 2e-06}, ...} ``` -### 8. `register_model` +### 9. `register_model` * Input: Provide EITHER a model cost dictionary or a url to a hosted json blob * Output: Returns updated model_cost dictionary + updates litellm.model_cost with model details. @@ -157,5 +176,3 @@ export LITELLM_LOCAL_MODEL_COST_MAP="True" ``` Note: this means you will need to upgrade to get updated pricing, and newer models. - - diff --git a/litellm/utils.py b/litellm/utils.py index b527972ba7c6..eec3a334cb06 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -3696,24 +3696,25 @@ def _select_tokenizer(model: str): return {"type": "openai_tokenizer", "tokenizer": encoding} -def encode(model: str, text: str): +def encode(model: str, custom_tokenizer: Optional[dict] = None, text: str): """ Encodes the given text using the specified model. Args: model (str): The name of the model to use for tokenization. + custom_tokenizer (Optional[dict]): A custom tokenizer created with the `create_pretrained_tokenizer` or `create_tokenizer` method. Must be a dictionary with a string value for `type` and Tokenizer for `tokenizer`. Default is None. text (str): The text to be encoded. Returns: enc: The encoded text. """ - tokenizer_json = _select_tokenizer(model=model) + tokenizer_json = custom_tokenizer or _select_tokenizer(model=model) enc = tokenizer_json["tokenizer"].encode(text) return enc -def decode(model: str, tokens: List[int]): - tokenizer_json = _select_tokenizer(model=model) +def decode(model: str, custom_tokenizer: Optional[dict] = None, tokens: List[int]): + tokenizer_json = custom_tokenizer or _select_tokenizer(model=model) dec = tokenizer_json["tokenizer"].decode(tokens) return dec From 2d43153efa8e779abf9d3fc1ad7d03698d8e2d44 Mon Sep 17 00:00:00 2001 From: Christian Privitelli <40876121+Priva28@users.noreply.github.com> Date: Thu, 2 May 2024 15:49:22 +1000 Subject: [PATCH 011/378] include methods in init import, add test, fix encode/decode param ordering --- litellm/__init__.py | 2 ++ litellm/main.py | 2 ++ litellm/tests/test_token_counter.py | 14 +++++++++++--- litellm/tests/test_utils.py | 2 ++ litellm/utils.py | 4 ++-- 5 files changed, 19 insertions(+), 5 deletions(-) diff --git a/litellm/__init__.py b/litellm/__init__.py index a3d61bce16a6..0ee22da6d4dc 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -608,6 +608,8 @@ def identify(event_details): get_optional_params, modify_integration, token_counter, + create_pretrained_tokenizer, + create_tokenizer, cost_per_token, completion_cost, supports_function_calling, diff --git a/litellm/main.py b/litellm/main.py index cdea40d11996..b5a986289050 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -33,6 +33,8 @@ async_mock_completion_streaming_obj, convert_to_model_response_object, token_counter, + create_pretrained_tokenizer, + create_tokenizer, Usage, get_optional_params_embeddings, get_optional_params_image_gen, diff --git a/litellm/tests/test_token_counter.py b/litellm/tests/test_token_counter.py index af0db487ec00..4d759d4cff4f 100644 --- a/litellm/tests/test_token_counter.py +++ b/litellm/tests/test_token_counter.py @@ -9,7 +9,7 @@ 0, os.path.abspath("../..") ) # Adds the parent directory to the system path import time -from litellm import token_counter, encode, decode +from litellm import token_counter, create_pretrained_tokenizer, encode, decode def test_token_counter_normal_plus_function_calling(): @@ -69,15 +69,23 @@ def test_tokenizers(): model="meta-llama/Llama-2-7b-chat", text=sample_text ) + # llama3 tokenizer (also testing custom tokenizer) + llama3_tokens_1 = token_counter(model="meta-llama/llama-3-70b-instruct", text=sample_text) + + llama3_tokenizer = create_pretrained_tokenizer("Xenova/llama-3-tokenizer") + llama3_tokens_2 = token_counter(custom_tokenizer=llama3_tokenizer, text=sample_text) + print( - f"openai tokens: {openai_tokens}; claude tokens: {claude_tokens}; cohere tokens: {cohere_tokens}; llama2 tokens: {llama2_tokens}" + f"openai tokens: {openai_tokens}; claude tokens: {claude_tokens}; cohere tokens: {cohere_tokens}; llama2 tokens: {llama2_tokens}; llama3 tokens: {llama3_tokens_1}" ) # assert that all token values are different assert ( - openai_tokens != cohere_tokens != llama2_tokens + openai_tokens != cohere_tokens != llama2_tokens != llama3_tokens_1 ), "Token values are not different." + assert llama3_tokens_1 == llama3_tokens_2, "Custom tokenizer is not being used! It has been configured to use the same tokenizer as the built in llama3 tokenizer and the results should be the same." + print("test tokenizer: It worked!") except Exception as e: pytest.fail(f"An exception occured: {e}") diff --git a/litellm/tests/test_utils.py b/litellm/tests/test_utils.py index 44fb1607c36b..57b93df9c017 100644 --- a/litellm/tests/test_utils.py +++ b/litellm/tests/test_utils.py @@ -20,6 +20,8 @@ validate_environment, function_to_dict, token_counter, + create_pretrained_tokenizer, + create_tokenizer, ) # Assuming your trim_messages, shorten_message_to_fit_limit, and get_token_count functions are all in a module named 'message_utils' diff --git a/litellm/utils.py b/litellm/utils.py index eec3a334cb06..6b1279761b1b 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -3696,7 +3696,7 @@ def _select_tokenizer(model: str): return {"type": "openai_tokenizer", "tokenizer": encoding} -def encode(model: str, custom_tokenizer: Optional[dict] = None, text: str): +def encode(model="", text="", custom_tokenizer: Optional[dict] = None): """ Encodes the given text using the specified model. @@ -3713,7 +3713,7 @@ def encode(model: str, custom_tokenizer: Optional[dict] = None, text: str): return enc -def decode(model: str, custom_tokenizer: Optional[dict] = None, tokens: List[int]): +def decode(model="", tokens: List[int] = [], custom_tokenizer: Optional[dict] = None): tokenizer_json = custom_tokenizer or _select_tokenizer(model=model) dec = tokenizer_json["tokenizer"].decode(tokens) return dec From 64d229caaa6e57f7dd169d82c5dc6a06d3aae5b1 Mon Sep 17 00:00:00 2001 From: ffreemt Date: Thu, 2 May 2024 19:30:01 +0800 Subject: [PATCH 012/378] Add return_exceptions to litellm.batch_completion for optionally returing exceptions and partial resuslt instead of throwing exceptions --- litellm/main.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/litellm/main.py b/litellm/main.py index 51ec954018d8..11ab0a0b9d49 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -2303,6 +2303,7 @@ def batch_completion( user (str, optional): The user string for generating completions. Defaults to "". deployment_id (optional): The deployment ID for generating completions. Defaults to None. request_timeout (int, optional): The request timeout for generating completions. Defaults to None. + return_exceptions (bool): Whether to return exceptions and partial results when exceptions occur. Defaults to False. Returns: list: A list of completion results. @@ -2361,7 +2362,17 @@ def chunks(lst, n): completions.append(future) # Retrieve the results from the futures - results = [future.result() for future in completions] + # results = [future.result() for future in completions] + if return_exceptions: + results = [] + for future in completions: + try: + results.append(future.result()) + except Exception as exc: + results.append(exc) + else: + results = [future.result() for future in completions] + return results From 152b5c8cebdd52c1a05e7926c7c800a3bf0b5142 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 May 2024 10:27:32 -0700 Subject: [PATCH 013/378] Add test_openai_deployments_model_chat_completions_azure --- litellm/tests/test_proxy_server.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index 052646db8143..3e301dbd0d8a 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -104,6 +104,34 @@ def test_chat_completion_azure(client_no_auth): # test_chat_completion_azure() +def test_openai_deployments_model_chat_completions_azure(client_no_auth): + global headers + try: + # Your test data + test_data = { + "model": "azure/chatgpt-v-2", + "messages": [ + {"role": "user", "content": "write 1 sentence poem"}, + ], + "max_tokens": 10, + } + + url = "/openai/deployments/azure/chatgpt-v-2/chat/completions" + print(f"testing proxy server with Azure Request {url}") + response = client_no_auth.post(url, json=test_data) + + assert response.status_code == 200 + result = response.json() + print(f"Received response: {result}") + assert len(result["choices"][0]["message"]["content"]) > 0 + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") + + +# Run the test +# test_openai_deployments_model_chat_completions_azure() + + ### EMBEDDING def test_embedding(client_no_auth): global headers From 4dfadb0cf4c3cf4c5de79bc8e8a81ac10c0ee245 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 May 2024 12:24:49 -0700 Subject: [PATCH 014/378] mock_patch_acompletion in test_proxy_server.py --- litellm/tests/test_proxy_server.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index 3e301dbd0d8a..3c192ca228e3 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -1,5 +1,6 @@ import sys, os import traceback +from unittest import mock from dotenv import load_dotenv load_dotenv() @@ -36,6 +37,25 @@ headers = {"Authorization": f"Bearer {token}"} +def mock_patch_acompletion(): + async def side_effect(*args, **kwargs): + return { + "choices": [ + { + "message": { + "content": "Whispers of the wind carry dreams to me.", + "role": "assistant" + } + } + ], + } + + return mock.patch( + "litellm.proxy.proxy_server.llm_router.acompletion", + side_effect=side_effect, + ) + + @pytest.fixture(scope="function") def client_no_auth(): # Assuming litellm.proxy.proxy_server is an object @@ -104,7 +124,8 @@ def test_chat_completion_azure(client_no_auth): # test_chat_completion_azure() -def test_openai_deployments_model_chat_completions_azure(client_no_auth): +@mock_patch_acompletion() +def test_openai_deployments_model_chat_completions_azure(_mock_acompletion, client_no_auth): global headers try: # Your test data From 6ec058711ab8e77edb674cf1fd7414939c5014e5 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 May 2024 12:41:30 -0700 Subject: [PATCH 015/378] Make unnecessary to pass extra arg for mock object Modify `mock_patch_acompletion` to be a context manager instead of a function that returns a mock object. This way, the mock object is created and yielded by the context manager, and the test function doesn't need to pass the mock object as an argument. --- litellm/tests/test_proxy_server.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index 3c192ca228e3..bdf1d893c78e 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -2,6 +2,7 @@ import traceback from unittest import mock from dotenv import load_dotenv +import contextlib load_dotenv() import os, io @@ -37,6 +38,7 @@ headers = {"Authorization": f"Bearer {token}"} +@contextlib.contextmanager def mock_patch_acompletion(): async def side_effect(*args, **kwargs): return { @@ -50,10 +52,11 @@ async def side_effect(*args, **kwargs): ], } - return mock.patch( + with mock.patch( "litellm.proxy.proxy_server.llm_router.acompletion", side_effect=side_effect, - ) + ): + yield @pytest.fixture(scope="function") @@ -125,7 +128,7 @@ def test_chat_completion_azure(client_no_auth): @mock_patch_acompletion() -def test_openai_deployments_model_chat_completions_azure(_mock_acompletion, client_no_auth): +def test_openai_deployments_model_chat_completions_azure(client_no_auth): global headers try: # Your test data From a79fd772f4bcdbc00e2ed1ab5437c6072480a6cb Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 May 2024 12:47:27 -0700 Subject: [PATCH 016/378] Simplify mock_patch_acompletion --- litellm/tests/test_proxy_server.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index bdf1d893c78e..51574c7ab308 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -37,24 +37,23 @@ headers = {"Authorization": f"Bearer {token}"} +example_completion_result = { + "choices": [ + { + "message": { + "content": "Whispers of the wind carry dreams to me.", + "role": "assistant" + } + } + ], +} + @contextlib.contextmanager def mock_patch_acompletion(): - async def side_effect(*args, **kwargs): - return { - "choices": [ - { - "message": { - "content": "Whispers of the wind carry dreams to me.", - "role": "assistant" - } - } - ], - } - with mock.patch( "litellm.proxy.proxy_server.llm_router.acompletion", - side_effect=side_effect, + return_value=example_completion_result, ): yield From 39670cd84a7a8b1675544260264bdb2b8f29487f Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Thu, 2 May 2024 13:09:17 -0700 Subject: [PATCH 017/378] Add Vision Support for Claude 3 Family modified the model info table to add "supports_vision": true, for the claude 3 family (haiku, sonnet, and opus) --- model_prices_and_context_window.json | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index ce6f9b800feb..279f96cfe5ff 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -813,6 +813,7 @@ "litellm_provider": "anthropic", "mode": "chat", "supports_function_calling": true, + "supports_vision": true, "tool_use_system_prompt_tokens": 264 }, "claude-3-opus-20240229": { @@ -824,6 +825,7 @@ "litellm_provider": "anthropic", "mode": "chat", "supports_function_calling": true, + "supports_vision": true, "tool_use_system_prompt_tokens": 395 }, "claude-3-sonnet-20240229": { @@ -835,6 +837,7 @@ "litellm_provider": "anthropic", "mode": "chat", "supports_function_calling": true, + "supports_vision": true, "tool_use_system_prompt_tokens": 159 }, "text-bison": { @@ -1142,7 +1145,8 @@ "output_cost_per_token": 0.000015, "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "vertex_ai/claude-3-haiku@20240307": { "max_tokens": 4096, @@ -1152,7 +1156,8 @@ "output_cost_per_token": 0.00000125, "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "vertex_ai/claude-3-opus@20240229": { "max_tokens": 4096, @@ -1162,7 +1167,8 @@ "output_cost_per_token": 0.0000075, "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "textembedding-gecko": { "max_tokens": 3072, @@ -1581,6 +1587,7 @@ "litellm_provider": "openrouter", "mode": "chat", "supports_function_calling": true, + "supports_vision": true, "tool_use_system_prompt_tokens": 395 }, "openrouter/google/palm-2-chat-bison": { @@ -1929,7 +1936,8 @@ "output_cost_per_token": 0.000015, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "anthropic.claude-3-haiku-20240307-v1:0": { "max_tokens": 4096, @@ -1939,7 +1947,8 @@ "output_cost_per_token": 0.00000125, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, @@ -1949,7 +1958,8 @@ "output_cost_per_token": 0.000075, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "anthropic.claude-v1": { "max_tokens": 8191, From 77098e31e4f0cde2aa23bda9099c11e436b3e01a Mon Sep 17 00:00:00 2001 From: Justin Watts Date: Thu, 2 May 2024 13:21:36 -0700 Subject: [PATCH 018/378] Update model_prices_and_context_window.json --- model_prices_and_context_window.json | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 279f96cfe5ff..7fcd425bb513 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -338,6 +338,18 @@ "output_cost_per_second": 0.0001, "litellm_provider": "azure" }, + "azure/gpt-4-turbo-2024-04-09": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00001, + "output_cost_per_token": 0.00003, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, "azure/gpt-4-0125-preview": { "max_tokens": 4096, "max_input_tokens": 128000, From 14e7c9b01c9d08675935efd2dd4fd8f50ace1d19 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 May 2024 13:36:23 -0700 Subject: [PATCH 019/378] Improve mocking in test_proxy_server Mock the calls to the backend and assert that the correct parameters are passed to the backend. --- litellm/tests/test_proxy_server.py | 154 ++++++++++++++++++++++++++--- 1 file changed, 142 insertions(+), 12 deletions(-) diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index 51574c7ab308..43a070556682 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -2,7 +2,6 @@ import traceback from unittest import mock from dotenv import load_dotenv -import contextlib load_dotenv() import os, io @@ -47,15 +46,66 @@ } ], } +example_embedding_result = { + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + -0.006929283495992422, + -0.005336422007530928, + -4.547132266452536e-05, + -0.024047505110502243, + -0.006929283495992422, + -0.005336422007530928, + -4.547132266452536e-05, + -0.024047505110502243, + -0.006929283495992422, + -0.005336422007530928, + -4.547132266452536e-05, + -0.024047505110502243, + ], + } + ], + "model": "text-embedding-3-small", + "usage": { + "prompt_tokens": 5, + "total_tokens": 5 + } +} +example_image_generation_result = { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] +} -@contextlib.contextmanager def mock_patch_acompletion(): - with mock.patch( + return mock.patch( "litellm.proxy.proxy_server.llm_router.acompletion", return_value=example_completion_result, - ): - yield + ) + + +def mock_patch_aembedding(): + return mock.patch( + "litellm.proxy.proxy_server.llm_router.aembedding", + return_value=example_embedding_result, + ) + + +def mock_patch_aimage_generation(): + return mock.patch( + "litellm.proxy.proxy_server.llm_router.aimage_generation", + return_value=example_image_generation_result, + ) @pytest.fixture(scope="function") @@ -74,7 +124,8 @@ def client_no_auth(): return TestClient(app) -def test_chat_completion(client_no_auth): +@mock_patch_acompletion() +def test_chat_completion(mock_acompletion, client_no_auth): global headers try: # Your test data @@ -88,6 +139,19 @@ def test_chat_completion(client_no_auth): print("testing proxy server with chat completions") response = client_no_auth.post("/v1/chat/completions", json=test_data) + mock_acompletion.assert_called_once_with( + model="gpt-3.5-turbo", + messages=[ + {"role": "user", "content": "hi"}, + ], + max_tokens=10, + litellm_call_id=mock.ANY, + litellm_logging_obj=mock.ANY, + request_timeout=mock.ANY, + specific_deployment=True, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) print(f"response - {response.text}") assert response.status_code == 200 result = response.json() @@ -99,7 +163,8 @@ def test_chat_completion(client_no_auth): # Run the test -def test_chat_completion_azure(client_no_auth): +@mock_patch_acompletion() +def test_chat_completion_azure(mock_acompletion, client_no_auth): global headers try: # Your test data @@ -114,6 +179,19 @@ def test_chat_completion_azure(client_no_auth): print("testing proxy server with Azure Request /chat/completions") response = client_no_auth.post("/v1/chat/completions", json=test_data) + mock_acompletion.assert_called_once_with( + model="azure/chatgpt-v-2", + messages=[ + {"role": "user", "content": "write 1 sentence poem"}, + ], + max_tokens=10, + litellm_call_id=mock.ANY, + litellm_logging_obj=mock.ANY, + request_timeout=mock.ANY, + specific_deployment=True, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) assert response.status_code == 200 result = response.json() print(f"Received response: {result}") @@ -127,7 +205,7 @@ def test_chat_completion_azure(client_no_auth): @mock_patch_acompletion() -def test_openai_deployments_model_chat_completions_azure(client_no_auth): +def test_openai_deployments_model_chat_completions_azure(mock_acompletion, client_no_auth): global headers try: # Your test data @@ -143,6 +221,19 @@ def test_openai_deployments_model_chat_completions_azure(client_no_auth): print(f"testing proxy server with Azure Request {url}") response = client_no_auth.post(url, json=test_data) + mock_acompletion.assert_called_once_with( + model="azure/chatgpt-v-2", + messages=[ + {"role": "user", "content": "write 1 sentence poem"}, + ], + max_tokens=10, + litellm_call_id=mock.ANY, + litellm_logging_obj=mock.ANY, + request_timeout=mock.ANY, + specific_deployment=True, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) assert response.status_code == 200 result = response.json() print(f"Received response: {result}") @@ -156,7 +247,8 @@ def test_openai_deployments_model_chat_completions_azure(client_no_auth): ### EMBEDDING -def test_embedding(client_no_auth): +@mock_patch_aembedding() +def test_embedding(mock_aembedding, client_no_auth): global headers from litellm.proxy.proxy_server import user_custom_auth @@ -168,6 +260,13 @@ def test_embedding(client_no_auth): response = client_no_auth.post("/v1/embeddings", json=test_data) + mock_aembedding.assert_called_once_with( + model="azure/azure-embedding-model", + input=["good morning from litellm"], + specific_deployment=True, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) assert response.status_code == 200 result = response.json() print(len(result["data"][0]["embedding"])) @@ -176,7 +275,8 @@ def test_embedding(client_no_auth): pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") -def test_bedrock_embedding(client_no_auth): +@mock_patch_aembedding() +def test_bedrock_embedding(mock_aembedding, client_no_auth): global headers from litellm.proxy.proxy_server import user_custom_auth @@ -188,6 +288,12 @@ def test_bedrock_embedding(client_no_auth): response = client_no_auth.post("/v1/embeddings", json=test_data) + mock_aembedding.assert_called_once_with( + model="amazon-embeddings", + input=["good morning from litellm"], + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) assert response.status_code == 200 result = response.json() print(len(result["data"][0]["embedding"])) @@ -222,7 +328,8 @@ def test_sagemaker_embedding(client_no_auth): #### IMAGE GENERATION -def test_img_gen(client_no_auth): +@mock_patch_aimage_generation() +def test_img_gen(mock_aimage_generation, client_no_auth): global headers from litellm.proxy.proxy_server import user_custom_auth @@ -236,6 +343,14 @@ def test_img_gen(client_no_auth): response = client_no_auth.post("/v1/images/generations", json=test_data) + mock_aimage_generation.assert_called_once_with( + model='dall-e-3', + prompt='A cute baby sea otter', + n=1, + size='1024x1024', + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) assert response.status_code == 200 result = response.json() print(len(result["data"][0]["url"])) @@ -300,7 +415,8 @@ def log_success_event(self, kwargs, response_obj, start_time, end_time): customHandler = MyCustomHandler() -def test_chat_completion_optional_params(client_no_auth): +@mock_patch_acompletion() +def test_chat_completion_optional_params(mock_acompletion, client_no_auth): # [PROXY: PROD TEST] - DO NOT DELETE # This tests if all the /chat/completion params are passed to litellm try: @@ -318,6 +434,20 @@ def test_chat_completion_optional_params(client_no_auth): litellm.callbacks = [customHandler] print("testing proxy server: optional params") response = client_no_auth.post("/v1/chat/completions", json=test_data) + mock_acompletion.assert_called_once_with( + model="gpt-3.5-turbo", + messages=[ + {"role": "user", "content": "hi"}, + ], + max_tokens=10, + user="proxy-user", + litellm_call_id=mock.ANY, + litellm_logging_obj=mock.ANY, + request_timeout=mock.ANY, + specific_deployment=True, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) assert response.status_code == 200 result = response.json() print(f"Received response: {result}") From acda064be6137989ebc624341079e19dd31adecd Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 2 May 2024 14:50:13 -0700 Subject: [PATCH 020/378] fix(proxy/utils.py): fix retry logic for generic data request --- litellm/proxy/utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index b5db81b3191e..7b0829504e67 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -679,8 +679,8 @@ async def check_view_exists(self): @backoff.on_exception( backoff.expo, Exception, # base exception to catch for the backoff - max_tries=3, # maximum number of retries - max_time=10, # maximum total time to retry for + max_tries=1, # maximum number of retries + max_time=2, # maximum total time to retry for on_backoff=on_backoff, # specifying the function to call on backoff ) async def get_generic_data( @@ -718,7 +718,8 @@ async def get_generic_data( import traceback error_msg = f"LiteLLM Prisma Client Exception get_generic_data: {str(e)}" - print_verbose(error_msg) + verbose_proxy_logger.error(error_msg) + error_msg = error_msg + "\nException Type: {}".format(type(e)) error_traceback = error_msg + "\n" + traceback.format_exc() end_time = time.time() _duration = end_time - start_time From 988c37fda3f0a0a30e0106a21f2f7ce88659e1f7 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Tue, 30 Apr 2024 14:34:54 -0700 Subject: [PATCH 021/378] Disambiguate invalid model name errors because that error can be thrown in several different places, so knowing the function it's being thrown from can be very useul for debugging. --- litellm/proxy/proxy_server.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index a0f8e9aa57fb..26987f478ee0 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3644,7 +3644,7 @@ async def chat_completion( raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ - "error": "Invalid model name passed in model=" + "error": "chat_completion: Invalid model name passed in model=" + data.get("model", "") }, ) @@ -3833,7 +3833,7 @@ async def completion( raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ - "error": "Invalid model name passed in model=" + "error": "completion: Invalid model name passed in model=" + data.get("model", "") }, ) @@ -4041,7 +4041,7 @@ async def embeddings( raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ - "error": "Invalid model name passed in model=" + "error": "embeddings: Invalid model name passed in model=" + data.get("model", "") }, ) @@ -4197,7 +4197,7 @@ async def image_generation( raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ - "error": "Invalid model name passed in model=" + "error": "image_generation: Invalid model name passed in model=" + data.get("model", "") }, ) @@ -4372,7 +4372,7 @@ async def audio_transcriptions( raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ - "error": "Invalid model name passed in model=" + "error": "audio_transcriptions: Invalid model name passed in model=" + data.get("model", "") }, ) @@ -4538,7 +4538,7 @@ async def moderations( raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail={ - "error": "Invalid model name passed in model=" + "error": "moderations: Invalid model name passed in model=" + data.get("model", "") }, ) From 6d154e2317a4f4984d51b2e87ac73bb4e8814062 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 May 2024 14:50:46 -0700 Subject: [PATCH 022/378] Update tests to reflect new error messages --- litellm/tests/test_proxy_exception_mapping.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/litellm/tests/test_proxy_exception_mapping.py b/litellm/tests/test_proxy_exception_mapping.py index 82957b658cc5..0cc7b0d30d01 100644 --- a/litellm/tests/test_proxy_exception_mapping.py +++ b/litellm/tests/test_proxy_exception_mapping.py @@ -169,7 +169,7 @@ def test_chat_completion_exception_any_model(client): ) assert isinstance(openai_exception, openai.BadRequestError) _error_message = openai_exception.message - assert "Invalid model name passed in model=Lite-GPT-12" in str(_error_message) + assert "chat_completion: Invalid model name passed in model=Lite-GPT-12" in str(_error_message) except Exception as e: pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") @@ -197,7 +197,7 @@ def test_embedding_exception_any_model(client): print("Exception raised=", openai_exception) assert isinstance(openai_exception, openai.BadRequestError) _error_message = openai_exception.message - assert "Invalid model name passed in model=Lite-GPT-12" in str(_error_message) + assert "embeddings: Invalid model name passed in model=Lite-GPT-12" in str(_error_message) except Exception as e: pytest.fail(f"LiteLLM Proxy test failed. Exception {str(e)}") From cdb39e90ce5c38ef471cf315a77b6bcc97b6edad Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Thu, 2 May 2024 14:42:20 -0700 Subject: [PATCH 023/378] Improve mocking in test_proxy_exception_mapping Mock the calls to the backend and assert that the correct parameters are passed to the backend. --- litellm/tests/test_proxy_exception_mapping.py | 71 +++++++++++++++---- 1 file changed, 59 insertions(+), 12 deletions(-) diff --git a/litellm/tests/test_proxy_exception_mapping.py b/litellm/tests/test_proxy_exception_mapping.py index 0cc7b0d30d01..ccd071d01ee0 100644 --- a/litellm/tests/test_proxy_exception_mapping.py +++ b/litellm/tests/test_proxy_exception_mapping.py @@ -1,6 +1,8 @@ # test that the proxy actually does exception mapping to the OpenAI format import sys, os +from unittest import mock +import json from dotenv import load_dotenv load_dotenv() @@ -12,13 +14,30 @@ import pytest import litellm, openai from fastapi.testclient import TestClient -from fastapi import FastAPI +from fastapi import Response from litellm.proxy.proxy_server import ( router, save_worker_config, initialize, ) # Replace with the actual module where your FastAPI router is defined +invalid_authentication_error_response = Response( + status_code=401, + content=json.dumps({"error": "Invalid Authentication"}), +) +context_length_exceeded_error_response_dict = { + "error": { + "message": "AzureException - Error code: 400 - {'error': {'message': \"This model's maximum context length is 4096 tokens. However, your messages resulted in 10007 tokens. Please reduce the length of the messages.\", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}}", + "type": None, + "param": None, + "code": 400, + }, +} +context_length_exceeded_error_response = Response( + status_code=400, + content=json.dumps(context_length_exceeded_error_response_dict), +) + @pytest.fixture def client(): @@ -60,7 +79,11 @@ def test_chat_completion_exception(client): # raise openai.AuthenticationError -def test_chat_completion_exception_azure(client): +@mock.patch( + "litellm.proxy.proxy_server.llm_router.acompletion", + return_value=invalid_authentication_error_response, +) +def test_chat_completion_exception_azure(mock_acompletion, client): try: # Your test data test_data = { @@ -73,6 +96,15 @@ def test_chat_completion_exception_azure(client): response = client.post("/chat/completions", json=test_data) + mock_acompletion.assert_called_once_with( + **test_data, + litellm_call_id=mock.ANY, + litellm_logging_obj=mock.ANY, + request_timeout=mock.ANY, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) + json_response = response.json() print("keys in json response", json_response.keys()) assert json_response.keys() == {"error"} @@ -90,12 +122,21 @@ def test_chat_completion_exception_azure(client): # raise openai.AuthenticationError -def test_embedding_auth_exception_azure(client): +@mock.patch( + "litellm.proxy.proxy_server.llm_router.aembedding", + return_value=invalid_authentication_error_response, +) +def test_embedding_auth_exception_azure(mock_aembedding, client): try: # Your test data test_data = {"model": "azure-embedding", "input": ["hi"]} response = client.post("/embeddings", json=test_data) + mock_aembedding.assert_called_once_with( + **test_data, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) print("Response from proxy=", response) json_response = response.json() @@ -204,7 +245,11 @@ def test_embedding_exception_any_model(client): # raise openai.BadRequestError -def test_chat_completion_exception_azure_context_window(client): +@mock.patch( + "litellm.proxy.proxy_server.llm_router.acompletion", + return_value=context_length_exceeded_error_response, +) +def test_chat_completion_exception_azure_context_window(mock_acompletion, client): try: # Your test data test_data = { @@ -219,20 +264,22 @@ def test_chat_completion_exception_azure_context_window(client): response = client.post("/chat/completions", json=test_data) print("got response from server", response) + mock_acompletion.assert_called_once_with( + **test_data, + litellm_call_id=mock.ANY, + litellm_logging_obj=mock.ANY, + request_timeout=mock.ANY, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) + json_response = response.json() print("keys in json response", json_response.keys()) assert json_response.keys() == {"error"} - assert json_response == { - "error": { - "message": "AzureException - Error code: 400 - {'error': {'message': \"This model's maximum context length is 4096 tokens. However, your messages resulted in 10007 tokens. Please reduce the length of the messages.\", 'type': 'invalid_request_error', 'param': 'messages', 'code': 'context_length_exceeded'}}", - "type": None, - "param": None, - "code": 400, - } - } + assert json_response == context_length_exceeded_error_response_dict # make an openai client to call _make_status_error_from_response openai_client = openai.OpenAI(api_key="anything") From 9edf463c3bc01495d2505a967aa5a9bbc45acd6c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Thu, 2 May 2024 16:02:52 -0700 Subject: [PATCH 024/378] fix - revert init langfuse client on slack alerts --- litellm/integrations/slack_alerting.py | 75 +------------------ ...odel_prices_and_context_window_backup.json | 34 +++++++-- 2 files changed, 30 insertions(+), 79 deletions(-) diff --git a/litellm/integrations/slack_alerting.py b/litellm/integrations/slack_alerting.py index 8f8ce712e9b4..a9aba2f1c6ca 100644 --- a/litellm/integrations/slack_alerting.py +++ b/litellm/integrations/slack_alerting.py @@ -48,19 +48,6 @@ def __init__( self.internal_usage_cache = DualCache() self.async_http_handler = AsyncHTTPHandler() self.alert_to_webhook_url = alert_to_webhook_url - self.langfuse_logger = None - - try: - from litellm.integrations.langfuse import LangFuseLogger - - self.langfuse_logger = LangFuseLogger( - os.getenv("LANGFUSE_PUBLIC_KEY"), - os.getenv("LANGFUSE_SECRET_KEY"), - flush_interval=1, - ) - except: - pass - pass def update_values( @@ -110,62 +97,8 @@ def _add_langfuse_trace_id_to_alert( start_time: Optional[datetime.datetime] = None, end_time: Optional[datetime.datetime] = None, ): - import uuid - - # For now: do nothing as we're debugging why this is not working as expected - if request_data is not None: - trace_id = request_data.get("metadata", {}).get( - "trace_id", None - ) # get langfuse trace id - if trace_id is None: - trace_id = "litellm-alert-trace-" + str(uuid.uuid4()) - request_data["metadata"]["trace_id"] = trace_id - elif kwargs is not None: - _litellm_params = kwargs.get("litellm_params", {}) - trace_id = _litellm_params.get("metadata", {}).get( - "trace_id", None - ) # get langfuse trace id - if trace_id is None: - trace_id = "litellm-alert-trace-" + str(uuid.uuid4()) - _litellm_params["metadata"]["trace_id"] = trace_id - - # Log hanging request as an error on langfuse - if type == "hanging_request": - if self.langfuse_logger is not None: - _logging_kwargs = copy.deepcopy(request_data) - if _logging_kwargs is None: - _logging_kwargs = {} - _logging_kwargs["litellm_params"] = {} - request_data = request_data or {} - _logging_kwargs["litellm_params"]["metadata"] = request_data.get( - "metadata", {} - ) - # log to langfuse in a separate thread - import threading - - threading.Thread( - target=self.langfuse_logger.log_event, - args=( - _logging_kwargs, - None, - start_time, - end_time, - None, - print, - "ERROR", - "Requests is hanging", - ), - ).start() - - _langfuse_host = os.environ.get("LANGFUSE_HOST", "https://cloud.langfuse.com") - _langfuse_project_id = os.environ.get("LANGFUSE_PROJECT_ID") - - # langfuse urls look like: https://us.cloud.langfuse.com/project/************/traces/litellm-alert-trace-ididi9dk-09292-************ - - _langfuse_url = ( - f"{_langfuse_host}/project/{_langfuse_project_id}/traces/{trace_id}" - ) - request_info += f"\n🪢 Langfuse Trace: {_langfuse_url}" + # do nothing for now + pass return request_info def _response_taking_too_long_callback( @@ -242,10 +175,6 @@ async def response_taking_too_long_callback( request_info = f"\nRequest Model: `{model}`\nAPI Base: `{api_base}`\nMessages: `{messages}`" slow_message = f"`Responses are slow - {round(time_difference_float,2)}s response time > Alerting threshold: {self.alerting_threshold}s`" if time_difference_float > self.alerting_threshold: - if "langfuse" in litellm.success_callback: - request_info = self._add_langfuse_trace_id_to_alert( - request_info=request_info, kwargs=kwargs, type="slow_response" - ) # add deployment latencies to alert if ( kwargs is not None diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index ce6f9b800feb..7fcd425bb513 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -338,6 +338,18 @@ "output_cost_per_second": 0.0001, "litellm_provider": "azure" }, + "azure/gpt-4-turbo-2024-04-09": { + "max_tokens": 4096, + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "input_cost_per_token": 0.00001, + "output_cost_per_token": 0.00003, + "litellm_provider": "azure", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, "azure/gpt-4-0125-preview": { "max_tokens": 4096, "max_input_tokens": 128000, @@ -813,6 +825,7 @@ "litellm_provider": "anthropic", "mode": "chat", "supports_function_calling": true, + "supports_vision": true, "tool_use_system_prompt_tokens": 264 }, "claude-3-opus-20240229": { @@ -824,6 +837,7 @@ "litellm_provider": "anthropic", "mode": "chat", "supports_function_calling": true, + "supports_vision": true, "tool_use_system_prompt_tokens": 395 }, "claude-3-sonnet-20240229": { @@ -835,6 +849,7 @@ "litellm_provider": "anthropic", "mode": "chat", "supports_function_calling": true, + "supports_vision": true, "tool_use_system_prompt_tokens": 159 }, "text-bison": { @@ -1142,7 +1157,8 @@ "output_cost_per_token": 0.000015, "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "vertex_ai/claude-3-haiku@20240307": { "max_tokens": 4096, @@ -1152,7 +1168,8 @@ "output_cost_per_token": 0.00000125, "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "vertex_ai/claude-3-opus@20240229": { "max_tokens": 4096, @@ -1162,7 +1179,8 @@ "output_cost_per_token": 0.0000075, "litellm_provider": "vertex_ai-anthropic_models", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "textembedding-gecko": { "max_tokens": 3072, @@ -1581,6 +1599,7 @@ "litellm_provider": "openrouter", "mode": "chat", "supports_function_calling": true, + "supports_vision": true, "tool_use_system_prompt_tokens": 395 }, "openrouter/google/palm-2-chat-bison": { @@ -1929,7 +1948,8 @@ "output_cost_per_token": 0.000015, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "anthropic.claude-3-haiku-20240307-v1:0": { "max_tokens": 4096, @@ -1939,7 +1959,8 @@ "output_cost_per_token": 0.00000125, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "anthropic.claude-3-opus-20240229-v1:0": { "max_tokens": 4096, @@ -1949,7 +1970,8 @@ "output_cost_per_token": 0.000075, "litellm_provider": "bedrock", "mode": "chat", - "supports_function_calling": true + "supports_function_calling": true, + "supports_vision": true }, "anthropic.claude-v1": { "max_tokens": 8191, From 5baeeec899fc5f6ce9869bc68d4a81289494a7e7 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 2 May 2024 16:34:08 -0700 Subject: [PATCH 025/378] fix(openmeter.py): fix get from env --- litellm/integrations/openmeter.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/litellm/integrations/openmeter.py b/litellm/integrations/openmeter.py index 2ed551c8d89b..248b83f4d34f 100644 --- a/litellm/integrations/openmeter.py +++ b/litellm/integrations/openmeter.py @@ -38,7 +38,7 @@ def validate_environment(self): in the environment """ missing_keys = [] - if litellm.get_secret("OPENMETER_API_KEY", None) is None: + if os.getenv("OPENMETER_API_KEY", None) is None: missing_keys.append("OPENMETER_API_KEY") if len(missing_keys) > 0: @@ -71,15 +71,13 @@ def _common_logic(self, kwargs: dict, response_obj): } def log_success_event(self, kwargs, response_obj, start_time, end_time): - _url = litellm.get_secret( - "OPENMETER_API_ENDPOINT", default_value="https://openmeter.cloud" - ) + _url = os.getenv("OPENMETER_API_ENDPOINT", "https://openmeter.cloud") if _url.endswith("/"): _url += "api/v1/events" else: _url += "/api/v1/events" - api_key = litellm.get_secret("OPENMETER_API_KEY") + api_key = os.getenv("OPENMETER_API_KEY") _data = self._common_logic(kwargs=kwargs, response_obj=response_obj) self.sync_http_handler.post( @@ -92,15 +90,13 @@ def log_success_event(self, kwargs, response_obj, start_time, end_time): ) async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): - _url = litellm.get_secret( - "OPENMETER_API_ENDPOINT", default_value="https://openmeter.cloud" - ) + _url = os.getenv("OPENMETER_API_ENDPOINT", "https://openmeter.cloud") if _url.endswith("/"): _url += "api/v1/events" else: _url += "/api/v1/events" - api_key = litellm.get_secret("OPENMETER_API_KEY") + api_key = os.getenv("OPENMETER_API_KEY") _data = self._common_logic(kwargs=kwargs, response_obj=response_obj) _headers = { @@ -117,7 +113,6 @@ async def async_log_success_event(self, kwargs, response_obj, start_time, end_ti response.raise_for_status() except Exception as e: - print(f"\nAn Exception Occurred - {str(e)}") if hasattr(response, "text"): - print(f"\nError Message: {response.text}") + litellm.print_verbose(f"\nError Message: {response.text}") raise e From fdc4fdb91a2500fef20fd6b79f0e2578de964e31 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 2 May 2024 17:18:21 -0700 Subject: [PATCH 026/378] fix(proxy/utils.py): fix slack alerting to only raise alerts for llm api exceptions don't spam for bad user requests. Closes https://github.com/BerriAI/litellm/issues/3395 --- litellm/proxy/_super_secret_config.yaml | 9 +++- litellm/proxy/utils.py | 22 ++++++---- litellm/tests/test_alerting.py | 57 ++++++++++++++++++++++++- 3 files changed, 78 insertions(+), 10 deletions(-) diff --git a/litellm/proxy/_super_secret_config.yaml b/litellm/proxy/_super_secret_config.yaml index 9f2f6ec174de..d90fb13fd0f7 100644 --- a/litellm/proxy/_super_secret_config.yaml +++ b/litellm/proxy/_super_secret_config.yaml @@ -11,5 +11,12 @@ router_settings: redis_password: os.environ/REDIS_PASSWORD redis_port: os.environ/REDIS_PORT +router_settings: + routing_strategy: "latency-based-routing" + litellm_settings: - success_callback: ["openmeter"] \ No newline at end of file + success_callback: ["openmeter"] + +general_settings: + alerting: ["slack"] + alert_types: ["llm_exceptions"] \ No newline at end of file diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 7b0829504e67..1048c67271b4 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -387,15 +387,21 @@ async def post_call_failure_hook( """ ### ALERTING ### - if "llm_exceptions" not in self.alert_types: - return - asyncio.create_task( - self.alerting_handler( - message=f"LLM API call failed: {str(original_exception)}", - level="High", - alert_type="llm_exceptions", + if "llm_exceptions" in self.alert_types and not isinstance( + original_exception, HTTPException + ): + """ + Just alert on LLM API exceptions. Do not alert on user errors + + Related issue - https://github.com/BerriAI/litellm/issues/3395 + """ + asyncio.create_task( + self.alerting_handler( + message=f"LLM API call failed: {str(original_exception)}", + level="High", + alert_type="llm_exceptions", + ) ) - ) for callback in litellm.callbacks: try: diff --git a/litellm/tests/test_alerting.py b/litellm/tests/test_alerting.py index ff3e8f8c7068..a74e25910cb2 100644 --- a/litellm/tests/test_alerting.py +++ b/litellm/tests/test_alerting.py @@ -3,7 +3,7 @@ import sys import os -import io, asyncio +import io, asyncio, httpx from datetime import datetime, timedelta # import logging @@ -17,6 +17,61 @@ from unittest.mock import patch, MagicMock from litellm.caching import DualCache from litellm.integrations.slack_alerting import SlackAlerting +from litellm.proxy._types import UserAPIKeyAuth +from litellm.proxy.proxy_server import HTTPException + + +@pytest.mark.parametrize("exception_type", ["llm-exception", "non-llm-exception"]) +@pytest.mark.asyncio +async def test_slack_alerting_llm_exceptions(exception_type, monkeypatch): + """ + Test if non-llm exception -> No request + Test if llm exception -> Request triggered + """ + _pl = ProxyLogging(user_api_key_cache=DualCache()) + _pl.update_values( + alerting=["slack"], + alerting_threshold=100, + redis_cache=None, + alert_types=["llm_exceptions"], + ) + + async def mock_alerting_handler(message, level, alert_type): + global exception_type + + if exception_type == "llm-exception": + pass + elif exception_type == "non-llm-exception": + pytest.fail("Function should not have been called") + + monkeypatch.setattr(_pl, "alerting_handler", mock_alerting_handler) + + if exception_type == "llm-exception": + await _pl.post_call_failure_hook( + original_exception=litellm.APIError( + status_code=500, + message="This is a test exception", + llm_provider="openai", + model="gpt-3.5-turbo", + request=httpx.Request( + method="completion", url="https://github.com/BerriAI/litellm" + ), + ), + user_api_key_dict=UserAPIKeyAuth(), + ) + + await asyncio.sleep(2) + + elif exception_type == "non-llm-exception": + await _pl.post_call_failure_hook( + original_exception=HTTPException( + status_code=400, + detail={"error": "this is a test exception"}, + ), + user_api_key_dict=UserAPIKeyAuth(), + ) + + await asyncio.sleep(2) @pytest.mark.asyncio From 91971fa9e021460ed2ce624799332bbf03f368b5 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Thu, 2 May 2024 17:53:09 -0700 Subject: [PATCH 027/378] feat(router.py): add 'get_model_info' helper function to get the model info for a specific model, based on it's id --- litellm/router.py | 19 ++++++++++++------- litellm/router_strategy/lowest_tpm_rpm_v2.py | 4 +++- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/litellm/router.py b/litellm/router.py index 15fdbd4b87c2..7acf75e8e1de 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -2590,6 +2590,16 @@ def get_deployment(self, model_id: str): return model return None + def get_model_info(self, id: str) -> Optional[dict]: + """ + For a given model id, return the model info + """ + for model in self.model_list: + if "model_info" in model and "id" in model["model_info"]: + if id == model["model_info"]["id"]: + return model + return None + def get_model_ids(self): ids = [] for model in self.model_list: @@ -2904,15 +2914,10 @@ def _common_checks_available_deployment( m for m in self.model_list if m["litellm_params"]["model"] == model ] - verbose_router_logger.debug( - f"initial list of deployments: {healthy_deployments}" - ) + litellm.print_verbose(f"initial list of deployments: {healthy_deployments}") - verbose_router_logger.debug( - f"healthy deployments: length {len(healthy_deployments)} {healthy_deployments}" - ) if len(healthy_deployments) == 0: - raise ValueError(f"No healthy deployment available, passed model={model}") + raise ValueError(f"No healthy deployment available, passed model={model}. ") if litellm.model_alias_map and model in litellm.model_alias_map: model = litellm.model_alias_map[ model diff --git a/litellm/router_strategy/lowest_tpm_rpm_v2.py b/litellm/router_strategy/lowest_tpm_rpm_v2.py index 4bcf1eec129e..f7a55d970914 100644 --- a/litellm/router_strategy/lowest_tpm_rpm_v2.py +++ b/litellm/router_strategy/lowest_tpm_rpm_v2.py @@ -79,10 +79,12 @@ def pre_call_check(self, deployment: Dict) -> Optional[Dict]: model=deployment.get("litellm_params", {}).get("model"), response=httpx.Response( status_code=429, - content="{} rpm limit={}. current usage={}".format( + content="{} rpm limit={}. current usage={}. id={}, model_group={}. Get the model info by calling 'router.get_model_info(id)".format( RouterErrors.user_defined_ratelimit_error.value, deployment_rpm, local_result, + model_id, + deployment.get("model_name", ""), ), request=httpx.Request(method="tpm_rpm_limits", url="https://github.com/BerriAI/litellm"), # type: ignore ), From a7ec1772b1457594d3af48cdcb0a382279b841c7 Mon Sep 17 00:00:00 2001 From: ffreemt Date: Fri, 3 May 2024 11:28:38 +0800 Subject: [PATCH 028/378] Add litellm\tests\test_batch_completion_return_exceptions.py --- .gitignore | 2 ++ litellm/main.py | 3 +- ...test_batch_completion_return_exceptions.py | 29 +++++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 litellm/tests/test_batch_completion_return_exceptions.py diff --git a/.gitignore b/.gitignore index abc4ecb0ced4..50085bd29d90 100644 --- a/.gitignore +++ b/.gitignore @@ -52,3 +52,5 @@ litellm/proxy/_new_secret_config.yaml litellm/proxy/_new_secret_config.yaml litellm/proxy/_super_secret_config.yaml litellm/proxy/_super_secret_config.yaml +.python-version +litellm/llms/tokenizers/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 diff --git a/litellm/main.py b/litellm/main.py index 11ab0a0b9d49..8fc07b9bf6df 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -2143,7 +2143,7 @@ def completion( """ assume input to custom LLM api bases follow this format: resp = requests.post( - api_base, + api_base, json={ 'model': 'meta-llama/Llama-2-13b-hf', # model name 'params': { @@ -2280,6 +2280,7 @@ def batch_completion( deployment_id=None, request_timeout: Optional[int] = None, timeout: Optional[int] = 600, + return_exceptions: bool = False, # Optional liteLLM function params **kwargs, ): diff --git a/litellm/tests/test_batch_completion_return_exceptions.py b/litellm/tests/test_batch_completion_return_exceptions.py new file mode 100644 index 000000000000..b44146993f2e --- /dev/null +++ b/litellm/tests/test_batch_completion_return_exceptions.py @@ -0,0 +1,29 @@ +"""Test batch_completion's return_exceptions.""" +import pytest +import litellm + +msg1 = [{"role": "user", "content": "hi 1"}] +msg2 = [{"role": "user", "content": "hi 2"}] + + +def test_batch_completion_return_exceptions_default(): + """Test batch_completion's return_exceptions.""" + with pytest.raises(Exception): + _ = litellm.batch_completion( + model="gpt-3.5-turbo", + messages=[msg1, msg2], + api_key="sk_xxx", # deliberately set invalid key + # return_exceptions=False, + ) + + +def test_batch_completion_return_exceptions_true(): + """Test batch_completion's return_exceptions.""" + res = litellm.batch_completion( + model="gpt-3.5-turbo", + messages=[msg1, msg2], + api_key="sk_xxx", # deliberately set invalid key + return_exceptions=True, + ) + + assert isinstance(res[0], litellm.exceptions.AuthenticationError) From 540a35ed5e96ac6e0408277d68f36191a2603bf7 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 08:48:11 -0700 Subject: [PATCH 029/378] fix update router logic --- litellm/router.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/litellm/router.py b/litellm/router.py index 7acf75e8e1de..9638db548eeb 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -2669,13 +2669,18 @@ def update_settings(self, **kwargs): "cooldown_time", ] + _existing_router_settings = self.get_settings() for var in kwargs: if var in _allowed_settings: if var in _int_settings: _casted_value = int(kwargs[var]) setattr(self, var, _casted_value) else: - if var == "routing_strategy": + # only run routing strategy init if it has changed + if ( + var == "routing_strategy" + and _existing_router_settings["routing_strategy"] != kwargs[var] + ): self.routing_strategy_init( routing_strategy=kwargs[var], routing_strategy_args=kwargs.get( From 0b729046087a3646ecf7c5573fb6d4cf861e520a Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 09:00:32 -0700 Subject: [PATCH 030/378] fix(lowest_latency.py): fix the size of the latency list to 10 by default (can be modified) --- litellm/router_strategy/lowest_latency.py | 37 +++++++- litellm/tests/test_lowest_latency_routing.py | 92 +++++++++++++++++++- 2 files changed, 124 insertions(+), 5 deletions(-) diff --git a/litellm/router_strategy/lowest_latency.py b/litellm/router_strategy/lowest_latency.py index 80dee5e678d9..5f0f15aac00a 100644 --- a/litellm/router_strategy/lowest_latency.py +++ b/litellm/router_strategy/lowest_latency.py @@ -31,6 +31,7 @@ def json(self, **kwargs): class RoutingArgs(LiteLLMBase): ttl: int = 1 * 60 * 60 # 1 hour lowest_latency_buffer: float = 0 + max_latency_list_size: int = 10 class LowestLatencyLoggingHandler(CustomLogger): @@ -103,7 +104,18 @@ def log_success_event(self, kwargs, response_obj, start_time, end_time): request_count_dict[id] = {} ## Latency - request_count_dict[id].setdefault("latency", []).append(final_value) + if ( + len(request_count_dict[id].get("latency", [])) + < self.routing_args.max_latency_list_size + ): + request_count_dict[id].setdefault("latency", []).append(final_value) + else: + request_count_dict[id]["latency"] = request_count_dict[id][ + "latency" + ][: self.routing_args.max_latency_list_size - 1] + [final_value] + + if precise_minute not in request_count_dict[id]: + request_count_dict[id][precise_minute] = {} if precise_minute not in request_count_dict[id]: request_count_dict[id][precise_minute] = {} @@ -170,8 +182,17 @@ async def async_log_failure_event(self, kwargs, response_obj, start_time, end_ti if id not in request_count_dict: request_count_dict[id] = {} - ## Latency - request_count_dict[id].setdefault("latency", []).append(1000.0) + ## Latency - give 1000s penalty for failing + if ( + len(request_count_dict[id].get("latency", [])) + < self.routing_args.max_latency_list_size + ): + request_count_dict[id].setdefault("latency", []).append(1000.0) + else: + request_count_dict[id]["latency"] = request_count_dict[id][ + "latency" + ][: self.routing_args.max_latency_list_size - 1] + [1000.0] + self.router_cache.set_cache( key=latency_key, value=request_count_dict, @@ -242,7 +263,15 @@ async def async_log_success_event(self, kwargs, response_obj, start_time, end_ti request_count_dict[id] = {} ## Latency - request_count_dict[id].setdefault("latency", []).append(final_value) + if ( + len(request_count_dict[id].get("latency", [])) + < self.routing_args.max_latency_list_size + ): + request_count_dict[id].setdefault("latency", []).append(final_value) + else: + request_count_dict[id]["latency"] = request_count_dict[id][ + "latency" + ][: self.routing_args.max_latency_list_size - 1] + [final_value] if precise_minute not in request_count_dict[id]: request_count_dict[id][precise_minute] = {} diff --git a/litellm/tests/test_lowest_latency_routing.py b/litellm/tests/test_lowest_latency_routing.py index 2f0aaee91deb..4da879208754 100644 --- a/litellm/tests/test_lowest_latency_routing.py +++ b/litellm/tests/test_lowest_latency_routing.py @@ -7,7 +7,7 @@ from dotenv import load_dotenv load_dotenv() -import os +import os, copy sys.path.insert( 0, os.path.abspath("../..") @@ -20,6 +20,96 @@ ### UNIT TESTS FOR LATENCY ROUTING ### +@pytest.mark.parametrize("sync_mode", [True, False]) +@pytest.mark.asyncio +async def test_latency_memory_leak(sync_mode): + """ + Test to make sure there's no memory leak caused by lowest latency routing + + - make 10 calls -> check memory + - make 11th call -> no change in memory + """ + test_cache = DualCache() + model_list = [] + lowest_latency_logger = LowestLatencyLoggingHandler( + router_cache=test_cache, model_list=model_list + ) + model_group = "gpt-3.5-turbo" + deployment_id = "1234" + kwargs = { + "litellm_params": { + "metadata": { + "model_group": "gpt-3.5-turbo", + "deployment": "azure/chatgpt-v-2", + }, + "model_info": {"id": deployment_id}, + } + } + start_time = time.time() + response_obj = {"usage": {"total_tokens": 50}} + time.sleep(5) + end_time = time.time() + for _ in range(10): + if sync_mode: + lowest_latency_logger.log_success_event( + response_obj=response_obj, + kwargs=kwargs, + start_time=start_time, + end_time=end_time, + ) + else: + await lowest_latency_logger.async_log_success_event( + response_obj=response_obj, + kwargs=kwargs, + start_time=start_time, + end_time=end_time, + ) + latency_key = f"{model_group}_map" + cache_value = copy.deepcopy( + test_cache.get_cache(key=latency_key) + ) # MAKE SURE NO MEMORY LEAK IN CACHING OBJECT + + if sync_mode: + lowest_latency_logger.log_success_event( + response_obj=response_obj, + kwargs=kwargs, + start_time=start_time, + end_time=end_time, + ) + else: + await lowest_latency_logger.async_log_success_event( + response_obj=response_obj, + kwargs=kwargs, + start_time=start_time, + end_time=end_time, + ) + new_cache_value = test_cache.get_cache(key=latency_key) + # Assert that the size of the cache doesn't grow unreasonably + assert get_size(new_cache_value) <= get_size( + cache_value + ), f"Memory leak detected in function call! new_cache size={get_size(new_cache_value)}, old cache size={get_size(cache_value)}" + + +def get_size(obj, seen=None): + # From https://goshippo.com/blog/measure-real-size-any-python-object/ + # Recursively finds size of objects + size = sys.getsizeof(obj) + if seen is None: + seen = set() + obj_id = id(obj) + if obj_id in seen: + return 0 + seen.add(obj_id) + if isinstance(obj, dict): + size += sum([get_size(v, seen) for v in obj.values()]) + size += sum([get_size(k, seen) for k in obj.keys()]) + elif hasattr(obj, "__dict__"): + size += get_size(obj.__dict__, seen) + elif hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, bytearray)): + size += sum([get_size(i, seen) for i in obj]) + return size + + def test_latency_updated(): test_cache = DualCache() model_list = [] From a1814a3e4ccf8bcb90aebbb8d9c4cc27435e0e17 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 09:13:37 -0700 Subject: [PATCH 031/378] test - num callbacks on proxy --- tests/test_callbacks_on_proxy.py | 79 ++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 tests/test_callbacks_on_proxy.py diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py new file mode 100644 index 000000000000..6effc74649fd --- /dev/null +++ b/tests/test_callbacks_on_proxy.py @@ -0,0 +1,79 @@ +# What this tests ? +## Makes sure the number of callbacks on the proxy don't increase over time +## Num callbacks should be a fixed number at t=0 and t=10, t=20 +""" +PROD TEST - DO NOT Delete this Test +""" + +import pytest +import asyncio +import aiohttp +import os +import dotenv +from dotenv import load_dotenv +import pytest + +load_dotenv() + + +async def config_update(session): + url = "http://0.0.0.0:4000/config/update" + headers = {"Authorization": "Bearer sk-1234", "Content-Type": "application/json"} + data = { + "router_settings": { + "routing_strategy": ["latency-based-routing"], + }, + } + + async with session.post(url, headers=headers, json=data) as response: + status = response.status + response_text = await response.text() + + print(response_text) + print() + + if status != 200: + raise Exception(f"Request did not return a 200 status code: {status}") + return await response.json() + + +async def get_active_callbacks(session): + url = "http://0.0.0.0:4000/health/readiness" + headers = { + "Content-Type": "application/json", + } + + async with session.get(url, headers=headers) as response: + status = response.status + response_text = await response.text() + print("response from /health/readiness") + print(response_text) + print() + + if status != 200: + raise Exception(f"Request did not return a 200 status code: {status}") + + _json_response = await response.json() + + _num_callbacks = _json_response["num_callbacks"] + print("current number of callbacks: ", _num_callbacks) + return _num_callbacks + + +@pytest.mark.asyncio +async def test_add_model_run_health(): + """ """ + import uuid + + async with aiohttp.ClientSession() as session: + num_callbacks_1 = await get_active_callbacks(session=session) + + await asyncio.sleep(30) + + num_callbacks_2 = await get_active_callbacks(session=session) + + await asyncio.sleep(30) + + num_callbacks_3 = await get_active_callbacks(session=session) + + assert num_callbacks_1 == num_callbacks_2 == num_callbacks_3 From 23d334fe60d7731555365bd9f3dc68f2038388e8 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 09:14:32 -0700 Subject: [PATCH 032/378] proxy - return num callbacks on /health/readiness --- litellm/proxy/proxy_server.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 26987f478ee0..984417757018 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -9208,7 +9208,19 @@ async def health_readiness(): """ try: # get success callback + _num_callbacks = 0 + try: + _num_callbacks = ( + len(litellm.callbacks) + + len(litellm.input_callback) + + len(litellm.failure_callback) + + len(litellm.success_callback) + ) + except: + _num_callbacks = 0 + success_callback_names = [] + try: # this was returning a JSON of the values in some of the callbacks # all we need is the callback name, hence we do str(callback) @@ -9236,13 +9248,13 @@ async def health_readiness(): # check DB if prisma_client is not None: # if db passed in, check if it's connected db_health_status = _db_health_readiness_check() - return { "status": "healthy", "db": "connected", "cache": cache_type, "litellm_version": version, "success_callbacks": success_callback_names, + "num_callbacks": _num_callbacks, **db_health_status, } else: @@ -9252,6 +9264,7 @@ async def health_readiness(): "cache": cache_type, "litellm_version": version, "success_callbacks": success_callback_names, + "num_callbacks": _num_callbacks, } except Exception as e: raise HTTPException(status_code=503, detail=f"Service Unhealthy ({str(e)})") From 3677d56e9e1ed5e0b28c1efcde919ab5f79cae1a Mon Sep 17 00:00:00 2001 From: Vince Loewe Date: Fri, 3 May 2024 17:42:50 +0100 Subject: [PATCH 033/378] Lunary: Fix tool calling --- litellm/integrations/lunary.py | 36 ++++++++++++++++++++++++------ litellm/tests/test_lunary.py | 40 ++++++++++++++++++++++++++++++++-- 2 files changed, 67 insertions(+), 9 deletions(-) diff --git a/litellm/integrations/lunary.py b/litellm/integrations/lunary.py index 6ddf2ca59923..6b23f098755d 100644 --- a/litellm/integrations/lunary.py +++ b/litellm/integrations/lunary.py @@ -4,7 +4,6 @@ import traceback import dotenv import importlib -import sys import packaging @@ -18,13 +17,33 @@ def parse_usage(usage): "prompt": usage["prompt_tokens"] if "prompt_tokens" in usage else 0, } +def parse_tool_calls(tool_calls): + if tool_calls is None: + return None + + def clean_tool_call(tool_call): + + serialized = { + "type": tool_call.type, + "id": tool_call.id, + "function": { + "name": tool_call.function.name, + "arguments": tool_call.function.arguments, + } + } + + return serialized + + return [clean_tool_call(tool_call) for tool_call in tool_calls] + def parse_messages(input): + if input is None: return None def clean_message(message): - # if is strin, return as is + # if is string, return as is if isinstance(message, str): return message @@ -38,9 +57,7 @@ def clean_message(message): # Only add tool_calls and function_call to res if they are set if message.get("tool_calls"): - serialized["tool_calls"] = message.get("tool_calls") - if message.get("function_call"): - serialized["function_call"] = message.get("function_call") + serialized["tool_calls"] = parse_tool_calls(message.get("tool_calls")) return serialized @@ -93,8 +110,13 @@ def log_event( print_verbose(f"Lunary Logging - Logging request for model {model}") litellm_params = kwargs.get("litellm_params", {}) + optional_params = kwargs.get("optional_params", {}) metadata = litellm_params.get("metadata", {}) or {} + if optional_params: + # merge into extra + extra = {**extra, **optional_params} + tags = litellm_params.pop("tags", None) or [] if extra: @@ -104,7 +126,7 @@ def log_event( # keep only serializable types for param, value in extra.items(): - if not isinstance(value, (str, int, bool, float)): + if not isinstance(value, (str, int, bool, float)) and param != "tools": try: extra[param] = str(value) except: @@ -140,7 +162,7 @@ def log_event( metadata=metadata, runtime="litellm", tags=tags, - extra=extra, + params=extra, ) self.lunary_client.track_event( diff --git a/litellm/tests/test_lunary.py b/litellm/tests/test_lunary.py index cbf9364aff8a..c9a8afd57f1b 100644 --- a/litellm/tests/test_lunary.py +++ b/litellm/tests/test_lunary.py @@ -11,7 +11,6 @@ litellm.success_callback = ["lunary"] litellm.set_verbose = True - def test_lunary_logging(): try: response = completion( @@ -59,9 +58,46 @@ def test_lunary_logging_with_metadata(): except Exception as e: print(e) +#test_lunary_logging_with_metadata() + +def test_lunary_with_tools(): + + import litellm + + messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}] + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + }, + } + ] + + response = litellm.completion( + model="gpt-3.5-turbo-1106", + messages=messages, + tools=tools, + tool_choice="auto", # auto is default, but we'll be explicit + ) + + response_message = response.choices[0].message + print("\nLLM Response:\n", response.choices[0].message) -# test_lunary_logging_with_metadata() +#test_lunary_with_tools() def test_lunary_logging_with_streaming_and_metadata(): try: From fe6e46546649f7368510ae0ba984e83cb16c75ae Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 09:59:59 -0700 Subject: [PATCH 034/378] test - num callbacks on proxy should not increase --- tests/test_callbacks_on_proxy.py | 79 ++++++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 4 deletions(-) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index 6effc74649fd..01183cf6e000 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -16,12 +16,13 @@ load_dotenv() -async def config_update(session): +async def config_update(session, routing_strategy=None): url = "http://0.0.0.0:4000/config/update" headers = {"Authorization": "Bearer sk-1234", "Content-Type": "application/json"} + print("routing_strategy: ", routing_strategy) data = { "router_settings": { - "routing_strategy": ["latency-based-routing"], + "routing_strategy": routing_strategy, }, } @@ -60,9 +61,41 @@ async def get_active_callbacks(session): return _num_callbacks +async def get_current_routing_strategy(session): + url = "http://0.0.0.0:4000/get/config/callbacks" + headers = { + "Content-Type": "application/json", + "Authorization": "Bearer sk-1234", + } + + async with session.get(url, headers=headers) as response: + status = response.status + response_text = await response.text() + print(response_text) + print() + + if status != 200: + raise Exception(f"Request did not return a 200 status code: {status}") + + _json_response = await response.json() + print("JSON response: ", _json_response) + + router_settings = _json_response["router_settings"] + print("Router settings: ", router_settings) + routing_strategy = router_settings["routing_strategy"] + return routing_strategy + + @pytest.mark.asyncio -async def test_add_model_run_health(): - """ """ +async def test_check_num_callbacks(): + """ + Test 1: num callbacks should NOT increase over time + -> check current callbacks + -> sleep for 30s + -> check current callbacks + -> sleep for 30s + -> check current callbacks + """ import uuid async with aiohttp.ClientSession() as session: @@ -72,8 +105,46 @@ async def test_add_model_run_health(): num_callbacks_2 = await get_active_callbacks(session=session) + assert num_callbacks_1 == num_callbacks_2 + await asyncio.sleep(30) num_callbacks_3 = await get_active_callbacks(session=session) assert num_callbacks_1 == num_callbacks_2 == num_callbacks_3 + + +@pytest.mark.asyncio +async def test_check_num_callbacks_on_lowest_latency(): + """ + Test 1: num callbacks should NOT increase over time + -> Update to lowest latency + -> check current callbacks + -> sleep for 30s + -> check current callbacks + -> sleep for 30s + -> check current callbacks + -> update back to original routing-strategy + """ + import uuid + + async with aiohttp.ClientSession() as session: + + original_routing_strategy = await get_current_routing_strategy(session=session) + await config_update(session=session, routing_strategy="latency-based-routing") + + num_callbacks_1 = await get_active_callbacks(session=session) + + await asyncio.sleep(30) + + num_callbacks_2 = await get_active_callbacks(session=session) + + assert num_callbacks_1 == num_callbacks_2 + + await asyncio.sleep(30) + + num_callbacks_3 = await get_active_callbacks(session=session) + + assert num_callbacks_1 == num_callbacks_2 == num_callbacks_3 + + await config_update(session=session, routing_strategy=original_routing_strategy) From 9ba5685722c1858437268f5d01b400727a974d80 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 10:05:06 -0700 Subject: [PATCH 035/378] test active callbacks on proxy --- tests/test_callbacks_on_proxy.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index 01183cf6e000..a4d31587de61 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -100,6 +100,9 @@ async def test_check_num_callbacks(): async with aiohttp.ClientSession() as session: num_callbacks_1 = await get_active_callbacks(session=session) + assert ( + num_callbacks_1 > 0 + ) # /health/readiness returns 0 when some calculation goes wrong await asyncio.sleep(30) @@ -134,6 +137,9 @@ async def test_check_num_callbacks_on_lowest_latency(): await config_update(session=session, routing_strategy="latency-based-routing") num_callbacks_1 = await get_active_callbacks(session=session) + assert ( + num_callbacks_1 > 0 + ) # /health/readiness returns 0 when some calculation goes wrong await asyncio.sleep(30) From 2dd9d2f704028be562f7fd1cbd4709300e3f5c47 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 10:09:57 -0700 Subject: [PATCH 036/378] test(test_amazing_vertex_completion.py): try-except api errors --- .../tests/test_amazing_vertex_completion.py | 36 ------------------- litellm/utils.py | 21 +++++++---- 2 files changed, 15 insertions(+), 42 deletions(-) diff --git a/litellm/tests/test_amazing_vertex_completion.py b/litellm/tests/test_amazing_vertex_completion.py index 05eece8344e1..1d79653ea628 100644 --- a/litellm/tests/test_amazing_vertex_completion.py +++ b/litellm/tests/test_amazing_vertex_completion.py @@ -548,42 +548,6 @@ def test_gemini_pro_vision_base64(): def test_gemini_pro_function_calling(): - load_vertex_ai_credentials() - tools = [ - { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, - }, - "required": ["location"], - }, - }, - } - ] - - messages = [ - { - "role": "user", - "content": "What's the weather like in Boston today in fahrenheit?", - } - ] - completion = litellm.completion( - model="gemini-pro", messages=messages, tools=tools, tool_choice="auto" - ) - print(f"completion: {completion}") - if hasattr(completion.choices[0].message, "tool_calls") and isinstance( - completion.choices[0].message.tool_calls, list - ): - assert len(completion.choices[0].message.tool_calls) == 1 try: load_vertex_ai_credentials() tools = [ diff --git a/litellm/utils.py b/litellm/utils.py index ec296e9dc3da..80d26f58b924 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -3974,12 +3974,10 @@ def calculage_img_tokens( tile_tokens = (base_tokens * 2) * tiles_needed_high_res total_tokens = base_tokens + tile_tokens return total_tokens - + def create_pretrained_tokenizer( - identifier: str, - revision="main", - auth_token: Optional[str] = None + identifier: str, revision="main", auth_token: Optional[str] = None ): """ Creates a tokenizer from an existing file on a HuggingFace repository to be used with `token_counter`. @@ -3993,7 +3991,9 @@ def create_pretrained_tokenizer( dict: A dictionary with the tokenizer and its type. """ - tokenizer = Tokenizer.from_pretrained(identifier, revision=revision, auth_token=auth_token) + tokenizer = Tokenizer.from_pretrained( + identifier, revision=revision, auth_token=auth_token + ) return {"type": "huggingface_tokenizer", "tokenizer": tokenizer} @@ -9001,7 +9001,16 @@ def exception_type( request=original_exception.request, ) elif custom_llm_provider == "azure": - if "This model's maximum context length is" in error_str: + if "Internal server error" in error_str: + exception_mapping_worked = True + raise APIError( + status_code=500, + message=f"AzureException - {original_exception.message}", + llm_provider="azure", + model=model, + request=httpx.Request(method="POST", url="https://openai.com/"), + ) + elif "This model's maximum context length is" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( message=f"AzureException - {original_exception.message}", From defc205348713c77861d487357ba849e4c700c47 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 12:39:21 -0700 Subject: [PATCH 037/378] test(test_alangfuse.py): fix test --- litellm/tests/test_alangfuse.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/litellm/tests/test_alangfuse.py b/litellm/tests/test_alangfuse.py index 29718d4746a4..fd968c6a7fcb 100644 --- a/litellm/tests/test_alangfuse.py +++ b/litellm/tests/test_alangfuse.py @@ -559,7 +559,15 @@ def test_langfuse_existing_trace_id(): new_langfuse_trace = langfuse_client.get_trace(id=trace_id) - assert dict(initial_langfuse_trace) == dict(new_langfuse_trace) + initial_langfuse_trace_dict = dict(initial_langfuse_trace) + initial_langfuse_trace_dict.pop("updatedAt") + initial_langfuse_trace_dict.pop("timestamp") + + new_langfuse_trace_dict = dict(new_langfuse_trace) + new_langfuse_trace_dict.pop("updatedAt") + new_langfuse_trace_dict.pop("timestamp") + + assert initial_langfuse_trace_dict == new_langfuse_trace_dict def test_langfuse_logging_tool_calling(): From b2a0502383fbcda2ef00afbe9aae319ef3a2f1f5 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 12:42:20 -0700 Subject: [PATCH 038/378] =?UTF-8?q?bump:=20version=201.35.36=20=E2=86=92?= =?UTF-8?q?=201.35.37?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 72651f148fbb..b0caa908990a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.35.36" +version = "1.35.37" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -80,7 +80,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.35.36" +version = "1.35.37" version_files = [ "pyproject.toml:^version" ] From fdc9856652364d138deeaade517ae36eecd787e5 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 13:33:43 -0700 Subject: [PATCH 039/378] UI - set DB Exceptions webhook_url --- ui/litellm-dashboard/src/components/settings.tsx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ui/litellm-dashboard/src/components/settings.tsx b/ui/litellm-dashboard/src/components/settings.tsx index 53de36286319..092f7bb140f7 100644 --- a/ui/litellm-dashboard/src/components/settings.tsx +++ b/ui/litellm-dashboard/src/components/settings.tsx @@ -106,7 +106,8 @@ const Settings: React.FC = ({ "llm_exceptions": "LLM Exceptions", "llm_too_slow": "LLM Responses Too Slow", "llm_requests_hanging": "LLM Requests Hanging", - "budget_alerts": "Budget Alerts (API Keys, Users)" + "budget_alerts": "Budget Alerts (API Keys, Users)", + "db_exceptions": "Database Exceptions (Read/Write)", } useEffect(() => { From 776f541f6ceca750383ecb399083e175c192c1cb Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:04:38 -0700 Subject: [PATCH 040/378] fix bug where slack would get inserting several times --- litellm/proxy/proxy_server.py | 40 +++++++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 26987f478ee0..763094fdeb9d 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -8708,11 +8708,11 @@ async def update_config(config_info: ConfigYAML): # overwrite existing settings with updated values if k == "alert_to_webhook_url": # check if slack is already enabled. if not, enable it - if "slack" not in _existing_settings: - if "alerting" not in _existing_settings: + if "alerting" not in _existing_settings: + _existing_settings["alerting"] = ["slack"] + elif isinstance(_existing_settings["alerting"], list): + if "slack" not in _existing_settings["alerting"]: _existing_settings["alerting"] = ["slack"] - elif isinstance(_existing_settings["alerting"], list): - _existing_settings["alerting"].append("slack") _existing_settings[k] = v config["general_settings"] = _existing_settings @@ -9197,6 +9197,37 @@ def _db_health_readiness_check(): return db_health_cache +@router.get( + "/active/callbacks", + tags=["health"], + dependencies=[Depends(user_api_key_auth)], +) +async def active_callbacks(): + _alerting = str(general_settings.get("alerting")) + # get success callback + success_callback_names = [] + try: + # this was returning a JSON of the values in some of the callbacks + # all we need is the callback name, hence we do str(callback) + success_callback_names = [str(x) for x in litellm.success_callback] + except: + # don't let this block the /health/readiness response, if we can't convert to str -> return litellm.success_callback + success_callback_names = litellm.success_callback + + _num_callbacks = ( + len(litellm.callbacks) + + len(litellm.input_callback) + + len(litellm.failure_callback) + + len(litellm.success_callback) + ) + + return { + "alerting": _alerting, + "success_callbacks": success_callback_names, + "num_callbacks": _num_callbacks, + } + + @router.get( "/health/readiness", tags=["health"], @@ -9206,6 +9237,7 @@ async def health_readiness(): """ Unprotected endpoint for checking if worker can receive requests """ + global general_settings try: # get success callback success_callback_names = [] From e9eead2f257fd739ec8b7c371ca4994d9780d9a7 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:22:15 -0700 Subject: [PATCH 041/378] test - size of callbacks, alerts --- tests/test_callbacks_on_proxy.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index a4d31587de61..29ca62b80412 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -39,7 +39,7 @@ async def config_update(session, routing_strategy=None): async def get_active_callbacks(session): - url = "http://0.0.0.0:4000/health/readiness" + url = "http://0.0.0.0:4000/active/callbacks" headers = { "Content-Type": "application/json", } @@ -47,7 +47,7 @@ async def get_active_callbacks(session): async with session.get(url, headers=headers) as response: status = response.status response_text = await response.text() - print("response from /health/readiness") + print("response from /active/callbacks") print(response_text) print() From 3997ea64427c0f1396d6f12782629745242f4be3 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:24:01 -0700 Subject: [PATCH 042/378] fix - return num callbacks in /active/callbacks --- litellm/proxy/proxy_server.py | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index f5c234963537..c9d4d3247af7 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -9203,6 +9203,10 @@ def _db_health_readiness_check(): dependencies=[Depends(user_api_key_auth)], ) async def active_callbacks(): + """ + Returns a list of active callbacks on litellm.callbacks, litellm.input_callback, litellm.failure_callback, litellm.success_callback + """ + global proxy_logging_obj _alerting = str(general_settings.get("alerting")) # get success callback success_callback_names = [] @@ -9219,12 +9223,27 @@ async def active_callbacks(): + len(litellm.input_callback) + len(litellm.failure_callback) + len(litellm.success_callback) + + len(litellm._async_failure_callback) + + len(litellm._async_success_callback) + + len(litellm._async_input_callback) ) + alerting = proxy_logging_obj.alerting + _num_alerting = 0 + if alerting and isinstance(alerting, list): + _num_alerting = len(alerting) + return { "alerting": _alerting, - "success_callbacks": success_callback_names, + "litellm.callbacks": litellm.callbacks, + "litellm.input_callback": litellm.input_callback, + "litellm.failure_callback": litellm.failure_callback, + "litellm.success_callback": success_callback_names, + "litellm._async_success_callback": litellm._async_success_callback, + "litellm._async_failure_callback": litellm._async_failure_callback, + "litellm._async_input_callback": litellm._async_input_callback, "num_callbacks": _num_callbacks, + "num_alerting": _num_alerting, } @@ -9240,17 +9259,6 @@ async def health_readiness(): global general_settings try: # get success callback - _num_callbacks = 0 - try: - _num_callbacks = ( - len(litellm.callbacks) - + len(litellm.input_callback) - + len(litellm.failure_callback) - + len(litellm.success_callback) - ) - except: - _num_callbacks = 0 - success_callback_names = [] try: @@ -9286,7 +9294,6 @@ async def health_readiness(): "cache": cache_type, "litellm_version": version, "success_callbacks": success_callback_names, - "num_callbacks": _num_callbacks, **db_health_status, } else: @@ -9296,7 +9303,6 @@ async def health_readiness(): "cache": cache_type, "litellm_version": version, "success_callbacks": success_callback_names, - "num_callbacks": _num_callbacks, } except Exception as e: raise HTTPException(status_code=503, detail=f"Service Unhealthy ({str(e)})") From e5311d35f24a172f59a8f1cea438d5b5109cacfd Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:32:13 -0700 Subject: [PATCH 043/378] fix test len active callbacks --- tests/test_callbacks_on_proxy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index 29ca62b80412..b5de5df259c0 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -42,6 +42,7 @@ async def get_active_callbacks(session): url = "http://0.0.0.0:4000/active/callbacks" headers = { "Content-Type": "application/json", + "Authorization": "Bearer sk-1234", } async with session.get(url, headers=headers) as response: From a369867e0208dbd356a2acdffaa79a110e02a28b Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:46:44 -0700 Subject: [PATCH 044/378] test - num alerts on callbacks --- tests/test_callbacks_on_proxy.py | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index b5de5df259c0..c10b18ed1d74 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -24,6 +24,12 @@ async def config_update(session, routing_strategy=None): "router_settings": { "routing_strategy": routing_strategy, }, + "general_settings": { + "alert_to_webhook_url": { + "llm_exceptions": "https://hooks.slack.com/services/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" + }, + "alert_types": ["llm_exceptions", "db_exceptions"], + }, } async with session.post(url, headers=headers, json=data) as response: @@ -58,8 +64,10 @@ async def get_active_callbacks(session): _json_response = await response.json() _num_callbacks = _json_response["num_callbacks"] + _num_alerts = _json_response["num_alerting"] print("current number of callbacks: ", _num_callbacks) - return _num_callbacks + print("current number of alerts: ", _num_alerts) + return _num_callbacks, _num_alerts async def get_current_routing_strategy(session): @@ -100,20 +108,20 @@ async def test_check_num_callbacks(): import uuid async with aiohttp.ClientSession() as session: - num_callbacks_1 = await get_active_callbacks(session=session) + num_callbacks_1, _ = await get_active_callbacks(session=session) assert ( num_callbacks_1 > 0 ) # /health/readiness returns 0 when some calculation goes wrong await asyncio.sleep(30) - num_callbacks_2 = await get_active_callbacks(session=session) + num_callbacks_2, _ = await get_active_callbacks(session=session) assert num_callbacks_1 == num_callbacks_2 await asyncio.sleep(30) - num_callbacks_3 = await get_active_callbacks(session=session) + num_callbacks_3, _ = await get_active_callbacks(session=session) assert num_callbacks_1 == num_callbacks_2 == num_callbacks_3 @@ -137,21 +145,23 @@ async def test_check_num_callbacks_on_lowest_latency(): original_routing_strategy = await get_current_routing_strategy(session=session) await config_update(session=session, routing_strategy="latency-based-routing") - num_callbacks_1 = await get_active_callbacks(session=session) + num_callbacks_1, num_alerts_1 = await get_active_callbacks(session=session) assert ( num_callbacks_1 > 0 ) # /health/readiness returns 0 when some calculation goes wrong await asyncio.sleep(30) - num_callbacks_2 = await get_active_callbacks(session=session) + num_callbacks_2, num_alerts_2 = await get_active_callbacks(session=session) assert num_callbacks_1 == num_callbacks_2 await asyncio.sleep(30) - num_callbacks_3 = await get_active_callbacks(session=session) + num_callbacks_3, num_alerts_3 = await get_active_callbacks(session=session) assert num_callbacks_1 == num_callbacks_2 == num_callbacks_3 + assert num_alerts_1 == num_alerts_2 == num_alerts_3 + await config_update(session=session, routing_strategy=original_routing_strategy) From ab27866b6a025680188efab51d17aa9274d3f86e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 14:58:11 -0700 Subject: [PATCH 045/378] fix test slack alerting len --- litellm/proxy/proxy_server.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index c9d4d3247af7..dfc6db6bac29 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -9235,13 +9235,19 @@ async def active_callbacks(): return { "alerting": _alerting, - "litellm.callbacks": litellm.callbacks, - "litellm.input_callback": litellm.input_callback, - "litellm.failure_callback": litellm.failure_callback, - "litellm.success_callback": success_callback_names, - "litellm._async_success_callback": litellm._async_success_callback, - "litellm._async_failure_callback": litellm._async_failure_callback, - "litellm._async_input_callback": litellm._async_input_callback, + "litellm.callbacks": [str(x) for x in litellm.callbacks], + "litellm.input_callback": [str(x) for x in litellm.input_callback], + "litellm.failure_callback": [str(x) for x in litellm.failure_callback], + "litellm.success_callback": [str(x) for x in litellm.success_callback], + "litellm._async_success_callback": [ + str(x) for x in litellm._async_success_callback + ], + "litellm._async_failure_callback": [ + str(x) for x in litellm._async_failure_callback + ], + "litellm._async_input_callback": [ + str(x) for x in litellm._async_input_callback + ], "num_callbacks": _num_callbacks, "num_alerting": _num_alerting, } From bae5d3601db52be88a97e6e90b97951387b12094 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 15:08:55 -0700 Subject: [PATCH 046/378] feat - add unit tests for slack alerting --- litellm/tests/test_alerting.py | 134 +++++++++++++++++++-------------- 1 file changed, 78 insertions(+), 56 deletions(-) diff --git a/litellm/tests/test_alerting.py b/litellm/tests/test_alerting.py index a74e25910cb2..5d6e068b7cf4 100644 --- a/litellm/tests/test_alerting.py +++ b/litellm/tests/test_alerting.py @@ -3,7 +3,7 @@ import sys import os -import io, asyncio, httpx +import io, asyncio from datetime import datetime, timedelta # import logging @@ -17,61 +17,6 @@ from unittest.mock import patch, MagicMock from litellm.caching import DualCache from litellm.integrations.slack_alerting import SlackAlerting -from litellm.proxy._types import UserAPIKeyAuth -from litellm.proxy.proxy_server import HTTPException - - -@pytest.mark.parametrize("exception_type", ["llm-exception", "non-llm-exception"]) -@pytest.mark.asyncio -async def test_slack_alerting_llm_exceptions(exception_type, monkeypatch): - """ - Test if non-llm exception -> No request - Test if llm exception -> Request triggered - """ - _pl = ProxyLogging(user_api_key_cache=DualCache()) - _pl.update_values( - alerting=["slack"], - alerting_threshold=100, - redis_cache=None, - alert_types=["llm_exceptions"], - ) - - async def mock_alerting_handler(message, level, alert_type): - global exception_type - - if exception_type == "llm-exception": - pass - elif exception_type == "non-llm-exception": - pytest.fail("Function should not have been called") - - monkeypatch.setattr(_pl, "alerting_handler", mock_alerting_handler) - - if exception_type == "llm-exception": - await _pl.post_call_failure_hook( - original_exception=litellm.APIError( - status_code=500, - message="This is a test exception", - llm_provider="openai", - model="gpt-3.5-turbo", - request=httpx.Request( - method="completion", url="https://github.com/BerriAI/litellm" - ), - ), - user_api_key_dict=UserAPIKeyAuth(), - ) - - await asyncio.sleep(2) - - elif exception_type == "non-llm-exception": - await _pl.post_call_failure_hook( - original_exception=HTTPException( - status_code=400, - detail={"error": "this is a test exception"}, - ), - user_api_key_dict=UserAPIKeyAuth(), - ) - - await asyncio.sleep(2) @pytest.mark.asyncio @@ -149,3 +94,80 @@ def test_init(): assert slack_no_alerting.alerting == [] print("passed testing slack alerting init") + + +from unittest.mock import patch, AsyncMock +from datetime import datetime, timedelta + + +@pytest.fixture +def slack_alerting(): + return SlackAlerting(alerting_threshold=1) + + +# Test for hanging LLM responses +@pytest.mark.asyncio +async def test_response_taking_too_long_hanging(slack_alerting): + request_data = { + "model": "test_model", + "messages": "test_messages", + "litellm_status": "running", + } + with patch.object(slack_alerting, "send_alert", new=AsyncMock()) as mock_send_alert: + await slack_alerting.response_taking_too_long( + type="hanging_request", request_data=request_data + ) + mock_send_alert.assert_awaited_once() + + +# Test for slow LLM responses +@pytest.mark.asyncio +async def test_response_taking_too_long_callback(slack_alerting): + start_time = datetime.now() + end_time = start_time + timedelta(seconds=301) + kwargs = {"model": "test_model", "messages": "test_messages", "litellm_params": {}} + with patch.object(slack_alerting, "send_alert", new=AsyncMock()) as mock_send_alert: + await slack_alerting.response_taking_too_long_callback( + kwargs, None, start_time, end_time + ) + mock_send_alert.assert_awaited_once() + + +# Test for budget crossed +@pytest.mark.asyncio +async def test_budget_alerts_crossed(slack_alerting): + user_max_budget = 100 + user_current_spend = 101 + with patch.object(slack_alerting, "send_alert", new=AsyncMock()) as mock_send_alert: + await slack_alerting.budget_alerts( + "user_budget", user_max_budget, user_current_spend + ) + mock_send_alert.assert_awaited_once() + + +# Test for budget crossed again (should not fire alert 2nd time) +@pytest.mark.asyncio +async def test_budget_alerts_crossed_again(slack_alerting): + user_max_budget = 100 + user_current_spend = 101 + with patch.object(slack_alerting, "send_alert", new=AsyncMock()) as mock_send_alert: + await slack_alerting.budget_alerts( + "user_budget", user_max_budget, user_current_spend + ) + mock_send_alert.assert_awaited_once() + mock_send_alert.reset_mock() + await slack_alerting.budget_alerts( + "user_budget", user_max_budget, user_current_spend + ) + mock_send_alert.assert_not_awaited() + + +# Test for send_alert - should be called once +@pytest.mark.asyncio +async def test_send_alert(slack_alerting): + with patch.object( + slack_alerting.async_http_handler, "post", new=AsyncMock() + ) as mock_post: + mock_post.return_value.status_code = 200 + await slack_alerting.send_alert("Test message", "Low", "budget_alerts") + mock_post.assert_awaited_once() From 5b39f8e282e399726849ae56169610cacee31213 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 15:27:32 -0700 Subject: [PATCH 047/378] feat(proxy_server.py): return api base in response headers Closes https://github.com/BerriAI/litellm/issues/2631 --- litellm/proxy/proxy_server.py | 3 ++ litellm/tests/test_alerting.py | 14 +++++++++ litellm/types/router.py | 2 ++ litellm/utils.py | 57 ++++++++++++++++++++++++++++------ 4 files changed, 66 insertions(+), 10 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 26987f478ee0..eb349b2d5f6e 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3658,6 +3658,7 @@ async def chat_completion( hidden_params = getattr(response, "_hidden_params", {}) or {} model_id = hidden_params.get("model_id", None) or "" cache_key = hidden_params.get("cache_key", None) or "" + api_base = hidden_params.get("api_base", None) or "" # Post Call Processing if llm_router is not None: @@ -3670,6 +3671,7 @@ async def chat_completion( custom_headers = { "x-litellm-model-id": model_id, "x-litellm-cache-key": cache_key, + "x-litellm-model-api-base": api_base, } selected_data_generator = select_data_generator( response=response, user_api_key_dict=user_api_key_dict @@ -3682,6 +3684,7 @@ async def chat_completion( fastapi_response.headers["x-litellm-model-id"] = model_id fastapi_response.headers["x-litellm-cache-key"] = cache_key + fastapi_response.headers["x-litellm-model-api-base"] = api_base ### CALL HOOKS ### - modify outgoing data response = await proxy_logging_obj.post_call_success_hook( diff --git a/litellm/tests/test_alerting.py b/litellm/tests/test_alerting.py index a74e25910cb2..40c75b86b7e5 100644 --- a/litellm/tests/test_alerting.py +++ b/litellm/tests/test_alerting.py @@ -15,6 +15,7 @@ import pytest import asyncio from unittest.mock import patch, MagicMock +from litellm.utils import get_api_base from litellm.caching import DualCache from litellm.integrations.slack_alerting import SlackAlerting from litellm.proxy._types import UserAPIKeyAuth @@ -74,6 +75,19 @@ async def mock_alerting_handler(message, level, alert_type): await asyncio.sleep(2) +@pytest.mark.parametrize( + "model, optional_params, expected_api_base", + [ + ("openai/my-fake-model", {"api_base": "my-fake-api-base"}, "my-fake-api-base"), + ("gpt-3.5-turbo", {}, "https://api.openai.com"), + ], +) +def test_get_api_base_unit_test(model, optional_params, expected_api_base): + api_base = get_api_base(model=model, optional_params=optional_params) + + assert api_base == expected_api_base + + @pytest.mark.asyncio async def test_get_api_base(): _pl = ProxyLogging(user_api_key_cache=DualCache()) diff --git a/litellm/types/router.py b/litellm/types/router.py index 64b71b999e05..068a99b0059a 100644 --- a/litellm/types/router.py +++ b/litellm/types/router.py @@ -99,6 +99,7 @@ def __setitem__(self, key, value): class LiteLLM_Params(BaseModel): model: str + custom_llm_provider: Optional[str] = None tpm: Optional[int] = None rpm: Optional[int] = None api_key: Optional[str] = None @@ -123,6 +124,7 @@ class LiteLLM_Params(BaseModel): def __init__( self, model: str, + custom_llm_provider: Optional[str] = None, max_retries: Optional[Union[int, str]] = None, tpm: Optional[int] = None, rpm: Optional[int] = None, diff --git a/litellm/utils.py b/litellm/utils.py index 80d26f58b924..589ea4d078f6 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -315,6 +315,7 @@ class ChatCompletionDeltaToolCall(OpenAIObject): class HiddenParams(OpenAIObject): original_response: Optional[str] = None model_id: Optional[str] = None # used in Router for individual deployments + api_base: Optional[str] = None # returns api base used for making completion call class Config: extra = "allow" @@ -3157,6 +3158,10 @@ def wrapper(*args, **kwargs): result._hidden_params["model_id"] = kwargs.get("model_info", {}).get( "id", None ) + result._hidden_params["api_base"] = get_api_base( + model=model, + optional_params=getattr(logging_obj, "optional_params", {}), + ) result._response_ms = ( end_time - start_time ).total_seconds() * 1000 # return response latency in ms like openai @@ -3226,6 +3231,8 @@ async def wrapper_async(*args, **kwargs): call_type = original_function.__name__ if "litellm_call_id" not in kwargs: kwargs["litellm_call_id"] = str(uuid.uuid4()) + + model = "" try: model = args[0] if len(args) > 0 else kwargs["model"] except: @@ -3547,6 +3554,10 @@ async def wrapper_async(*args, **kwargs): result._hidden_params["model_id"] = kwargs.get("model_info", {}).get( "id", None ) + result._hidden_params["api_base"] = get_api_base( + model=model, + optional_params=kwargs, + ) if ( isinstance(result, ModelResponse) or isinstance(result, EmbeddingResponse) @@ -5810,18 +5821,39 @@ def get_api_base(model: str, optional_params: dict) -> Optional[str]: get_api_base(model="gemini/gemini-pro") ``` """ - _optional_params = LiteLLM_Params( - model=model, **optional_params - ) # convert to pydantic object + + try: + if "model" in optional_params: + _optional_params = LiteLLM_Params(**optional_params) + else: # prevent needing to copy and pop the dict + _optional_params = LiteLLM_Params( + model=model, **optional_params + ) # convert to pydantic object + except Exception as e: + verbose_logger.error("Error occurred in getting api base - {}".format(str(e))) + return None # get llm provider + + if _optional_params.api_base is not None: + return _optional_params.api_base + try: - model, custom_llm_provider, dynamic_api_key, api_base = get_llm_provider( - model=model + model, custom_llm_provider, dynamic_api_key, dynamic_api_base = ( + get_llm_provider( + model=model, + custom_llm_provider=_optional_params.custom_llm_provider, + api_base=_optional_params.api_base, + api_key=_optional_params.api_key, + ) ) - except: + except Exception as e: + verbose_logger.error("Error occurred in getting api base - {}".format(str(e))) custom_llm_provider = None - if _optional_params.api_base is not None: - return _optional_params.api_base + dynamic_api_key = None + dynamic_api_base = None + + if dynamic_api_base is not None: + return dynamic_api_base if ( _optional_params.vertex_location is not None @@ -5835,11 +5867,17 @@ def get_api_base(model: str, optional_params: dict) -> Optional[str]: ) return _api_base - if custom_llm_provider is not None and custom_llm_provider == "gemini": + if custom_llm_provider is None: + return None + + if custom_llm_provider == "gemini": _api_base = "https://generativelanguage.googleapis.com/v1beta/models/{}:generateContent".format( model ) return _api_base + elif custom_llm_provider == "openai": + _api_base = "https://api.openai.com" + return _api_base return None @@ -6147,7 +6185,6 @@ def get_llm_provider( try: dynamic_api_key = None # check if llm provider provided - # AZURE AI-Studio Logic - Azure AI Studio supports AZURE/Cohere # If User passes azure/command-r-plus -> we should send it to cohere_chat/command-r-plus if model.split("/", 1)[0] == "azure": From 209baaca0266cf441e00ccae7a382535cfafec5e Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 15:40:05 -0700 Subject: [PATCH 048/378] docs - simplify prod docs --- docs/my-website/docs/proxy/prod.md | 165 ++++++++--------------------- 1 file changed, 44 insertions(+), 121 deletions(-) diff --git a/docs/my-website/docs/proxy/prod.md b/docs/my-website/docs/proxy/prod.md index 980bba542672..32cd916c9863 100644 --- a/docs/my-website/docs/proxy/prod.md +++ b/docs/my-website/docs/proxy/prod.md @@ -3,34 +3,38 @@ import TabItem from '@theme/TabItem'; # ⚡ Best Practices for Production -Expected Performance in Production - -1 LiteLLM Uvicorn Worker on Kubernetes - -| Description | Value | -|--------------|-------| -| Avg latency | `50ms` | -| Median latency | `51ms` | -| `/chat/completions` Requests/second | `35` | -| `/chat/completions` Requests/minute | `2100` | -| `/chat/completions` Requests/hour | `126K` | +## 1. Use this config.yaml +Use this config.yaml in production (with your own LLMs) +```yaml +model_list: + - model_name: fake-openai-endpoint + litellm_params: + model: openai/fake + api_key: fake-key + api_base: https://exampleopenaiendpoint-production.up.railway.app/ -## 1. Switch off Debug Logging +general_settings: + master_key: sk-1234 # enter your own master key, ensure it starts with 'sk-' + alerting: ["slack"] # Setup slack alerting - get alerts on LLM exceptions, Budget Alerts, Slow LLM Responses + proxy_batch_write_at: 60 # Batch write spend updates every 60s -Remove `set_verbose: True` from your config.yaml -```yaml litellm_settings: - set_verbose: True + set_verbose: False # Switch off Debug Logging, ensure your logs do not have any debugging on ``` -You should only see the following level of details in logs on the proxy server +Set slack webhook url in your env ```shell -# INFO: 192.168.2.205:11774 - "POST /chat/completions HTTP/1.1" 200 OK -# INFO: 192.168.2.205:34717 - "POST /chat/completions HTTP/1.1" 200 OK -# INFO: 192.168.2.205:29734 - "POST /chat/completions HTTP/1.1" 200 OK +export SLACK_WEBHOOK_URL="https://hooks.slack.com/services/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" ``` +:::info + +Need Help or want dedicated support ? Talk to a founder [here]: (https://calendly.com/d/4mp-gd3-k5k/litellm-1-1-onboarding-chat) + +::: + + ## 2. On Kubernetes - Use 1 Uvicorn worker [Suggested CMD] Use this Docker `CMD`. This will start the proxy with 1 Uvicorn Async Worker @@ -40,21 +44,12 @@ Use this Docker `CMD`. This will start the proxy with 1 Uvicorn Async Worker CMD ["--port", "4000", "--config", "./proxy_server_config.yaml"] ``` -## 3. Batch write spend updates every 60s -The default proxy batch write is 10s. This is to make it easy to see spend when debugging locally. +## 3. Use Redis 'port','host', 'password'. NOT 'redis_url' -In production, we recommend using a longer interval period of 60s. This reduces the number of connections used to make DB writes. +If you decide to use Redis, DO NOT use 'redis_url'. We recommend usig redis port, host, and password params. -```yaml -general_settings: - master_key: sk-1234 - proxy_batch_write_at: 60 # 👈 Frequency of batch writing logs to server (in seconds) -``` - -## 4. use Redis 'port','host', 'password'. NOT 'redis_url' - -When connecting to Redis use redis port, host, and password params. Not 'redis_url'. We've seen a 80 RPS difference between these 2 approaches when using the async redis client. +`redis_url`is 80 RPS slower This is still something we're investigating. Keep track of it [here](https://github.com/BerriAI/litellm/issues/3188) @@ -69,103 +64,31 @@ router_settings: redis_password: os.environ/REDIS_PASSWORD ``` -## 5. Switch off resetting budgets - -Add this to your config.yaml. (Only spend per Key, User and Team will be tracked - spend per API Call will not be written to the LiteLLM Database) -```yaml -general_settings: - disable_reset_budget: true -``` - -## 6. Move spend logs to separate server (BETA) - -Writing each spend log to the db can slow down your proxy. In testing we saw a 70% improvement in median response time, by moving writing spend logs to a separate server. - -👉 [LiteLLM Spend Logs Server](https://github.com/BerriAI/litellm/tree/main/litellm-js/spend-logs) - - -**Spend Logs** -This is a log of the key, tokens, model, and latency for each call on the proxy. - -[**Full Payload**](https://github.com/BerriAI/litellm/blob/8c9623a6bc4ad9da0a2dac64249a60ed8da719e8/litellm/proxy/utils.py#L1769) - - -**1. Start the spend logs server** - -```bash -docker run -p 3000:3000 \ - -e DATABASE_URL="postgres://.." \ - ghcr.io/berriai/litellm-spend_logs:main-latest - -# RUNNING on http://0.0.0.0:3000 -``` - -**2. Connect to proxy** - - -Example litellm_config.yaml - -```yaml -model_list: -- model_name: fake-openai-endpoint - litellm_params: - model: openai/my-fake-model - api_key: my-fake-key - api_base: https://exampleopenaiendpoint-production.up.railway.app/ - -general_settings: - master_key: sk-1234 - proxy_batch_write_at: 5 # 👈 Frequency of batch writing logs to server (in seconds) -``` - -Add `SPEND_LOGS_URL` as an environment variable when starting the proxy - -```bash -docker run \ - -v $(pwd)/litellm_config.yaml:/app/config.yaml \ - -e DATABASE_URL="postgresql://.." \ - -e SPEND_LOGS_URL="http://host.docker.internal:3000" \ # 👈 KEY CHANGE - -p 4000:4000 \ - ghcr.io/berriai/litellm:main-latest \ - --config /app/config.yaml --detailed_debug - -# Running on http://0.0.0.0:4000 -``` - -**3. Test Proxy!** +## Extras +### Expected Performance in Production +1 LiteLLM Uvicorn Worker on Kubernetes -```bash -curl --location 'http://0.0.0.0:4000/v1/chat/completions' \ ---header 'Content-Type: application/json' \ ---header 'Authorization: Bearer sk-1234' \ ---data '{ - "model": "fake-openai-endpoint", - "messages": [ - {"role": "system", "content": "Be helpful"}, - {"role": "user", "content": "What do you know?"} - ] -}' -``` +| Description | Value | +|--------------|-------| +| Avg latency | `50ms` | +| Median latency | `51ms` | +| `/chat/completions` Requests/second | `35` | +| `/chat/completions` Requests/minute | `2100` | +| `/chat/completions` Requests/hour | `126K` | -In your LiteLLM Spend Logs Server, you should see -**Expected Response** +### Verifying Debugging logs are off +You should only see the following level of details in logs on the proxy server +```shell +# INFO: 192.168.2.205:11774 - "POST /chat/completions HTTP/1.1" 200 OK +# INFO: 192.168.2.205:34717 - "POST /chat/completions HTTP/1.1" 200 OK +# INFO: 192.168.2.205:29734 - "POST /chat/completions HTTP/1.1" 200 OK ``` -Received and stored 1 logs. Total logs in memory: 1 -... -Flushed 1 log to the DB. -``` - - -### Machine Specification - -A t2.micro should be sufficient to handle 1k logs / minute on this server. -This consumes at max 120MB, and <0.1 vCPU. -## Machine Specifications to Deploy LiteLLM +### Machine Specifications to Deploy LiteLLM | Service | Spec | CPUs | Memory | Architecture | Version| | --- | --- | --- | --- | --- | --- | @@ -173,7 +96,7 @@ This consumes at max 120MB, and <0.1 vCPU. | Redis Cache | - | - | - | - | 7.0+ Redis Engine| -## Reference Kubernetes Deployment YAML +### Reference Kubernetes Deployment YAML Reference Kubernetes `deployment.yaml` that was load tested by us From fbe412a3a6a61640793a24d4c919210175e2179c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 15:58:05 -0700 Subject: [PATCH 049/378] feat - add amazon.titan-embed-text-v2 --- litellm/model_prices_and_context_window_backup.json | 9 +++++++++ model_prices_and_context_window.json | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/litellm/model_prices_and_context_window_backup.json b/litellm/model_prices_and_context_window_backup.json index 7fcd425bb513..c7d5aae2d03b 100644 --- a/litellm/model_prices_and_context_window_backup.json +++ b/litellm/model_prices_and_context_window_backup.json @@ -1832,6 +1832,15 @@ "litellm_provider": "bedrock", "mode": "embedding" }, + "amazon.titan-embed-text-v2:0": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "output_vector_size": 1024, + "input_cost_per_token": 0.0000002, + "output_cost_per_token": 0.0, + "litellm_provider": "bedrock", + "mode": "embedding" + }, "mistral.mistral-7b-instruct-v0:2": { "max_tokens": 8191, "max_input_tokens": 32000, diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 7fcd425bb513..c7d5aae2d03b 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -1832,6 +1832,15 @@ "litellm_provider": "bedrock", "mode": "embedding" }, + "amazon.titan-embed-text-v2:0": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "output_vector_size": 1024, + "input_cost_per_token": 0.0000002, + "output_cost_per_token": 0.0, + "litellm_provider": "bedrock", + "mode": "embedding" + }, "mistral.mistral-7b-instruct-v0:2": { "max_tokens": 8191, "max_input_tokens": 32000, From bf048ecda4976f2676a25de4aec69fe98c6c0b54 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 15:59:02 -0700 Subject: [PATCH 050/378] docs - titan embeddings v2 --- docs/my-website/docs/providers/bedrock.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/my-website/docs/providers/bedrock.md b/docs/my-website/docs/providers/bedrock.md index 590ffc423b29..147c12e65a9d 100644 --- a/docs/my-website/docs/providers/bedrock.md +++ b/docs/my-website/docs/providers/bedrock.md @@ -535,7 +535,8 @@ print(response) | Model Name | Function Call | |----------------------|---------------------------------------------| -| Titan Embeddings - G1 | `embedding(model="bedrock/amazon.titan-embed-text-v1", input=input)` | +| Titan Embeddings V2 | `embedding(model="bedrock/amazon.titan-embed-text-v2:0", input=input)` | +| Titan Embeddings - V1 | `embedding(model="bedrock/amazon.titan-embed-text-v1", input=input)` | | Cohere Embeddings - English | `embedding(model="bedrock/cohere.embed-english-v3", input=input)` | | Cohere Embeddings - Multilingual | `embedding(model="bedrock/cohere.embed-multilingual-v3", input=input)` | From 4a39b95accf25380a037ff241cb1822f412e3e22 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 16:13:27 -0700 Subject: [PATCH 051/378] fix - support dimension for titan embed v2 --- litellm/utils.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/litellm/utils.py b/litellm/utils.py index 80d26f58b924..e124358b6d46 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4743,6 +4743,21 @@ def get_optional_params_embeddings( status_code=500, message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) + if custom_llm_provider == "bedrock": + if "amazon.titan-embed-text-v2" in model: + # embed-text-v2 supports the dimension param + non_default_params.pop("dimensions", None) + if len(non_default_params.keys()) > 0: + if litellm.drop_params is True: # drop the unsupported non-default values + keys = list(non_default_params.keys()) + for k in keys: + non_default_params.pop(k, None) + final_params = {**non_default_params, **kwargs} + return final_params + raise UnsupportedParamsError( + status_code=500, + message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", + ) if ( custom_llm_provider != "openai" From 401bf8d67ee416a177b0fe6afe1c3939d5a94ed1 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 16:23:37 -0700 Subject: [PATCH 052/378] test - bedrock v2 supports dimension --- .../test_get_optional_params_embeddings.py | 29 +++++++++++++++++++ litellm/utils.py | 10 +++++-- 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/litellm/tests/test_get_optional_params_embeddings.py b/litellm/tests/test_get_optional_params_embeddings.py index 41396b5312d2..81b177030974 100644 --- a/litellm/tests/test_get_optional_params_embeddings.py +++ b/litellm/tests/test_get_optional_params_embeddings.py @@ -40,3 +40,32 @@ def test_vertex_projects(): # test_vertex_projects() + + +def test_bedrock_embed_v2_regular(): + model, custom_llm_provider, _, _ = get_llm_provider( + model="bedrock/amazon.titan-embed-text-v2:0" + ) + optional_params = get_optional_params_embeddings( + model=model, + dimensions=512, + custom_llm_provider=custom_llm_provider, + ) + print(f"received optional_params: {optional_params}") + assert optional_params == {"dimensions": 512} + + +def test_bedrock_embed_v2_with_drop_params(): + litellm.drop_params = True + model, custom_llm_provider, _, _ = get_llm_provider( + model="bedrock/amazon.titan-embed-text-v2:0" + ) + optional_params = get_optional_params_embeddings( + model=model, + dimensions=512, + user="test-litellm-user-5", + encoding_format="base64", + custom_llm_provider=custom_llm_provider, + ) + print(f"received optional_params: {optional_params}") + assert optional_params == {"dimensions": 512} diff --git a/litellm/utils.py b/litellm/utils.py index e124358b6d46..5070e6498b1a 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4744,9 +4744,14 @@ def get_optional_params_embeddings( message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) if custom_llm_provider == "bedrock": - if "amazon.titan-embed-text-v2" in model: - # embed-text-v2 supports the dimension param + # if dimensions is in non_default_params -> pass it for model=bedrock/amazon.titan-embed-text-v2 + if ( + "dimensions" in non_default_params.keys() + and "amazon.titan-embed-text-v2" in model + ): + kwargs["dimensions"] = non_default_params["dimensions"] non_default_params.pop("dimensions", None) + if len(non_default_params.keys()) > 0: if litellm.drop_params is True: # drop the unsupported non-default values keys = list(non_default_params.keys()) @@ -4758,6 +4763,7 @@ def get_optional_params_embeddings( status_code=500, message=f"Setting user/encoding format is not supported by {custom_llm_provider}. To drop it from the call, set `litellm.drop_params = True`.", ) + return {**non_default_params, **kwargs} if ( custom_llm_provider != "openai" From a732d8772af2f8d2cb9b9a04d3f17fda85086436 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 16:24:21 -0700 Subject: [PATCH 053/378] fix(bedrock.py): convert httpx.timeout to boto3 valid timeout Closes https://github.com/BerriAI/litellm/issues/3398 --- litellm/llms/azure.py | 2 +- litellm/llms/bedrock.py | 9 +++++++-- litellm/llms/openai.py | 15 +++++++++------ litellm/main.py | 35 +++++++++++++++++++++++------------ litellm/router.py | 12 +++++++++--- litellm/tests/test_timeout.py | 32 +++++++++++++++++++++++++++++++- litellm/utils.py | 14 +++++++++++++- 7 files changed, 93 insertions(+), 26 deletions(-) diff --git a/litellm/llms/azure.py b/litellm/llms/azure.py index 0fe5c4e7e5d2..e7af9d43b644 100644 --- a/litellm/llms/azure.py +++ b/litellm/llms/azure.py @@ -151,7 +151,7 @@ def completion( api_type: str, azure_ad_token: str, print_verbose: Callable, - timeout, + timeout: Union[float, httpx.Timeout], logging_obj, optional_params, litellm_params, diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index 235c13c59c35..7ce544c96f13 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -533,7 +533,7 @@ def init_bedrock_client( aws_session_name: Optional[str] = None, aws_profile_name: Optional[str] = None, aws_role_name: Optional[str] = None, - timeout: Optional[int] = None, + timeout: Optional[Union[float, httpx.Timeout]] = None, ): # check for custom AWS_REGION_NAME and use it if not passed to init_bedrock_client litellm_aws_region_name = get_secret("AWS_REGION_NAME", None) @@ -592,7 +592,12 @@ def init_bedrock_client( import boto3 - config = boto3.session.Config(connect_timeout=timeout, read_timeout=timeout) + if isinstance(timeout, float): + config = boto3.session.Config(connect_timeout=timeout, read_timeout=timeout) + elif isinstance(timeout, httpx.Timeout): + config = boto3.session.Config( + connect_timeout=timeout.connect, read_timeout=timeout.read + ) ### CHECK STS ### if aws_role_name is not None and aws_session_name is not None: diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index f68ab235e6af..5a76605b3ab9 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -246,7 +246,7 @@ def __init__(self) -> None: def completion( self, model_response: ModelResponse, - timeout: float, + timeout: Union[float, httpx.Timeout], model: Optional[str] = None, messages: Optional[list] = None, print_verbose: Optional[Callable] = None, @@ -271,9 +271,12 @@ def completion( if model is None or messages is None: raise OpenAIError(status_code=422, message=f"Missing model or messages") - if not isinstance(timeout, float): + if not isinstance(timeout, float) and not isinstance( + timeout, httpx.Timeout + ): raise OpenAIError( - status_code=422, message=f"Timeout needs to be a float" + status_code=422, + message=f"Timeout needs to be a float or httpx.Timeout", ) if custom_llm_provider != "openai": @@ -425,7 +428,7 @@ async def acompletion( self, data: dict, model_response: ModelResponse, - timeout: float, + timeout: Union[float, httpx.Timeout], api_key: Optional[str] = None, api_base: Optional[str] = None, organization: Optional[str] = None, @@ -480,7 +483,7 @@ async def acompletion( def streaming( self, logging_obj, - timeout: float, + timeout: Union[float, httpx.Timeout], data: dict, model: str, api_key: Optional[str] = None, @@ -524,7 +527,7 @@ def streaming( async def async_streaming( self, logging_obj, - timeout: float, + timeout: Union[float, httpx.Timeout], data: dict, model: str, api_key: Optional[str] = None, diff --git a/litellm/main.py b/litellm/main.py index 9765669fe1c9..bbcdef0de065 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -39,6 +39,7 @@ Usage, get_optional_params_embeddings, get_optional_params_image_gen, + supports_httpx_timeout, ) from .llms import ( anthropic_text, @@ -450,7 +451,7 @@ def completion( model: str, # Optional OpenAI params: see https://platform.openai.com/docs/api-reference/chat/create messages: List = [], - timeout: Optional[Union[float, int]] = None, + timeout: Optional[Union[float, str, httpx.Timeout]] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, n: Optional[int] = None, @@ -648,11 +649,21 @@ def completion( non_default_params = { k: v for k, v in kwargs.items() if k not in default_params } # model-specific params - pass them straight to the model/provider - if timeout is None: - timeout = ( - kwargs.get("request_timeout", None) or 600 - ) # set timeout for 10 minutes by default - timeout = float(timeout) + + ### TIMEOUT LOGIC ### + timeout = timeout or kwargs.get("request_timeout", 600) or 600 + # set timeout for 10 minutes by default + + if ( + timeout is not None + and isinstance(timeout, httpx.Timeout) + and supports_httpx_timeout(custom_llm_provider) == False + ): + read_timeout = timeout.read or 600 + timeout = read_timeout # default 10 min timeout + elif timeout is not None and not isinstance(timeout, httpx.Timeout): + timeout = float(timeout) # type: ignore + try: if base_url is not None: api_base = base_url @@ -873,7 +884,7 @@ def completion( logger_fn=logger_fn, logging_obj=logging, acompletion=acompletion, - timeout=timeout, + timeout=timeout, # type: ignore client=client, # pass AsyncAzureOpenAI, AzureOpenAI client ) @@ -1014,7 +1025,7 @@ def completion( optional_params=optional_params, litellm_params=litellm_params, logger_fn=logger_fn, - timeout=timeout, + timeout=timeout, # type: ignore custom_prompt_dict=custom_prompt_dict, client=client, # pass AsyncOpenAI, OpenAI client organization=organization, @@ -1099,7 +1110,7 @@ def completion( optional_params=optional_params, litellm_params=litellm_params, logger_fn=logger_fn, - timeout=timeout, + timeout=timeout, # type: ignore ) if ( @@ -1473,7 +1484,7 @@ def completion( acompletion=acompletion, logging_obj=logging, custom_prompt_dict=custom_prompt_dict, - timeout=timeout, + timeout=timeout, # type: ignore ) if ( "stream" in optional_params @@ -1566,7 +1577,7 @@ def completion( logger_fn=logger_fn, logging_obj=logging, acompletion=acompletion, - timeout=timeout, + timeout=timeout, # type: ignore ) ## LOGGING logging.post_call( @@ -1893,7 +1904,7 @@ def completion( logger_fn=logger_fn, encoding=encoding, logging_obj=logging, - timeout=timeout, + timeout=timeout, # type: ignore ) if ( "stream" in optional_params diff --git a/litellm/router.py b/litellm/router.py index 9638db548eeb..d64deecec102 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -375,7 +375,9 @@ def completion( except Exception as e: raise e - def _completion(self, model: str, messages: List[Dict[str, str]], **kwargs): + def _completion( + self, model: str, messages: List[Dict[str, str]], **kwargs + ) -> Union[ModelResponse, CustomStreamWrapper]: model_name = None try: # pick the one that is available (lowest TPM/RPM) @@ -438,7 +440,9 @@ def _completion(self, model: str, messages: List[Dict[str, str]], **kwargs): ) raise e - async def acompletion(self, model: str, messages: List[Dict[str, str]], **kwargs): + async def acompletion( + self, model: str, messages: List[Dict[str, str]], **kwargs + ) -> Union[ModelResponse, CustomStreamWrapper]: try: kwargs["model"] = model kwargs["messages"] = messages @@ -454,7 +458,9 @@ async def acompletion(self, model: str, messages: List[Dict[str, str]], **kwargs except Exception as e: raise e - async def _acompletion(self, model: str, messages: List[Dict[str, str]], **kwargs): + async def _acompletion( + self, model: str, messages: List[Dict[str, str]], **kwargs + ) -> Union[ModelResponse, CustomStreamWrapper]: """ - Get an available deployment - call it with a semaphore over the call diff --git a/litellm/tests/test_timeout.py b/litellm/tests/test_timeout.py index d38da52e51fe..f24b26a0cf23 100644 --- a/litellm/tests/test_timeout.py +++ b/litellm/tests/test_timeout.py @@ -10,7 +10,37 @@ import time import litellm import openai -import pytest, uuid +import pytest, uuid, httpx + + +@pytest.mark.parametrize( + "model, provider", + [ + ("gpt-3.5-turbo", "openai"), + ("anthropic.claude-instant-v1", "bedrock"), + ("azure/chatgpt-v-2", "azure"), + ], +) +@pytest.mark.parametrize("sync_mode", [True, False]) +@pytest.mark.asyncio +async def test_httpx_timeout(model, provider, sync_mode): + """ + Test if setting httpx.timeout works for completion calls + """ + timeout_val = httpx.Timeout(10.0, connect=60.0) + + messages = [{"role": "user", "content": "Hey, how's it going?"}] + + if sync_mode: + response = litellm.completion( + model=model, messages=messages, timeout=timeout_val + ) + else: + response = await litellm.acompletion( + model=model, messages=messages, timeout=timeout_val + ) + + print(f"response: {response}") def test_timeout(): diff --git a/litellm/utils.py b/litellm/utils.py index 80d26f58b924..89d814e3240d 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4442,7 +4442,19 @@ def completion_cost( raise e -def supports_function_calling(model: str): +def supports_httpx_timeout(custom_llm_provider: str) -> bool: + """ + Helper function to know if a provider implementation supports httpx timeout + """ + supported_providers = ["openai", "azure", "bedrock"] + + if custom_llm_provider in supported_providers: + return True + + return False + + +def supports_function_calling(model: str) -> bool: """ Check if the given model supports function calling and return a boolean value. From 2084cfd959da45bd1b90432e13da4d51180f9ac7 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 16:41:08 -0700 Subject: [PATCH 054/378] fix - test_check_num_callbacks --- tests/test_callbacks_on_proxy.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tests/test_callbacks_on_proxy.py b/tests/test_callbacks_on_proxy.py index c10b18ed1d74..70cd3ca25d9c 100644 --- a/tests/test_callbacks_on_proxy.py +++ b/tests/test_callbacks_on_proxy.py @@ -108,11 +108,9 @@ async def test_check_num_callbacks(): import uuid async with aiohttp.ClientSession() as session: + await asyncio.sleep(30) num_callbacks_1, _ = await get_active_callbacks(session=session) - assert ( - num_callbacks_1 > 0 - ) # /health/readiness returns 0 when some calculation goes wrong - + assert num_callbacks_1 > 0 await asyncio.sleep(30) num_callbacks_2, _ = await get_active_callbacks(session=session) @@ -141,14 +139,12 @@ async def test_check_num_callbacks_on_lowest_latency(): import uuid async with aiohttp.ClientSession() as session: + await asyncio.sleep(30) original_routing_strategy = await get_current_routing_strategy(session=session) await config_update(session=session, routing_strategy="latency-based-routing") num_callbacks_1, num_alerts_1 = await get_active_callbacks(session=session) - assert ( - num_callbacks_1 > 0 - ) # /health/readiness returns 0 when some calculation goes wrong await asyncio.sleep(30) From f7eee609431f3003dfb1885ffd0663a1b666af7d Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 16:51:12 -0700 Subject: [PATCH 055/378] docs(exception_mapping.md): update exception mapping docs with 'should_retry' --- docs/my-website/docs/exception_mapping.md | 67 +++++++++++++++++------ litellm/utils.py | 15 +++-- 2 files changed, 60 insertions(+), 22 deletions(-) diff --git a/docs/my-website/docs/exception_mapping.md b/docs/my-website/docs/exception_mapping.md index db17fb093e5d..2345e9f838aa 100644 --- a/docs/my-website/docs/exception_mapping.md +++ b/docs/my-website/docs/exception_mapping.md @@ -13,7 +13,7 @@ LiteLLM maps exceptions across all providers to their OpenAI counterparts. | >=500 | InternalServerError | | N/A | ContextWindowExceededError| | 400 | ContentPolicyViolationError| -| N/A | APIConnectionError | +| 500 | APIConnectionError | Base case we return APIConnectionError @@ -74,6 +74,28 @@ except Exception as e: ``` +## Usage - Should you retry exception? + +``` +import litellm +import openai + +try: + response = litellm.completion( + model="gpt-4", + messages=[ + { + "role": "user", + "content": "hello, write a 20 pageg essay" + } + ], + timeout=0.01, # this will raise a timeout exception + ) +except openai.APITimeoutError as e: + should_retry = litellm._should_retry(e.status_code) + print(f"should_retry: {should_retry}") +``` + ## Details To see how it's implemented - [check out the code](https://github.com/BerriAI/litellm/blob/a42c197e5a6de56ea576c73715e6c7c6b19fa249/litellm/utils.py#L1217) @@ -86,21 +108,34 @@ To see how it's implemented - [check out the code](https://github.com/BerriAI/li Base case - we return the original exception. -| | ContextWindowExceededError | AuthenticationError | InvalidRequestError | RateLimitError | ServiceUnavailableError | -|---------------|----------------------------|---------------------|---------------------|---------------|-------------------------| -| Anthropic | ✅ | ✅ | ✅ | ✅ | | -| OpenAI | ✅ | ✅ |✅ |✅ |✅| -| Azure OpenAI | ✅ | ✅ |✅ |✅ |✅| -| Replicate | ✅ | ✅ | ✅ | ✅ | ✅ | -| Cohere | ✅ | ✅ | ✅ | ✅ | ✅ | -| Huggingface | ✅ | ✅ | ✅ | ✅ | | -| Openrouter | ✅ | ✅ | ✅ | ✅ | | -| AI21 | ✅ | ✅ | ✅ | ✅ | | -| VertexAI | | |✅ | | | -| Bedrock | | |✅ | | | -| Sagemaker | | |✅ | | | -| TogetherAI | ✅ | ✅ | ✅ | ✅ | | -| AlephAlpha | ✅ | ✅ | ✅ | ✅ | ✅ | +| custom_llm_provider | Timeout | ContextWindowExceededError | BadRequestError | NotFoundError | ContentPolicyViolationError | AuthenticationError | APIError | RateLimitError | ServiceUnavailableError | PermissionDeniedError | UnprocessableEntityError | +|----------------------------|---------|----------------------------|------------------|---------------|-----------------------------|---------------------|----------|----------------|-------------------------|-----------------------|-------------------------| +| openai | ✓ | ✓ | ✓ | | ✓ | ✓ | | | | | | +| text-completion-openai | ✓ | ✓ | ✓ | | ✓ | ✓ | | | | | | +| custom_openai | ✓ | ✓ | ✓ | | ✓ | ✓ | | | | | | +| openai_compatible_providers| ✓ | ✓ | ✓ | | ✓ | ✓ | | | | | | +| anthropic | ✓ | ✓ | ✓ | ✓ | | ✓ | | | ✓ | ✓ | | +| replicate | ✓ | ✓ | ✓ | ✓ | | ✓ | | ✓ | ✓ | | | +| bedrock | ✓ | ✓ | ✓ | ✓ | | ✓ | | ✓ | ✓ | ✓ | | +| sagemaker | | ✓ | ✓ | | | | | | | | | +| vertex_ai | ✓ | | ✓ | | | | ✓ | | | | ✓ | +| palm | ✓ | ✓ | | | | | ✓ | | | | | +| gemini | ✓ | ✓ | | | | | ✓ | | | | | +| cloudflare | | | ✓ | | | ✓ | | | | | | +| cohere | | ✓ | ✓ | | | ✓ | | | ✓ | | | +| cohere_chat | | ✓ | ✓ | | | ✓ | | | ✓ | | | +| huggingface | ✓ | ✓ | ✓ | | | ✓ | | ✓ | ✓ | | | +| ai21 | ✓ | ✓ | ✓ | ✓ | | ✓ | | ✓ | | | | +| nlp_cloud | ✓ | ✓ | ✓ | | | ✓ | ✓ | ✓ | ✓ | | | +| together_ai | ✓ | ✓ | ✓ | | | ✓ | | | | | | +| aleph_alpha | | | ✓ | | | ✓ | | | | | | +| ollama | ✓ | | ✓ | | | | | | ✓ | | | +| ollama_chat | ✓ | | ✓ | | | | | | ✓ | | | +| vllm | | | | | | ✓ | ✓ | | | | | +| azure | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | | | ✓ | | | + +- "✓" indicates that the specified `custom_llm_provider` can raise the corresponding exception. +- Empty cells indicate the lack of association or that the provider does not raise that particular exception type as indicated by the function. > For a deeper understanding of these exceptions, you can check out [this](https://github.com/BerriAI/litellm/blob/d7e58d13bf9ba9edbab2ab2f096f3de7547f35fa/litellm/utils.py#L1544) implementation for additional insights. diff --git a/litellm/utils.py b/litellm/utils.py index acadb47e71cd..63684766badf 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -8480,7 +8480,7 @@ def exception_type( # 503 Getting metadata from plugin failed with error: Reauthentication is needed. Please run `gcloud auth application-default login` to reauthenticate. exception_mapping_worked = True raise BadRequestError( - message=f"PalmException - Invalid api key", + message=f"GeminiException - Invalid api key", model=model, llm_provider="palm", response=original_exception.response, @@ -8491,23 +8491,26 @@ def exception_type( ): exception_mapping_worked = True raise Timeout( - message=f"PalmException - {original_exception.message}", + message=f"GeminiException - {original_exception.message}", model=model, llm_provider="palm", ) if "400 Request payload size exceeds" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( - message=f"PalmException - {error_str}", + message=f"GeminiException - {error_str}", model=model, llm_provider="palm", response=original_exception.response, ) - if "500 An internal error has occurred." in error_str: + if ( + "500 An internal error has occurred." in error_str + or "list index out of range" in error_str + ): exception_mapping_worked = True raise APIError( status_code=getattr(original_exception, "status_code", 500), - message=f"PalmException - {original_exception.message}", + message=f"GeminiException - {original_exception.message}", llm_provider="palm", model=model, request=original_exception.request, @@ -8516,7 +8519,7 @@ def exception_type( if original_exception.status_code == 400: exception_mapping_worked = True raise BadRequestError( - message=f"PalmException - {error_str}", + message=f"GeminiException - {error_str}", model=model, llm_provider="palm", response=original_exception.response, From d3152e606ff222ecc0f165f096d7512b9955b572 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 16:53:24 -0700 Subject: [PATCH 056/378] fix - ui filter exceptions by modelGroup --- ui/litellm-dashboard/src/components/networking.tsx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ui/litellm-dashboard/src/components/networking.tsx b/ui/litellm-dashboard/src/components/networking.tsx index 35880949b2d1..53779c64bc86 100644 --- a/ui/litellm-dashboard/src/components/networking.tsx +++ b/ui/litellm-dashboard/src/components/networking.tsx @@ -526,7 +526,9 @@ export const modelExceptionsCall = async ( */ try { let url = proxyBaseUrl ? `${proxyBaseUrl}/model/metrics/exceptions` : `/model/metrics/exceptions`; - + if (modelGroup) { + url = `${url}?_selected_model_group=${modelGroup}` + } const response = await fetch(url, { method: "GET", headers: { From e7034ea53d65cf233c05e12e676a9d0348c68bb8 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 16:54:24 -0700 Subject: [PATCH 057/378] feat - filter exceptions by model group --- litellm/proxy/proxy_server.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 55202fd160df..27a05ca21217 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -7706,7 +7706,7 @@ async def model_metrics_exceptions( exception_type, COUNT(*) AS num_exceptions FROM "LiteLLM_ErrorLogs" - WHERE "startTime" >= $1::timestamp AND "endTime" <= $2::timestamp + WHERE "startTime" >= $1::timestamp AND "endTime" <= $2::timestamp AND model_group = $3 GROUP BY combined_model_api_base, exception_type ) SELECT @@ -7718,7 +7718,9 @@ async def model_metrics_exceptions( ORDER BY total_exceptions DESC LIMIT 200; """ - db_response = await prisma_client.db.query_raw(sql_query, startTime, endTime) + db_response = await prisma_client.db.query_raw( + sql_query, startTime, endTime, _selected_model_group + ) response: List[dict] = [] exception_types = set() From 0b9fa53e3e6da6fbe0c965fa77917f919210fa2c Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 16:59:49 -0700 Subject: [PATCH 058/378] fix(anthropic.py): drop unsupported non-whitespace character value when calling anthropic with stop sequences Fixes https://github.com/BerriAI/litellm/issues/3286 --- litellm/llms/anthropic.py | 42 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index 24d889b0f46e..377235deebe8 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -84,6 +84,48 @@ def get_config(cls): and v is not None } + def get_supported_openai_params(self): + return [ + "stream", + "stop", + "temperature", + "top_p", + "max_tokens", + "tools", + "tool_choice", + ] + + def map_openai_params(self, non_default_params: dict, optional_params: dict): + for param, value in non_default_params.items(): + if param == "max_tokens": + optional_params["max_tokens"] = value + if param == "tools": + optional_params["tools"] = value + if param == "stream": + optional_params["stream"] = value + if param == "stop": + if isinstance(value, str): + if ( + value == "\n" + ): # anthropic doesn't allow whitespace characters as stop-sequences + continue + value = [value] + elif isinstance(value, list): + new_v = [] + for v in value: + if ( + v == "\n" + ): # anthropic doesn't allow whitespace characters as stop-sequences + continue + new_v.append(v) + value = new_v + optional_params["stop_sequences"] = value + if param == "temperature": + optional_params["temperature"] = value + if param == "top_p": + optional_params["top_p"] = value + return optional_params + # makes headers for API call def validate_environment(api_key, user_headers): From 0450abfdc12d92576595e8e4d6ba88308d72f704 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 17:22:06 -0700 Subject: [PATCH 059/378] fix(bedrock.py): fix boto3 config init --- litellm/llms/bedrock.py | 2 + litellm/tests/log.txt | 114 ++++++++++++++++++---------------------- 2 files changed, 53 insertions(+), 63 deletions(-) diff --git a/litellm/llms/bedrock.py b/litellm/llms/bedrock.py index 7ce544c96f13..517e441469fe 100644 --- a/litellm/llms/bedrock.py +++ b/litellm/llms/bedrock.py @@ -598,6 +598,8 @@ def init_bedrock_client( config = boto3.session.Config( connect_timeout=timeout.connect, read_timeout=timeout.read ) + else: + config = boto3.session.Config() ### CHECK STS ### if aws_role_name is not None and aws_session_name is not None: diff --git a/litellm/tests/log.txt b/litellm/tests/log.txt index 2d3718c7d443..a7d12334528c 100644 --- a/litellm/tests/log.txt +++ b/litellm/tests/log.txt @@ -5,74 +5,59 @@ plugins: timeout-2.2.0, asyncio-0.23.2, anyio-3.7.1, xdist-3.3.1 asyncio: mode=Mode.STRICT collected 1 item -test_custom_logger.py Chunks have a created at hidden param -Chunks sorted -token_counter messages received: [{'role': 'user', 'content': 'write a one sentence poem about: 73348'}] -Token Counter - using OpenAI token counter, for model=gpt-3.5-turbo -LiteLLM: Utils - Counting tokens for OpenAI model=gpt-3.5-turbo -Logging Details LiteLLM-Success Call: None -success callbacks: [] -Token Counter - using OpenAI token counter, for model=gpt-3.5-turbo -LiteLLM: Utils - Counting tokens for OpenAI model=gpt-3.5-turbo -Logging Details LiteLLM-Success Call streaming complete -Looking up model=gpt-3.5-turbo in model_cost_map -Success: model=gpt-3.5-turbo in model_cost_map -prompt_tokens=17; completion_tokens=0 -Returned custom cost for model=gpt-3.5-turbo - prompt_tokens_cost_usd_dollar: 2.55e-05, completion_tokens_cost_usd_dollar: 0.0 -final cost: 2.55e-05; prompt_tokens_cost_usd_dollar: 2.55e-05; completion_tokens_cost_usd_dollar: 0.0 -. [100%] +test_image_generation.py . [100%] =============================== warnings summary =============================== -../../../../../../opt/homebrew/lib/python3.11/site-packages/pydantic/_internal/_config.py:284: 18 warnings +../../../../../../opt/homebrew/lib/python3.11/site-packages/pydantic/_internal/_config.py:284: 23 warnings /opt/homebrew/lib/python3.11/site-packages/pydantic/_internal/_config.py:284: PydanticDeprecatedSince20: Support for class-based `config` is deprecated, use ConfigDict instead. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ warnings.warn(DEPRECATION_MESSAGE, DeprecationWarning) -../proxy/_types.py:218 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:218: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:219 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:219: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:305 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:305: PydanticDeprecatedSince20: `pydantic.config.Extra` is deprecated, use literal values instead (e.g. `extra='allow'`). Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:306 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:306: PydanticDeprecatedSince20: `pydantic.config.Extra` is deprecated, use literal values instead (e.g. `extra='allow'`). Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ extra = Extra.allow # Allow extra fields -../proxy/_types.py:308 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:308: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:309 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:309: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:337 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:337: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:338 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:338: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:384 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:384: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:385 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:385: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:450 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:450: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:454 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:454: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:462 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:462: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:466 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:466: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:502 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:502: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:509 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:509: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:536 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:536: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:546 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:546: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:823 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:823: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:840 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:840: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:850 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:850: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:867 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:867: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) -../proxy/_types.py:869 - /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:869: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ +../proxy/_types.py:886 + /Users/krrishdholakia/Documents/litellm/litellm/proxy/_types.py:886: PydanticDeprecatedSince20: Pydantic V1 style `@root_validator` validators are deprecated. You should migrate to Pydantic V2 style `@model_validator` validators, see the migration guide for more details. Deprecated in Pydantic V2.0 to be removed in V3.0. See Pydantic V2 Migration Guide at https://errors.pydantic.dev/2.7/migration/ @root_validator(pre=True) ../../../../../../opt/homebrew/lib/python3.11/site-packages/pkg_resources/__init__.py:121 @@ -126,30 +111,33 @@ final cost: 2.55e-05; prompt_tokens_cost_usd_dollar: 2.55e-05; completion_tokens Implementing implicit namespace packages (as specified in PEP 420) is preferred to `pkg_resources.declare_namespace`. See https://setuptools.pypa.io/en/latest/references/keywords.html#keyword-namespace-packages declare_namespace(pkg) -test_custom_logger.py::test_redis_cache_completion_stream - /opt/homebrew/lib/python3.11/site-packages/_pytest/unraisableexception.py:78: PytestUnraisableExceptionWarning: Exception ignored in: +test_image_generation.py::test_aimage_generation_bedrock_with_optional_params + /opt/homebrew/lib/python3.11/site-packages/_pytest/threadexception.py:73: PytestUnhandledThreadExceptionWarning: Exception in thread Thread-1 (success_handler) Traceback (most recent call last): - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/streams.py", line 395, in __del__ - self.close() - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/streams.py", line 343, in close - return self._transport.close() - ^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/sslproto.py", line 112, in close - self._ssl_protocol._start_shutdown() - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/sslproto.py", line 620, in _start_shutdown - self._shutdown_timeout_handle = self._loop.call_later( - ^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/base_events.py", line 727, in call_later - timer = self.call_at(self.time() + delay, callback, *args, - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/base_events.py", line 740, in call_at - self._check_closed() - File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/base_events.py", line 519, in _check_closed - raise RuntimeError('Event loop is closed') - RuntimeError: Event loop is closed + File "/Users/krrishdholakia/Documents/litellm/litellm/utils.py", line 1412, in _success_handler_helper_fn + litellm.completion_cost( + File "/Users/krrishdholakia/Documents/litellm/litellm/utils.py", line 4442, in completion_cost + raise e + File "/Users/krrishdholakia/Documents/litellm/litellm/utils.py", line 4405, in completion_cost + raise Exception( + Exception: Model=1024-x-1024/stability.stable-diffusion-xl-v1 not found in completion cost model map - warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) + During handling of the above exception, another exception occurred: + + Traceback (most recent call last): + File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/threading.py", line 1045, in _bootstrap_inner + self.run() + File "/opt/homebrew/Cellar/python@3.11/3.11.6_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/threading.py", line 982, in run + self._target(*self._args, **self._kwargs) + File "/Users/krrishdholakia/Documents/litellm/litellm/utils.py", line 1465, in success_handler + start_time, end_time, result = self._success_handler_helper_fn( + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Users/krrishdholakia/Documents/litellm/litellm/utils.py", line 1459, in _success_handler_helper_fn + raise Exception(f"[Non-Blocking] LiteLLM.Success_Call Error: {str(e)}") + Exception: [Non-Blocking] LiteLLM.Success_Call Error: Model=1024-x-1024/stability.stable-diffusion-xl-v1 not found in completion cost model map + + warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) -- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html -======================== 1 passed, 56 warnings in 2.43s ======================== +======================== 1 passed, 61 warnings in 3.00s ======================== From 097714e02f3b835eeb61810df52004b568b3d9d6 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Fri, 3 May 2024 17:31:34 -0700 Subject: [PATCH 060/378] fix(anthropic.py): handle whitespace characters for anthropic calls --- litellm/llms/anthropic.py | 11 +++++---- litellm/tests/test_optional_params.py | 16 ++++++++++++- litellm/utils.py | 33 ++++----------------------- 3 files changed, 26 insertions(+), 34 deletions(-) diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index 377235deebe8..3c130aafcbdc 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -101,13 +101,13 @@ def map_openai_params(self, non_default_params: dict, optional_params: dict): optional_params["max_tokens"] = value if param == "tools": optional_params["tools"] = value - if param == "stream": + if param == "stream" and value == True: optional_params["stream"] = value if param == "stop": if isinstance(value, str): if ( value == "\n" - ): # anthropic doesn't allow whitespace characters as stop-sequences + ) and litellm.drop_params == True: # anthropic doesn't allow whitespace characters as stop-sequences continue value = [value] elif isinstance(value, list): @@ -115,10 +115,13 @@ def map_openai_params(self, non_default_params: dict, optional_params: dict): for v in value: if ( v == "\n" - ): # anthropic doesn't allow whitespace characters as stop-sequences + ) and litellm.drop_params == True: # anthropic doesn't allow whitespace characters as stop-sequences continue new_v.append(v) - value = new_v + if len(new_v) > 0: + value = new_v + else: + continue optional_params["stop_sequences"] = value if param == "temperature": optional_params["temperature"] = value diff --git a/litellm/tests/test_optional_params.py b/litellm/tests/test_optional_params.py index 4fa8df3b6db9..c6e0d7a5b7a5 100644 --- a/litellm/tests/test_optional_params.py +++ b/litellm/tests/test_optional_params.py @@ -5,13 +5,27 @@ sys.path.insert(0, os.path.abspath("../..")) import litellm -from litellm.utils import get_optional_params_embeddings +from litellm.utils import get_optional_params_embeddings, get_optional_params ## get_optional_params_embeddings ### Models: OpenAI, Azure, Bedrock ### Scenarios: w/ optional params + litellm.drop_params = True +@pytest.mark.parametrize( + "stop_sequence, expected_count", [("\n", 0), (["\n"], 0), (["finish_reason"], 1)] +) +def test_anthropic_optional_params(stop_sequence, expected_count): + """ + Test if whitespace character optional param is dropped by anthropic + """ + litellm.drop_params = True + optional_params = get_optional_params( + model="claude-3", custom_llm_provider="anthropic", stop=stop_sequence + ) + assert len(optional_params) == expected_count + + def test_bedrock_optional_params_embeddings(): litellm.drop_params = True optional_params = get_optional_params_embeddings( diff --git a/litellm/utils.py b/litellm/utils.py index 63684766badf..dceb280c9545 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5006,26 +5006,9 @@ def _map_and_modify_arg(supported_params: dict, provider: str, model: str): model=model, custom_llm_provider=custom_llm_provider ) _check_valid_arg(supported_params=supported_params) - # handle anthropic params - if stream: - optional_params["stream"] = stream - if stop is not None: - if type(stop) == str: - stop = [stop] # openai can accept str/list for stop - optional_params["stop_sequences"] = stop - if temperature is not None: - optional_params["temperature"] = temperature - if top_p is not None: - optional_params["top_p"] = top_p - if max_tokens is not None: - if (model == "claude-2") or (model == "claude-instant-1"): - # these models use antropic_text.py which only accepts max_tokens_to_sample - optional_params["max_tokens_to_sample"] = max_tokens - else: - optional_params["max_tokens"] = max_tokens - optional_params["max_tokens"] = max_tokens - if tools is not None: - optional_params["tools"] = tools + optional_params = litellm.AnthropicConfig().map_openai_params( + non_default_params=non_default_params, optional_params=optional_params + ) elif custom_llm_provider == "cohere": ## check if unsupported param passed in supported_params = get_supported_openai_params( @@ -5929,15 +5912,7 @@ def get_supported_openai_params(model: str, custom_llm_provider: str): elif custom_llm_provider == "ollama_chat": return litellm.OllamaChatConfig().get_supported_openai_params() elif custom_llm_provider == "anthropic": - return [ - "stream", - "stop", - "temperature", - "top_p", - "max_tokens", - "tools", - "tool_choice", - ] + return litellm.AnthropicConfig().get_supported_openai_params() elif custom_llm_provider == "groq": return [ "temperature", From 4ce4927c0cf9af8d0c4b6319b155e244572db4e7 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Fri, 3 May 2024 17:56:39 -0700 Subject: [PATCH 061/378] Add test_engines_model_chat_completions --- litellm/tests/test_proxy_server.py | 35 +++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index 43a070556682..c1965dc2aac7 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -160,7 +160,40 @@ def test_chat_completion(mock_acompletion, client_no_auth): pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") -# Run the test +@mock_patch_acompletion() +def test_engines_model_chat_completions(mock_acompletion, client_no_auth): + global headers + try: + # Your test data + test_data = { + "model": "gpt-3.5-turbo", + "messages": [ + {"role": "user", "content": "hi"}, + ], + "max_tokens": 10, + } + + print("testing proxy server with chat completions") + response = client_no_auth.post("/engines/gpt-3.5-turbo/chat/completions", json=test_data) + mock_acompletion.assert_called_once_with( + model="gpt-3.5-turbo", + messages=[ + {"role": "user", "content": "hi"}, + ], + max_tokens=10, + litellm_call_id=mock.ANY, + litellm_logging_obj=mock.ANY, + request_timeout=mock.ANY, + specific_deployment=True, + metadata=mock.ANY, + proxy_server_request=mock.ANY, + ) + print(f"response - {response.text}") + assert response.status_code == 200 + result = response.json() + print(f"Received response: {result}") + except Exception as e: + pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") @mock_patch_acompletion() From eb433bde863c750affcae4ac38517ba756ed9290 Mon Sep 17 00:00:00 2001 From: Marc Abramowitz Date: Fri, 3 May 2024 17:57:30 -0700 Subject: [PATCH 062/378] Add route: "/engines/{model:path}/chat/completions" Without this, it results in: ```pytb Traceback (most recent call last): File "/Users/abramowi/Code/OpenSource/litellm/litellm/proxy/proxy_server.py", line 3836, in completion raise HTTPException( fastapi.exceptions.HTTPException: 400: {'error': 'completion: Invalid model name passed in model=gpt-3.5-turbo/chat'} ``` --- litellm/proxy/proxy_server.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 55202fd160df..bbeacded0cdf 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -3456,6 +3456,11 @@ def model_list( dependencies=[Depends(user_api_key_auth)], tags=["chat/completions"], ) +@router.post( + "/engines/{model:path}/chat/completions", + dependencies=[Depends(user_api_key_auth)], + tags=["chat/completions"], +) @router.post( "/openai/deployments/{model:path}/chat/completions", dependencies=[Depends(user_api_key_auth)], From fc0ced48c1a3c695186b1d433bb2d99234d6c67e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebasti=C3=A1n=20Est=C3=A9vez?= Date: Fri, 3 May 2024 23:38:54 -0400 Subject: [PATCH 063/378] add_function_to_prompt bug fix This blows up when there's no "functions" in the dictionary even when tools is present because the inner function executes regardless (does not short circuit). --- litellm/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/litellm/utils.py b/litellm/utils.py index ac8ec35d4b62..75d6f8b7f0a9 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4956,7 +4956,7 @@ def get_optional_params( litellm.add_function_to_prompt ): # if user opts to add it to prompt instead optional_params["functions_unsupported_model"] = non_default_params.pop( - "tools", non_default_params.pop("functions") + "tools", non_default_params.pop("functions", None) ) else: raise UnsupportedParamsError( From 01a11ccced84498136066678f3d9412882393022 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 3 May 2024 20:40:58 -0700 Subject: [PATCH 064/378] ui - new build --- litellm/proxy/_experimental/out/404.html | 6 +--- .../static/chunks/386-d811195b597a2122.js | 32 ------------------- .../static/chunks/761-05f8a8451296476c.js | 32 +++++++++++++++++++ .../chunks/app/layout-bdfb585eb82bdab5.js | 1 - .../chunks/app/layout-bf3537d6924e801d.js | 1 + .../chunks/app/page-e0ee34389254cdf2.js | 1 - .../chunks/app/page-f538305fa38a6c75.js | 2 +- ...915716.js => main-app-9b4fb13a7db53edf.js} | 2 +- ...8bd8abb.js => webpack-202e312607f242a1.js} | 2 +- .../out/_next/static/css/00c2ddbcd01819c0.css | 5 +++ .../out/_next/static/css/9f51f0573c6b0365.css | 3 -- .../_buildManifest.js | 0 .../_ssgManifest.js | 0 litellm/proxy/_experimental/out/index.html | 6 +--- litellm/proxy/_experimental/out/index.txt | 11 ++----- ui/litellm-dashboard/out/404.html | 6 +--- .../c5rha8cqAah-saaczjn02/_buildManifest.js | 1 - .../c5rha8cqAah-saaczjn02/_ssgManifest.js | 1 - .../static/chunks/761-05f8a8451296476c.js | 32 +++++++++++++++++++ .../chunks/app/layout-bf3537d6924e801d.js | 1 + .../chunks/app/page-f538305fa38a6c75.js | 2 +- .../chunks/main-app-9b4fb13a7db53edf.js | 1 + .../static/chunks/webpack-202e312607f242a1.js | 1 + .../out/_next/static/css/00c2ddbcd01819c0.css | 5 +++ .../hizRXvPxuztz_ZrAcFMhz}/_buildManifest.js | 0 .../hizRXvPxuztz_ZrAcFMhz}/_ssgManifest.js | 0 ui/litellm-dashboard/out/index.html | 6 +--- ui/litellm-dashboard/out/index.txt | 11 ++----- 28 files changed, 90 insertions(+), 81 deletions(-) delete mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/386-d811195b597a2122.js create mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/761-05f8a8451296476c.js delete mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/app/layout-bdfb585eb82bdab5.js create mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/app/layout-bf3537d6924e801d.js delete mode 100644 litellm/proxy/_experimental/out/_next/static/chunks/app/page-e0ee34389254cdf2.js rename ui/litellm-dashboard/out/_next/static/chunks/app/page-5a4a198eefedc775.js => litellm/proxy/_experimental/out/_next/static/chunks/app/page-f538305fa38a6c75.js (73%) rename litellm/proxy/_experimental/out/_next/static/chunks/{main-app-096338c8e1915716.js => main-app-9b4fb13a7db53edf.js} (54%) rename litellm/proxy/_experimental/out/_next/static/chunks/{webpack-65a932b4e8bd8abb.js => webpack-202e312607f242a1.js} (98%) create mode 100644 litellm/proxy/_experimental/out/_next/static/css/00c2ddbcd01819c0.css delete mode 100644 litellm/proxy/_experimental/out/_next/static/css/9f51f0573c6b0365.css rename litellm/proxy/_experimental/out/_next/static/{c5rha8cqAah-saaczjn02 => hizRXvPxuztz_ZrAcFMhz}/_buildManifest.js (100%) rename litellm/proxy/_experimental/out/_next/static/{c5rha8cqAah-saaczjn02 => hizRXvPxuztz_ZrAcFMhz}/_ssgManifest.js (100%) delete mode 100644 ui/litellm-dashboard/out/_next/static/c5rha8cqAah-saaczjn02/_buildManifest.js delete mode 100644 ui/litellm-dashboard/out/_next/static/c5rha8cqAah-saaczjn02/_ssgManifest.js create mode 100644 ui/litellm-dashboard/out/_next/static/chunks/761-05f8a8451296476c.js create mode 100644 ui/litellm-dashboard/out/_next/static/chunks/app/layout-bf3537d6924e801d.js rename litellm/proxy/_experimental/out/_next/static/chunks/app/page-5a4a198eefedc775.js => ui/litellm-dashboard/out/_next/static/chunks/app/page-f538305fa38a6c75.js (73%) create mode 100644 ui/litellm-dashboard/out/_next/static/chunks/main-app-9b4fb13a7db53edf.js create mode 100644 ui/litellm-dashboard/out/_next/static/chunks/webpack-202e312607f242a1.js create mode 100644 ui/litellm-dashboard/out/_next/static/css/00c2ddbcd01819c0.css rename {litellm/proxy/_experimental/out/_next/static/dWGL92c5LzTMn7XX6utn2 => ui/litellm-dashboard/out/_next/static/hizRXvPxuztz_ZrAcFMhz}/_buildManifest.js (100%) rename {litellm/proxy/_experimental/out/_next/static/dWGL92c5LzTMn7XX6utn2 => ui/litellm-dashboard/out/_next/static/hizRXvPxuztz_ZrAcFMhz}/_ssgManifest.js (100%) diff --git a/litellm/proxy/_experimental/out/404.html b/litellm/proxy/_experimental/out/404.html index ae30e10a82dd..e1cfa170d85c 100644 --- a/litellm/proxy/_experimental/out/404.html +++ b/litellm/proxy/_experimental/out/404.html @@ -1,5 +1 @@ -<<<<<<< HEAD -404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

-======= -404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

->>>>>>> 73a7b4f4 (refactor(main.py): trigger new build) +404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

\ No newline at end of file diff --git a/litellm/proxy/_experimental/out/_next/static/chunks/386-d811195b597a2122.js b/litellm/proxy/_experimental/out/_next/static/chunks/386-d811195b597a2122.js deleted file mode 100644 index c589a3c7340d..000000000000 --- a/litellm/proxy/_experimental/out/_next/static/chunks/386-d811195b597a2122.js +++ /dev/null @@ -1,32 +0,0 @@ -"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[386],{12215:function(e,t,n){n.d(t,{iN:function(){return h},R_:function(){return d},EV:function(){return g},ez:function(){return p}});var r=n(41785),o=n(76991),a=[{index:7,opacity:.15},{index:6,opacity:.25},{index:5,opacity:.3},{index:5,opacity:.45},{index:5,opacity:.65},{index:5,opacity:.85},{index:4,opacity:.9},{index:3,opacity:.95},{index:2,opacity:.97},{index:1,opacity:.98}];function i(e){var t=e.r,n=e.g,o=e.b,a=(0,r.py)(t,n,o);return{h:360*a.h,s:a.s,v:a.v}}function l(e){var t=e.r,n=e.g,o=e.b;return"#".concat((0,r.vq)(t,n,o,!1))}function s(e,t,n){var r;return(r=Math.round(e.h)>=60&&240>=Math.round(e.h)?n?Math.round(e.h)-2*t:Math.round(e.h)+2*t:n?Math.round(e.h)+2*t:Math.round(e.h)-2*t)<0?r+=360:r>=360&&(r-=360),r}function c(e,t,n){var r;return 0===e.h&&0===e.s?e.s:((r=n?e.s-.16*t:4===t?e.s+.16:e.s+.05*t)>1&&(r=1),n&&5===t&&r>.1&&(r=.1),r<.06&&(r=.06),Number(r.toFixed(2)))}function u(e,t,n){var r;return(r=n?e.v+.05*t:e.v-.15*t)>1&&(r=1),Number(r.toFixed(2))}function d(e){for(var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=[],r=(0,o.uA)(e),d=5;d>0;d-=1){var p=i(r),f=l((0,o.uA)({h:s(p,d,!0),s:c(p,d,!0),v:u(p,d,!0)}));n.push(f)}n.push(l(r));for(var m=1;m<=4;m+=1){var g=i(r),h=l((0,o.uA)({h:s(g,m),s:c(g,m),v:u(g,m)}));n.push(h)}return"dark"===t.theme?a.map(function(e){var r,a,i,s=e.index,c=e.opacity;return l((r=(0,o.uA)(t.backgroundColor||"#141414"),a=(0,o.uA)(n[s]),i=100*c/100,{r:(a.r-r.r)*i+r.r,g:(a.g-r.g)*i+r.g,b:(a.b-r.b)*i+r.b}))}):n}var p={red:"#F5222D",volcano:"#FA541C",orange:"#FA8C16",gold:"#FAAD14",yellow:"#FADB14",lime:"#A0D911",green:"#52C41A",cyan:"#13C2C2",blue:"#1677FF",geekblue:"#2F54EB",purple:"#722ED1",magenta:"#EB2F96",grey:"#666666"},f={},m={};Object.keys(p).forEach(function(e){f[e]=d(p[e]),f[e].primary=f[e][5],m[e]=d(p[e],{theme:"dark",backgroundColor:"#141414"}),m[e].primary=m[e][5]}),f.red,f.volcano;var g=f.gold;f.orange,f.yellow,f.lime,f.green,f.cyan;var h=f.blue;f.geekblue,f.purple,f.magenta,f.grey,f.grey},8985:function(e,t,n){n.d(t,{E4:function(){return ej},jG:function(){return T},ks:function(){return Z},bf:function(){return F},CI:function(){return eD},fp:function(){return X},xy:function(){return eL}});var r,o,a=n(50833),i=n(80406),l=n(63787),s=n(5239),c=function(e){for(var t,n=0,r=0,o=e.length;o>=4;++r,o-=4)t=(65535&(t=255&e.charCodeAt(r)|(255&e.charCodeAt(++r))<<8|(255&e.charCodeAt(++r))<<16|(255&e.charCodeAt(++r))<<24))*1540483477+((t>>>16)*59797<<16),t^=t>>>24,n=(65535&t)*1540483477+((t>>>16)*59797<<16)^(65535&n)*1540483477+((n>>>16)*59797<<16);switch(o){case 3:n^=(255&e.charCodeAt(r+2))<<16;case 2:n^=(255&e.charCodeAt(r+1))<<8;case 1:n^=255&e.charCodeAt(r),n=(65535&n)*1540483477+((n>>>16)*59797<<16)}return n^=n>>>13,(((n=(65535&n)*1540483477+((n>>>16)*59797<<16))^n>>>15)>>>0).toString(36)},u=n(24050),d=n(64090),p=n.t(d,2);n(61475),n(92536);var f=n(47365),m=n(65127);function g(e){return e.join("%")}var h=function(){function e(t){(0,f.Z)(this,e),(0,a.Z)(this,"instanceId",void 0),(0,a.Z)(this,"cache",new Map),this.instanceId=t}return(0,m.Z)(e,[{key:"get",value:function(e){return this.opGet(g(e))}},{key:"opGet",value:function(e){return this.cache.get(e)||null}},{key:"update",value:function(e,t){return this.opUpdate(g(e),t)}},{key:"opUpdate",value:function(e,t){var n=t(this.cache.get(e));null===n?this.cache.delete(e):this.cache.set(e,n)}}]),e}(),b="data-token-hash",v="data-css-hash",y="__cssinjs_instance__",E=d.createContext({hashPriority:"low",cache:function(){var e=Math.random().toString(12).slice(2);if("undefined"!=typeof document&&document.head&&document.body){var t=document.body.querySelectorAll("style[".concat(v,"]"))||[],n=document.head.firstChild;Array.from(t).forEach(function(t){t[y]=t[y]||e,t[y]===e&&document.head.insertBefore(t,n)});var r={};Array.from(document.querySelectorAll("style[".concat(v,"]"))).forEach(function(t){var n,o=t.getAttribute(v);r[o]?t[y]===e&&(null===(n=t.parentNode)||void 0===n||n.removeChild(t)):r[o]=!0})}return new h(e)}(),defaultCache:!0}),S=n(6976),w=n(22127),x=function(){function e(){(0,f.Z)(this,e),(0,a.Z)(this,"cache",void 0),(0,a.Z)(this,"keys",void 0),(0,a.Z)(this,"cacheCallTimes",void 0),this.cache=new Map,this.keys=[],this.cacheCallTimes=0}return(0,m.Z)(e,[{key:"size",value:function(){return this.keys.length}},{key:"internalGet",value:function(e){var t,n,r=arguments.length>1&&void 0!==arguments[1]&&arguments[1],o={map:this.cache};return e.forEach(function(e){if(o){var t;o=null===(t=o)||void 0===t||null===(t=t.map)||void 0===t?void 0:t.get(e)}else o=void 0}),null!==(t=o)&&void 0!==t&&t.value&&r&&(o.value[1]=this.cacheCallTimes++),null===(n=o)||void 0===n?void 0:n.value}},{key:"get",value:function(e){var t;return null===(t=this.internalGet(e,!0))||void 0===t?void 0:t[0]}},{key:"has",value:function(e){return!!this.internalGet(e)}},{key:"set",value:function(t,n){var r=this;if(!this.has(t)){if(this.size()+1>e.MAX_CACHE_SIZE+e.MAX_CACHE_OFFSET){var o=this.keys.reduce(function(e,t){var n=(0,i.Z)(e,2)[1];return r.internalGet(t)[1]0,"[Ant Design CSS-in-JS] Theme should have at least one derivative function."),k+=1}return(0,m.Z)(e,[{key:"getDerivativeToken",value:function(e){return this.derivatives.reduce(function(t,n){return n(e,t)},void 0)}}]),e}(),A=new x;function T(e){var t=Array.isArray(e)?e:[e];return A.has(t)||A.set(t,new C(t)),A.get(t)}var I=new WeakMap,N={},R=new WeakMap;function _(e){var t=R.get(e)||"";return t||(Object.keys(e).forEach(function(n){var r=e[n];t+=n,r instanceof C?t+=r.id:r&&"object"===(0,S.Z)(r)?t+=_(r):t+=r}),R.set(e,t)),t}function P(e,t){return c("".concat(t,"_").concat(_(e)))}var L="random-".concat(Date.now(),"-").concat(Math.random()).replace(/\./g,""),M="_bAmBoO_",D=void 0,j=(0,w.Z)();function F(e){return"number"==typeof e?"".concat(e,"px"):e}function B(e,t,n){var r,o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},i=arguments.length>4&&void 0!==arguments[4]&&arguments[4];if(i)return e;var l=(0,s.Z)((0,s.Z)({},o),{},(r={},(0,a.Z)(r,b,t),(0,a.Z)(r,v,n),r)),c=Object.keys(l).map(function(e){var t=l[e];return t?"".concat(e,'="').concat(t,'"'):null}).filter(function(e){return e}).join(" ");return"")}var Z=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return"--".concat(t?"".concat(t,"-"):"").concat(e).replace(/([a-z0-9])([A-Z])/g,"$1-$2").replace(/([A-Z]+)([A-Z][a-z0-9]+)/g,"$1-$2").replace(/([a-z])([A-Z0-9])/g,"$1-$2").toLowerCase()},U=function(e,t,n){var r,o={},a={};return Object.entries(e).forEach(function(e){var t=(0,i.Z)(e,2),r=t[0],l=t[1];if(null!=n&&null!==(s=n.preserve)&&void 0!==s&&s[r])a[r]=l;else if(("string"==typeof l||"number"==typeof l)&&!(null!=n&&null!==(c=n.ignore)&&void 0!==c&&c[r])){var s,c,u,d=Z(r,null==n?void 0:n.prefix);o[d]="number"!=typeof l||null!=n&&null!==(u=n.unitless)&&void 0!==u&&u[r]?String(l):"".concat(l,"px"),a[r]="var(".concat(d,")")}}),[a,(r={scope:null==n?void 0:n.scope},Object.keys(o).length?".".concat(t).concat(null!=r&&r.scope?".".concat(r.scope):"","{").concat(Object.entries(o).map(function(e){var t=(0,i.Z)(e,2),n=t[0],r=t[1];return"".concat(n,":").concat(r,";")}).join(""),"}"):"")]},z=n(24800),H=(0,s.Z)({},p).useInsertionEffect,G=H?function(e,t,n){return H(function(){return e(),t()},n)}:function(e,t,n){d.useMemo(e,n),(0,z.Z)(function(){return t(!0)},n)},$=void 0!==(0,s.Z)({},p).useInsertionEffect?function(e){var t=[],n=!1;return d.useEffect(function(){return n=!1,function(){n=!0,t.length&&t.forEach(function(e){return e()})}},e),function(e){n||t.push(e)}}:function(){return function(e){e()}};function W(e,t,n,r,o){var a=d.useContext(E).cache,s=g([e].concat((0,l.Z)(t))),c=$([s]),u=function(e){a.opUpdate(s,function(t){var r=(0,i.Z)(t||[void 0,void 0],2),o=r[0],a=[void 0===o?0:o,r[1]||n()];return e?e(a):a})};d.useMemo(function(){u()},[s]);var p=a.opGet(s)[1];return G(function(){null==o||o(p)},function(e){return u(function(t){var n=(0,i.Z)(t,2),r=n[0],a=n[1];return e&&0===r&&(null==o||o(p)),[r+1,a]}),function(){a.opUpdate(s,function(t){var n=(0,i.Z)(t||[],2),o=n[0],l=void 0===o?0:o,u=n[1];return 0==l-1?(c(function(){(e||!a.opGet(s))&&(null==r||r(u,!1))}),null):[l-1,u]})}},[s]),p}var V={},q=new Map,Y=function(e,t,n,r){var o=n.getDerivativeToken(e),a=(0,s.Z)((0,s.Z)({},o),t);return r&&(a=r(a)),a},K="token";function X(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=(0,d.useContext)(E),o=r.cache.instanceId,a=r.container,p=n.salt,f=void 0===p?"":p,m=n.override,g=void 0===m?V:m,h=n.formatToken,S=n.getComputedToken,w=n.cssVar,x=function(e,t){for(var n=I,r=0;r=(q.get(e)||0)}),n.length-r.length>0&&r.forEach(function(e){"undefined"!=typeof document&&document.querySelectorAll("style[".concat(b,'="').concat(e,'"]')).forEach(function(e){if(e[y]===o){var t;null===(t=e.parentNode)||void 0===t||t.removeChild(e)}}),q.delete(e)})},function(e){var t=(0,i.Z)(e,4),n=t[0],r=t[3];if(w&&r){var l=(0,u.hq)(r,c("css-variables-".concat(n._themeKey)),{mark:v,prepend:"queue",attachTo:a,priority:-999});l[y]=o,l.setAttribute(b,n._themeKey)}})}var Q=n(14749),J={animationIterationCount:1,borderImageOutset:1,borderImageSlice:1,borderImageWidth:1,boxFlex:1,boxFlexGroup:1,boxOrdinalGroup:1,columnCount:1,columns:1,flex:1,flexGrow:1,flexPositive:1,flexShrink:1,flexNegative:1,flexOrder:1,gridRow:1,gridRowEnd:1,gridRowSpan:1,gridRowStart:1,gridColumn:1,gridColumnEnd:1,gridColumnSpan:1,gridColumnStart:1,msGridRow:1,msGridRowSpan:1,msGridColumn:1,msGridColumnSpan:1,fontWeight:1,lineHeight:1,opacity:1,order:1,orphans:1,tabSize:1,widows:1,zIndex:1,zoom:1,WebkitLineClamp:1,fillOpacity:1,floodOpacity:1,stopOpacity:1,strokeDasharray:1,strokeDashoffset:1,strokeMiterlimit:1,strokeOpacity:1,strokeWidth:1},ee="comm",et="rule",en="decl",er=Math.abs,eo=String.fromCharCode;function ea(e,t,n){return e.replace(t,n)}function ei(e,t){return 0|e.charCodeAt(t)}function el(e,t,n){return e.slice(t,n)}function es(e){return e.length}function ec(e,t){return t.push(e),e}function eu(e,t){for(var n="",r=0;r0?f[v]+" "+y:ea(y,/&\f/g,f[v])).trim())&&(s[b++]=E);return ev(e,t,n,0===o?et:l,s,c,u,d)}function eO(e,t,n,r,o){return ev(e,t,n,en,el(e,0,r),el(e,r+1,-1),r,o)}var ek="data-ant-cssinjs-cache-path",eC="_FILE_STYLE__",eA=!0,eT="_multi_value_";function eI(e){var t,n,r;return eu((r=function e(t,n,r,o,a,i,l,s,c){for(var u,d,p,f=0,m=0,g=l,h=0,b=0,v=0,y=1,E=1,S=1,w=0,x="",O=a,k=i,C=o,A=x;E;)switch(v=w,w=ey()){case 40:if(108!=v&&58==ei(A,g-1)){-1!=(d=A+=ea(ew(w),"&","&\f"),p=er(f?s[f-1]:0),d.indexOf("&\f",p))&&(S=-1);break}case 34:case 39:case 91:A+=ew(w);break;case 9:case 10:case 13:case 32:A+=function(e){for(;eh=eE();)if(eh<33)ey();else break;return eS(e)>2||eS(eh)>3?"":" "}(v);break;case 92:A+=function(e,t){for(var n;--t&&ey()&&!(eh<48)&&!(eh>102)&&(!(eh>57)||!(eh<65))&&(!(eh>70)||!(eh<97)););return n=eg+(t<6&&32==eE()&&32==ey()),el(eb,e,n)}(eg-1,7);continue;case 47:switch(eE()){case 42:case 47:ec(ev(u=function(e,t){for(;ey();)if(e+eh===57)break;else if(e+eh===84&&47===eE())break;return"/*"+el(eb,t,eg-1)+"*"+eo(47===e?e:ey())}(ey(),eg),n,r,ee,eo(eh),el(u,2,-2),0,c),c);break;default:A+="/"}break;case 123*y:s[f++]=es(A)*S;case 125*y:case 59:case 0:switch(w){case 0:case 125:E=0;case 59+m:-1==S&&(A=ea(A,/\f/g,"")),b>0&&es(A)-g&&ec(b>32?eO(A+";",o,r,g-1,c):eO(ea(A," ","")+";",o,r,g-2,c),c);break;case 59:A+=";";default:if(ec(C=ex(A,n,r,f,m,a,s,x,O=[],k=[],g,i),i),123===w){if(0===m)e(A,n,C,C,O,i,g,s,k);else switch(99===h&&110===ei(A,3)?100:h){case 100:case 108:case 109:case 115:e(t,C,C,o&&ec(ex(t,C,C,0,0,a,s,x,a,O=[],g,k),k),a,k,g,s,o?O:k);break;default:e(A,C,C,C,[""],k,0,s,k)}}}f=m=b=0,y=S=1,x=A="",g=l;break;case 58:g=1+es(A),b=v;default:if(y<1){if(123==w)--y;else if(125==w&&0==y++&&125==(eh=eg>0?ei(eb,--eg):0,ef--,10===eh&&(ef=1,ep--),eh))continue}switch(A+=eo(w),w*y){case 38:S=m>0?1:(A+="\f",-1);break;case 44:s[f++]=(es(A)-1)*S,S=1;break;case 64:45===eE()&&(A+=ew(ey())),h=eE(),m=g=es(x=A+=function(e){for(;!eS(eE());)ey();return el(eb,e,eg)}(eg)),w++;break;case 45:45===v&&2==es(A)&&(y=0)}}return i}("",null,null,null,[""],(n=t=e,ep=ef=1,em=es(eb=n),eg=0,t=[]),0,[0],t),eb="",r),ed).replace(/\{%%%\:[^;];}/g,";")}var eN=function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{root:!0,parentSelectors:[]},o=r.root,a=r.injectHash,c=r.parentSelectors,d=n.hashId,p=n.layer,f=(n.path,n.hashPriority),m=n.transformers,g=void 0===m?[]:m;n.linters;var h="",b={};function v(t){var r=t.getName(d);if(!b[r]){var o=e(t.style,n,{root:!1,parentSelectors:c}),a=(0,i.Z)(o,1)[0];b[r]="@keyframes ".concat(t.getName(d)).concat(a)}}if((function e(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return t.forEach(function(t){Array.isArray(t)?e(t,n):t&&n.push(t)}),n})(Array.isArray(t)?t:[t]).forEach(function(t){var r="string"!=typeof t||o?t:{};if("string"==typeof r)h+="".concat(r,"\n");else if(r._keyframe)v(r);else{var u=g.reduce(function(e,t){var n;return(null==t||null===(n=t.visit)||void 0===n?void 0:n.call(t,e))||e},r);Object.keys(u).forEach(function(t){var r=u[t];if("object"!==(0,S.Z)(r)||!r||"animationName"===t&&r._keyframe||"object"===(0,S.Z)(r)&&r&&("_skip_check_"in r||eT in r)){function p(e,t){var n=e.replace(/[A-Z]/g,function(e){return"-".concat(e.toLowerCase())}),r=t;J[e]||"number"!=typeof r||0===r||(r="".concat(r,"px")),"animationName"===e&&null!=t&&t._keyframe&&(v(t),r=t.getName(d)),h+="".concat(n,":").concat(r,";")}var m,g=null!==(m=null==r?void 0:r.value)&&void 0!==m?m:r;"object"===(0,S.Z)(r)&&null!=r&&r[eT]&&Array.isArray(g)?g.forEach(function(e){p(t,e)}):p(t,g)}else{var y=!1,E=t.trim(),w=!1;(o||a)&&d?E.startsWith("@")?y=!0:E=function(e,t,n){if(!t)return e;var r=".".concat(t),o="low"===n?":where(".concat(r,")"):r;return e.split(",").map(function(e){var t,n=e.trim().split(/\s+/),r=n[0]||"",a=(null===(t=r.match(/^\w+/))||void 0===t?void 0:t[0])||"";return[r="".concat(a).concat(o).concat(r.slice(a.length))].concat((0,l.Z)(n.slice(1))).join(" ")}).join(",")}(t,d,f):o&&!d&&("&"===E||""===E)&&(E="",w=!0);var x=e(r,n,{root:w,injectHash:y,parentSelectors:[].concat((0,l.Z)(c),[E])}),O=(0,i.Z)(x,2),k=O[0],C=O[1];b=(0,s.Z)((0,s.Z)({},b),C),h+="".concat(E).concat(k)}})}}),o){if(p&&(void 0===D&&(D=function(e,t,n){if((0,w.Z)()){(0,u.hq)(e,L);var r,o,a=document.createElement("div");a.style.position="fixed",a.style.left="0",a.style.top="0",null==t||t(a),document.body.appendChild(a);var i=n?n(a):null===(r=getComputedStyle(a).content)||void 0===r?void 0:r.includes(M);return null===(o=a.parentNode)||void 0===o||o.removeChild(a),(0,u.jL)(L),i}return!1}("@layer ".concat(L," { .").concat(L,' { content: "').concat(M,'"!important; } }'),function(e){e.className=L})),D)){var y=p.split(","),E=y[y.length-1].trim();h="@layer ".concat(E," {").concat(h,"}"),y.length>1&&(h="@layer ".concat(p,"{%%%:%}").concat(h))}}else h="{".concat(h,"}");return[h,b]};function eR(e,t){return c("".concat(e.join("%")).concat(t))}function e_(){return null}var eP="style";function eL(e,t){var n=e.token,o=e.path,s=e.hashId,c=e.layer,p=e.nonce,f=e.clientOnly,m=e.order,g=void 0===m?0:m,h=d.useContext(E),S=h.autoClear,x=(h.mock,h.defaultCache),O=h.hashPriority,k=h.container,C=h.ssrInline,A=h.transformers,T=h.linters,I=h.cache,N=n._tokenKey,R=[N].concat((0,l.Z)(o)),_=W(eP,R,function(){var e=R.join("|");if(!function(){if(!r&&(r={},(0,w.Z)())){var e,t=document.createElement("div");t.className=ek,t.style.position="fixed",t.style.visibility="hidden",t.style.top="-9999px",document.body.appendChild(t);var n=getComputedStyle(t).content||"";(n=n.replace(/^"/,"").replace(/"$/,"")).split(";").forEach(function(e){var t=e.split(":"),n=(0,i.Z)(t,2),o=n[0],a=n[1];r[o]=a});var o=document.querySelector("style[".concat(ek,"]"));o&&(eA=!1,null===(e=o.parentNode)||void 0===e||e.removeChild(o)),document.body.removeChild(t)}}(),r[e]){var n=function(e){var t=r[e],n=null;if(t&&(0,w.Z)()){if(eA)n=eC;else{var o=document.querySelector("style[".concat(v,'="').concat(r[e],'"]'));o?n=o.innerHTML:delete r[e]}}return[n,t]}(e),a=(0,i.Z)(n,2),l=a[0],u=a[1];if(l)return[l,N,u,{},f,g]}var d=eN(t(),{hashId:s,hashPriority:O,layer:c,path:o.join("-"),transformers:A,linters:T}),p=(0,i.Z)(d,2),m=p[0],h=p[1],b=eI(m),y=eR(R,b);return[b,N,y,h,f,g]},function(e,t){var n=(0,i.Z)(e,3)[2];(t||S)&&j&&(0,u.jL)(n,{mark:v})},function(e){var t=(0,i.Z)(e,4),n=t[0],r=(t[1],t[2]),o=t[3];if(j&&n!==eC){var a={mark:v,prepend:"queue",attachTo:k,priority:g},l="function"==typeof p?p():p;l&&(a.csp={nonce:l});var s=(0,u.hq)(n,r,a);s[y]=I.instanceId,s.setAttribute(b,N),Object.keys(o).forEach(function(e){(0,u.hq)(eI(o[e]),"_effect-".concat(e),a)})}}),P=(0,i.Z)(_,3),L=P[0],M=P[1],D=P[2];return function(e){var t,n;return t=C&&!j&&x?d.createElement("style",(0,Q.Z)({},(n={},(0,a.Z)(n,b,M),(0,a.Z)(n,v,D),n),{dangerouslySetInnerHTML:{__html:L}})):d.createElement(e_,null),d.createElement(d.Fragment,null,t,e)}}var eM="cssVar",eD=function(e,t){var n=e.key,r=e.prefix,o=e.unitless,a=e.ignore,s=e.token,c=e.scope,p=void 0===c?"":c,f=(0,d.useContext)(E),m=f.cache.instanceId,g=f.container,h=s._tokenKey,S=[].concat((0,l.Z)(e.path),[n,p,h]);return W(eM,S,function(){var e=U(t(),n,{prefix:r,unitless:o,ignore:a,scope:p}),l=(0,i.Z)(e,2),s=l[0],c=l[1],u=eR(S,c);return[s,c,u,n]},function(e){var t=(0,i.Z)(e,3)[2];j&&(0,u.jL)(t,{mark:v})},function(e){var t=(0,i.Z)(e,3),r=t[1],o=t[2];if(r){var a=(0,u.hq)(r,o,{mark:v,prepend:"queue",attachTo:g,priority:-999});a[y]=m,a.setAttribute(b,n)}})};o={},(0,a.Z)(o,eP,function(e,t,n){var r=(0,i.Z)(e,6),o=r[0],a=r[1],l=r[2],s=r[3],c=r[4],u=r[5],d=(n||{}).plain;if(c)return null;var p=o,f={"data-rc-order":"prependQueue","data-rc-priority":"".concat(u)};return p=B(o,a,l,f,d),s&&Object.keys(s).forEach(function(e){if(!t[e]){t[e]=!0;var n=eI(s[e]);p+=B(n,a,"_effect-".concat(e),f,d)}}),[u,l,p]}),(0,a.Z)(o,K,function(e,t,n){var r=(0,i.Z)(e,5),o=r[2],a=r[3],l=r[4],s=(n||{}).plain;if(!a)return null;var c=o._tokenKey,u=B(a,l,c,{"data-rc-order":"prependQueue","data-rc-priority":"".concat(-999)},s);return[-999,c,u]}),(0,a.Z)(o,eM,function(e,t,n){var r=(0,i.Z)(e,4),o=r[1],a=r[2],l=r[3],s=(n||{}).plain;if(!o)return null;var c=B(o,l,a,{"data-rc-order":"prependQueue","data-rc-priority":"".concat(-999)},s);return[-999,a,c]});var ej=function(){function e(t,n){(0,f.Z)(this,e),(0,a.Z)(this,"name",void 0),(0,a.Z)(this,"style",void 0),(0,a.Z)(this,"_keyframe",!0),this.name=t,this.style=n}return(0,m.Z)(e,[{key:"getName",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return e?"".concat(e,"-").concat(this.name):this.name}}]),e}();function eF(e){return e.notSplit=!0,e}eF(["borderTop","borderBottom"]),eF(["borderTop"]),eF(["borderBottom"]),eF(["borderLeft","borderRight"]),eF(["borderLeft"]),eF(["borderRight"])},60688:function(e,t,n){n.d(t,{Z:function(){return T}});var r=n(14749),o=n(80406),a=n(50833),i=n(6787),l=n(64090),s=n(16480),c=n.n(s),u=n(12215),d=n(67689),p=n(5239),f=n(6976),m=n(24050),g=n(74687),h=n(53850);function b(e){return"object"===(0,f.Z)(e)&&"string"==typeof e.name&&"string"==typeof e.theme&&("object"===(0,f.Z)(e.icon)||"function"==typeof e.icon)}function v(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return Object.keys(e).reduce(function(t,n){var r=e[n];return"class"===n?(t.className=r,delete t.class):(delete t[n],t[n.replace(/-(.)/g,function(e,t){return t.toUpperCase()})]=r),t},{})}function y(e){return(0,u.R_)(e)[0]}function E(e){return e?Array.isArray(e)?e:[e]:[]}var S=function(e){var t=(0,l.useContext)(d.Z),n=t.csp,r=t.prefixCls,o="\n.anticon {\n display: inline-block;\n color: inherit;\n font-style: normal;\n line-height: 0;\n text-align: center;\n text-transform: none;\n vertical-align: -0.125em;\n text-rendering: optimizeLegibility;\n -webkit-font-smoothing: antialiased;\n -moz-osx-font-smoothing: grayscale;\n}\n\n.anticon > * {\n line-height: 1;\n}\n\n.anticon svg {\n display: inline-block;\n}\n\n.anticon::before {\n display: none;\n}\n\n.anticon .anticon-icon {\n display: block;\n}\n\n.anticon[tabindex] {\n cursor: pointer;\n}\n\n.anticon-spin::before,\n.anticon-spin {\n display: inline-block;\n -webkit-animation: loadingCircle 1s infinite linear;\n animation: loadingCircle 1s infinite linear;\n}\n\n@-webkit-keyframes loadingCircle {\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n\n@keyframes loadingCircle {\n 100% {\n -webkit-transform: rotate(360deg);\n transform: rotate(360deg);\n }\n}\n";r&&(o=o.replace(/anticon/g,r)),(0,l.useEffect)(function(){var t=e.current,r=(0,g.A)(t);(0,m.hq)(o,"@ant-design-icons",{prepend:!0,csp:n,attachTo:r})},[])},w=["icon","className","onClick","style","primaryColor","secondaryColor"],x={primaryColor:"#333",secondaryColor:"#E6E6E6",calculated:!1},O=function(e){var t,n,r=e.icon,o=e.className,a=e.onClick,s=e.style,c=e.primaryColor,u=e.secondaryColor,d=(0,i.Z)(e,w),f=l.useRef(),m=x;if(c&&(m={primaryColor:c,secondaryColor:u||y(c)}),S(f),t=b(r),n="icon should be icon definiton, but got ".concat(r),(0,h.ZP)(t,"[@ant-design/icons] ".concat(n)),!b(r))return null;var g=r;return g&&"function"==typeof g.icon&&(g=(0,p.Z)((0,p.Z)({},g),{},{icon:g.icon(m.primaryColor,m.secondaryColor)})),function e(t,n,r){return r?l.createElement(t.tag,(0,p.Z)((0,p.Z)({key:n},v(t.attrs)),r),(t.children||[]).map(function(r,o){return e(r,"".concat(n,"-").concat(t.tag,"-").concat(o))})):l.createElement(t.tag,(0,p.Z)({key:n},v(t.attrs)),(t.children||[]).map(function(r,o){return e(r,"".concat(n,"-").concat(t.tag,"-").concat(o))}))}(g.icon,"svg-".concat(g.name),(0,p.Z)((0,p.Z)({className:o,onClick:a,style:s,"data-icon":g.name,width:"1em",height:"1em",fill:"currentColor","aria-hidden":"true"},d),{},{ref:f}))};function k(e){var t=E(e),n=(0,o.Z)(t,2),r=n[0],a=n[1];return O.setTwoToneColors({primaryColor:r,secondaryColor:a})}O.displayName="IconReact",O.getTwoToneColors=function(){return(0,p.Z)({},x)},O.setTwoToneColors=function(e){var t=e.primaryColor,n=e.secondaryColor;x.primaryColor=t,x.secondaryColor=n||y(t),x.calculated=!!n};var C=["className","icon","spin","rotate","tabIndex","onClick","twoToneColor"];k(u.iN.primary);var A=l.forwardRef(function(e,t){var n,s=e.className,u=e.icon,p=e.spin,f=e.rotate,m=e.tabIndex,g=e.onClick,h=e.twoToneColor,b=(0,i.Z)(e,C),v=l.useContext(d.Z),y=v.prefixCls,S=void 0===y?"anticon":y,w=v.rootClassName,x=c()(w,S,(n={},(0,a.Z)(n,"".concat(S,"-").concat(u.name),!!u.name),(0,a.Z)(n,"".concat(S,"-spin"),!!p||"loading"===u.name),n),s),k=m;void 0===k&&g&&(k=-1);var A=E(h),T=(0,o.Z)(A,2),I=T[0],N=T[1];return l.createElement("span",(0,r.Z)({role:"img","aria-label":u.name},b,{ref:t,tabIndex:k,onClick:g,className:x}),l.createElement(O,{icon:u,primaryColor:I,secondaryColor:N,style:f?{msTransform:"rotate(".concat(f,"deg)"),transform:"rotate(".concat(f,"deg)")}:void 0}))});A.displayName="AntdIcon",A.getTwoToneColor=function(){var e=O.getTwoToneColors();return e.calculated?[e.primaryColor,e.secondaryColor]:e.primaryColor},A.setTwoToneColor=k;var T=A},67689:function(e,t,n){var r=(0,n(64090).createContext)({});t.Z=r},99537:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm193.5 301.7l-210.6 292a31.8 31.8 0 01-51.7 0L318.5 484.9c-3.8-5.3 0-12.7 6.5-12.7h46.9c10.2 0 19.9 4.9 25.9 13.3l71.2 98.8 157.2-218c6-8.3 15.6-13.3 25.9-13.3H699c6.5 0 10.3 7.4 6.5 12.7z"}}]},name:"check-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},90507:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M912 190h-69.9c-9.8 0-19.1 4.5-25.1 12.2L404.7 724.5 207 474a32 32 0 00-25.1-12.2H112c-6.7 0-10.4 7.7-6.3 12.9l273.9 347c12.8 16.2 37.4 16.2 50.3 0l488.4-618.9c4.1-5.1.4-12.8-6.3-12.8z"}}]},name:"check",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},77136:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{"fill-rule":"evenodd",viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64c247.4 0 448 200.6 448 448S759.4 960 512 960 64 759.4 64 512 264.6 64 512 64zm127.98 274.82h-.04l-.08.06L512 466.75 384.14 338.88c-.04-.05-.06-.06-.08-.06a.12.12 0 00-.07 0c-.03 0-.05.01-.09.05l-45.02 45.02a.2.2 0 00-.05.09.12.12 0 000 .07v.02a.27.27 0 00.06.06L466.75 512 338.88 639.86c-.05.04-.06.06-.06.08a.12.12 0 000 .07c0 .03.01.05.05.09l45.02 45.02a.2.2 0 00.09.05.12.12 0 00.07 0c.02 0 .04-.01.08-.05L512 557.25l127.86 127.87c.04.04.06.05.08.05a.12.12 0 00.07 0c.03 0 .05-.01.09-.05l45.02-45.02a.2.2 0 00.05-.09.12.12 0 000-.07v-.02a.27.27 0 00-.05-.06L557.25 512l127.87-127.86c.04-.04.05-.06.05-.08a.12.12 0 000-.07c0-.03-.01-.05-.05-.09l-45.02-45.02a.2.2 0 00-.09-.05.12.12 0 00-.07 0z"}}]},name:"close-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},81303:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{"fill-rule":"evenodd",viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M799.86 166.31c.02 0 .04.02.08.06l57.69 57.7c.04.03.05.05.06.08a.12.12 0 010 .06c0 .03-.02.05-.06.09L569.93 512l287.7 287.7c.04.04.05.06.06.09a.12.12 0 010 .07c0 .02-.02.04-.06.08l-57.7 57.69c-.03.04-.05.05-.07.06a.12.12 0 01-.07 0c-.03 0-.05-.02-.09-.06L512 569.93l-287.7 287.7c-.04.04-.06.05-.09.06a.12.12 0 01-.07 0c-.02 0-.04-.02-.08-.06l-57.69-57.7c-.04-.03-.05-.05-.06-.07a.12.12 0 010-.07c0-.03.02-.05.06-.09L454.07 512l-287.7-287.7c-.04-.04-.05-.06-.06-.09a.12.12 0 010-.07c0-.02.02-.04.06-.08l57.7-57.69c.03-.04.05-.05.07-.06a.12.12 0 01.07 0c.03 0 .05.02.09.06L512 454.07l287.7-287.7c.04-.04.06-.05.09-.06a.12.12 0 01.07 0z"}}]},name:"close",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},20383:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M884 256h-75c-5.1 0-9.9 2.5-12.9 6.6L512 654.2 227.9 262.6c-3-4.1-7.8-6.6-12.9-6.6h-75c-6.5 0-10.3 7.4-6.5 12.7l352.6 486.1c12.8 17.6 39 17.6 51.7 0l352.6-486.1c3.9-5.3.1-12.7-6.4-12.7z"}}]},name:"down",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},31413:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M176 511a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0zm280 0a56 56 0 10112 0 56 56 0 10-112 0z"}}]},name:"ellipsis",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},20653:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm-32 232c0-4.4 3.6-8 8-8h48c4.4 0 8 3.6 8 8v272c0 4.4-3.6 8-8 8h-48c-4.4 0-8-3.6-8-8V296zm32 440a48.01 48.01 0 010-96 48.01 48.01 0 010 96z"}}]},name:"exclamation-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},41311:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M942.2 486.2C847.4 286.5 704.1 186 512 186c-192.2 0-335.4 100.5-430.2 300.3a60.3 60.3 0 000 51.5C176.6 737.5 319.9 838 512 838c192.2 0 335.4-100.5 430.2-300.3 7.7-16.2 7.7-35 0-51.5zM512 766c-161.3 0-279.4-81.8-362.7-254C232.6 339.8 350.7 258 512 258c161.3 0 279.4 81.8 362.7 254C791.5 684.2 673.4 766 512 766zm-4-430c-97.2 0-176 78.8-176 176s78.8 176 176 176 176-78.8 176-176-78.8-176-176-176zm0 288c-61.9 0-112-50.1-112-112s50.1-112 112-112 112 50.1 112 112-50.1 112-112 112z"}}]},name:"eye",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},40388:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M512 64C264.6 64 64 264.6 64 512s200.6 448 448 448 448-200.6 448-448S759.4 64 512 64zm32 664c0 4.4-3.6 8-8 8h-48c-4.4 0-8-3.6-8-8V456c0-4.4 3.6-8 8-8h48c4.4 0 8 3.6 8 8v272zm-32-344a48.01 48.01 0 010-96 48.01 48.01 0 010 96z"}}]},name:"info-circle",theme:"filled"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},66155:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"0 0 1024 1024",focusable:"false"},children:[{tag:"path",attrs:{d:"M988 548c-19.9 0-36-16.1-36-36 0-59.4-11.6-117-34.6-171.3a440.45 440.45 0 00-94.3-139.9 437.71 437.71 0 00-139.9-94.3C629 83.6 571.4 72 512 72c-19.9 0-36-16.1-36-36s16.1-36 36-36c69.1 0 136.2 13.5 199.3 40.3C772.3 66 827 103 874 150c47 47 83.9 101.8 109.7 162.7 26.7 63.1 40.2 130.2 40.2 199.3.1 19.9-16 36-35.9 36z"}}]},name:"loading",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},50459:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M765.7 486.8L314.9 134.7A7.97 7.97 0 00302 141v77.3c0 4.9 2.3 9.6 6.1 12.6l360 281.1-360 281.1c-3.9 3-6.1 7.7-6.1 12.6V883c0 6.7 7.7 10.4 12.9 6.3l450.8-352.1a31.96 31.96 0 000-50.4z"}}]},name:"right",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},96871:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M909.6 854.5L649.9 594.8C690.2 542.7 712 479 712 412c0-80.2-31.3-155.4-87.9-212.1-56.6-56.7-132-87.9-212.1-87.9s-155.5 31.3-212.1 87.9C143.2 256.5 112 331.8 112 412c0 80.1 31.3 155.5 87.9 212.1C256.5 680.8 331.8 712 412 712c67 0 130.6-21.8 182.7-62l259.7 259.6a8.2 8.2 0 0011.6 0l43.6-43.5a8.2 8.2 0 000-11.6zM570.4 570.4C528 612.7 471.8 636 412 636s-116-23.3-158.4-65.6C211.3 528 188 471.8 188 412s23.3-116.1 65.6-158.4C296 211.3 352.2 188 412 188s116.1 23.2 158.4 65.6S636 352.2 636 412s-23.3 116.1-65.6 158.4z"}}]},name:"search",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},97766:function(e,t,n){n.d(t,{Z:function(){return l}});var r=n(14749),o=n(64090),a={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M400 317.7h73.9V656c0 4.4 3.6 8 8 8h60c4.4 0 8-3.6 8-8V317.7H624c6.7 0 10.4-7.7 6.3-12.9L518.3 163a8 8 0 00-12.6 0l-112 141.7c-4.1 5.3-.4 13 6.3 13zM878 626h-60c-4.4 0-8 3.6-8 8v154H214V634c0-4.4-3.6-8-8-8h-60c-4.4 0-8 3.6-8 8v198c0 17.7 14.3 32 32 32h684c17.7 0 32-14.3 32-32V634c0-4.4-3.6-8-8-8z"}}]},name:"upload",theme:"outlined"},i=n(60688),l=o.forwardRef(function(e,t){return o.createElement(i.Z,(0,r.Z)({},e,{ref:t,icon:a}))})},41785:function(e,t,n){n.d(t,{T6:function(){return p},VD:function(){return f},WE:function(){return c},Yt:function(){return m},lC:function(){return a},py:function(){return s},rW:function(){return o},s:function(){return d},ve:function(){return l},vq:function(){return u}});var r=n(27974);function o(e,t,n){return{r:255*(0,r.sh)(e,255),g:255*(0,r.sh)(t,255),b:255*(0,r.sh)(n,255)}}function a(e,t,n){var o=Math.max(e=(0,r.sh)(e,255),t=(0,r.sh)(t,255),n=(0,r.sh)(n,255)),a=Math.min(e,t,n),i=0,l=0,s=(o+a)/2;if(o===a)l=0,i=0;else{var c=o-a;switch(l=s>.5?c/(2-o-a):c/(o+a),o){case e:i=(t-n)/c+(t1&&(n-=1),n<1/6)?e+6*n*(t-e):n<.5?t:n<2/3?e+(t-e)*(2/3-n)*6:e}function l(e,t,n){if(e=(0,r.sh)(e,360),t=(0,r.sh)(t,100),n=(0,r.sh)(n,100),0===t)a=n,l=n,o=n;else{var o,a,l,s=n<.5?n*(1+t):n+t-n*t,c=2*n-s;o=i(c,s,e+1/3),a=i(c,s,e),l=i(c,s,e-1/3)}return{r:255*o,g:255*a,b:255*l}}function s(e,t,n){var o=Math.max(e=(0,r.sh)(e,255),t=(0,r.sh)(t,255),n=(0,r.sh)(n,255)),a=Math.min(e,t,n),i=0,l=o-a;if(o===a)i=0;else{switch(o){case e:i=(t-n)/l+(t>16,g:(65280&e)>>8,b:255&e}}},6564:function(e,t,n){n.d(t,{R:function(){return r}});var r={aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyan:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",goldenrod:"#daa520",gold:"#ffd700",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavenderblush:"#fff0f5",lavender:"#e6e6fa",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",rebeccapurple:"#663399",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",steelblue:"#4682b4",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",tomato:"#ff6347",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"}},76991:function(e,t,n){n.d(t,{uA:function(){return i}});var r=n(41785),o=n(6564),a=n(27974);function i(e){var t={r:0,g:0,b:0},n=1,i=null,l=null,s=null,c=!1,p=!1;return"string"==typeof e&&(e=function(e){if(0===(e=e.trim().toLowerCase()).length)return!1;var t=!1;if(o.R[e])e=o.R[e],t=!0;else if("transparent"===e)return{r:0,g:0,b:0,a:0,format:"name"};var n=u.rgb.exec(e);return n?{r:n[1],g:n[2],b:n[3]}:(n=u.rgba.exec(e))?{r:n[1],g:n[2],b:n[3],a:n[4]}:(n=u.hsl.exec(e))?{h:n[1],s:n[2],l:n[3]}:(n=u.hsla.exec(e))?{h:n[1],s:n[2],l:n[3],a:n[4]}:(n=u.hsv.exec(e))?{h:n[1],s:n[2],v:n[3]}:(n=u.hsva.exec(e))?{h:n[1],s:n[2],v:n[3],a:n[4]}:(n=u.hex8.exec(e))?{r:(0,r.VD)(n[1]),g:(0,r.VD)(n[2]),b:(0,r.VD)(n[3]),a:(0,r.T6)(n[4]),format:t?"name":"hex8"}:(n=u.hex6.exec(e))?{r:(0,r.VD)(n[1]),g:(0,r.VD)(n[2]),b:(0,r.VD)(n[3]),format:t?"name":"hex"}:(n=u.hex4.exec(e))?{r:(0,r.VD)(n[1]+n[1]),g:(0,r.VD)(n[2]+n[2]),b:(0,r.VD)(n[3]+n[3]),a:(0,r.T6)(n[4]+n[4]),format:t?"name":"hex8"}:!!(n=u.hex3.exec(e))&&{r:(0,r.VD)(n[1]+n[1]),g:(0,r.VD)(n[2]+n[2]),b:(0,r.VD)(n[3]+n[3]),format:t?"name":"hex"}}(e)),"object"==typeof e&&(d(e.r)&&d(e.g)&&d(e.b)?(t=(0,r.rW)(e.r,e.g,e.b),c=!0,p="%"===String(e.r).substr(-1)?"prgb":"rgb"):d(e.h)&&d(e.s)&&d(e.v)?(i=(0,a.JX)(e.s),l=(0,a.JX)(e.v),t=(0,r.WE)(e.h,i,l),c=!0,p="hsv"):d(e.h)&&d(e.s)&&d(e.l)&&(i=(0,a.JX)(e.s),s=(0,a.JX)(e.l),t=(0,r.ve)(e.h,i,s),c=!0,p="hsl"),Object.prototype.hasOwnProperty.call(e,"a")&&(n=e.a)),n=(0,a.Yq)(n),{ok:c,format:e.format||p,r:Math.min(255,Math.max(t.r,0)),g:Math.min(255,Math.max(t.g,0)),b:Math.min(255,Math.max(t.b,0)),a:n}}var l="(?:".concat("[-\\+]?\\d*\\.\\d+%?",")|(?:").concat("[-\\+]?\\d+%?",")"),s="[\\s|\\(]+(".concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")\\s*\\)?"),c="[\\s|\\(]+(".concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")[,|\\s]+(").concat(l,")\\s*\\)?"),u={CSS_UNIT:new RegExp(l),rgb:RegExp("rgb"+s),rgba:RegExp("rgba"+c),hsl:RegExp("hsl"+s),hsla:RegExp("hsla"+c),hsv:RegExp("hsv"+s),hsva:RegExp("hsva"+c),hex3:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex6:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/,hex4:/^#?([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})([0-9a-fA-F]{1})$/,hex8:/^#?([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/};function d(e){return!!u.CSS_UNIT.exec(String(e))}},6336:function(e,t,n){n.d(t,{C:function(){return l}});var r=n(41785),o=n(6564),a=n(76991),i=n(27974),l=function(){function e(t,n){if(void 0===t&&(t=""),void 0===n&&(n={}),t instanceof e)return t;"number"==typeof t&&(t=(0,r.Yt)(t)),this.originalInput=t;var o,i=(0,a.uA)(t);this.originalInput=t,this.r=i.r,this.g=i.g,this.b=i.b,this.a=i.a,this.roundA=Math.round(100*this.a)/100,this.format=null!==(o=n.format)&&void 0!==o?o:i.format,this.gradientType=n.gradientType,this.r<1&&(this.r=Math.round(this.r)),this.g<1&&(this.g=Math.round(this.g)),this.b<1&&(this.b=Math.round(this.b)),this.isValid=i.ok}return e.prototype.isDark=function(){return 128>this.getBrightness()},e.prototype.isLight=function(){return!this.isDark()},e.prototype.getBrightness=function(){var e=this.toRgb();return(299*e.r+587*e.g+114*e.b)/1e3},e.prototype.getLuminance=function(){var e=this.toRgb(),t=e.r/255,n=e.g/255,r=e.b/255;return .2126*(t<=.03928?t/12.92:Math.pow((t+.055)/1.055,2.4))+.7152*(n<=.03928?n/12.92:Math.pow((n+.055)/1.055,2.4))+.0722*(r<=.03928?r/12.92:Math.pow((r+.055)/1.055,2.4))},e.prototype.getAlpha=function(){return this.a},e.prototype.setAlpha=function(e){return this.a=(0,i.Yq)(e),this.roundA=Math.round(100*this.a)/100,this},e.prototype.isMonochrome=function(){return 0===this.toHsl().s},e.prototype.toHsv=function(){var e=(0,r.py)(this.r,this.g,this.b);return{h:360*e.h,s:e.s,v:e.v,a:this.a}},e.prototype.toHsvString=function(){var e=(0,r.py)(this.r,this.g,this.b),t=Math.round(360*e.h),n=Math.round(100*e.s),o=Math.round(100*e.v);return 1===this.a?"hsv(".concat(t,", ").concat(n,"%, ").concat(o,"%)"):"hsva(".concat(t,", ").concat(n,"%, ").concat(o,"%, ").concat(this.roundA,")")},e.prototype.toHsl=function(){var e=(0,r.lC)(this.r,this.g,this.b);return{h:360*e.h,s:e.s,l:e.l,a:this.a}},e.prototype.toHslString=function(){var e=(0,r.lC)(this.r,this.g,this.b),t=Math.round(360*e.h),n=Math.round(100*e.s),o=Math.round(100*e.l);return 1===this.a?"hsl(".concat(t,", ").concat(n,"%, ").concat(o,"%)"):"hsla(".concat(t,", ").concat(n,"%, ").concat(o,"%, ").concat(this.roundA,")")},e.prototype.toHex=function(e){return void 0===e&&(e=!1),(0,r.vq)(this.r,this.g,this.b,e)},e.prototype.toHexString=function(e){return void 0===e&&(e=!1),"#"+this.toHex(e)},e.prototype.toHex8=function(e){return void 0===e&&(e=!1),(0,r.s)(this.r,this.g,this.b,this.a,e)},e.prototype.toHex8String=function(e){return void 0===e&&(e=!1),"#"+this.toHex8(e)},e.prototype.toHexShortString=function(e){return void 0===e&&(e=!1),1===this.a?this.toHexString(e):this.toHex8String(e)},e.prototype.toRgb=function(){return{r:Math.round(this.r),g:Math.round(this.g),b:Math.round(this.b),a:this.a}},e.prototype.toRgbString=function(){var e=Math.round(this.r),t=Math.round(this.g),n=Math.round(this.b);return 1===this.a?"rgb(".concat(e,", ").concat(t,", ").concat(n,")"):"rgba(".concat(e,", ").concat(t,", ").concat(n,", ").concat(this.roundA,")")},e.prototype.toPercentageRgb=function(){var e=function(e){return"".concat(Math.round(100*(0,i.sh)(e,255)),"%")};return{r:e(this.r),g:e(this.g),b:e(this.b),a:this.a}},e.prototype.toPercentageRgbString=function(){var e=function(e){return Math.round(100*(0,i.sh)(e,255))};return 1===this.a?"rgb(".concat(e(this.r),"%, ").concat(e(this.g),"%, ").concat(e(this.b),"%)"):"rgba(".concat(e(this.r),"%, ").concat(e(this.g),"%, ").concat(e(this.b),"%, ").concat(this.roundA,")")},e.prototype.toName=function(){if(0===this.a)return"transparent";if(this.a<1)return!1;for(var e="#"+(0,r.vq)(this.r,this.g,this.b,!1),t=0,n=Object.entries(o.R);t=0;return!t&&r&&(e.startsWith("hex")||"name"===e)?"name"===e&&0===this.a?this.toName():this.toRgbString():("rgb"===e&&(n=this.toRgbString()),"prgb"===e&&(n=this.toPercentageRgbString()),("hex"===e||"hex6"===e)&&(n=this.toHexString()),"hex3"===e&&(n=this.toHexString(!0)),"hex4"===e&&(n=this.toHex8String(!0)),"hex8"===e&&(n=this.toHex8String()),"name"===e&&(n=this.toName()),"hsl"===e&&(n=this.toHslString()),"hsv"===e&&(n=this.toHsvString()),n||this.toHexString())},e.prototype.toNumber=function(){return(Math.round(this.r)<<16)+(Math.round(this.g)<<8)+Math.round(this.b)},e.prototype.clone=function(){return new e(this.toString())},e.prototype.lighten=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.l+=t/100,n.l=(0,i.V2)(n.l),new e(n)},e.prototype.brighten=function(t){void 0===t&&(t=10);var n=this.toRgb();return n.r=Math.max(0,Math.min(255,n.r-Math.round(-(t/100*255)))),n.g=Math.max(0,Math.min(255,n.g-Math.round(-(t/100*255)))),n.b=Math.max(0,Math.min(255,n.b-Math.round(-(t/100*255)))),new e(n)},e.prototype.darken=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.l-=t/100,n.l=(0,i.V2)(n.l),new e(n)},e.prototype.tint=function(e){return void 0===e&&(e=10),this.mix("white",e)},e.prototype.shade=function(e){return void 0===e&&(e=10),this.mix("black",e)},e.prototype.desaturate=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.s-=t/100,n.s=(0,i.V2)(n.s),new e(n)},e.prototype.saturate=function(t){void 0===t&&(t=10);var n=this.toHsl();return n.s+=t/100,n.s=(0,i.V2)(n.s),new e(n)},e.prototype.greyscale=function(){return this.desaturate(100)},e.prototype.spin=function(t){var n=this.toHsl(),r=(n.h+t)%360;return n.h=r<0?360+r:r,new e(n)},e.prototype.mix=function(t,n){void 0===n&&(n=50);var r=this.toRgb(),o=new e(t).toRgb(),a=n/100;return new e({r:(o.r-r.r)*a+r.r,g:(o.g-r.g)*a+r.g,b:(o.b-r.b)*a+r.b,a:(o.a-r.a)*a+r.a})},e.prototype.analogous=function(t,n){void 0===t&&(t=6),void 0===n&&(n=30);var r=this.toHsl(),o=360/n,a=[this];for(r.h=(r.h-(o*t>>1)+720)%360;--t;)r.h=(r.h+o)%360,a.push(new e(r));return a},e.prototype.complement=function(){var t=this.toHsl();return t.h=(t.h+180)%360,new e(t)},e.prototype.monochromatic=function(t){void 0===t&&(t=6);for(var n=this.toHsv(),r=n.h,o=n.s,a=n.v,i=[],l=1/t;t--;)i.push(new e({h:r,s:o,v:a})),a=(a+l)%1;return i},e.prototype.splitcomplement=function(){var t=this.toHsl(),n=t.h;return[this,new e({h:(n+72)%360,s:t.s,l:t.l}),new e({h:(n+216)%360,s:t.s,l:t.l})]},e.prototype.onBackground=function(t){var n=this.toRgb(),r=new e(t).toRgb(),o=n.a+r.a*(1-n.a);return new e({r:(n.r*n.a+r.r*r.a*(1-n.a))/o,g:(n.g*n.a+r.g*r.a*(1-n.a))/o,b:(n.b*n.a+r.b*r.a*(1-n.a))/o,a:o})},e.prototype.triad=function(){return this.polyad(3)},e.prototype.tetrad=function(){return this.polyad(4)},e.prototype.polyad=function(t){for(var n=this.toHsl(),r=n.h,o=[this],a=360/t,i=1;iMath.abs(e-t))?1:e=360===t?(e<0?e%t+t:e%t)/parseFloat(String(t)):e%t/parseFloat(String(t))}function o(e){return Math.min(1,Math.max(0,e))}function a(e){return(isNaN(e=parseFloat(e))||e<0||e>1)&&(e=1),e}function i(e){return e<=1?"".concat(100*Number(e),"%"):e}function l(e){return 1===e.length?"0"+e:String(e)}n.d(t,{FZ:function(){return l},JX:function(){return i},V2:function(){return o},Yq:function(){return a},sh:function(){return r}})},88804:function(e,t,n){n.d(t,{Z:function(){return y}});var r,o=n(80406),a=n(64090),i=n(89542),l=n(22127);n(53850);var s=n(74084),c=a.createContext(null),u=n(63787),d=n(24800),p=[],f=n(24050);function m(e){var t=e.match(/^(.*)px$/),n=Number(null==t?void 0:t[1]);return Number.isNaN(n)?function(e){if("undefined"==typeof document)return 0;if(void 0===r){var t=document.createElement("div");t.style.width="100%",t.style.height="200px";var n=document.createElement("div"),o=n.style;o.position="absolute",o.top="0",o.left="0",o.pointerEvents="none",o.visibility="hidden",o.width="200px",o.height="150px",o.overflow="hidden",n.appendChild(t),document.body.appendChild(n);var a=t.offsetWidth;n.style.overflow="scroll";var i=t.offsetWidth;a===i&&(i=n.clientWidth),document.body.removeChild(n),r=a-i}return r}():n}var g="rc-util-locker-".concat(Date.now()),h=0,b=!1,v=function(e){return!1!==e&&((0,l.Z)()&&e?"string"==typeof e?document.querySelector(e):"function"==typeof e?e():e:null)},y=a.forwardRef(function(e,t){var n,r,y,E,S=e.open,w=e.autoLock,x=e.getContainer,O=(e.debug,e.autoDestroy),k=void 0===O||O,C=e.children,A=a.useState(S),T=(0,o.Z)(A,2),I=T[0],N=T[1],R=I||S;a.useEffect(function(){(k||S)&&N(S)},[S,k]);var _=a.useState(function(){return v(x)}),P=(0,o.Z)(_,2),L=P[0],M=P[1];a.useEffect(function(){var e=v(x);M(null!=e?e:null)});var D=function(e,t){var n=a.useState(function(){return(0,l.Z)()?document.createElement("div"):null}),r=(0,o.Z)(n,1)[0],i=a.useRef(!1),s=a.useContext(c),f=a.useState(p),m=(0,o.Z)(f,2),g=m[0],h=m[1],b=s||(i.current?void 0:function(e){h(function(t){return[e].concat((0,u.Z)(t))})});function v(){r.parentElement||document.body.appendChild(r),i.current=!0}function y(){var e;null===(e=r.parentElement)||void 0===e||e.removeChild(r),i.current=!1}return(0,d.Z)(function(){return e?s?s(v):v():y(),y},[e]),(0,d.Z)(function(){g.length&&(g.forEach(function(e){return e()}),h(p))},[g]),[r,b]}(R&&!L,0),j=(0,o.Z)(D,2),F=j[0],B=j[1],Z=null!=L?L:F;n=!!(w&&S&&(0,l.Z)()&&(Z===F||Z===document.body)),r=a.useState(function(){return h+=1,"".concat(g,"_").concat(h)}),y=(0,o.Z)(r,1)[0],(0,d.Z)(function(){if(n){var e=function(e){if("undefined"==typeof document||!e||!(e instanceof Element))return{width:0,height:0};var t=getComputedStyle(e,"::-webkit-scrollbar"),n=t.width,r=t.height;return{width:m(n),height:m(r)}}(document.body).width,t=document.body.scrollHeight>(window.innerHeight||document.documentElement.clientHeight)&&window.innerWidth>document.body.offsetWidth;(0,f.hq)("\nhtml body {\n overflow-y: hidden;\n ".concat(t?"width: calc(100% - ".concat(e,"px);"):"","\n}"),y)}else(0,f.jL)(y);return function(){(0,f.jL)(y)}},[n,y]);var U=null;C&&(0,s.Yr)(C)&&t&&(U=C.ref);var z=(0,s.x1)(U,t);if(!R||!(0,l.Z)()||void 0===L)return null;var H=!1===Z||("boolean"==typeof E&&(b=E),b),G=C;return t&&(G=a.cloneElement(C,{ref:z})),a.createElement(c.Provider,{value:B},H?G:(0,i.createPortal)(G,Z))})},44101:function(e,t,n){n.d(t,{Z:function(){return z}});var r=n(5239),o=n(80406),a=n(6787),i=n(88804),l=n(16480),s=n.n(l),c=n(46505),u=n(97472),d=n(74687),p=n(54811),f=n(91010),m=n(24800),g=n(76158),h=n(64090),b=n(14749),v=n(49367),y=n(74084);function E(e){var t=e.prefixCls,n=e.align,r=e.arrow,o=e.arrowPos,a=r||{},i=a.className,l=a.content,c=o.x,u=o.y,d=h.useRef();if(!n||!n.points)return null;var p={position:"absolute"};if(!1!==n.autoArrow){var f=n.points[0],m=n.points[1],g=f[0],b=f[1],v=m[0],y=m[1];g!==v&&["t","b"].includes(g)?"t"===g?p.top=0:p.bottom=0:p.top=void 0===u?0:u,b!==y&&["l","r"].includes(b)?"l"===b?p.left=0:p.right=0:p.left=void 0===c?0:c}return h.createElement("div",{ref:d,className:s()("".concat(t,"-arrow"),i),style:p},l)}function S(e){var t=e.prefixCls,n=e.open,r=e.zIndex,o=e.mask,a=e.motion;return o?h.createElement(v.ZP,(0,b.Z)({},a,{motionAppear:!0,visible:n,removeOnLeave:!0}),function(e){var n=e.className;return h.createElement("div",{style:{zIndex:r},className:s()("".concat(t,"-mask"),n)})}):null}var w=h.memo(function(e){return e.children},function(e,t){return t.cache}),x=h.forwardRef(function(e,t){var n=e.popup,a=e.className,i=e.prefixCls,l=e.style,u=e.target,d=e.onVisibleChanged,p=e.open,f=e.keepDom,g=e.fresh,x=e.onClick,O=e.mask,k=e.arrow,C=e.arrowPos,A=e.align,T=e.motion,I=e.maskMotion,N=e.forceRender,R=e.getPopupContainer,_=e.autoDestroy,P=e.portal,L=e.zIndex,M=e.onMouseEnter,D=e.onMouseLeave,j=e.onPointerEnter,F=e.ready,B=e.offsetX,Z=e.offsetY,U=e.offsetR,z=e.offsetB,H=e.onAlign,G=e.onPrepare,$=e.stretch,W=e.targetWidth,V=e.targetHeight,q="function"==typeof n?n():n,Y=p||f,K=(null==R?void 0:R.length)>0,X=h.useState(!R||!K),Q=(0,o.Z)(X,2),J=Q[0],ee=Q[1];if((0,m.Z)(function(){!J&&K&&u&&ee(!0)},[J,K,u]),!J)return null;var et="auto",en={left:"-1000vw",top:"-1000vh",right:et,bottom:et};if(F||!p){var er,eo=A.points,ea=A.dynamicInset||(null===(er=A._experimental)||void 0===er?void 0:er.dynamicInset),ei=ea&&"r"===eo[0][1],el=ea&&"b"===eo[0][0];ei?(en.right=U,en.left=et):(en.left=B,en.right=et),el?(en.bottom=z,en.top=et):(en.top=Z,en.bottom=et)}var es={};return $&&($.includes("height")&&V?es.height=V:$.includes("minHeight")&&V&&(es.minHeight=V),$.includes("width")&&W?es.width=W:$.includes("minWidth")&&W&&(es.minWidth=W)),p||(es.pointerEvents="none"),h.createElement(P,{open:N||Y,getContainer:R&&function(){return R(u)},autoDestroy:_},h.createElement(S,{prefixCls:i,open:p,zIndex:L,mask:O,motion:I}),h.createElement(c.Z,{onResize:H,disabled:!p},function(e){return h.createElement(v.ZP,(0,b.Z)({motionAppear:!0,motionEnter:!0,motionLeave:!0,removeOnLeave:!1,forceRender:N,leavedClassName:"".concat(i,"-hidden")},T,{onAppearPrepare:G,onEnterPrepare:G,visible:p,onVisibleChanged:function(e){var t;null==T||null===(t=T.onVisibleChanged)||void 0===t||t.call(T,e),d(e)}}),function(n,o){var c=n.className,u=n.style,d=s()(i,c,a);return h.createElement("div",{ref:(0,y.sQ)(e,t,o),className:d,style:(0,r.Z)((0,r.Z)((0,r.Z)((0,r.Z)({"--arrow-x":"".concat(C.x||0,"px"),"--arrow-y":"".concat(C.y||0,"px")},en),es),u),{},{boxSizing:"border-box",zIndex:L},l),onMouseEnter:M,onMouseLeave:D,onPointerEnter:j,onClick:x},k&&h.createElement(E,{prefixCls:i,arrow:k,arrowPos:C,align:A}),h.createElement(w,{cache:!p&&!g},q))})}))}),O=h.forwardRef(function(e,t){var n=e.children,r=e.getTriggerDOMNode,o=(0,y.Yr)(n),a=h.useCallback(function(e){(0,y.mH)(t,r?r(e):e)},[r]),i=(0,y.x1)(a,n.ref);return o?h.cloneElement(n,{ref:i}):n}),k=h.createContext(null);function C(e){return e?Array.isArray(e)?e:[e]:[]}var A=n(73193);function T(e,t,n,r){return t||(n?{motionName:"".concat(e,"-").concat(n)}:r?{motionName:r}:null)}function I(e){return e.ownerDocument.defaultView}function N(e){for(var t=[],n=null==e?void 0:e.parentElement,r=["hidden","scroll","clip","auto"];n;){var o=I(n).getComputedStyle(n);[o.overflowX,o.overflowY,o.overflow].some(function(e){return r.includes(e)})&&t.push(n),n=n.parentElement}return t}function R(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:1;return Number.isNaN(e)?t:e}function _(e){return R(parseFloat(e),0)}function P(e,t){var n=(0,r.Z)({},e);return(t||[]).forEach(function(e){if(!(e instanceof HTMLBodyElement||e instanceof HTMLHtmlElement)){var t=I(e).getComputedStyle(e),r=t.overflow,o=t.overflowClipMargin,a=t.borderTopWidth,i=t.borderBottomWidth,l=t.borderLeftWidth,s=t.borderRightWidth,c=e.getBoundingClientRect(),u=e.offsetHeight,d=e.clientHeight,p=e.offsetWidth,f=e.clientWidth,m=_(a),g=_(i),h=_(l),b=_(s),v=R(Math.round(c.width/p*1e3)/1e3),y=R(Math.round(c.height/u*1e3)/1e3),E=m*y,S=h*v,w=0,x=0;if("clip"===r){var O=_(o);w=O*v,x=O*y}var k=c.x+S-w,C=c.y+E-x,A=k+c.width+2*w-S-b*v-(p-f-h-b)*v,T=C+c.height+2*x-E-g*y-(u-d-m-g)*y;n.left=Math.max(n.left,k),n.top=Math.max(n.top,C),n.right=Math.min(n.right,A),n.bottom=Math.min(n.bottom,T)}}),n}function L(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,n="".concat(t),r=n.match(/^(.*)\%$/);return r?parseFloat(r[1])/100*e:parseFloat(n)}function M(e,t){var n=(0,o.Z)(t||[],2),r=n[0],a=n[1];return[L(e.width,r),L(e.height,a)]}function D(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"";return[e[0],e[1]]}function j(e,t){var n,r=t[0],o=t[1];return n="t"===r?e.y:"b"===r?e.y+e.height:e.y+e.height/2,{x:"l"===o?e.x:"r"===o?e.x+e.width:e.x+e.width/2,y:n}}function F(e,t){var n={t:"b",b:"t",l:"r",r:"l"};return e.map(function(e,r){return r===t?n[e]||"c":e}).join("")}var B=n(63787);n(53850);var Z=n(19223),U=["prefixCls","children","action","showAction","hideAction","popupVisible","defaultPopupVisible","onPopupVisibleChange","afterPopupVisibleChange","mouseEnterDelay","mouseLeaveDelay","focusDelay","blurDelay","mask","maskClosable","getPopupContainer","forceRender","autoDestroy","destroyPopupOnHide","popup","popupClassName","popupStyle","popupPlacement","builtinPlacements","popupAlign","zIndex","stretch","getPopupClassNameFromAlign","fresh","alignPoint","onPopupClick","onPopupAlign","arrow","popupMotion","maskMotion","popupTransitionName","popupAnimation","maskTransitionName","maskAnimation","className","getTriggerDOMNode"],z=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:i.Z;return h.forwardRef(function(t,n){var i,l,b,v,y,E,S,w,_,L,z,H,G,$,W,V,q,Y=t.prefixCls,K=void 0===Y?"rc-trigger-popup":Y,X=t.children,Q=t.action,J=t.showAction,ee=t.hideAction,et=t.popupVisible,en=t.defaultPopupVisible,er=t.onPopupVisibleChange,eo=t.afterPopupVisibleChange,ea=t.mouseEnterDelay,ei=t.mouseLeaveDelay,el=void 0===ei?.1:ei,es=t.focusDelay,ec=t.blurDelay,eu=t.mask,ed=t.maskClosable,ep=t.getPopupContainer,ef=t.forceRender,em=t.autoDestroy,eg=t.destroyPopupOnHide,eh=t.popup,eb=t.popupClassName,ev=t.popupStyle,ey=t.popupPlacement,eE=t.builtinPlacements,eS=void 0===eE?{}:eE,ew=t.popupAlign,ex=t.zIndex,eO=t.stretch,ek=t.getPopupClassNameFromAlign,eC=t.fresh,eA=t.alignPoint,eT=t.onPopupClick,eI=t.onPopupAlign,eN=t.arrow,eR=t.popupMotion,e_=t.maskMotion,eP=t.popupTransitionName,eL=t.popupAnimation,eM=t.maskTransitionName,eD=t.maskAnimation,ej=t.className,eF=t.getTriggerDOMNode,eB=(0,a.Z)(t,U),eZ=h.useState(!1),eU=(0,o.Z)(eZ,2),ez=eU[0],eH=eU[1];(0,m.Z)(function(){eH((0,g.Z)())},[]);var eG=h.useRef({}),e$=h.useContext(k),eW=h.useMemo(function(){return{registerSubPopup:function(e,t){eG.current[e]=t,null==e$||e$.registerSubPopup(e,t)}}},[e$]),eV=(0,f.Z)(),eq=h.useState(null),eY=(0,o.Z)(eq,2),eK=eY[0],eX=eY[1],eQ=(0,p.Z)(function(e){(0,u.S)(e)&&eK!==e&&eX(e),null==e$||e$.registerSubPopup(eV,e)}),eJ=h.useState(null),e0=(0,o.Z)(eJ,2),e1=e0[0],e2=e0[1],e4=h.useRef(null),e6=(0,p.Z)(function(e){(0,u.S)(e)&&e1!==e&&(e2(e),e4.current=e)}),e3=h.Children.only(X),e5=(null==e3?void 0:e3.props)||{},e8={},e9=(0,p.Z)(function(e){var t,n;return(null==e1?void 0:e1.contains(e))||(null===(t=(0,d.A)(e1))||void 0===t?void 0:t.host)===e||e===e1||(null==eK?void 0:eK.contains(e))||(null===(n=(0,d.A)(eK))||void 0===n?void 0:n.host)===e||e===eK||Object.values(eG.current).some(function(t){return(null==t?void 0:t.contains(e))||e===t})}),e7=T(K,eR,eL,eP),te=T(K,e_,eD,eM),tt=h.useState(en||!1),tn=(0,o.Z)(tt,2),tr=tn[0],to=tn[1],ta=null!=et?et:tr,ti=(0,p.Z)(function(e){void 0===et&&to(e)});(0,m.Z)(function(){to(et||!1)},[et]);var tl=h.useRef(ta);tl.current=ta;var ts=h.useRef([]);ts.current=[];var tc=(0,p.Z)(function(e){var t;ti(e),(null!==(t=ts.current[ts.current.length-1])&&void 0!==t?t:ta)!==e&&(ts.current.push(e),null==er||er(e))}),tu=h.useRef(),td=function(){clearTimeout(tu.current)},tp=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0;td(),0===t?tc(e):tu.current=setTimeout(function(){tc(e)},1e3*t)};h.useEffect(function(){return td},[]);var tf=h.useState(!1),tm=(0,o.Z)(tf,2),tg=tm[0],th=tm[1];(0,m.Z)(function(e){(!e||ta)&&th(!0)},[ta]);var tb=h.useState(null),tv=(0,o.Z)(tb,2),ty=tv[0],tE=tv[1],tS=h.useState([0,0]),tw=(0,o.Z)(tS,2),tx=tw[0],tO=tw[1],tk=function(e){tO([e.clientX,e.clientY])},tC=(i=eA?tx:e1,l=h.useState({ready:!1,offsetX:0,offsetY:0,offsetR:0,offsetB:0,arrowX:0,arrowY:0,scaleX:1,scaleY:1,align:eS[ey]||{}}),v=(b=(0,o.Z)(l,2))[0],y=b[1],E=h.useRef(0),S=h.useMemo(function(){return eK?N(eK):[]},[eK]),w=h.useRef({}),ta||(w.current={}),_=(0,p.Z)(function(){if(eK&&i&&ta){var e,t,n,a,l,s,c,d=eK.ownerDocument,p=I(eK).getComputedStyle(eK),f=p.width,m=p.height,g=p.position,h=eK.style.left,b=eK.style.top,v=eK.style.right,E=eK.style.bottom,x=eK.style.overflow,O=(0,r.Z)((0,r.Z)({},eS[ey]),ew),k=d.createElement("div");if(null===(e=eK.parentElement)||void 0===e||e.appendChild(k),k.style.left="".concat(eK.offsetLeft,"px"),k.style.top="".concat(eK.offsetTop,"px"),k.style.position=g,k.style.height="".concat(eK.offsetHeight,"px"),k.style.width="".concat(eK.offsetWidth,"px"),eK.style.left="0",eK.style.top="0",eK.style.right="auto",eK.style.bottom="auto",eK.style.overflow="hidden",Array.isArray(i))n={x:i[0],y:i[1],width:0,height:0};else{var C=i.getBoundingClientRect();n={x:C.x,y:C.y,width:C.width,height:C.height}}var T=eK.getBoundingClientRect(),N=d.documentElement,_=N.clientWidth,L=N.clientHeight,B=N.scrollWidth,Z=N.scrollHeight,U=N.scrollTop,z=N.scrollLeft,H=T.height,G=T.width,$=n.height,W=n.width,V=O.htmlRegion,q="visible",Y="visibleFirst";"scroll"!==V&&V!==Y&&(V=q);var K=V===Y,X=P({left:-z,top:-U,right:B-z,bottom:Z-U},S),Q=P({left:0,top:0,right:_,bottom:L},S),J=V===q?Q:X,ee=K?Q:J;eK.style.left="auto",eK.style.top="auto",eK.style.right="0",eK.style.bottom="0";var et=eK.getBoundingClientRect();eK.style.left=h,eK.style.top=b,eK.style.right=v,eK.style.bottom=E,eK.style.overflow=x,null===(t=eK.parentElement)||void 0===t||t.removeChild(k);var en=R(Math.round(G/parseFloat(f)*1e3)/1e3),er=R(Math.round(H/parseFloat(m)*1e3)/1e3);if(!(0===en||0===er||(0,u.S)(i)&&!(0,A.Z)(i))){var eo=O.offset,ea=O.targetOffset,ei=M(T,eo),el=(0,o.Z)(ei,2),es=el[0],ec=el[1],eu=M(n,ea),ed=(0,o.Z)(eu,2),ep=ed[0],ef=ed[1];n.x-=ep,n.y-=ef;var em=O.points||[],eg=(0,o.Z)(em,2),eh=eg[0],eb=D(eg[1]),ev=D(eh),eE=j(n,eb),ex=j(T,ev),eO=(0,r.Z)({},O),ek=eE.x-ex.x+es,eC=eE.y-ex.y+ec,eA=tt(ek,eC),eT=tt(ek,eC,Q),eN=j(n,["t","l"]),eR=j(T,["t","l"]),e_=j(n,["b","r"]),eP=j(T,["b","r"]),eL=O.overflow||{},eM=eL.adjustX,eD=eL.adjustY,ej=eL.shiftX,eF=eL.shiftY,eB=function(e){return"boolean"==typeof e?e:e>=0};tn();var eZ=eB(eD),eU=ev[0]===eb[0];if(eZ&&"t"===ev[0]&&(l>ee.bottom||w.current.bt)){var ez=eC;eU?ez-=H-$:ez=eN.y-eP.y-ec;var eH=tt(ek,ez),eG=tt(ek,ez,Q);eH>eA||eH===eA&&(!K||eG>=eT)?(w.current.bt=!0,eC=ez,ec=-ec,eO.points=[F(ev,0),F(eb,0)]):w.current.bt=!1}if(eZ&&"b"===ev[0]&&(aeA||eW===eA&&(!K||eV>=eT)?(w.current.tb=!0,eC=e$,ec=-ec,eO.points=[F(ev,0),F(eb,0)]):w.current.tb=!1}var eq=eB(eM),eY=ev[1]===eb[1];if(eq&&"l"===ev[1]&&(c>ee.right||w.current.rl)){var eX=ek;eY?eX-=G-W:eX=eN.x-eP.x-es;var eQ=tt(eX,eC),eJ=tt(eX,eC,Q);eQ>eA||eQ===eA&&(!K||eJ>=eT)?(w.current.rl=!0,ek=eX,es=-es,eO.points=[F(ev,1),F(eb,1)]):w.current.rl=!1}if(eq&&"r"===ev[1]&&(seA||e1===eA&&(!K||e2>=eT)?(w.current.lr=!0,ek=e0,es=-es,eO.points=[F(ev,1),F(eb,1)]):w.current.lr=!1}tn();var e4=!0===ej?0:ej;"number"==typeof e4&&(sQ.right&&(ek-=c-Q.right-es,n.x>Q.right-e4&&(ek+=n.x-Q.right+e4)));var e6=!0===eF?0:eF;"number"==typeof e6&&(aQ.bottom&&(eC-=l-Q.bottom-ec,n.y>Q.bottom-e6&&(eC+=n.y-Q.bottom+e6)));var e3=T.x+ek,e5=T.y+eC,e8=n.x,e9=n.y;null==eI||eI(eK,eO);var e7=et.right-T.x-(ek+T.width),te=et.bottom-T.y-(eC+T.height);y({ready:!0,offsetX:ek/en,offsetY:eC/er,offsetR:e7/en,offsetB:te/er,arrowX:((Math.max(e3,e8)+Math.min(e3+G,e8+W))/2-e3)/en,arrowY:((Math.max(e5,e9)+Math.min(e5+H,e9+$))/2-e5)/er,scaleX:en,scaleY:er,align:eO})}function tt(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:J,r=T.x+e,o=T.y+t,a=Math.max(r,n.left),i=Math.max(o,n.top);return Math.max(0,(Math.min(r+G,n.right)-a)*(Math.min(o+H,n.bottom)-i))}function tn(){l=(a=T.y+eC)+H,c=(s=T.x+ek)+G}}}),L=function(){y(function(e){return(0,r.Z)((0,r.Z)({},e),{},{ready:!1})})},(0,m.Z)(L,[ey]),(0,m.Z)(function(){ta||L()},[ta]),[v.ready,v.offsetX,v.offsetY,v.offsetR,v.offsetB,v.arrowX,v.arrowY,v.scaleX,v.scaleY,v.align,function(){E.current+=1;var e=E.current;Promise.resolve().then(function(){E.current===e&&_()})}]),tA=(0,o.Z)(tC,11),tT=tA[0],tI=tA[1],tN=tA[2],tR=tA[3],t_=tA[4],tP=tA[5],tL=tA[6],tM=tA[7],tD=tA[8],tj=tA[9],tF=tA[10],tB=(z=void 0===Q?"hover":Q,h.useMemo(function(){var e=C(null!=J?J:z),t=C(null!=ee?ee:z),n=new Set(e),r=new Set(t);return ez&&(n.has("hover")&&(n.delete("hover"),n.add("click")),r.has("hover")&&(r.delete("hover"),r.add("click"))),[n,r]},[ez,z,J,ee])),tZ=(0,o.Z)(tB,2),tU=tZ[0],tz=tZ[1],tH=tU.has("click"),tG=tz.has("click")||tz.has("contextMenu"),t$=(0,p.Z)(function(){tg||tF()});H=function(){tl.current&&eA&&tG&&tp(!1)},(0,m.Z)(function(){if(ta&&e1&&eK){var e=N(e1),t=N(eK),n=I(eK),r=new Set([n].concat((0,B.Z)(e),(0,B.Z)(t)));function o(){t$(),H()}return r.forEach(function(e){e.addEventListener("scroll",o,{passive:!0})}),n.addEventListener("resize",o,{passive:!0}),t$(),function(){r.forEach(function(e){e.removeEventListener("scroll",o),n.removeEventListener("resize",o)})}}},[ta,e1,eK]),(0,m.Z)(function(){t$()},[tx,ey]),(0,m.Z)(function(){ta&&!(null!=eS&&eS[ey])&&t$()},[JSON.stringify(ew)]);var tW=h.useMemo(function(){var e=function(e,t,n,r){for(var o=n.points,a=Object.keys(e),i=0;i0&&void 0!==arguments[0]?arguments[0]:[],t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],n=arguments.length>2?arguments[2]:void 0;return n?e[0]===t[0]:e[0]===t[0]&&e[1]===t[1]}(null===(l=e[s])||void 0===l?void 0:l.points,o,r))return"".concat(t,"-placement-").concat(s)}return""}(eS,K,tj,eA);return s()(e,null==ek?void 0:ek(tj))},[tj,ek,eS,K,eA]);h.useImperativeHandle(n,function(){return{nativeElement:e4.current,forceAlign:t$}});var tV=h.useState(0),tq=(0,o.Z)(tV,2),tY=tq[0],tK=tq[1],tX=h.useState(0),tQ=(0,o.Z)(tX,2),tJ=tQ[0],t0=tQ[1],t1=function(){if(eO&&e1){var e=e1.getBoundingClientRect();tK(e.width),t0(e.height)}};function t2(e,t,n,r){e8[e]=function(o){var a;null==r||r(o),tp(t,n);for(var i=arguments.length,l=Array(i>1?i-1:0),s=1;s1?n-1:0),o=1;o1?n-1:0),o=1;o{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M11.9999 13.1714L16.9497 8.22168L18.3639 9.63589L11.9999 15.9999L5.63599 9.63589L7.0502 8.22168L11.9999 13.1714Z"}))}},8903:function(e,t,n){n.d(t,{Z:function(){return a}});var r=n(69703),o=n(64090);let a=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M12 22C6.47715 22 2 17.5228 2 12C2 6.47715 6.47715 2 12 2C17.5228 2 22 6.47715 22 12C22 17.5228 17.5228 22 12 22ZM12 10.5858L9.17157 7.75736L7.75736 9.17157L10.5858 12L7.75736 14.8284L9.17157 16.2426L12 13.4142L14.8284 16.2426L16.2426 14.8284L13.4142 12L16.2426 9.17157L14.8284 7.75736L12 10.5858Z"}))}},57750:function(e,t,n){n.d(t,{Z:function(){return eg}});var r=n(69703),o=n(64090),a=n(26587),i=n(65558),l=n(75504),s=n(30638),c=n(80509),u=n.n(c),d=n(5037),p=n.n(d),f=n(71292),m=n.n(f),g=n(96240),h=n.n(g),b=n(93574),v=n.n(b),y=n(72996),E=n(84487),S=n(7986),w=n(71594),x=n(68139),O=n(20757),k=n(9586),C=n(765),A=["layout","type","stroke","connectNulls","isRange","ref"];function T(e){return(T="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function I(){return(I=Object.assign?Object.assign.bind():function(e){for(var t=1;t=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(a,A));return o.createElement(S.m,{clipPath:n?"url(#clipPath-".concat(r,")"):null},o.createElement(y.H,I({},(0,C.L6)(d,!0),{points:e,connectNulls:c,type:l,baseLine:t,layout:i,stroke:"none",className:"recharts-area-area"})),"none"!==s&&o.createElement(y.H,I({},(0,C.L6)(this.props,!1),{className:"recharts-area-curve",layout:i,type:l,connectNulls:c,fill:"none",points:e})),"none"!==s&&u&&o.createElement(y.H,I({},(0,C.L6)(this.props,!1),{className:"recharts-area-curve",layout:i,type:l,connectNulls:c,fill:"none",points:t})))}},{key:"renderAreaWithAnimation",value:function(e,t){var n=this,r=this.props,a=r.points,i=r.baseLine,l=r.isAnimationActive,c=r.animationBegin,u=r.animationDuration,d=r.animationEasing,p=r.animationId,f=this.state,g=f.prevPoints,b=f.prevBaseLine;return o.createElement(s.ZP,{begin:c,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"area-".concat(p),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var l=r.t;if(g){var s,c=g.length/a.length,u=a.map(function(e,t){var n=Math.floor(t*c);if(g[n]){var r=g[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return R(R({},e),{},{x:o(l),y:a(l)})}return e});return s=(0,O.hj)(i)&&"number"==typeof i?(0,O.k4)(b,i)(l):m()(i)||h()(i)?(0,O.k4)(b,0)(l):i.map(function(e,t){var n=Math.floor(t*c);if(b[n]){var r=b[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return R(R({},e),{},{x:o(l),y:a(l)})}return e}),n.renderAreaStatically(u,s,e,t)}return o.createElement(S.m,null,o.createElement("defs",null,o.createElement("clipPath",{id:"animationClipPath-".concat(t)},n.renderClipRect(l))),o.createElement(S.m,{clipPath:"url(#animationClipPath-".concat(t,")")},n.renderAreaStatically(a,i,e,t)))})}},{key:"renderArea",value:function(e,t){var n=this.props,r=n.points,o=n.baseLine,a=n.isAnimationActive,i=this.state,l=i.prevPoints,s=i.prevBaseLine,c=i.totalLength;return a&&r&&r.length&&(!l&&c>0||!v()(l,r)||!v()(s,o))?this.renderAreaWithAnimation(e,t):this.renderAreaStatically(r,o,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,a=t.points,i=t.className,s=t.top,c=t.left,u=t.xAxis,d=t.yAxis,p=t.width,f=t.height,g=t.isAnimationActive,h=t.id;if(n||!a||!a.length)return null;var b=this.state.isAnimationFinished,v=1===a.length,y=(0,l.Z)("recharts-area",i),E=u&&u.allowDataOverflow,x=d&&d.allowDataOverflow,O=E||x,k=m()(h)?this.id:h,A=null!==(e=(0,C.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},T=A.r,I=A.strokeWidth,N=((0,C.$k)(r)?r:{}).clipDot,R=void 0===N||N,_=2*(void 0===T?3:T)+(void 0===I?2:I);return o.createElement(S.m,{className:y},E||x?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(k)},o.createElement("rect",{x:E?c:c-p/2,y:x?s:s-f/2,width:E?p:2*p,height:x?f:2*f})),!R&&o.createElement("clipPath",{id:"clipPath-dots-".concat(k)},o.createElement("rect",{x:c-_/2,y:s-_/2,width:p+_,height:f+_}))):null,v?null:this.renderArea(O,k),(r||v)&&this.renderDots(O,R,k),(!g||b)&&w.e.renderCallByParent(this.props,a))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,curBaseLine:e.baseLine,prevPoints:t.curPoints,prevBaseLine:t.curBaseLine}:e.points!==t.curPoints||e.baseLine!==t.curBaseLine?{curPoints:e.points,curBaseLine:e.baseLine}:null}}],n&&_(i.prototype,n),r&&_(i,r),Object.defineProperty(i,"prototype",{writable:!1}),i}(o.PureComponent);D(F,"displayName","Area"),D(F,"defaultProps",{stroke:"#3182bd",fill:"#3182bd",fillOpacity:.6,xAxisId:0,yAxisId:0,legendType:"line",connectNulls:!1,points:[],dot:!1,activeDot:!0,hide:!1,isAnimationActive:!x.x.isSsr,animationBegin:0,animationDuration:1500,animationEasing:"ease"}),D(F,"getBaseValue",function(e,t,n,r){var o=e.layout,a=e.baseValue,i=t.props.baseValue,l=null!=i?i:a;if((0,O.hj)(l)&&"number"==typeof l)return l;var s="horizontal"===o?r:n,c=s.scale.domain();if("number"===s.type){var u=Math.max(c[0],c[1]),d=Math.min(c[0],c[1]);return"dataMin"===l?d:"dataMax"===l?u:u<0?u:Math.max(Math.min(c[0],c[1]),0)}return"dataMin"===l?c[0]:"dataMax"===l?c[1]:c[0]}),D(F,"getComposedData",function(e){var t,n=e.props,r=e.item,o=e.xAxis,a=e.yAxis,i=e.xAxisTicks,l=e.yAxisTicks,s=e.bandSize,c=e.dataKey,u=e.stackedData,d=e.dataStartIndex,p=e.displayedData,f=e.offset,m=n.layout,g=u&&u.length,h=F.getBaseValue(n,r,o,a),b="horizontal"===m,v=!1,y=p.map(function(e,t){g?n=u[d+t]:Array.isArray(n=(0,k.F$)(e,c))?v=!0:n=[h,n];var n,r=null==n[1]||g&&null==(0,k.F$)(e,c);return b?{x:(0,k.Hv)({axis:o,ticks:i,bandSize:s,entry:e,index:t}),y:r?null:a.scale(n[1]),value:n,payload:e}:{x:r?null:o.scale(n[1]),y:(0,k.Hv)({axis:a,ticks:l,bandSize:s,entry:e,index:t}),value:n,payload:e}});return t=g||v?y.map(function(e){var t=Array.isArray(e.value)?e.value[0]:null;return b?{x:e.x,y:null!=t&&null!=e.y?a.scale(t):null}:{x:null!=t?o.scale(t):null,y:e.y}}):b?a.scale(h):o.scale(h),R({points:y,baseLine:t,layout:m,isRange:v},f)}),D(F,"renderDotItem",function(e,t){return o.isValidElement(e)?o.cloneElement(e,t):u()(e)?e(t):o.createElement(E.o,I({},t,{className:"recharts-area-dot"}))});var B=n(23356),Z=n(22983),U=n(12627),z=(0,i.z)({chartName:"AreaChart",GraphicalChild:F,axisComponents:[{axisType:"xAxis",AxisComp:B.K},{axisType:"yAxis",AxisComp:Z.B}],formatAxisMap:U.t9}),H=n(38333),G=n(10166),$=n(94866),W=n(99355),V=["type","layout","connectNulls","ref"];function q(e){return(q="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e})(e)}function Y(){return(Y=Object.assign?Object.assign.bind():function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=Array(t);na){s=[].concat(Q(r.slice(0,c)),[a-u]);break}var d=s.length%2==0?[0,l]:[l];return[].concat(Q(i.repeat(r,Math.floor(t/o))),Q(s),d).map(function(e){return"".concat(e,"px")}).join(", ")}),eo(en(e),"id",(0,O.EL)("recharts-line-")),eo(en(e),"pathRef",function(t){e.mainCurve=t}),eo(en(e),"handleAnimationEnd",function(){e.setState({isAnimationFinished:!0}),e.props.onAnimationEnd&&e.props.onAnimationEnd()}),eo(en(e),"handleAnimationStart",function(){e.setState({isAnimationFinished:!1}),e.props.onAnimationStart&&e.props.onAnimationStart()}),e}return n=[{key:"componentDidMount",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();this.setState({totalLength:e})}}},{key:"componentDidUpdate",value:function(){if(this.props.isAnimationActive){var e=this.getTotalLength();e!==this.state.totalLength&&this.setState({totalLength:e})}}},{key:"getTotalLength",value:function(){var e=this.mainCurve;try{return e&&e.getTotalLength&&e.getTotalLength()||0}catch(e){return 0}}},{key:"renderErrorBar",value:function(e,t){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var n=this.props,r=n.points,a=n.xAxis,i=n.yAxis,l=n.layout,s=n.children,c=(0,C.NN)(s,W.W);if(!c)return null;var u=function(e,t){return{x:e.x,y:e.y,value:e.value,errorVal:(0,k.F$)(e.payload,t)}};return o.createElement(S.m,{clipPath:e?"url(#clipPath-".concat(t,")"):null},c.map(function(e){return o.cloneElement(e,{key:"bar-".concat(e.props.dataKey),data:r,xAxis:a,yAxis:i,layout:l,dataPointFormatter:u})}))}},{key:"renderDots",value:function(e,t,n){if(this.props.isAnimationActive&&!this.state.isAnimationFinished)return null;var r=this.props,a=r.dot,l=r.points,s=r.dataKey,c=(0,C.L6)(this.props,!1),u=(0,C.L6)(a,!0),d=l.map(function(e,t){var n=X(X(X({key:"dot-".concat(t),r:3},c),u),{},{value:e.value,dataKey:s,cx:e.x,cy:e.y,index:t,payload:e.payload});return i.renderDotItem(a,n)}),p={clipPath:e?"url(#clipPath-".concat(t?"":"dots-").concat(n,")"):null};return o.createElement(S.m,Y({className:"recharts-line-dots",key:"dots"},p),d)}},{key:"renderCurveStatically",value:function(e,t,n,r){var a=this.props,i=a.type,l=a.layout,s=a.connectNulls,c=(a.ref,function(e,t){if(null==e)return{};var n,r,o=function(e,t){if(null==e)return{};var n,r,o={},a=Object.keys(e);for(r=0;r=0||(o[n]=e[n]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(r=0;r=0)&&Object.prototype.propertyIsEnumerable.call(e,n)&&(o[n]=e[n])}return o}(a,V)),u=X(X(X({},(0,C.L6)(c,!0)),{},{fill:"none",className:"recharts-line-curve",clipPath:t?"url(#clipPath-".concat(n,")"):null,points:e},r),{},{type:i,layout:l,connectNulls:s});return o.createElement(y.H,Y({},u,{pathRef:this.pathRef}))}},{key:"renderCurveWithAnimation",value:function(e,t){var n=this,r=this.props,a=r.points,i=r.strokeDasharray,l=r.isAnimationActive,c=r.animationBegin,u=r.animationDuration,d=r.animationEasing,p=r.animationId,f=r.animateNewValues,m=r.width,g=r.height,h=this.state,b=h.prevPoints,v=h.totalLength;return o.createElement(s.ZP,{begin:c,duration:u,isActive:l,easing:d,from:{t:0},to:{t:1},key:"line-".concat(p),onAnimationEnd:this.handleAnimationEnd,onAnimationStart:this.handleAnimationStart},function(r){var o,l=r.t;if(b){var s=b.length/a.length,c=a.map(function(e,t){var n=Math.floor(t*s);if(b[n]){var r=b[n],o=(0,O.k4)(r.x,e.x),a=(0,O.k4)(r.y,e.y);return X(X({},e),{},{x:o(l),y:a(l)})}if(f){var i=(0,O.k4)(2*m,e.x),c=(0,O.k4)(g/2,e.y);return X(X({},e),{},{x:i(l),y:c(l)})}return X(X({},e),{},{x:e.x,y:e.y})});return n.renderCurveStatically(c,e,t)}var u=(0,O.k4)(0,v)(l);if(i){var d="".concat(i).split(/[,\s]+/gim).map(function(e){return parseFloat(e)});o=n.getStrokeDasharray(u,v,d)}else o=n.generateSimpleStrokeDasharray(v,u);return n.renderCurveStatically(a,e,t,{strokeDasharray:o})})}},{key:"renderCurve",value:function(e,t){var n=this.props,r=n.points,o=n.isAnimationActive,a=this.state,i=a.prevPoints,l=a.totalLength;return o&&r&&r.length&&(!i&&l>0||!v()(i,r))?this.renderCurveWithAnimation(e,t):this.renderCurveStatically(r,e,t)}},{key:"render",value:function(){var e,t=this.props,n=t.hide,r=t.dot,a=t.points,i=t.className,s=t.xAxis,c=t.yAxis,u=t.top,d=t.left,p=t.width,f=t.height,g=t.isAnimationActive,h=t.id;if(n||!a||!a.length)return null;var b=this.state.isAnimationFinished,v=1===a.length,y=(0,l.Z)("recharts-line",i),E=s&&s.allowDataOverflow,x=c&&c.allowDataOverflow,O=E||x,k=m()(h)?this.id:h,A=null!==(e=(0,C.L6)(r,!1))&&void 0!==e?e:{r:3,strokeWidth:2},T=A.r,I=A.strokeWidth,N=((0,C.$k)(r)?r:{}).clipDot,R=void 0===N||N,_=2*(void 0===T?3:T)+(void 0===I?2:I);return o.createElement(S.m,{className:y},E||x?o.createElement("defs",null,o.createElement("clipPath",{id:"clipPath-".concat(k)},o.createElement("rect",{x:E?d:d-p/2,y:x?u:u-f/2,width:E?p:2*p,height:x?f:2*f})),!R&&o.createElement("clipPath",{id:"clipPath-dots-".concat(k)},o.createElement("rect",{x:d-_/2,y:u-_/2,width:p+_,height:f+_}))):null,!v&&this.renderCurve(O,k),this.renderErrorBar(O,k),(v||r)&&this.renderDots(O,R,k),(!g||b)&&w.e.renderCallByParent(this.props,a))}}],r=[{key:"getDerivedStateFromProps",value:function(e,t){return e.animationId!==t.prevAnimationId?{prevAnimationId:e.animationId,curPoints:e.points,prevPoints:t.curPoints}:e.points!==t.curPoints?{curPoints:e.points}:null}},{key:"repeat",value:function(e,t){for(var n=e.length%2!=0?[].concat(Q(e),[0]):e,r=[],o=0;o{let{data:n=[],categories:i=[],index:l,stack:s=!1,colors:c=ep.s,valueFormatter:u=em.Cj,startEndOnly:d=!1,showXAxis:p=!0,showYAxis:f=!0,yAxisWidth:m=56,intervalType:g="equidistantPreserveStart",showAnimation:h=!1,animationDuration:b=900,showTooltip:v=!0,showLegend:y=!0,showGridLines:S=!0,showGradient:w=!0,autoMinValue:x=!1,curveType:O="linear",minValue:k,maxValue:C,connectNulls:A=!1,allowDecimals:T=!0,noDataText:I,className:N,onValueChange:R,enableLegendSlider:_=!1,customTooltip:P,rotateLabelX:L,tickGap:M=5}=e,D=(0,r._T)(e,["data","categories","index","stack","colors","valueFormatter","startEndOnly","showXAxis","showYAxis","yAxisWidth","intervalType","showAnimation","animationDuration","showTooltip","showLegend","showGridLines","showGradient","autoMinValue","curveType","minValue","maxValue","connectNulls","allowDecimals","noDataText","className","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap"]),j=(p||f)&&(!d||f)?20:0,[U,W]=(0,o.useState)(60),[V,q]=(0,o.useState)(void 0),[Y,K]=(0,o.useState)(void 0),X=(0,eu.me)(i,c),Q=(0,eu.i4)(x,k,C),J=!!R;function ee(e){J&&(e===Y&&!V||(0,eu.FB)(n,e)&&V&&V.dataKey===e?(K(void 0),null==R||R(null)):(K(e),null==R||R({eventType:"category",categoryClicked:e})),q(void 0))}return o.createElement("div",Object.assign({ref:t,className:(0,ef.q)("w-full h-80",N)},D),o.createElement(a.h,{className:"h-full w-full"},(null==n?void 0:n.length)?o.createElement(z,{data:n,onClick:J&&(Y||V)?()=>{q(void 0),K(void 0),null==R||R(null)}:void 0},S?o.createElement(H.q,{className:(0,ef.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:!0,vertical:!1}):null,o.createElement(B.K,{padding:{left:j,right:j},hide:!p,dataKey:l,tick:{transform:"translate(0, 6)"},ticks:d?[n[0][l],n[n.length-1][l]]:void 0,fill:"",stroke:"",className:(0,ef.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),interval:d?"preserveStartEnd":g,tickLine:!1,axisLine:!1,minTickGap:M,angle:null==L?void 0:L.angle,dy:null==L?void 0:L.verticalShift,height:null==L?void 0:L.xAxisHeight}),o.createElement(Z.B,{width:m,hide:!f,axisLine:!1,tickLine:!1,type:"number",domain:Q,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,ef.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:u,allowDecimals:T}),o.createElement(G.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{stroke:"#d1d5db",strokeWidth:1},content:v?e=>{let{active:t,payload:n,label:r}=e;return P?o.createElement(P,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=X.get(e.dataKey))&&void 0!==t?t:ed.fr.Gray})}),active:t,label:r}):o.createElement(es.ZP,{active:t,payload:n,label:r,valueFormatter:u,categoryColors:X})}:o.createElement(o.Fragment,null),position:{y:0}}),y?o.createElement($.D,{verticalAlign:"top",height:U,content:e=>{let{payload:t}=e;return(0,el.Z)({payload:t},X,W,Y,J?e=>ee(e):void 0,_)}}):null,i.map(e=>{var t,n;return o.createElement("defs",{key:e},w?o.createElement("linearGradient",{className:(0,em.bM)(null!==(t=X.get(e))&&void 0!==t?t:ed.fr.Gray,ep.K.text).textColor,id:X.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{offset:"5%",stopColor:"currentColor",stopOpacity:V||Y&&Y!==e?.15:.4}),o.createElement("stop",{offset:"95%",stopColor:"currentColor",stopOpacity:0})):o.createElement("linearGradient",{className:(0,em.bM)(null!==(n=X.get(e))&&void 0!==n?n:ed.fr.Gray,ep.K.text).textColor,id:X.get(e),x1:"0",y1:"0",x2:"0",y2:"1"},o.createElement("stop",{stopColor:"currentColor",stopOpacity:V||Y&&Y!==e?.1:.3})))}),i.map(e=>{var t;return o.createElement(F,{className:(0,em.bM)(null!==(t=X.get(e))&&void 0!==t?t:ed.fr.Gray,ep.K.text).strokeColor,strokeOpacity:V||Y&&Y!==e?.3:1,activeDot:e=>{var t;let{cx:r,cy:a,stroke:i,strokeLinecap:l,strokeLinejoin:s,strokeWidth:c,dataKey:u}=e;return o.createElement(E.o,{className:(0,ef.q)("stroke-tremor-background dark:stroke-dark-tremor-background",R?"cursor-pointer":"",(0,em.bM)(null!==(t=X.get(u))&&void 0!==t?t:ed.fr.Gray,ep.K.text).fillColor),cx:r,cy:a,r:5,fill:"",stroke:i,strokeLinecap:l,strokeLinejoin:s,strokeWidth:c,onClick:(t,r)=>{r.stopPropagation(),J&&(e.index===(null==V?void 0:V.index)&&e.dataKey===(null==V?void 0:V.dataKey)||(0,eu.FB)(n,e.dataKey)&&Y&&Y===e.dataKey?(K(void 0),q(void 0),null==R||R(null)):(K(e.dataKey),q({index:e.index,dataKey:e.dataKey}),null==R||R(Object.assign({eventType:"dot",categoryClicked:e.dataKey},e.payload))))}})},dot:t=>{var r;let{stroke:a,strokeLinecap:i,strokeLinejoin:l,strokeWidth:s,cx:c,cy:u,dataKey:d,index:p}=t;return(0,eu.FB)(n,e)&&!(V||Y&&Y!==e)||(null==V?void 0:V.index)===p&&(null==V?void 0:V.dataKey)===e?o.createElement(E.o,{key:p,cx:c,cy:u,r:5,stroke:a,fill:"",strokeLinecap:i,strokeLinejoin:l,strokeWidth:s,className:(0,ef.q)("stroke-tremor-background dark:stroke-dark-tremor-background",R?"cursor-pointer":"",(0,em.bM)(null!==(r=X.get(d))&&void 0!==r?r:ed.fr.Gray,ep.K.text).fillColor)}):o.createElement(o.Fragment,{key:p})},key:e,name:e,type:O,dataKey:e,stroke:"",fill:"url(#".concat(X.get(e),")"),strokeWidth:2,strokeLinejoin:"round",strokeLinecap:"round",isAnimationActive:h,animationDuration:b,stackId:s?"a":void 0,connectNulls:A})}),R?i.map(e=>o.createElement(ei,{className:(0,ef.q)("cursor-pointer"),strokeOpacity:0,key:e,name:e,type:O,dataKey:e,stroke:"transparent",fill:"transparent",legendType:"none",tooltipType:"none",strokeWidth:12,connectNulls:A,onClick:(e,t)=>{t.stopPropagation();let{name:n}=e;ee(n)}})):null):o.createElement(ec.Z,{noDataText:I})))});eg.displayName="AreaChart"},44041:function(e,t,n){n.d(t,{Z:function(){return x}});var r=n(69703),o=n(54942),a=n(2898),i=n(99250),l=n(65492),s=n(64090),c=n(26587),u=n(65558),d=n(28485),p=n(23356),f=n(22983),m=n(12627),g=(0,u.z)({chartName:"BarChart",GraphicalChild:d.$,defaultTooltipEventType:"axis",validateTooltipEventTypes:["axis","item"],axisComponents:[{axisType:"xAxis",AxisComp:p.K},{axisType:"yAxis",AxisComp:f.B}],formatAxisMap:m.t9}),h=n(38333),b=n(10166),v=n(94866),y=n(17280),E=n(30470),S=n(77448),w=n(36342);let x=s.forwardRef((e,t)=>{let{data:n=[],categories:u=[],index:m,colors:x=a.s,valueFormatter:O=l.Cj,layout:k="horizontal",stack:C=!1,relative:A=!1,startEndOnly:T=!1,animationDuration:I=900,showAnimation:N=!1,showXAxis:R=!0,showYAxis:_=!0,yAxisWidth:P=56,intervalType:L="equidistantPreserveStart",showTooltip:M=!0,showLegend:D=!0,showGridLines:j=!0,autoMinValue:F=!1,minValue:B,maxValue:Z,allowDecimals:U=!0,noDataText:z,onValueChange:H,enableLegendSlider:G=!1,customTooltip:$,rotateLabelX:W,tickGap:V=5,className:q}=e,Y=(0,r._T)(e,["data","categories","index","colors","valueFormatter","layout","stack","relative","startEndOnly","animationDuration","showAnimation","showXAxis","showYAxis","yAxisWidth","intervalType","showTooltip","showLegend","showGridLines","autoMinValue","minValue","maxValue","allowDecimals","noDataText","onValueChange","enableLegendSlider","customTooltip","rotateLabelX","tickGap","className"]),K=R||_?20:0,[X,Q]=(0,s.useState)(60),J=(0,w.me)(u,x),[ee,et]=s.useState(void 0),[en,er]=(0,s.useState)(void 0),eo=!!H;function ea(e,t,n){var r,o,a,i;n.stopPropagation(),H&&((0,w.vZ)(ee,Object.assign(Object.assign({},e.payload),{value:e.value}))?(er(void 0),et(void 0),null==H||H(null)):(er(null===(o=null===(r=e.tooltipPayload)||void 0===r?void 0:r[0])||void 0===o?void 0:o.dataKey),et(Object.assign(Object.assign({},e.payload),{value:e.value})),null==H||H(Object.assign({eventType:"bar",categoryClicked:null===(i=null===(a=e.tooltipPayload)||void 0===a?void 0:a[0])||void 0===i?void 0:i.dataKey},e.payload))))}let ei=(0,w.i4)(F,B,Z);return s.createElement("div",Object.assign({ref:t,className:(0,i.q)("w-full h-80",q)},Y),s.createElement(c.h,{className:"h-full w-full"},(null==n?void 0:n.length)?s.createElement(g,{data:n,stackOffset:C?"sign":A?"expand":"none",layout:"vertical"===k?"vertical":"horizontal",onClick:eo&&(en||ee)?()=>{et(void 0),er(void 0),null==H||H(null)}:void 0},j?s.createElement(h.q,{className:(0,i.q)("stroke-1","stroke-tremor-border","dark:stroke-dark-tremor-border"),horizontal:"vertical"!==k,vertical:"vertical"===k}):null,"vertical"!==k?s.createElement(p.K,{padding:{left:K,right:K},hide:!R,dataKey:m,interval:T?"preserveStartEnd":L,tick:{transform:"translate(0, 6)"},ticks:T?[n[0][m],n[n.length-1][m]]:void 0,fill:"",stroke:"",className:(0,i.q)("mt-4 text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,angle:null==W?void 0:W.angle,dy:null==W?void 0:W.verticalShift,height:null==W?void 0:W.xAxisHeight,minTickGap:V}):s.createElement(p.K,{hide:!R,type:"number",tick:{transform:"translate(-3, 0)"},domain:ei,fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickLine:!1,axisLine:!1,tickFormatter:O,minTickGap:V,allowDecimals:U,angle:null==W?void 0:W.angle,dy:null==W?void 0:W.verticalShift,height:null==W?void 0:W.xAxisHeight}),"vertical"!==k?s.createElement(f.B,{width:P,hide:!_,axisLine:!1,tickLine:!1,type:"number",domain:ei,tick:{transform:"translate(-3, 0)"},fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content"),tickFormatter:A?e=>"".concat((100*e).toString()," %"):O,allowDecimals:U}):s.createElement(f.B,{width:P,hide:!_,dataKey:m,axisLine:!1,tickLine:!1,ticks:T?[n[0][m],n[n.length-1][m]]:void 0,type:"category",interval:"preserveStartEnd",tick:{transform:"translate(0, 6)"},fill:"",stroke:"",className:(0,i.q)("text-tremor-label","fill-tremor-content","dark:fill-dark-tremor-content")}),s.createElement(b.u,{wrapperStyle:{outline:"none"},isAnimationActive:!1,cursor:{fill:"#d1d5db",opacity:"0.15"},content:M?e=>{let{active:t,payload:n,label:r}=e;return $?s.createElement($,{payload:null==n?void 0:n.map(e=>{var t;return Object.assign(Object.assign({},e),{color:null!==(t=J.get(e.dataKey))&&void 0!==t?t:o.fr.Gray})}),active:t,label:r}):s.createElement(E.ZP,{active:t,payload:n,label:r,valueFormatter:O,categoryColors:J})}:s.createElement(s.Fragment,null),position:{y:0}}),D?s.createElement(v.D,{verticalAlign:"top",height:X,content:e=>{let{payload:t}=e;return(0,y.Z)({payload:t},J,Q,en,eo?e=>{eo&&(e!==en||ee?(er(e),null==H||H({eventType:"category",categoryClicked:e})):(er(void 0),null==H||H(null)),et(void 0))}:void 0,G)}}):null,u.map(e=>{var t;return s.createElement(d.$,{className:(0,i.q)((0,l.bM)(null!==(t=J.get(e))&&void 0!==t?t:o.fr.Gray,a.K.background).fillColor,H?"cursor-pointer":""),key:e,name:e,type:"linear",stackId:C||A?"a":void 0,dataKey:e,fill:"",isAnimationActive:N,animationDuration:I,shape:e=>((e,t,n,r)=>{let{fillOpacity:o,name:a,payload:i,value:l}=e,{x:c,width:u,y:d,height:p}=e;return"horizontal"===r&&p<0?(d+=p,p=Math.abs(p)):"vertical"===r&&u<0&&(c+=u,u=Math.abs(u)),s.createElement("rect",{x:c,y:d,width:u,height:p,opacity:t||n&&n!==a?(0,w.vZ)(t,Object.assign(Object.assign({},i),{value:l}))?o:.3:o})})(e,ee,en,k),onClick:ea})})):s.createElement(S.Z,{noDataText:z})))});x.displayName="BarChart"},17280:function(e,t,n){n.d(t,{Z:function(){return g}});var r=n(64090);let o=(e,t)=>{let[n,o]=(0,r.useState)(t);(0,r.useEffect)(()=>{let t=()=>{o(window.innerWidth),e()};return t(),window.addEventListener("resize",t),()=>window.removeEventListener("resize",t)},[e,n])};var a=n(69703),i=n(2898),l=n(99250),s=n(65492);let c=e=>{var t=(0,a._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M8 12L14 6V18L8 12Z"}))},u=e=>{var t=(0,a._T)(e,[]);return r.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),r.createElement("path",{d:"M16 12L10 18V6L16 12Z"}))},d=(0,s.fn)("Legend"),p=e=>{let{name:t,color:n,onClick:o,activeLegend:a}=e,c=!!o;return r.createElement("li",{className:(0,l.q)(d("legendItem"),"group inline-flex items-center px-2 py-0.5 rounded-tremor-small transition whitespace-nowrap",c?"cursor-pointer":"cursor-default","text-tremor-content",c?"hover:bg-tremor-background-subtle":"","dark:text-dark-tremor-content",c?"dark:hover:bg-dark-tremor-background-subtle":""),onClick:e=>{e.stopPropagation(),null==o||o(t,n)}},r.createElement("svg",{className:(0,l.q)("flex-none h-2 w-2 mr-1.5",(0,s.bM)(n,i.K.text).textColor,a&&a!==t?"opacity-40":"opacity-100"),fill:"currentColor",viewBox:"0 0 8 8"},r.createElement("circle",{cx:4,cy:4,r:4})),r.createElement("p",{className:(0,l.q)("whitespace-nowrap truncate text-tremor-default","text-tremor-content",c?"group-hover:text-tremor-content-emphasis":"","dark:text-dark-tremor-content",a&&a!==t?"opacity-40":"opacity-100",c?"dark:group-hover:text-dark-tremor-content-emphasis":"")},t))},f=e=>{let{icon:t,onClick:n,disabled:o}=e,[a,i]=r.useState(!1),s=r.useRef(null);return r.useEffect(()=>(a?s.current=setInterval(()=>{null==n||n()},300):clearInterval(s.current),()=>clearInterval(s.current)),[a,n]),(0,r.useEffect)(()=>{o&&(clearInterval(s.current),i(!1))},[o]),r.createElement("button",{type:"button",className:(0,l.q)(d("legendSliderButton"),"w-5 group inline-flex items-center truncate rounded-tremor-small transition",o?"cursor-not-allowed":"cursor-pointer",o?"text-tremor-content-subtle":"text-tremor-content hover:text-tremor-content-emphasis hover:bg-tremor-background-subtle",o?"dark:text-dark-tremor-subtle":"dark:text-dark-tremor dark:hover:text-tremor-content-emphasis dark:hover:bg-dark-tremor-background-subtle"),disabled:o,onClick:e=>{e.stopPropagation(),null==n||n()},onMouseDown:e=>{e.stopPropagation(),i(!0)},onMouseUp:e=>{e.stopPropagation(),i(!1)}},r.createElement(t,{className:"w-full"}))},m=r.forwardRef((e,t)=>{var n,o;let{categories:s,colors:m=i.s,className:g,onClickLegendItem:h,activeLegend:b,enableLegendSlider:v=!1}=e,y=(0,a._T)(e,["categories","colors","className","onClickLegendItem","activeLegend","enableLegendSlider"]),E=r.useRef(null),[S,w]=r.useState(null),[x,O]=r.useState(null),k=r.useRef(null),C=(0,r.useCallback)(()=>{let e=null==E?void 0:E.current;e&&w({left:e.scrollLeft>0,right:e.scrollWidth-e.clientWidth>e.scrollLeft})},[w]),A=(0,r.useCallback)(e=>{var t;let n=null==E?void 0:E.current,r=null!==(t=null==n?void 0:n.clientWidth)&&void 0!==t?t:0;n&&v&&(n.scrollTo({left:"left"===e?n.scrollLeft-r:n.scrollLeft+r,behavior:"smooth"}),setTimeout(()=>{C()},400))},[v,C]);r.useEffect(()=>{let e=e=>{"ArrowLeft"===e?A("left"):"ArrowRight"===e&&A("right")};return x?(e(x),k.current=setInterval(()=>{e(x)},300)):clearInterval(k.current),()=>clearInterval(k.current)},[x,A]);let T=e=>{e.stopPropagation(),"ArrowLeft"!==e.key&&"ArrowRight"!==e.key||(e.preventDefault(),O(e.key))},I=e=>{e.stopPropagation(),O(null)};return r.useEffect(()=>{let e=null==E?void 0:E.current;return v&&(C(),null==e||e.addEventListener("keydown",T),null==e||e.addEventListener("keyup",I)),()=>{null==e||e.removeEventListener("keydown",T),null==e||e.removeEventListener("keyup",I)}},[C,v]),r.createElement("ol",Object.assign({ref:t,className:(0,l.q)(d("root"),"relative overflow-hidden",g)},y),r.createElement("div",{ref:E,tabIndex:0,className:(0,l.q)("h-full flex",v?(null==S?void 0:S.right)||(null==S?void 0:S.left)?"pl-4 pr-12 items-center overflow-auto snap-mandatory [&::-webkit-scrollbar]:hidden [scrollbar-width:none]":"":"flex-wrap")},s.map((e,t)=>r.createElement(p,{key:"item-".concat(t),name:e,color:m[t],onClick:h,activeLegend:b}))),v&&((null==S?void 0:S.right)||(null==S?void 0:S.left))?r.createElement(r.Fragment,null,r.createElement("div",{className:(0,l.q)("from-tremor-background","dark:from-dark-tremor-background","absolute top-0 bottom-0 left-0 w-4 bg-gradient-to-r to-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("to-tremor-background","dark:to-dark-tremor-background","absolute top-0 bottom-0 right-10 w-4 bg-gradient-to-r from-transparent pointer-events-none")}),r.createElement("div",{className:(0,l.q)("bg-tremor-background","dark:bg-dark-tremor-background","absolute flex top-0 pr-1 bottom-0 right-0 items-center justify-center h-full")},r.createElement(f,{icon:c,onClick:()=>{O(null),A("left")},disabled:!(null==S?void 0:S.left)}),r.createElement(f,{icon:u,onClick:()=>{O(null),A("right")},disabled:!(null==S?void 0:S.right)}))):null)});m.displayName="Legend";let g=(e,t,n,a,i,l)=>{let{payload:s}=e,c=(0,r.useRef)(null);o(()=>{var e,t;n((t=null===(e=c.current)||void 0===e?void 0:e.clientHeight)?Number(t)+20:60)});let u=s.filter(e=>"none"!==e.type);return r.createElement("div",{ref:c,className:"flex items-center justify-end"},r.createElement(m,{categories:u.map(e=>e.value),colors:u.map(e=>t.get(e.value)),onClickLegendItem:i,activeLegend:a,enableLegendSlider:l}))}},30470:function(e,t,n){n.d(t,{ZP:function(){return u}});var r=n(64090),o=n(54942),a=n(2898),i=n(99250),l=n(65492);let s=e=>{let{children:t}=e;return r.createElement("div",{className:(0,i.q)("rounded-tremor-default text-tremor-default border","bg-tremor-background shadow-tremor-dropdown border-tremor-border","dark:bg-dark-tremor-background dark:shadow-dark-tremor-dropdown dark:border-dark-tremor-border")},t)},c=e=>{let{value:t,name:n,color:o}=e;return r.createElement("div",{className:"flex items-center justify-between space-x-8"},r.createElement("div",{className:"flex items-center space-x-2"},r.createElement("span",{className:(0,i.q)("shrink-0 rounded-tremor-full border-2 h-3 w-3","border-tremor-background shadow-tremor-card","dark:border-dark-tremor-background dark:shadow-dark-tremor-card",(0,l.bM)(o,a.K.background).bgColor)}),r.createElement("p",{className:(0,i.q)("text-right whitespace-nowrap","text-tremor-content","dark:text-dark-tremor-content")},n)),r.createElement("p",{className:(0,i.q)("font-medium tabular-nums text-right whitespace-nowrap","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},t))},u=e=>{let{active:t,payload:n,label:a,categoryColors:l,valueFormatter:u}=e;if(t&&n){let e=n.filter(e=>"none"!==e.type);return r.createElement(s,null,r.createElement("div",{className:(0,i.q)("border-tremor-border border-b px-4 py-2","dark:border-dark-tremor-border")},r.createElement("p",{className:(0,i.q)("font-medium","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis")},a)),r.createElement("div",{className:(0,i.q)("px-4 py-2 space-y-1")},e.map((e,t)=>{var n;let{value:a,name:i}=e;return r.createElement(c,{key:"id-".concat(t),value:u(a),name:i,color:null!==(n=l.get(i))&&void 0!==n?n:o.fr.Blue})})))}return null}},77448:function(e,t,n){n.d(t,{Z:function(){return p}});var r=n(99250),o=n(64090),a=n(69703);let i=(0,n(65492).fn)("Flex"),l={start:"justify-start",end:"justify-end",center:"justify-center",between:"justify-between",around:"justify-around",evenly:"justify-evenly"},s={start:"items-start",end:"items-end",center:"items-center",baseline:"items-baseline",stretch:"items-stretch"},c={row:"flex-row",col:"flex-col","row-reverse":"flex-row-reverse","col-reverse":"flex-col-reverse"},u=o.forwardRef((e,t)=>{let{flexDirection:n="row",justifyContent:u="between",alignItems:d="center",children:p,className:f}=e,m=(0,a._T)(e,["flexDirection","justifyContent","alignItems","children","className"]);return o.createElement("div",Object.assign({ref:t,className:(0,r.q)(i("root"),"flex w-full",c[n],l[u],s[d],f)},m),p)});u.displayName="Flex";var d=n(71801);let p=e=>{let{noDataText:t="No data"}=e;return o.createElement(u,{alignItems:"center",justifyContent:"center",className:(0,r.q)("w-full h-full border border-dashed rounded-tremor-default","border-tremor-border","dark:border-dark-tremor-border")},o.createElement(d.Z,{className:(0,r.q)("text-tremor-content","dark:text-dark-tremor-content")},t))}},36342:function(e,t,n){n.d(t,{FB:function(){return a},i4:function(){return o},me:function(){return r},vZ:function(){return function e(t,n){if(t===n)return!0;if("object"!=typeof t||"object"!=typeof n||null===t||null===n)return!1;let r=Object.keys(t),o=Object.keys(n);if(r.length!==o.length)return!1;for(let a of r)if(!o.includes(a)||!e(t[a],n[a]))return!1;return!0}}});let r=(e,t)=>{let n=new Map;return e.forEach((e,r)=>{n.set(e,t[r])}),n},o=(e,t,n)=>[e?"auto":null!=t?t:0,null!=n?n:"auto"];function a(e,t){let n=[];for(let r of e)if(Object.prototype.hasOwnProperty.call(r,t)&&(n.push(r[t]),n.length>1))return!1;return!0}},5:function(e,t,n){n.d(t,{Z:function(){return f}});var r=n(69703),o=n(64090),a=n(58437),i=n(54942),l=n(2898),s=n(99250),c=n(65492);let u={xs:{paddingX:"px-2",paddingY:"py-0.5",fontSize:"text-xs"},sm:{paddingX:"px-2.5",paddingY:"py-0.5",fontSize:"text-sm"},md:{paddingX:"px-3",paddingY:"py-0.5",fontSize:"text-md"},lg:{paddingX:"px-3.5",paddingY:"py-0.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-1",fontSize:"text-xl"}},d={xs:{height:"h-4",width:"w-4"},sm:{height:"h-4",width:"w-4"},md:{height:"h-4",width:"w-4"},lg:{height:"h-5",width:"w-5"},xl:{height:"h-6",width:"w-6"}},p=(0,c.fn)("Badge"),f=o.forwardRef((e,t)=>{let{color:n,icon:f,size:m=i.u8.SM,tooltip:g,className:h,children:b}=e,v=(0,r._T)(e,["color","icon","size","tooltip","className","children"]),y=f||null,{tooltipProps:E,getReferenceProps:S}=(0,a.l)();return o.createElement("span",Object.assign({ref:(0,c.lq)([t,E.refs.setReference]),className:(0,s.q)(p("root"),"w-max flex-shrink-0 inline-flex justify-center items-center cursor-default rounded-tremor-full",n?(0,s.q)((0,c.bM)(n,l.K.background).bgColor,(0,c.bM)(n,l.K.text).textColor,"bg-opacity-20 dark:bg-opacity-25"):(0,s.q)("bg-tremor-brand-muted text-tremor-brand-emphasis","dark:bg-dark-tremor-brand-muted dark:text-dark-tremor-brand-emphasis"),u[m].paddingX,u[m].paddingY,u[m].fontSize,h)},S,v),o.createElement(a.Z,Object.assign({text:g},E)),y?o.createElement(y,{className:(0,s.q)(p("icon"),"shrink-0 -ml-1 mr-1.5",d[m].height,d[m].width)}):null,o.createElement("p",{className:(0,s.q)(p("text"),"text-sm whitespace-nowrap")},b))});f.displayName="Badge"},61244:function(e,t,n){n.d(t,{Z:function(){return g}});var r=n(69703),o=n(64090),a=n(58437),i=n(54942),l=n(99250),s=n(65492),c=n(2898);let u={xs:{paddingX:"px-1.5",paddingY:"py-1.5"},sm:{paddingX:"px-1.5",paddingY:"py-1.5"},md:{paddingX:"px-2",paddingY:"py-2"},lg:{paddingX:"px-2",paddingY:"py-2"},xl:{paddingX:"px-2.5",paddingY:"py-2.5"}},d={xs:{height:"h-3",width:"w-3"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-7",width:"w-7"},xl:{height:"h-9",width:"w-9"}},p={simple:{rounded:"",border:"",ring:"",shadow:""},light:{rounded:"rounded-tremor-default",border:"",ring:"",shadow:""},shadow:{rounded:"rounded-tremor-default",border:"border",ring:"",shadow:"shadow-tremor-card dark:shadow-dark-tremor-card"},solid:{rounded:"rounded-tremor-default",border:"border-2",ring:"ring-1",shadow:""},outlined:{rounded:"rounded-tremor-default",border:"border",ring:"ring-2",shadow:""}},f=(e,t)=>{switch(e){case"simple":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:"",borderColor:"",ringColor:""};case"light":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand-muted dark:bg-dark-tremor-brand-muted",borderColor:"",ringColor:""};case"shadow":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:"border-tremor-border dark:border-dark-tremor-border",ringColor:""};case"solid":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-brand dark:bg-dark-tremor-brand",borderColor:"border-tremor-brand-inverted dark:border-dark-tremor-brand-inverted",ringColor:"ring-tremor-ring dark:ring-dark-tremor-ring"};case"outlined":return{textColor:t?(0,s.bM)(t,c.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",bgColor:t?(0,l.q)((0,s.bM)(t,c.K.background).bgColor,"bg-opacity-20"):"bg-tremor-background dark:bg-dark-tremor-background",borderColor:t?(0,s.bM)(t,c.K.ring).borderColor:"border-tremor-brand-subtle dark:border-dark-tremor-brand-subtle",ringColor:t?(0,l.q)((0,s.bM)(t,c.K.ring).ringColor,"ring-opacity-40"):"ring-tremor-brand-muted dark:ring-dark-tremor-brand-muted"}}},m=(0,s.fn)("Icon"),g=o.forwardRef((e,t)=>{let{icon:n,variant:c="simple",tooltip:g,size:h=i.u8.SM,color:b,className:v}=e,y=(0,r._T)(e,["icon","variant","tooltip","size","color","className"]),E=f(c,b),{tooltipProps:S,getReferenceProps:w}=(0,a.l)();return o.createElement("span",Object.assign({ref:(0,s.lq)([t,S.refs.setReference]),className:(0,l.q)(m("root"),"inline-flex flex-shrink-0 items-center",E.bgColor,E.textColor,E.borderColor,E.ringColor,p[c].rounded,p[c].border,p[c].shadow,p[c].ring,u[h].paddingX,u[h].paddingY,v)},w,y),o.createElement(a.Z,Object.assign({text:g},S)),o.createElement(n,{className:(0,l.q)(m("icon"),"shrink-0",d[h].height,d[h].width)}))});g.displayName="Icon"},2179:function(e,t,n){n.d(t,{Z:function(){return O}});var r=n(69703),o=n(58437),a=n(64090);let i=["preEnter","entering","entered","preExit","exiting","exited","unmounted"],l=e=>({_s:e,status:i[e],isEnter:e<3,isMounted:6!==e,isResolved:2===e||e>4}),s=e=>e?6:5,c=(e,t)=>{switch(e){case 1:case 0:return 2;case 4:case 3:return s(t)}},u=e=>"object"==typeof e?[e.enter,e.exit]:[e,e],d=(e,t)=>setTimeout(()=>{isNaN(document.body.offsetTop)||e(t+1)},0),p=(e,t,n,r,o)=>{clearTimeout(r.current);let a=l(e);t(a),n.current=a,o&&o({current:a})},f=function(){let{enter:e=!0,exit:t=!0,preEnter:n,preExit:r,timeout:o,initialEntered:i,mountOnEnter:f,unmountOnExit:m,onStateChange:g}=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},[h,b]=(0,a.useState)(()=>l(i?2:s(f))),v=(0,a.useRef)(h),y=(0,a.useRef)(),[E,S]=u(o),w=(0,a.useCallback)(()=>{let e=c(v.current._s,m);e&&p(e,b,v,y,g)},[g,m]),x=(0,a.useCallback)(o=>{let a=e=>{switch(p(e,b,v,y,g),e){case 1:E>=0&&(y.current=setTimeout(w,E));break;case 4:S>=0&&(y.current=setTimeout(w,S));break;case 0:case 3:y.current=d(a,e)}},i=v.current.isEnter;"boolean"!=typeof o&&(o=!i),o?i||a(e?n?0:1:2):i&&a(t?r?3:4:s(m))},[w,g,e,t,n,r,E,S,m]);return(0,a.useEffect)(()=>()=>clearTimeout(y.current),[]),[h,x,w]};var m=n(54942),g=n(99250),h=n(65492);let b=e=>{var t=(0,r._T)(e,[]);return a.createElement("svg",Object.assign({},t,{xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"}),a.createElement("path",{fill:"none",d:"M0 0h24v24H0z"}),a.createElement("path",{d:"M18.364 5.636L16.95 7.05A7 7 0 1 0 19 12h2a9 9 0 1 1-2.636-6.364z"}))};var v=n(2898);let y={xs:{height:"h-4",width:"w-4"},sm:{height:"h-5",width:"w-5"},md:{height:"h-5",width:"w-5"},lg:{height:"h-6",width:"w-6"},xl:{height:"h-6",width:"w-6"}},E=e=>"light"!==e?{xs:{paddingX:"px-2.5",paddingY:"py-1.5",fontSize:"text-xs"},sm:{paddingX:"px-4",paddingY:"py-2",fontSize:"text-sm"},md:{paddingX:"px-4",paddingY:"py-2",fontSize:"text-md"},lg:{paddingX:"px-4",paddingY:"py-2.5",fontSize:"text-lg"},xl:{paddingX:"px-4",paddingY:"py-3",fontSize:"text-xl"}}:{xs:{paddingX:"",paddingY:"",fontSize:"text-xs"},sm:{paddingX:"",paddingY:"",fontSize:"text-sm"},md:{paddingX:"",paddingY:"",fontSize:"text-md"},lg:{paddingX:"",paddingY:"",fontSize:"text-lg"},xl:{paddingX:"",paddingY:"",fontSize:"text-xl"}},S=(e,t)=>{switch(e){case"primary":return{textColor:t?(0,h.bM)("white").textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",hoverTextColor:t?(0,h.bM)("white").textColor:"text-tremor-brand-inverted dark:text-dark-tremor-brand-inverted",bgColor:t?(0,h.bM)(t,v.K.background).bgColor:"bg-tremor-brand dark:bg-dark-tremor-brand",hoverBgColor:t?(0,h.bM)(t,v.K.darkBackground).hoverBgColor:"hover:bg-tremor-brand-emphasis dark:hover:bg-dark-tremor-brand-emphasis",borderColor:t?(0,h.bM)(t,v.K.border).borderColor:"border-tremor-brand dark:border-dark-tremor-brand",hoverBorderColor:t?(0,h.bM)(t,v.K.darkBorder).hoverBorderColor:"hover:border-tremor-brand-emphasis dark:hover:border-dark-tremor-brand-emphasis"};case"secondary":return{textColor:t?(0,h.bM)(t,v.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",hoverTextColor:t?(0,h.bM)(t,v.K.text).textColor:"hover:text-tremor-brand-emphasis dark:hover:text-dark-tremor-brand-emphasis",bgColor:(0,h.bM)("transparent").bgColor,hoverBgColor:t?(0,g.q)((0,h.bM)(t,v.K.background).hoverBgColor,"hover:bg-opacity-20 dark:hover:bg-opacity-20"):"hover:bg-tremor-brand-faint dark:hover:bg-dark-tremor-brand-faint",borderColor:t?(0,h.bM)(t,v.K.border).borderColor:"border-tremor-brand dark:border-dark-tremor-brand"};case"light":return{textColor:t?(0,h.bM)(t,v.K.text).textColor:"text-tremor-brand dark:text-dark-tremor-brand",hoverTextColor:t?(0,h.bM)(t,v.K.darkText).hoverTextColor:"hover:text-tremor-brand-emphasis dark:hover:text-dark-tremor-brand-emphasis",bgColor:(0,h.bM)("transparent").bgColor,borderColor:"",hoverBorderColor:""}}},w=(0,h.fn)("Button"),x=e=>{let{loading:t,iconSize:n,iconPosition:r,Icon:o,needMargin:i,transitionStatus:l}=e,s=i?r===m.zS.Left?(0,g.q)("-ml-1","mr-1.5"):(0,g.q)("-mr-1","ml-1.5"):"",c=(0,g.q)("w-0 h-0"),u={default:c,entering:c,entered:n,exiting:n,exited:c};return t?a.createElement(b,{className:(0,g.q)(w("icon"),"animate-spin shrink-0",s,u.default,u[l]),style:{transition:"width 150ms"}}):a.createElement(o,{className:(0,g.q)(w("icon"),"shrink-0",n,s)})},O=a.forwardRef((e,t)=>{let{icon:n,iconPosition:i=m.zS.Left,size:l=m.u8.SM,color:s,variant:c="primary",disabled:u,loading:d=!1,loadingText:p,children:b,tooltip:v,className:O}=e,k=(0,r._T)(e,["icon","iconPosition","size","color","variant","disabled","loading","loadingText","children","tooltip","className"]),C=d||u,A=void 0!==n||d,T=d&&p,I=!(!b&&!T),N=(0,g.q)(y[l].height,y[l].width),R="light"!==c?(0,g.q)("rounded-tremor-default border","shadow-tremor-input","dark:shadow-dark-tremor-input"):"",_=S(c,s),P=E(c)[l],{tooltipProps:L,getReferenceProps:M}=(0,o.l)(300),[D,j]=f({timeout:50});return(0,a.useEffect)(()=>{j(d)},[d]),a.createElement("button",Object.assign({ref:(0,h.lq)([t,L.refs.setReference]),className:(0,g.q)(w("root"),"flex-shrink-0 inline-flex justify-center items-center group font-medium outline-none",R,P.paddingX,P.paddingY,P.fontSize,_.textColor,_.bgColor,_.borderColor,_.hoverBorderColor,C?"opacity-50 cursor-not-allowed":(0,g.q)(S(c,s).hoverTextColor,S(c,s).hoverBgColor,S(c,s).hoverBorderColor),O),disabled:C},M,k),a.createElement(o.Z,Object.assign({text:v},L)),A&&i!==m.zS.Right?a.createElement(x,{loading:d,iconSize:N,iconPosition:i,Icon:n,transitionStatus:D.status,needMargin:I}):null,T||b?a.createElement("span",{className:(0,g.q)(w("text"),"text-tremor-default whitespace-nowrap")},T?p:b):null,A&&i===m.zS.Right?a.createElement(x,{loading:d,iconSize:N,iconPosition:i,Icon:n,transitionStatus:D.status,needMargin:I}):null)});O.displayName="Button"},47047:function(e,t,n){n.d(t,{Z:function(){return b}});var r=n(69703),o=n(64090);n(50027),n(18174),n(21871);var a=n(41213),i=n(46457),l=n(54518);let s=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",viewBox:"0 0 24 24",fill:"currentColor"},t),o.createElement("path",{d:"M18.031 16.6168L22.3137 20.8995L20.8995 22.3137L16.6168 18.031C15.0769 19.263 13.124 20 11 20C6.032 20 2 15.968 2 11C2 6.032 6.032 2 11 2C15.968 2 20 6.032 20 11C20 13.124 19.263 15.0769 18.031 16.6168ZM16.0247 15.8748C17.2475 14.6146 18 12.8956 18 11C18 7.1325 14.8675 4 11 4C7.1325 4 4 7.1325 4 11C4 14.8675 7.1325 18 11 18C12.8956 18 14.6146 17.2475 15.8748 16.0247L16.0247 15.8748Z"}))};var c=n(8903),u=n(25163),d=n(70129);let p=e=>{var t=(0,r._T)(e,[]);return o.createElement("svg",Object.assign({xmlns:"http://www.w3.org/2000/svg",width:"100%",height:"100%",fill:"none",viewBox:"0 0 24 24",stroke:"currentColor",strokeWidth:"2",strokeLinecap:"round",strokeLinejoin:"round"},t),o.createElement("line",{x1:"18",y1:"6",x2:"6",y2:"18"}),o.createElement("line",{x1:"6",y1:"6",x2:"18",y2:"18"}))};var f=n(99250),m=n(65492),g=n(91753);let h=(0,m.fn)("MultiSelect"),b=o.forwardRef((e,t)=>{let{defaultValue:n,value:m,onValueChange:b,placeholder:v="Select...",placeholderSearch:y="Search",disabled:E=!1,icon:S,children:w,className:x}=e,O=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","placeholderSearch","disabled","icon","children","className"]),[k,C]=(0,i.Z)(n,m),{reactElementChildren:A,optionsAvailable:T}=(0,o.useMemo)(()=>{let e=o.Children.toArray(w).filter(o.isValidElement);return{reactElementChildren:e,optionsAvailable:(0,g.n0)("",e)}},[w]),[I,N]=(0,o.useState)(""),R=(null!=k?k:[]).length>0,_=(0,o.useMemo)(()=>I?(0,g.n0)(I,A):T,[I,A,T]),P=()=>{N("")};return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:k,value:k,onChange:e=>{null==b||b(e),C(e)},disabled:E,className:(0,f.q)("w-full min-w-[10rem] relative text-tremor-default",x)},O,{multiple:!0}),e=>{let{value:t}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,f.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-1.5","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",S?"pl-11 -ml-0.5":"pl-3",(0,g.um)(t.length>0,E))},S&&o.createElement("span",{className:(0,f.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(S,{className:(0,f.q)(h("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("div",{className:"h-6 flex items-center"},t.length>0?o.createElement("div",{className:"flex flex-nowrap overflow-x-scroll [&::-webkit-scrollbar]:hidden [scrollbar-width:none] gap-x-1 mr-5 -ml-1.5 relative"},T.filter(e=>t.includes(e.props.value)).map((e,n)=>{var r;return o.createElement("div",{key:n,className:(0,f.q)("max-w-[100px] lg:max-w-[200px] flex justify-center items-center pl-2 pr-1.5 py-1 font-medium","rounded-tremor-small","bg-tremor-background-muted dark:bg-dark-tremor-background-muted","bg-tremor-background-subtle dark:bg-dark-tremor-background-subtle","text-tremor-content-default dark:text-dark-tremor-content-default","text-tremor-content-emphasis dark:text-dark-tremor-content-emphasis")},o.createElement("div",{className:"text-xs truncate "},null!==(r=e.props.children)&&void 0!==r?r:e.props.value),o.createElement("div",{onClick:n=>{n.preventDefault();let r=t.filter(t=>t!==e.props.value);null==b||b(r),C(r)}},o.createElement(p,{className:(0,f.q)(h("clearIconItem"),"cursor-pointer rounded-tremor-full w-3.5 h-3.5 ml-2","text-tremor-content-subtle hover:text-tremor-content","dark:text-dark-tremor-content-subtle dark:hover:text-tremor-content")})))})):o.createElement("span",null,v)),o.createElement("span",{className:(0,f.q)("absolute inset-y-0 right-0 flex items-center mr-2.5")},o.createElement(l.Z,{className:(0,f.q)(h("arrowDownIcon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),R&&!E?o.createElement("button",{type:"button",className:(0,f.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),C([]),null==b||b([])}},o.createElement(c.Z,{className:(0,f.q)(h("clearIconAllItems"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,f.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},o.createElement("div",{className:(0,f.q)("flex items-center w-full px-2.5","bg-tremor-background-muted","dark:bg-dark-tremor-background-muted")},o.createElement("span",null,o.createElement(s,{className:(0,f.q)("flex-none w-4 h-4 mr-2","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("input",{name:"search",type:"input",autoComplete:"off",placeholder:y,className:(0,f.q)("w-full focus:outline-none focus:ring-none bg-transparent text-tremor-default py-2","text-tremor-content-emphasis","dark:text-dark-tremor-content-emphasis"),onKeyDown:e=>{"Space"===e.code&&""!==e.target.value&&e.stopPropagation()},onChange:e=>N(e.target.value),value:I})),o.createElement(a.Z.Provider,Object.assign({},{onBlur:{handleResetSearch:P}},{value:{selectedValue:t}}),_))))})});b.displayName="MultiSelect"},76628:function(e,t,n){n.d(t,{Z:function(){return u}});var r=n(69703);n(50027),n(18174),n(21871);var o=n(41213),a=n(64090),i=n(99250),l=n(65492),s=n(25163);let c=(0,l.fn)("MultiSelectItem"),u=a.forwardRef((e,t)=>{let{value:n,className:u,children:d}=e,p=(0,r._T)(e,["value","className","children"]),{selectedValue:f}=(0,a.useContext)(o.Z),m=(0,l.NZ)(n,f);return a.createElement(s.R.Option,Object.assign({className:(0,i.q)(c("root"),"flex justify-start items-center cursor-default text-tremor-default p-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",u),ref:t,key:n,value:n},p),a.createElement("input",{type:"checkbox",className:(0,i.q)(c("checkbox"),"flex-none focus:ring-none focus:outline-none cursor-pointer mr-2.5","accent-tremor-brand","dark:accent-dark-tremor-brand"),checked:m,readOnly:!0}),a.createElement("span",{className:"whitespace-nowrap truncate"},null!=d?d:n))});u.displayName="MultiSelectItem"},95093:function(e,t,n){n.d(t,{Z:function(){return m}});var r=n(69703),o=n(64090),a=n(54518),i=n(8903),l=n(99250),s=n(65492),c=n(91753),u=n(25163),d=n(70129),p=n(46457);let f=(0,s.fn)("Select"),m=o.forwardRef((e,t)=>{let{defaultValue:n,value:s,onValueChange:m,placeholder:g="Select...",disabled:h=!1,icon:b,enableClear:v=!0,children:y,className:E}=e,S=(0,r._T)(e,["defaultValue","value","onValueChange","placeholder","disabled","icon","enableClear","children","className"]),[w,x]=(0,p.Z)(n,s),O=(0,o.useMemo)(()=>{let e=o.Children.toArray(y).filter(o.isValidElement);return(0,c.sl)(e)},[y]);return o.createElement(u.R,Object.assign({as:"div",ref:t,defaultValue:w,value:w,onChange:e=>{null==m||m(e),x(e)},disabled:h,className:(0,l.q)("w-full min-w-[10rem] relative text-tremor-default",E)},S),e=>{var t;let{value:n}=e;return o.createElement(o.Fragment,null,o.createElement(u.R.Button,{className:(0,l.q)("w-full outline-none text-left whitespace-nowrap truncate rounded-tremor-default focus:ring-2 transition duration-100 border pr-8 py-2","border-tremor-border shadow-tremor-input focus:border-tremor-brand-subtle focus:ring-tremor-brand-muted","dark:border-dark-tremor-border dark:shadow-dark-tremor-input dark:focus:border-dark-tremor-brand-subtle dark:focus:ring-dark-tremor-brand-muted",b?"pl-10":"pl-3",(0,c.um)((0,c.Uh)(n),h))},b&&o.createElement("span",{className:(0,l.q)("absolute inset-y-0 left-0 flex items-center ml-px pl-2.5")},o.createElement(b,{className:(0,l.q)(f("Icon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})),o.createElement("span",{className:"w-[90%] block truncate"},n&&null!==(t=O.get(n))&&void 0!==t?t:g),o.createElement("span",{className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-3")},o.createElement(a.Z,{className:(0,l.q)(f("arrowDownIcon"),"flex-none h-5 w-5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}))),v&&w?o.createElement("button",{type:"button",className:(0,l.q)("absolute inset-y-0 right-0 flex items-center mr-8"),onClick:e=>{e.preventDefault(),x(""),null==m||m("")}},o.createElement(i.Z,{className:(0,l.q)(f("clearIcon"),"flex-none h-4 w-4","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")})):null,o.createElement(d.u,{className:"absolute z-10 w-full",enter:"transition ease duration-100 transform",enterFrom:"opacity-0 -translate-y-4",enterTo:"opacity-100 translate-y-0",leave:"transition ease duration-100 transform",leaveFrom:"opacity-100 translate-y-0",leaveTo:"opacity-0 -translate-y-4"},o.createElement(u.R.Options,{className:(0,l.q)("divide-y overflow-y-auto outline-none rounded-tremor-default max-h-[228px] left-0 border my-1","bg-tremor-background border-tremor-border divide-tremor-border shadow-tremor-dropdown","dark:bg-dark-tremor-background dark:border-dark-tremor-border dark:divide-dark-tremor-border dark:shadow-dark-tremor-dropdown")},y)))})});m.displayName="Select"},27166:function(e,t,n){n.d(t,{Z:function(){return s}});var r=n(69703),o=n(64090),a=n(25163),i=n(99250);let l=(0,n(65492).fn)("SelectItem"),s=o.forwardRef((e,t)=>{let{value:n,icon:s,className:c,children:u}=e,d=(0,r._T)(e,["value","icon","className","children"]);return o.createElement(a.R.Option,Object.assign({className:(0,i.q)(l("root"),"flex justify-start items-center cursor-default text-tremor-default px-2.5 py-2.5","ui-active:bg-tremor-background-muted ui-active:text-tremor-content-strong ui-selected:text-tremor-content-strong ui-selected:bg-tremor-background-muted text-tremor-content-emphasis","dark:ui-active:bg-dark-tremor-background-muted dark:ui-active:text-dark-tremor-content-strong dark:ui-selected:text-dark-tremor-content-strong dark:ui-selected:bg-dark-tremor-background-muted dark:text-dark-tremor-content-emphasis",c),ref:t,key:n,value:n},d),s&&o.createElement(s,{className:(0,i.q)(l("icon"),"flex-none w-5 h-5 mr-1.5","text-tremor-content-subtle","dark:text-dark-tremor-content-subtle")}),o.createElement("span",{className:"whitespace-nowrap truncate"},null!=u?u:n))});s.displayName="SelectItem"},12224:function(e,t,n){n.d(t,{Z:function(){return I}});var r=n(69703),o=n(64090),a=n(83891),i=n(20044),l=n(10641),s=n(92381),c=n(71454),u=n(36601),d=n(37700),p=n(84152),f=n(34797),m=n(18318),g=n(71014),h=n(67409),b=n(39790);let v=(0,o.createContext)(null),y=Object.assign((0,m.yV)(function(e,t){let n=(0,s.M)(),{id:r="headlessui-label-".concat(n),passive:a=!1,...i}=e,l=function e(){let t=(0,o.useContext)(v);if(null===t){let t=Error("You used a