Skip to content

Commit f03b632

Browse files
nirkoplerHoaNQ9
authored andcommitted
Finetuned OpenAI models cost calculation langchain-ai#11715 (langchain-ai#12190)
**Description:** Add cost calculation for fine tuned models (new and legacy), this is required after OpenAI added new models for fine tuning and separated the costs of I/O for fine tuned models. Also I updated the relevant unit tests see https://platform.openai.com/docs/guides/fine-tuning for more information. issue: langchain-ai#11715 - **Issue:** 11715 - **Twitter handle:** @nirkopler
1 parent ad21423 commit f03b632

File tree

2 files changed

+33
-7
lines changed

2 files changed

+33
-7
lines changed

libs/langchain/langchain/callbacks/openai_info.py

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -57,10 +57,19 @@
5757
"text-davinci-003": 0.02,
5858
"text-davinci-002": 0.02,
5959
"code-davinci-002": 0.02,
60-
"ada-finetuned": 0.0016,
61-
"babbage-finetuned": 0.0024,
62-
"curie-finetuned": 0.012,
63-
"davinci-finetuned": 0.12,
60+
# Fine Tuned input
61+
"babbage-002-finetuned": 0.0016,
62+
"davinci-002-finetuned": 0.012,
63+
"gpt-3.5-turbo-0613-finetuned": 0.012,
64+
# Fine Tuned output
65+
"babbage-002-finetuned-completion": 0.0016,
66+
"davinci-002-finetuned-completion": 0.012,
67+
"gpt-3.5-turbo-0613-finetuned-completion": 0.016,
68+
# Legacy fine-tuned models
69+
"ada-finetuned-legacy": 0.0016,
70+
"babbage-finetuned-legacy": 0.0024,
71+
"curie-finetuned-legacy": 0.012,
72+
"davinci-finetuned-legacy": 0.12,
6473
}
6574

6675

@@ -82,11 +91,14 @@ def standardize_model_name(
8291
"""
8392
model_name = model_name.lower()
8493
if "ft-" in model_name:
85-
return model_name.split(":")[0] + "-finetuned"
94+
return model_name.split(":")[0] + "-finetuned-legacy"
95+
if "ft:" in model_name:
96+
return model_name.split(":")[1] + "-finetuned"
8697
elif is_completion and (
8798
model_name.startswith("gpt-4")
8899
or model_name.startswith("gpt-3.5")
89100
or model_name.startswith("gpt-35")
101+
or ("finetuned" in model_name and "legacy" not in model_name)
90102
):
91103
return model_name + "-completion"
92104
else:

libs/langchain/tests/unit_tests/callbacks/test_openai_info.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,21 @@ def test_on_llm_end_custom_model(handler: OpenAICallbackHandler) -> None:
4949
assert handler.total_cost == 0
5050

5151

52-
def test_on_llm_end_finetuned_model(handler: OpenAICallbackHandler) -> None:
52+
@pytest.mark.parametrize(
53+
"model_name",
54+
[
55+
"ada:ft-your-org:custom-model-name-2022-02-15-04-21-04",
56+
"babbage:ft-your-org:custom-model-name-2022-02-15-04-21-04",
57+
"curie:ft-your-org:custom-model-name-2022-02-15-04-21-04",
58+
"davinci:ft-your-org:custom-model-name-2022-02-15-04-21-04",
59+
"ft:babbage-002:your-org:custom-model-name:1abcdefg",
60+
"ft:davinci-002:your-org:custom-model-name:1abcdefg",
61+
"ft:gpt-3.5-turbo-0613:your-org:custom-model-name:1abcdefg",
62+
],
63+
)
64+
def test_on_llm_end_finetuned_model(
65+
handler: OpenAICallbackHandler, model_name: str
66+
) -> None:
5367
response = LLMResult(
5468
generations=[],
5569
llm_output={
@@ -58,7 +72,7 @@ def test_on_llm_end_finetuned_model(handler: OpenAICallbackHandler) -> None:
5872
"completion_tokens": 1,
5973
"total_tokens": 3,
6074
},
61-
"model_name": "ada:ft-your-org:custom-model-name-2022-02-15-04-21-04",
75+
"model_name": model_name,
6276
},
6377
)
6478
handler.on_llm_end(response)

0 commit comments

Comments
 (0)