From e936b2861e75ae6883f3bff9f2847e0a8bb15704 Mon Sep 17 00:00:00 2001 From: Rahul-s-007 <70894220+Rahul-s-007@users.noreply.github.com> Date: Sun, 26 Mar 2023 03:14:25 +0530 Subject: [PATCH] changed the old model to the new updated model davinci-codex replaced by code-davinci-002 --- main.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/main.py b/main.py index 865256f..fbd0d22 100644 --- a/main.py +++ b/main.py @@ -20,7 +20,7 @@ def check_if_not_null(query): # Done def pass_prompt_to_ai_fix_error_func(prompt): response = openai.Completion.create( - engine = "davinci-codex", + engine = "code-davinci-002", max_tokens = 512, prompt = prompt, temperature = 0, # Risk taking ability - 0 @@ -44,7 +44,7 @@ def fix_error_func(query,inp_lang): # Done def pass_prompt_to_ai_opt_code_func(prompt): response = openai.Completion.create( - engine = "davinci-codex", + engine = "code-davinci-002", max_tokens = 512, prompt = prompt, temperature = 0, # Risk taking ability - 0 @@ -68,7 +68,7 @@ def opt_code_func(query,inp_lang): # Done def pass_prompt_to_ai_promt_to_code_func(prompt): response = openai.Completion.create( - engine = "davinci-codex", + engine = "code-davinci-002", max_tokens = 512, prompt = prompt, temperature = 0, # Risk taking ability - 0 @@ -92,7 +92,7 @@ def promt_to_code_func(query): # Done def pass_prompt_to_ai_explain_code_func(prompt): response = openai.Completion.create( - engine = "davinci-codex", + engine = "code-davinci-002", max_tokens = 512, prompt = prompt, temperature = 0, # Risk taking ability - 0 @@ -117,7 +117,7 @@ def explain_code_func(query): # Done def pass_prompt_to_ai_convert_lang_func(prompt): response = openai.Completion.create( - engine = "davinci-codex", + engine = "code-davinci-002", max_tokens = 256, prompt = prompt, temperature = 0, # Risk taking ability - 0