Skip to content

Commit

Permalink
+ Reset if called while running
Browse files Browse the repository at this point in the history
- Removed completion
  • Loading branch information
ecornell committed Mar 4, 2024
1 parent ebccbca commit f4b3334
Show file tree
Hide file tree
Showing 4 changed files with 48 additions and 71 deletions.
63 changes: 22 additions & 41 deletions AI-Tools.ahk
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ RestoreCursor()


;# globals
_running := false
_settingsCache := Map()
_lastModified := fileGetTime("./settings.ini")
_displayResponse := false
Expand Down Expand Up @@ -61,6 +62,12 @@ ShowPopupMenu() {
PromptHandler(promptName, append := false) {
try {

if (_running) {
;MsgBox "Already running. Please wait for the current request to finish."
Reload
return
}

global _running := true
global _startTime := A_TickCount

Expand Down Expand Up @@ -118,7 +125,6 @@ GetTextFromClip() {
return text
}


GetSetting(section, key, defaultValue := "") {
global _settingsCache
if (_settingsCache.Has(section . key . defaultValue)) {
Expand Down Expand Up @@ -160,41 +166,21 @@ GetBody(mode, promptName, prompt, input, promptEnd) {

;

if (mode == "mode_chat_completion") {
content := prompt . input . promptEnd
messages := []
prompt_system := GetSetting(promptName, "prompt_system", "")
if (prompt_system != "") {
messages.Push(Map("role", "system", "content", prompt_system))
}
messages.Push(Map("role", "user", "content", content))
body["messages"] := messages
body["max_tokens"] := max_tokens
body["temperature"] := temperature
body["frequency_penalty"] := frequency_penalty
body["presence_penalty"] := presence_penalty
body["top_p"] := top_p
body["model"] := model

} else if (mode == "mode_completion" or mode == "mode_completion_azure") {
fullPrompt := prompt . input . promptEnd
body["prompt"] := fullPrompt
body["max_tokens"] := max_tokens
body["temperature"] := temperature
body["frequency_penalty"] := frequency_penalty
body["presence_penalty"] := presence_penalty
body["top_p"] := top_p
body["best_of"] := best_of
body["stop"] := stop
body["model"] := model

} else if (mode == "mode_edit") {
body["input"] := input
body["instruction"] := prompt
body["temperature"] := temperature
body["top_p"] := top_p
body["model"] := model
content := prompt . input . promptEnd
messages := []
prompt_system := GetSetting(promptName, "prompt_system", "")
if (prompt_system != "") {
messages.Push(Map("role", "system", "content", prompt_system))
}
messages.Push(Map("role", "user", "content", content))
body["messages"] := messages
body["max_tokens"] := max_tokens
body["temperature"] := temperature
body["frequency_penalty"] := frequency_penalty
body["presence_penalty"] := presence_penalty
body["top_p"] := top_p
body["model"] := model

return body
}

Expand Down Expand Up @@ -236,12 +222,7 @@ HandleResponse(data, mode, promptName, input) {
LogDebug "data ->`n" data

var := Jxon_Load(&data)

if (mode == "mode_chat_completion") {
text := var.Get("choices")[1].Get("message").Get("content")
} else {
text := var.Get("choices")[1].Get("text")
}
text := var.Get("choices")[1].Get("message").Get("content")

if text == "" {
MsgBox "No text was generated. Consider modifying your input."
Expand Down
15 changes: 8 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -56,15 +56,16 @@ To have the script start when windows boots up, select "Start With Windows" from


## Supported OpoenAI APIs and Models
OpenAI
OpenAI and Azure OpenAI API's are supported.

/v1/chat/completions (Default) - gpt-3.5-turbo
/v1/completions - text-davinci-003
/v1/edits - text-davinci-edit-001
API:
/v1/chat/completions (Default - OpenAI)
/openai/deployments/*/chat/completions (Azure)

Azure

/openai/deployments/***/completions - text-davinci-003
Models:
gpt-3.5-turbo
gpt-4
gpt-*

## Compatibility
Tested on Windows 10 Pro 22H2 64-bit.
Expand Down
10 changes: 9 additions & 1 deletion _jxon.ahk
Original file line number Diff line number Diff line change
Expand Up @@ -195,8 +195,16 @@ Jxon_Dump(obj, indent:="", lvl:=1) {
} Else If (obj is Number)
return obj

Else ; String
Else { ; String

if(obj == "true")
return "true"

if(obj == "false")
return "false"

return escape_str(obj)
}

escape_str(obj) {
obj := StrReplace(obj,"\","\\")
Expand Down
31 changes: 9 additions & 22 deletions settings.ini.default
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@ default_api_key=
;---------
; ^ = Ctrl, ! = Alt, + = Shift, # = Win
; See: (https://autohotkey.com/docs/Hotkeys.htm)
;
; hotkey_1 : (Required) The hotkey to select the current line and auto-run
; hotkey_1_prompt : (Required) The prompt to use for the hotkey_1
; hotkey_2 : (Required) The hotkey to select the current line and display
; the prompt menu
; menu_hotkey : (Required) The hotkey to display the prompt menu
;------------------------------------------------------------------------------
hotkey_1 = ^+j
hotkey_1_prompt = prompt_spelling
Expand Down Expand Up @@ -182,30 +188,11 @@ top_p=1
frequency_penalty=0.0
presence_penalty=0.0

; Other modes
[mode_edit]
endpoint=https://api.openai.com/v1/edits
model="text-davinci-edit-001"
temperature=0.2
top_p=1
stop=["###"]

[mode_completion]
endpoint=https://api.openai.com/v1/completions
model="text-davinci-003"
max_tokens=2000
temperature=0.2
top_p=1
best_of=1
frequency_penalty=0.0
presence_penalty=0.0
stop=["###"]

; Azure mode settings - See: https://docs.microsoft.com/en-us/azure/openai/quickstart
[mode_completion_azure]
endpoint=https://***.openai.azure.com/openai/deployments/***/completions?api-version=2022-12-01
[mode_chat_completion_azure]
endpoint=https://****.openai.azure.com/openai/deployments/gpt-35-turbo/chat/completions?api-version=2023-03-15-preview
api_key=***
model="text-davinci-003"
model="gpt-3.5-turbo"
max_tokens=2000
temperature=0.2
top_p=1
Expand Down

0 comments on commit f4b3334

Please sign in to comment.