Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions NEWS.org
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
* Version 0.26.0
- Call tools with =nil= when called with false JSON values
* Version 0.25.0
- Add =llm-ollama-authed= provider, which is like Ollama but takes a key.
- Set Gemini 2.5 Pro to be the default Gemini model
Expand Down
2 changes: 2 additions & 0 deletions README.org
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,8 @@ The various chat APIs will execute the functions defined in =tools= slot with th

After the tool is called, the client could use the result, but if you want to proceed with the conversation, or get a textual response that accompany the function you should just send the prompt back with no modifications. This is because the LLM gives the tool use to perform, and then expects to get back the results of that tool use. The results were already executed at the end of the call which returned the tools used, which also stores the result of that execution in the prompt. This is why it should be sent back without further modifications.

Tools will be called with vectors for array results, =nil= for false boolean results, and plists for objects.

Be aware that there is no gaurantee that the tool will be called correctly. While the LLMs mostly get this right, they are trained on Javascript functions, so imitating Javascript names is recommended. So, "write_email" is a better name for a function than "write-email".

Examples can be found in =llm-tester=. There is also a function call to generate function calls from existing elisp functions in =utilities/elisp-to-tool.el=.
Expand Down
15 changes: 15 additions & 0 deletions llm-integration-test.el
Original file line number Diff line number Diff line change
Expand Up @@ -331,6 +331,21 @@ else. We really just want to see if it's in the right ballpark."
;; Test that we can send the function back to the provider without error.
(llm-chat provider prompt))))

(llm-def-integration-test llm-boolean-tool-use (provider)
(when (member 'tool-use (llm-capabilities provider))
(llm-chat provider (llm-make-chat-prompt
"Is Lyon the capital of France?"
:tools
(list (llm-make-tool
:function (lambda (result)
(should-not result))
:name "verifier"
:description "Test the LLM's decision on the veracity of the statement."
:args '((:name "llm-decision"
:description "The decision on the statement by the LLM."
:type boolean))
:async nil))))))

(llm-def-integration-test llm-tool-use-multi-output (provider)
(when (member 'tool-use (llm-capabilities provider))
(let* ((prompt (llm-integration-test-tool-use-prompt))
Expand Down
12 changes: 12 additions & 0 deletions llm-provider-utils-test.el
Original file line number Diff line number Diff line change
Expand Up @@ -148,5 +148,17 @@
(should (equal '(:foo 3) (llm-provider-utils-streaming-accumulate '(:foo 1) '(:foo 2))))
(should (equal '(:foo "foo bar baz") (llm-provider-utils-streaming-accumulate '(:foo "foo bar") '(:foo " baz")))))

(ert-deftest llm-provider-utils--normalize-args ()
(should-not (llm-provider-utils--normalize-args :false))
(should-not (llm-provider-utils--normalize-args :json-false))
(should (equal '(1 2 nil)
(llm-provider-utils--normalize-args '(1 2 :json-false))))
(should (equal [1 2 nil]
(llm-provider-utils--normalize-args [1 2 :json-false])))
(should (equal '(1 2 [t nil t])
(llm-provider-utils--normalize-args '(1 2 [t :false t]))))
(should (equal '(:a 1 :b nil)
(llm-provider-utils--normalize-args '(:a 1 :b :json-false)))))

(provide 'llm-provider-utils-test)
;;; llm-provider-utils-test.el ends here
22 changes: 21 additions & 1 deletion llm-provider-utils.el
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
(require 'llm-request-plz)
(require 'llm-models)
(require 'seq)
(require 'compat)

(cl-defstruct llm-standard-provider
"A struct indicating that this is a standard provider.
Expand Down Expand Up @@ -787,6 +788,24 @@ This transforms the plist so that:
value)
value))))

(defun llm-provider-utils--normalize-args (args)
"Normalize ARGS to a form that can be passed to the user.

This will convert all :json-false and :false values to nil."
(cond
((vectorp args) (vconcat (mapcar #'llm-provider-utils--normalize-args args)))
((listp args) (mapcar #'llm-provider-utils--normalize-args args))
((plistp args) (let (new-plist)
(map-do
(lambda (key value)
(setq new-plist
(plist-put new-plist
key
(llm-provider-utils--normalize-args value))))
args)))
((member args '(:json-false :false)) nil)
(t args)))

(defun llm-provider-utils-execute-tool-uses (provider prompt tool-uses multi-output partial-result success-callback)
"Execute TOOL-USES, a list of `llm-provider-utils-tool-use'.

Expand Down Expand Up @@ -841,7 +860,8 @@ have returned results."
(if (llm-tool-async tool)
(apply (llm-tool-function tool)
(append (list end-func) call-args))
(funcall end-func (apply (llm-tool-function tool) call-args)))))))
(funcall end-func (apply (llm-tool-function tool)
(llm-provider-utils--normalize-args call-args))))))))


;; This is a useful method for getting out of the request buffer when it's time
Expand Down
2 changes: 1 addition & 1 deletion llm.el
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

;; Author: Andrew Hyatt <ahyatt@gmail.com>
;; Homepage: https://github.com/ahyatt/llm
;; Package-Requires: ((emacs "28.1") (plz "0.8") (plz-event-source "0.1.1") (plz-media-type "0.2.1"))
;; Package-Requires: ((emacs "28.1") (plz "0.8") (plz-event-source "0.1.1") (plz-media-type "0.2.1") (compat "29.1"))
;; Package-Version: 0.25.0
;; SPDX-License-Identifier: GPL-3.0-or-later
;;
Expand Down