From 8ccb2099ef223fdc9d334d9473e6ac0c561f7fdf Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 15 May 2024 11:16:45 -0400 Subject: [PATCH 01/31] 0.6.0 - 0.6.1 --- .gitignore | 3 +- llamascript/__init__.py | 198 +++++++++++++++++++++++++++++++++++++++- llamascript/embedded.py | 49 ---------- llamascript/lang.py | 173 ----------------------------------- setup.py | 12 ++- upload.py | 4 + 6 files changed, 212 insertions(+), 227 deletions(-) delete mode 100644 llamascript/embedded.py delete mode 100644 llamascript/lang.py create mode 100644 upload.py diff --git a/.gitignore b/.gitignore index 33997b1..eb83577 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ dist/ .DS_Store *.egg-info/ -build/ \ No newline at end of file +build/ +__pycache__/ \ No newline at end of file diff --git a/llamascript/__init__.py b/llamascript/__init__.py index c7666fb..3fefcdd 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -1,2 +1,196 @@ -from lang import * -from embedded import * +__version__ = "0.6.1" + +import asyncio +import ollama +import logging +import sys +import subprocess +import os + +# Set up logging +logging.basicConfig(level=logging.WARNING) + + +class llama: + def __init__(self): + self.model = "" + self.data = "" + self.system = [] + + def USE(self, line): + if line.split(" ")[0] == "USE": + self.model = line.split(" ")[1].strip() + else: + raise ValueError("Invalid model") + + def PROMPT(self, line="", p=""): + if p != "": + self.data = p + else: + split_line = line.split(" ", 1) + self.data = split_line[1] if len(split_line) > 1 else "" + + def SYSTEM(self, line="", p=""): + if p != "": + self.system = [{"role": "system", "content": p}] + else: + split_line = line.split(" ", 1) + prompt = split_line[1] if len(split_line) > 1 else "" + self.system = [{"role": "system", "content": prompt}] + + def CHAT(self, stream: bool = False): + for _ in range(3): + try: + response = ollama.chat( + model=self.model, + messages=self.system + [{"role": "user", "content": self.data}], + stream=stream, + ) + if stream: + for message in response: + print(message["message"]["content"], end="") + print() + else: + print(response["message"]["content"]) + break + except Exception as e: + logging.error("Error using model: %s", e) + print("Model not loaded. Trying to load model...") + ollama.pull(self.model) + print("Model loaded. Trying again...") + else: + raise ValueError( + "Model does not exist or could not be loaded. Please try again." + ) + + def INPUT(self, command): + if command == "SYSTEM": + self.SYSTEM(p=input("Enter system prompt: ")) + elif command == "PROMPT": + self.PROMPT(p=input("Enter prompt: ")) + else: + raise ValueError("Invalid command for INPUT") + + def CREATE_MODEL(self, filename, parameters, model_name): + try: + with open(filename, "w") as file: + file.write( + f'FROM {parameters["model"]}\nPARAMETER temperature {parameters["temperature"]}\nSYSTEM """\n{parameters["system_message"]}\n"""\n' + ) + print(f"Modelfile created.") + command = ["ollama", "create", model_name, "-f", "./Modelfile"] + process = subprocess.Popen( + command, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + creationflags=subprocess.CREATE_NO_WINDOW, + ) + stdout, stderr = process.communicate() + print("Model created.") + + if process.returncode != 0: + if stderr is not None: + print(f"Error executing command: {stderr.decode()}") + else: + if stdout is not None: + print(stdout.decode()) + print("Removing Modelfile...") + os.remove(filename) + + except Exception as e: + logging.error("Error creating model file: %s", e) + print(f"Error creating model file {filename}.") + + def REPEAT(self, command, repeat_count): + for _ in range(repeat_count): + self.execute_command(command) + + def execute_command(self, command): + if command.startswith("PROMPT INPUT"): + self.INPUT("PROMPT") + elif command.startswith("CHAT"): + self.CHAT() + else: + raise ValueError("Invalid command to repeat") + + async def read(self, filename): + try: + with open(filename, "r") as file: + lines = file.readlines() + i = 0 + while i < len(lines): + line = lines[i].strip() + if not line: + i += 1 + continue + command = line.split(" ") + if command[0] == "REPEAT": + repeat_count = int(command[1]) if len(command) > 1 else 1 + repeat_commands = [] + i += 1 + while i < len(lines) and not lines[i].strip().startswith("ENDREPEAT"): + repeat_commands.append(lines[i].strip()) + i += 1 + for _ in range(repeat_count): + for repeat_command in repeat_commands: + self.execute_command(repeat_command) + elif command[0] == "USE": + self.USE(line) + elif len(command) > 1 and command[1] == "INPUT": + self.INPUT(command[0]) + elif command[0] == "SYSTEM": + self.SYSTEM(line=line) + elif command[0] == "PROMPT": + self.PROMPT(line=line) + elif command[0] == "SAVE": + if len(command) < 2: + logging.error("No filename provided") + print("No filename provided") + sys.exit(1) + model_name = command[1] + parameters = { + "model": self.model, + "temperature": command[2] if len(command) > 2 else 0.7, + "system_message": self.system[0]["content"], + } + self.CREATE_MODEL("Modelfile", parameters, model_name) + elif command[0] == "CHAT": + if len(command) > 1 and command[1] == "STREAM": + stream = command[1] == True + else: + stream = False + self.CHAT(stream=stream) + elif command[0] == "REPEAT": + repeat_count = int(command[1]) if len(command) > 1 else 1 + repeat_command = " ".join(command[2:]) + self.REPEAT(repeat_command, repeat_count) + else: + raise ValueError("Invalid command") + except FileNotFoundError: + logging.error("File %s not found.", filename) + print(f"File {filename} not found.") + + +import argparse + + +def run(): + parser = argparse.ArgumentParser(description="Run llama script.") + parser.add_argument("file_name", type=str, help="The name of the file to run") + + args = parser.parse_args() + + if not (args.file_name.endswith(".llama") or args.file_name == "llama"): + logging.error("Invalid file type. Please provide a .llama or llama file.") + print("Invalid file type. Please provide a .llama or llama file.") + sys.exit(1) + + try: + l = llama() + asyncio.run(l.read(args.file_name)) + except KeyboardInterrupt: + pass + + +if __name__ == "__main__": + run() diff --git a/llamascript/embedded.py b/llamascript/embedded.py deleted file mode 100644 index 762c26a..0000000 --- a/llamascript/embedded.py +++ /dev/null @@ -1,49 +0,0 @@ -from lang import llama - - -class LlamaScriptRunner: - def __init__(self): - self.llama = llama() - - def use(self, model): - try: - self.llama.USE(f"USE {model}") - return True - except ValueError: - return False - - def prompt(self, prompt): - self.llama.PROMPT(f"PROMPT {prompt}") - return True - - def system(self, system_prompt): - self.llama.SYSTEM(f"SYSTEM {system_prompt}") - return True - - def chat(self, stream=False): - try: - self.llama.CHAT(stream) - return True - except ValueError: - return False - - def input(self, command): - try: - self.llama.INPUT(command) - return True - except ValueError: - return False - - def create_model(self, filename, parameters, model_name): - try: - self.llama.CREATE_MODEL(filename, parameters, model_name) - return True - except Exception: - return False - - async def read(self, filename): - try: - await self.llama.read(filename) - return True - except FileNotFoundError: - return False diff --git a/llamascript/lang.py b/llamascript/lang.py deleted file mode 100644 index bee74d1..0000000 --- a/llamascript/lang.py +++ /dev/null @@ -1,173 +0,0 @@ -import asyncio -import ollama -import logging -import sys -import subprocess -import os - -# Set up logging -logging.basicConfig(level=logging.WARNING) - - -class llama: - def __init__(self): - self.model = "" - self.data = "" - self.system = [] - self.ignore = False - - def USE(self, line): - if line.split(" ")[0] == "USE": - self.model = line.split(" ")[1].strip() - else: - raise ValueError("Invalid model") - - def PROMPT(self, line="", p=""): - if p != "": - self.data = p - else: - split_line = line.split(" ", 1) - self.data = split_line[1] if len(split_line) > 1 else "" - - def SYSTEM(self, line="", p=""): - if p != "": - self.system = [{"role": "system", "content": p}] - else: - split_line = line.split(" ", 1) - prompt = split_line[1] if len(split_line) > 1 else "" - self.system = [{"role": "system", "content": prompt}] - - def CHAT(self, stream: bool = False): - for _ in range(3): - try: - response = ollama.chat( - model=self.model, - messages=self.system + [{"role": "user", "content": self.data}], - stream=stream, - ) - if stream: - for message in response: - print(message["message"]["content"], end="") - print() - else: - print(response["message"]["content"]) - break - except Exception as e: - logging.error("Error using model: %s", e) - print("Model not loaded. Trying to load model...") - ollama.pull(self.model) - print("Model loaded. Trying again...") - else: - raise ValueError( - "Model does not exist or could not be loaded. Please try again." - ) - - def INPUT(self, command): - if command == "SYSTEM": - self.SYSTEM(p=input("Enter system prompt: ")) - elif command == "PROMPT": - self.PROMPT(p=input("Enter prompt: ")) - else: - raise ValueError("Invalid command for INPUT") - - def CREATE_MODEL(self, filename, parameters, model_name): - try: - with open(filename, "w") as file: - file.write( - f'FROM {parameters["model"]}\nPARAMETER temperature {parameters["temperature"]}\nSYSTEM """\n{parameters["system_message"]}\n"""\n' - ) - print(f"Modelfile created.") - command = ["ollama", "create", model_name, "-f", "./Modelfile"] - process = subprocess.Popen( - command, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - creationflags=subprocess.CREATE_NO_WINDOW, - ) - stdout, stderr = process.communicate() - print("Model created.") - - if process.returncode != 0: - if stderr is not None: - print(f"Error executing command: {stderr.decode()}") - else: - if stdout is not None: - print(stdout.decode()) - print("Removing Modelfile...") - os.remove(filename) - - except Exception as e: - logging.error("Error creating model file: %s", e) - print(f"Error creating model file {filename}.") - - async def read(self, filename): - try: - with open(filename, "r") as file: - for line in file: - line = line.strip() - if not line: - continue - command = line.split(" ") - if command[0] == "IGNORE": - self.ignore = True - elif command[0] == "USE": - self.USE(line) - elif len(command) > 1 and command[1] == "INPUT": - self.INPUT(command[0]) - elif command[0] == "SYSTEM": - self.SYSTEM(line=line) - elif command[0] == "PROMPT": - self.PROMPT(line=line) - elif command[0] == "SAVE": - if len(command) < 2: - logging.error("No filename provided") - print("No filename provided") - sys.exit(1) - model_name = command[1] - parameters = { - "model": self.model, - "temperature": command[2] if len(command) > 2 else 0.7, - "system_message": self.system[0]["content"], - } - self.CREATE_MODEL("Modelfile", parameters, model_name) - elif command[0] == "CHAT": - if len(command) > 1 and command[1] == "STREAM": - stream = command[1] == True - else: - stream = False - if not self.ignore: - print( - '=================\nThanks for using llama, a no-code AI chatbot. Please ensure Ollama (https://ollama.com) is running. To get started, type "USE" followed by the model you want to use. Then, type "PROMPT" followed by the prompt you want to use. Finally, type "CHAT" to chat with the AI. To run a script, type "llamascript" to run your script. To ignore this message, add "IGNORE" to the beginning of your llama file.\n=================' - ) - self.ignore = True - self.CHAT(stream=stream) - else: - raise ValueError("Invalid command") - except FileNotFoundError: - logging.error("File %s not found.", filename) - print(f"File {filename} not found.") - - -import argparse - - -def run(): - parser = argparse.ArgumentParser(description="Run llama script.") - parser.add_argument("file_name", type=str, help="The name of the file to run") - - args = parser.parse_args() - - if not (args.file_name.endswith(".llama") or args.file_name == "llama"): - logging.error("Invalid file type. Please provide a .llama or llama file.") - print("Invalid file type. Please provide a .llama or llama file.") - sys.exit(1) - - try: - l = llama() - asyncio.run(l.read(args.file_name)) - except KeyboardInterrupt: - pass - - -if __name__ == "__main__": - run() diff --git a/setup.py b/setup.py index fdc29ee..773ca1c 100644 --- a/setup.py +++ b/setup.py @@ -1,8 +1,16 @@ import setuptools +import os +import re + + +def read_version(): + with open(os.path.join("llamascript", "__init__.py")) as f: + return re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", f.read(), re.M).group(1) + setuptools.setup( name="llamascript", - version="0.5.0", + version=read_version(), author="WolfTheDev", author_email="wolfthedev@gmail.com", description="No-code AI chatbot using Ollama.", @@ -19,7 +27,7 @@ python_requires=">=3.6", entry_points={ "console_scripts": [ - "llamascript=llamascript.lang:run", + "llamascript=llamascript:run", ], }, ) diff --git a/upload.py b/upload.py new file mode 100644 index 0000000..dafae9d --- /dev/null +++ b/upload.py @@ -0,0 +1,4 @@ +import subprocess + +subprocess.run(["python3", "setup.py", "sdist", "bdist_wheel"]) +subprocess.run(["twine", "upload", "dist/*"]) \ No newline at end of file From a5899cfc89eb60c84592494e0b2cbe48cd826896 Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 15 May 2024 11:24:52 -0400 Subject: [PATCH 02/31] Update README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a0d3c89..8c0dbf6 100644 --- a/README.md +++ b/README.md @@ -6,9 +6,9 @@ [Medium Post](https://medium.com/@wolfthedev/llamascript-simple-ai-builder-74442dc9b090) -[![Black Format](https://github.com/Project-Llama/llamascript/actions/workflows/format.yml/badge.svg)](https://github.com/WolfTheDeveloper/llamascript/actions/workflows/format.yml) -[![Upload to PyPi](https://github.com/Project-Llama/llamascript/actions/workflows/python-publish.yml/badge.svg)](https://github.com/WolfTheDeveloper/llamascript/actions/workflows/python-publish.yml) -[![CodeQL](https://github.com/Project-Llama/llamascript/actions/workflows/github-code-scanning/codeql/badge.svg)](https://github.com/WolfTheDeveloper/llamascript/actions/workflows/github-code-scanning/codeql) +[![Black Format](https://github.com/Project-Llama/llamascript/actions/workflows/format.yml/badge.svg)](https://github.com/Project-Llama/llamascript/actions/workflows/format.yml) +[![Upload to PyPi](https://github.com/Project-Llama/llamascript/actions/workflows/python-publish.yml/badge.svg)](https://github.com/Project-Llama/llamascript/actions/workflows/python-publish.yml) +[![CodeQL](https://github.com/Project-Llama/llamascript/actions/workflows/github-code-scanning/codeql/badge.svg)](https://github.com/Project-Llama/llamascript/actions/workflows/github-code-scanning/codeql) [![VS Code Extension Downloads](https://img.shields.io/visual-studio-marketplace/d/WolfTheDev.llamascript?label=VS-Code%20Downloads)](https://marketplace.visualstudio.com/items?itemName=WolfTheDev.llamascript) ![GitHub commit activity](https://img.shields.io/github/commit-activity/w/Project-Llama/llamascript?label=Commits) From c5578dfdbb27717b21e892e06998d5ed8d3b88c8 Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Sat, 25 May 2024 21:37:16 -0400 Subject: [PATCH 03/31] 0.6.2 - 0.6.3 Fix LlamaScript not running and add `-v / --version` flags. --- llamascript/__init__.py | 50 +++++++++++++++++++++-------------------- llamascript/llama | 3 +++ upload.py | 8 +++++++ 3 files changed, 37 insertions(+), 24 deletions(-) create mode 100644 llamascript/llama diff --git a/llamascript/__init__.py b/llamascript/__init__.py index 3fefcdd..a69846e 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.6.1" +__version__ = "0.6.3" import asyncio import ollama @@ -7,6 +7,12 @@ import subprocess import os +dbg = False + +def debug(message): + if dbg: + print(message) + # Set up logging logging.basicConfig(level=logging.WARNING) @@ -41,11 +47,13 @@ def SYSTEM(self, line="", p=""): def CHAT(self, stream: bool = False): for _ in range(3): try: + debug("Attempting to chat with model...") response = ollama.chat( model=self.model, messages=self.system + [{"role": "user", "content": self.data}], stream=stream, ) + debug("Chat successful.") if stream: for message in response: print(message["message"]["content"], end="") @@ -100,10 +108,7 @@ def CREATE_MODEL(self, filename, parameters, model_name): except Exception as e: logging.error("Error creating model file: %s", e) print(f"Error creating model file {filename}.") - - def REPEAT(self, command, repeat_count): - for _ in range(repeat_count): - self.execute_command(command) + sys.exit(1) def execute_command(self, command): if command.startswith("PROMPT INPUT"): @@ -124,17 +129,7 @@ async def read(self, filename): i += 1 continue command = line.split(" ") - if command[0] == "REPEAT": - repeat_count = int(command[1]) if len(command) > 1 else 1 - repeat_commands = [] - i += 1 - while i < len(lines) and not lines[i].strip().startswith("ENDREPEAT"): - repeat_commands.append(lines[i].strip()) - i += 1 - for _ in range(repeat_count): - for repeat_command in repeat_commands: - self.execute_command(repeat_command) - elif command[0] == "USE": + if command[0] == "USE": self.USE(line) elif len(command) > 1 and command[1] == "INPUT": self.INPUT(command[0]) @@ -155,17 +150,13 @@ async def read(self, filename): } self.CREATE_MODEL("Modelfile", parameters, model_name) elif command[0] == "CHAT": - if len(command) > 1 and command[1] == "STREAM": - stream = command[1] == True + if len(command) > 1 and command[1] == "STREAM":\ + self.CHAT(stream=True) else: - stream = False - self.CHAT(stream=stream) - elif command[0] == "REPEAT": - repeat_count = int(command[1]) if len(command) > 1 else 1 - repeat_command = " ".join(command[2:]) - self.REPEAT(repeat_command, repeat_count) + self.CHAT() else: raise ValueError("Invalid command") + i += 1 except FileNotFoundError: logging.error("File %s not found.", filename) print(f"File {filename} not found.") @@ -177,9 +168,20 @@ async def read(self, filename): def run(): parser = argparse.ArgumentParser(description="Run llama script.") parser.add_argument("file_name", type=str, help="The name of the file to run") + parser.add_argument( + "-v", + "--version", + action="version", + version=f"LlamaScript version {__version__}", + help="Display version information", + ) args = parser.parse_args() + if args.version: + print(f"llamascript version {__version__}") + sys.exit(0) + if not (args.file_name.endswith(".llama") or args.file_name == "llama"): logging.error("Invalid file type. Please provide a .llama or llama file.") print("Invalid file type. Please provide a .llama or llama file.") diff --git a/llamascript/llama b/llamascript/llama new file mode 100644 index 0000000..5b63a9f --- /dev/null +++ b/llamascript/llama @@ -0,0 +1,3 @@ +USE llama3 +PROMPT why is the sky blue? +CHAT STREAM \ No newline at end of file diff --git a/upload.py b/upload.py index dafae9d..d6e4b58 100644 --- a/upload.py +++ b/upload.py @@ -1,4 +1,12 @@ import subprocess +import os + +if os.path.exists("dist"): + subprocess.run(["rm", "-r", "dist"]) +if os.path.exists("build"): + subprocess.run(["rm", "-r", "build"]) +if os.path.exists("llamascript.egg-info"): + subprocess.run(["rm", "-r", "llamascript.egg-info"]) subprocess.run(["python3", "setup.py", "sdist", "bdist_wheel"]) subprocess.run(["twine", "upload", "dist/*"]) \ No newline at end of file From ca31de41d42a23ce5e6c5811980b9e774803edd6 Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Sat, 25 May 2024 21:45:35 -0400 Subject: [PATCH 04/31] Update format.yml --- .github/workflows/format.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index 324019f..d4d1ea8 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -1,5 +1,5 @@ name: Format -on: [pull_request, push] +on: [push] jobs: linter_name: name: runner / black From 0559ab2de1e999bc4ee9481c0d0cb4b0543af035 Mon Sep 17 00:00:00 2001 From: zanderlewis <158775116+zanderlewis@users.noreply.github.com> Date: Sun, 26 May 2024 01:46:14 +0000 Subject: [PATCH 05/31] :art: Format Python code with psf/black --- llamascript/__init__.py | 4 +++- upload.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/llamascript/__init__.py b/llamascript/__init__.py index a69846e..75b7432 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -9,10 +9,12 @@ dbg = False + def debug(message): if dbg: print(message) + # Set up logging logging.basicConfig(level=logging.WARNING) @@ -150,7 +152,7 @@ async def read(self, filename): } self.CREATE_MODEL("Modelfile", parameters, model_name) elif command[0] == "CHAT": - if len(command) > 1 and command[1] == "STREAM":\ + if len(command) > 1 and command[1] == "STREAM": self.CHAT(stream=True) else: self.CHAT() diff --git a/upload.py b/upload.py index d6e4b58..3313793 100644 --- a/upload.py +++ b/upload.py @@ -9,4 +9,4 @@ subprocess.run(["rm", "-r", "llamascript.egg-info"]) subprocess.run(["python3", "setup.py", "sdist", "bdist_wheel"]) -subprocess.run(["twine", "upload", "dist/*"]) \ No newline at end of file +subprocess.run(["twine", "upload", "dist/*"]) From 8f2d5033325565c716232919ec7654a3e739604c Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Sat, 25 May 2024 21:49:08 -0400 Subject: [PATCH 06/31] Update format.yml --- .github/workflows/format.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index d4d1ea8..baf5f7d 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -4,6 +4,7 @@ jobs: linter_name: name: runner / black runs-on: ubuntu-latest + if: github.actor != 'github-actions[bot]' steps: - uses: actions/checkout@v4 - name: Check files using the black formatter From 0273eea79862a92ec6c0d4b3d6352d7f385e4f7d Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Sun, 26 May 2024 09:24:31 -0400 Subject: [PATCH 07/31] Update upload.py Add safety message. --- upload.py | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/upload.py b/upload.py index 3313793..f08b11a 100644 --- a/upload.py +++ b/upload.py @@ -1,12 +1,19 @@ import subprocess import os +import sys -if os.path.exists("dist"): - subprocess.run(["rm", "-r", "dist"]) -if os.path.exists("build"): - subprocess.run(["rm", "-r", "build"]) -if os.path.exists("llamascript.egg-info"): - subprocess.run(["rm", "-r", "llamascript.egg-info"]) +i = input("Are you sure you don't want to create a gh release instead? (Y/n): ") -subprocess.run(["python3", "setup.py", "sdist", "bdist_wheel"]) -subprocess.run(["twine", "upload", "dist/*"]) +if i == "n": + sys.exit(0) +elif i == "Y": + if os.path.exists("dist"): + subprocess.run(["rm", "-r", "dist"]) + if os.path.exists("build"): + subprocess.run(["rm", "-r", "build"]) + if os.path.exists("llamascript.egg-info"): + subprocess.run(["rm", "-r", "llamascript.egg-info"]) + + subprocess.run(["python3", "setup.py", "sdist", "bdist_wheel"]) + subprocess.run(["twine", "upload", "dist/*"]) +sys.exit(0) From 135bc196d9b906d1f9cd052737ff95b216506548 Mon Sep 17 00:00:00 2001 From: zanderlewis <158775116+zanderlewis@users.noreply.github.com> Date: Sun, 26 May 2024 13:24:53 +0000 Subject: [PATCH 08/31] :art: Format Python code with psf/black --- upload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upload.py b/upload.py index f08b11a..fb01eae 100644 --- a/upload.py +++ b/upload.py @@ -13,7 +13,7 @@ subprocess.run(["rm", "-r", "build"]) if os.path.exists("llamascript.egg-info"): subprocess.run(["rm", "-r", "llamascript.egg-info"]) - + subprocess.run(["python3", "setup.py", "sdist", "bdist_wheel"]) subprocess.run(["twine", "upload", "dist/*"]) sys.exit(0) From 440b1cbbd32e0cba624966bba454dc6876403a27 Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Mon, 27 May 2024 17:56:17 -0400 Subject: [PATCH 09/31] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 8c0dbf6..7292a0e 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,8 @@ ![GitHub commit activity](https://img.shields.io/github/commit-activity/w/Project-Llama/llamascript?label=Commits) ![GitHub License](https://img.shields.io/github/license/Project-Llama/llamascript?label=License) +![Discord](https://img.shields.io/badge/Join-Logo?style=flat&logo=discord&label=Discord&color=%235865F2&link=http%3A%2F%2Fdiscord.com%2Finvite%2FRJ9ZDPM2Fx) + LlamaScript is a no-code AI chatbot using Ollama. ## Table of Contents From 07660f05bd83900776e8ceebb0c09eae3ca76a21 Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Mon, 27 May 2024 17:57:03 -0400 Subject: [PATCH 10/31] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7292a0e..070d848 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ ![GitHub commit activity](https://img.shields.io/github/commit-activity/w/Project-Llama/llamascript?label=Commits) ![GitHub License](https://img.shields.io/github/license/Project-Llama/llamascript?label=License) -![Discord](https://img.shields.io/badge/Join-Logo?style=flat&logo=discord&label=Discord&color=%235865F2&link=http%3A%2F%2Fdiscord.com%2Finvite%2FRJ9ZDPM2Fx) +[![Discord](https://img.shields.io/badge/Join-Logo?style=flat&logo=discord&label=Discord&color=%235865F2)](http://discord.com/invite/RJ9ZDPM2Fx) LlamaScript is a no-code AI chatbot using Ollama. From 0cba4a497e19cfc8255ea81e4470a7f2b2ce49a4 Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 29 May 2024 09:28:33 -0400 Subject: [PATCH 11/31] Update issue templates --- .github/ISSUE_TEMPLATE/feature_request.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..bbcbbe7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. From 5be0e9e1f5dce429beb2acc611893b7043d3cccb Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 29 May 2024 09:30:09 -0400 Subject: [PATCH 12/31] Create CODE_OF_CONDUCT.md --- CODE_OF_CONDUCT.md | 128 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 CODE_OF_CONDUCT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..2a04c79 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +wolfthedev@gmail.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. From 79c0a39f192305e3057251fa1e9e4b79b28300cc Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 29 May 2024 09:33:18 -0400 Subject: [PATCH 13/31] Create SECURITY.md --- SECURITY.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..94c27be --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,14 @@ +# Security Policy + +## Supported Versions + +Our supported versions are: + +| Version | Supported | +| ------- | ------------------ | +| 0.6.x | :white_check_mark: | +| < 0.5 | :x: | + +## Reporting a Vulnerability + +Report Vulnerabilities by opening an issue with a title containing `[SECURITY]`. From 8a52cbd58ea0e5c6f3258c8351b3214c37fc8f9f Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 29 May 2024 10:26:47 -0400 Subject: [PATCH 14/31] Create CONTRIBUTING.md --- CONTRIBUTING.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..a33d763 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,8 @@ +# Contributing +You can contribute to LlamaScript by doing the following: + +1. Fork LlamaScript +2. Create your changes on your fork +3. Make sure your fork is not behind on any commits +4. Create a Pull Request +5. Get your Pull Request merged! From b06a7d8e74106197f3c64dc606b5e3ad62d9e013 Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 29 May 2024 10:27:06 -0400 Subject: [PATCH 15/31] Delete CONTRIBUTORS.md --- CONTRIBUTORS.md | 1 - 1 file changed, 1 deletion(-) delete mode 100644 CONTRIBUTORS.md diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md deleted file mode 100644 index 4287ca8..0000000 --- a/CONTRIBUTORS.md +++ /dev/null @@ -1 +0,0 @@ -# \ No newline at end of file From 6f5cb5d7d56fb8942e6c0e0e197cc1edd151309b Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 29 May 2024 10:45:28 -0400 Subject: [PATCH 16/31] Fix version issue --- llamascript/__init__.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/llamascript/__init__.py b/llamascript/__init__.py index 75b7432..0a29815 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.6.3" +__version__ = "0.6.4" import asyncio import ollama @@ -180,10 +180,6 @@ def run(): args = parser.parse_args() - if args.version: - print(f"llamascript version {__version__}") - sys.exit(0) - if not (args.file_name.endswith(".llama") or args.file_name == "llama"): logging.error("Invalid file type. Please provide a .llama or llama file.") print("Invalid file type. Please provide a .llama or llama file.") From 0ff18b89f5820c6cb83cbd2bed07362a3f35b908 Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 29 May 2024 11:12:07 -0400 Subject: [PATCH 17/31] Fix Mac and Linux Support --- llamascript/__init__.py | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/llamascript/__init__.py b/llamascript/__init__.py index 0a29815..20063fa 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.6.4" +__version__ = "0.6.5" import asyncio import ollama @@ -6,6 +6,7 @@ import sys import subprocess import os +import platform dbg = False @@ -89,12 +90,19 @@ def CREATE_MODEL(self, filename, parameters, model_name): ) print(f"Modelfile created.") command = ["ollama", "create", model_name, "-f", "./Modelfile"] - process = subprocess.Popen( - command, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - creationflags=subprocess.CREATE_NO_WINDOW, - ) + if platform.system() == "Windows": + process = subprocess.Popen( + command, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + creationflags=subprocess.CREATE_NO_WINDOW, + ) + else: + process = subprocess.Popen( + command, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) stdout, stderr = process.communicate() print("Model created.") From b0d95efa3c4ca039ccd7848eecdf0fef34da41f6 Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 29 May 2024 12:42:58 -0400 Subject: [PATCH 18/31] Create dependabot.yml --- .github/dependabot.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..f55afd4 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "daily" From 5909904a32f393255284a600ccba5983ac1df0ce Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 29 May 2024 12:43:30 -0400 Subject: [PATCH 19/31] Update dependabot.yml --- .github/dependabot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index f55afd4..2775ad7 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,7 +5,7 @@ version: 2 updates: - - package-ecosystem: "" # See documentation for possible values + - package-ecosystem: "pip" # See documentation for possible values directory: "/" # Location of package manifests schedule: interval: "daily" From 13842909384eca4e2456c8e5458294545f602cc7 Mon Sep 17 00:00:00 2001 From: Zander Date: Mon, 6 Jan 2025 18:28:47 -0500 Subject: [PATCH 20/31] Major Rewrite --- .github/ISSUE_TEMPLATE/feature_request.md | 20 -- .github/dependabot.yml | 9 +- CODE_OF_CONDUCT.md | 128 ---------- README.md | 43 +--- SECURITY.md | 14 -- examples/basic-chatbot/llama | 4 - examples/chatbot.llama | 3 + examples/custom-model/llama | 3 - examples/custom_model.llama | 3 + examples/user-decides/llama | 5 - llamascript/__init__.py | 293 ++++++++++++++-------- llamascript/llama | 3 - requirements.txt | 4 +- setup.py | 8 +- 14 files changed, 216 insertions(+), 324 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.md delete mode 100644 CODE_OF_CONDUCT.md delete mode 100644 SECURITY.md delete mode 100644 examples/basic-chatbot/llama create mode 100644 examples/chatbot.llama delete mode 100644 examples/custom-model/llama create mode 100644 examples/custom_model.llama delete mode 100644 examples/user-decides/llama delete mode 100644 llamascript/llama diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index bbcbbe7..0000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 2775ad7..b38df29 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,11 +1,6 @@ -# To get started with Dependabot version updates, you'll need to specify which -# package ecosystems to update and where the package manifests are located. -# Please see the documentation for all configuration options: -# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file - version: 2 updates: - - package-ecosystem: "pip" # See documentation for possible values - directory: "/" # Location of package manifests + - package-ecosystem: "pip" + directory: "/" schedule: interval: "daily" diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 2a04c79..0000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or - advances of any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -wolfthedev@gmail.com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. diff --git a/README.md b/README.md index 070d848..a71317c 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,6 @@ # LlamaScript - -[Medium Post](https://medium.com/@wolfthedev/llamascript-simple-ai-builder-74442dc9b090) - [![Black Format](https://github.com/Project-Llama/llamascript/actions/workflows/format.yml/badge.svg)](https://github.com/Project-Llama/llamascript/actions/workflows/format.yml) [![Upload to PyPi](https://github.com/Project-Llama/llamascript/actions/workflows/python-publish.yml/badge.svg)](https://github.com/Project-Llama/llamascript/actions/workflows/python-publish.yml) [![CodeQL](https://github.com/Project-Llama/llamascript/actions/workflows/github-code-scanning/codeql/badge.svg)](https://github.com/Project-Llama/llamascript/actions/workflows/github-code-scanning/codeql) @@ -14,8 +11,6 @@ ![GitHub commit activity](https://img.shields.io/github/commit-activity/w/Project-Llama/llamascript?label=Commits) ![GitHub License](https://img.shields.io/github/license/Project-Llama/llamascript?label=License) -[![Discord](https://img.shields.io/badge/Join-Logo?style=flat&logo=discord&label=Discord&color=%235865F2)](http://discord.com/invite/RJ9ZDPM2Fx) - LlamaScript is a no-code AI chatbot using Ollama. ## Table of Contents @@ -23,8 +18,7 @@ LlamaScript is a no-code AI chatbot using Ollama. - [Installation](#installation) - [Usage](#usage) - [License](#license) - - [Roadmap](#roadmap) - - [Examples](examples/) +- [Examples](examples/) ## Installation @@ -35,42 +29,29 @@ pip install llamascript ``` ## Usage -To use LlamaScript, create a llama file (no file extension) with the following commands: - +To use LlamaScript, create a `.llama` file and write your script. Here are the commands you can use: ```llamascript -IGNORE: Use this before the CHAT command to supress the welcome message. -USE : This command loads the specified model. -SYSTEM : This command sets the system prompt. -PROMPT : This command sets the message to be sent to the chatbot. -CHAT: This command sends the message to the chatbot and prints the response. +use(...) # Specify the model to use +prompt(...) # Prompt the user for input +system(...) # System message for the AI +chat(...) # Chat with the user +save(...) # Save the model ``` Here's an example: - ```llamascript -IGNORE -USE llama3 -PROMPT Hello, how are you? -CHAT +use("llama3") +prompt("Why is the sky blue?") +chat() ``` > [!NOTE]\ > For more examples see [here.](examples/) You can then run LlamaScript with the following command: - ```bash -llamascript +llamascript myscript.llama ``` -LlamaScript usually has a file extension of `.llama`, but if it is ran as a main script, it is usually `llama` (no file extension). - ## License -LlamaScript is licensed under the Apache 2.0 License. - -## Roadmap -Things to come in the future: - -- An `API` command to serve on Flask -- Plugins/Extensions handling (Help Wanted) - +LlamaScript is licensed under the Apache 2.0 License. See [LICENSE](LICENSE) for more information. diff --git a/SECURITY.md b/SECURITY.md deleted file mode 100644 index 94c27be..0000000 --- a/SECURITY.md +++ /dev/null @@ -1,14 +0,0 @@ -# Security Policy - -## Supported Versions - -Our supported versions are: - -| Version | Supported | -| ------- | ------------------ | -| 0.6.x | :white_check_mark: | -| < 0.5 | :x: | - -## Reporting a Vulnerability - -Report Vulnerabilities by opening an issue with a title containing `[SECURITY]`. diff --git a/examples/basic-chatbot/llama b/examples/basic-chatbot/llama deleted file mode 100644 index cd7d1d1..0000000 --- a/examples/basic-chatbot/llama +++ /dev/null @@ -1,4 +0,0 @@ -IGNORE -USE llama3 -PROMPT Why is the sky blue? -CHAT \ No newline at end of file diff --git a/examples/chatbot.llama b/examples/chatbot.llama new file mode 100644 index 0000000..665531f --- /dev/null +++ b/examples/chatbot.llama @@ -0,0 +1,3 @@ +use("llama3") +prompt("Why is the sky blue?") +chat() \ No newline at end of file diff --git a/examples/custom-model/llama b/examples/custom-model/llama deleted file mode 100644 index 4a760de..0000000 --- a/examples/custom-model/llama +++ /dev/null @@ -1,3 +0,0 @@ -USE llama3 -SYSTEM You will respond in pig latin only. -SAVE piglatin 0.7 diff --git a/examples/custom_model.llama b/examples/custom_model.llama new file mode 100644 index 0000000..61ddf83 --- /dev/null +++ b/examples/custom_model.llama @@ -0,0 +1,3 @@ +use("llama3") +system("You will respond in pig latin only.") +save("piglatin", 0.7) \ No newline at end of file diff --git a/examples/user-decides/llama b/examples/user-decides/llama deleted file mode 100644 index 5effcac..0000000 --- a/examples/user-decides/llama +++ /dev/null @@ -1,5 +0,0 @@ -IGNORE -USE llama3 -SYSTEM INPUT -PROMPT INPUT -CHAT diff --git a/llamascript/__init__.py b/llamascript/__init__.py index 20063fa..32ba6d7 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.6.5" +__version__ = "1.0.0" import asyncio import ollama @@ -7,47 +7,176 @@ import subprocess import os import platform +import re +import argparse +import colorama +colorama.init() dbg = False - def debug(message): if dbg: - print(message) + print(f"{colorama.Fore.CYAN}{colorama.Style.BRIGHT}[DEBUG]{colorama.Style.RESET_ALL} {message}") + +def error(message): + print(f"{colorama.Fore.RED}{colorama.Style.BRIGHT}[ERROR]{colorama.Style.RESET_ALL} {message}") +def warning(message): + print(f"{colorama.Fore.YELLOW}{colorama.Style.BRIGHT}[WARNING]{colorama.Style.RESET_ALL} {message}") # Set up logging logging.basicConfig(level=logging.WARNING) +class Lexer: + def __init__(self, input_text): + self.tokens = [] + self.tokenize(input_text) + + def tokenize(self, text): + token_specification = [ + ("ATTRIBUTE", r"#\[(.*?)\]"), # Attributes e.g., #[stream(true)] + ("NUMBER", r"\d+(\.\d*)?"), # Integer or decimal number + ("STRING", r"\".*?\""), # String literals + ("ID", r"[A-Za-z_][A-Za-z0-9_]*"), # Identifiers + ("LPAREN", r"\("), # Left parenthesis + ("RPAREN", r"\)"), # Right parenthesis + ("COMMA", r","), # Comma + ("NEWLINE", r"\n"), # Line endings + ("SKIP", r"[ \t]+"), # Skip over spaces and tabs + ("MISMATCH", r"."), # Any other character + ] + tok_regex = "|".join("(?P<%s>%s)" % pair for pair in token_specification) + for mo in re.finditer(tok_regex, text): + kind = mo.lastgroup + value = mo.group() + if kind == "NUMBER": + value = float(value) if "." in value else int(value) + self.tokens.append(("NUMBER", value)) + elif kind in {"ID", "STRING", "LPAREN", "RPAREN", "COMMA", "ATTRIBUTE"}: + self.tokens.append((kind, value)) + elif kind == "NEWLINE": + self.tokens.append(("NEWLINE", value)) + elif kind == "SKIP": + continue + elif kind == "MISMATCH": + error(f"Invalid character: {value}") + sys.exit(1) + +class Parser: + def __init__(self, tokens): + self.tokens = tokens + self.current = 0 + + def parse(self): + ast = [] + current_attributes = {} + while self.current < len(self.tokens): + token = self.tokens[self.current] + if token[0] == "ATTRIBUTE": + current_attributes = self.parse_attribute(token[1][2:-1].strip()) # Remove #[ and ] + self.current += 1 # Skip ATTRIBUTE + elif token[0] == "ID": + ast.append(self.statement(current_attributes)) + current_attributes = {} # Reset after associating + else: + self.current += 1 + return ast + + def statement(self, attributes): + token = self.tokens[self.current] + func_name = token[1].lower() + self.current += 1 # Skip function name + + if self.tokens[self.current][0] != "LPAREN": + error(f"Expected '(' after {func_name}") + sys.exit(1) + + self.current += 1 # Skip '(' + args = self.arguments() + + if self.tokens[self.current][0] != "RPAREN": + error("Expected ')' after arguments") + sys.exit(1) + + self.current += 1 # Skip ')' + return (func_name, args, attributes) + + def arguments(self): + args = [] + while self.current < len(self.tokens) and self.tokens[self.current][0] != "RPAREN": + token = self.tokens[self.current] + if token[0] in {"STRING", "NUMBER"}: + args.append(token[1]) + self.current += 1 + elif token[0] == "COMMA": + self.current += 1 # Skip comma + else: + error(f"Invalid argument `{token[1]}`") + sys.exit(1) + return args + + def parse_attribute(self, attr_str): + match = re.match(r'(\w+)\((.+)\)', attr_str) + if match: + attr_name = match.group(1).lower() + attr_value = match.group(2).strip('"').strip("'") + if attr_value.lower() == "true": + attr_value = True + elif attr_value.lower() == "false": + attr_value = False + return {attr_name: attr_value} + else: + error(f"Invalid attribute: {attr_str}") + sys.exit(1) + +class Interpreter: + def __init__(self, ast, llama_instance): + self.ast = ast + self.llama = llama_instance + + def execute(self): + for node in self.ast: + command = node[0] + args = node[1] + attributes = node[2] + if command == "use": + self.llama.use(args[0], attributes) + elif command == "prompt": + self.llama.prompt(args[0], attributes) + elif command == "system": + self.llama.system_command(args[0], attributes) + elif command == "save": + self.llama.create_model(args[0], { + "model": self.llama.model, + "temperature": args[1], + "system_message": self.llama.system[0]["content"], + }, attributes) + elif command == "chat": + self.llama.chat(attributes) + else: + raise ValueError(f"Unknown command: {command}") -class llama: +class Llama: def __init__(self): self.model = "" self.data = "" self.system = [] - def USE(self, line): - if line.split(" ")[0] == "USE": - self.model = line.split(" ")[1].strip() - else: - raise ValueError("Invalid model") + def use(self, model_name, _): + self.model = model_name.strip('"') + debug(f"Using model: {self.model}") - def PROMPT(self, line="", p=""): - if p != "": - self.data = p - else: - split_line = line.split(" ", 1) - self.data = split_line[1] if len(split_line) > 1 else "" + def prompt(self, prompt_text, _): + self.data = prompt_text + debug(f"Prompt set to: {self.data}") - def SYSTEM(self, line="", p=""): - if p != "": - self.system = [{"role": "system", "content": p}] - else: - split_line = line.split(" ", 1) - prompt = split_line[1] if len(split_line) > 1 else "" - self.system = [{"role": "system", "content": prompt}] + def system_command(self, system_content, _): + self.system = [{"role": "system", "content": system_content}] + debug(f"System command set.") - def CHAT(self, stream: bool = False): + def chat(self, attributes): + stream = attributes.get("stream", False) + debug(f"Stream set to: {stream}") for _ in range(3): try: debug("Attempting to chat with model...") @@ -58,6 +187,7 @@ def CHAT(self, stream: bool = False): ) debug("Chat successful.") if stream: + warning("Streaming is a work in progress. Please wait for the final response.") for message in response: print(message["message"]["content"], end="") print() @@ -66,30 +196,21 @@ def CHAT(self, stream: bool = False): break except Exception as e: logging.error("Error using model: %s", e) - print("Model not loaded. Trying to load model...") + debug("Model not loaded. Trying to load model...") ollama.pull(self.model) - print("Model loaded. Trying again...") - else: - raise ValueError( - "Model does not exist or could not be loaded. Please try again." - ) - - def INPUT(self, command): - if command == "SYSTEM": - self.SYSTEM(p=input("Enter system prompt: ")) - elif command == "PROMPT": - self.PROMPT(p=input("Enter prompt: ")) + debug("Model loaded. Trying again...") else: - raise ValueError("Invalid command for INPUT") + error("Error using model. Please try again.") + sys.exit(1) - def CREATE_MODEL(self, filename, parameters, model_name): + def create_model(self, filename, parameters, attributes): try: with open(filename, "w") as file: file.write( f'FROM {parameters["model"]}\nPARAMETER temperature {parameters["temperature"]}\nSYSTEM """\n{parameters["system_message"]}\n"""\n' ) - print(f"Modelfile created.") - command = ["ollama", "create", model_name, "-f", "./Modelfile"] + debug("Modelfile created.") + command = ["ollama", "create", parameters["model"], "-f", "./Modelfile"] if platform.system() == "Windows": process = subprocess.Popen( command, @@ -107,73 +228,30 @@ def CREATE_MODEL(self, filename, parameters, model_name): print("Model created.") if process.returncode != 0: - if stderr is not None: - print(f"Error executing command: {stderr.decode()}") - else: - if stdout is not None: - print(stdout.decode()) - print("Removing Modelfile...") + if stderr: + error(f"Error executing command: {stderr.decode()}") + elif stdout: + debug(stdout.decode()) + debug("Removing Modelfile...") os.remove(filename) except Exception as e: logging.error("Error creating model file: %s", e) - print(f"Error creating model file {filename}.") + error(f"Error creating model file {filename}.") sys.exit(1) - def execute_command(self, command): - if command.startswith("PROMPT INPUT"): - self.INPUT("PROMPT") - elif command.startswith("CHAT"): - self.CHAT() - else: - raise ValueError("Invalid command to repeat") - - async def read(self, filename): + def read(self, filename): try: with open(filename, "r") as file: - lines = file.readlines() - i = 0 - while i < len(lines): - line = lines[i].strip() - if not line: - i += 1 - continue - command = line.split(" ") - if command[0] == "USE": - self.USE(line) - elif len(command) > 1 and command[1] == "INPUT": - self.INPUT(command[0]) - elif command[0] == "SYSTEM": - self.SYSTEM(line=line) - elif command[0] == "PROMPT": - self.PROMPT(line=line) - elif command[0] == "SAVE": - if len(command) < 2: - logging.error("No filename provided") - print("No filename provided") - sys.exit(1) - model_name = command[1] - parameters = { - "model": self.model, - "temperature": command[2] if len(command) > 2 else 0.7, - "system_message": self.system[0]["content"], - } - self.CREATE_MODEL("Modelfile", parameters, model_name) - elif command[0] == "CHAT": - if len(command) > 1 and command[1] == "STREAM": - self.CHAT(stream=True) - else: - self.CHAT() - else: - raise ValueError("Invalid command") - i += 1 + content = file.read() + lexer = Lexer(content) + parser = Parser(lexer.tokens) + ast = parser.parse() + interpreter = Interpreter(ast, self) + interpreter.execute() except FileNotFoundError: logging.error("File %s not found.", filename) - print(f"File {filename} not found.") - - -import argparse - + error(f"File {filename} not found.") def run(): parser = argparse.ArgumentParser(description="Run llama script.") @@ -185,20 +263,29 @@ def run(): version=f"LlamaScript version {__version__}", help="Display version information", ) + parser.add_argument( + "-d", + "--debug", + action="store_true", + help="Enable debug mode", + ) args = parser.parse_args() - if not (args.file_name.endswith(".llama") or args.file_name == "llama"): - logging.error("Invalid file type. Please provide a .llama or llama file.") - print("Invalid file type. Please provide a .llama or llama file.") + global dbg + dbg = args.debug + + if not args.file_name.endswith(".llama"): + err_msg = "Invalid file type. Please provide a .llama." + logging.error(err_msg) + error(err_msg) sys.exit(1) try: - l = llama() - asyncio.run(l.read(args.file_name)) + l = Llama() + l.read(args.file_name) except KeyboardInterrupt: pass - if __name__ == "__main__": - run() + run() \ No newline at end of file diff --git a/llamascript/llama b/llamascript/llama deleted file mode 100644 index 5b63a9f..0000000 --- a/llamascript/llama +++ /dev/null @@ -1,3 +0,0 @@ -USE llama3 -PROMPT why is the sky blue? -CHAT STREAM \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 3fc682f..10feae8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -ollama -asyncio +ollama==0.4.5 +colorama==0.4.6 \ No newline at end of file diff --git a/setup.py b/setup.py index 773ca1c..db5311a 100644 --- a/setup.py +++ b/setup.py @@ -11,14 +11,14 @@ def read_version(): setuptools.setup( name="llamascript", version=read_version(), - author="WolfTheDev", - author_email="wolfthedev@gmail.com", + author="Zander Lewis", + author_email="zander@zanderlewis.dev", description="No-code AI chatbot using Ollama.", long_description=open("README.md").read(), long_description_content_type="text/markdown", - url="https://github.com/WolfTheDeveloper/llamascript", + url="https://github.com/Project-Llama/llamascript", packages=setuptools.find_packages(), - install_requires=["ollama"], + install_requires=["ollama", "colorama"], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", From 6c431fb131a598f5824e048099adc3052f09563d Mon Sep 17 00:00:00 2001 From: zanderlewis <158775116+zanderlewis@users.noreply.github.com> Date: Mon, 6 Jan 2025 23:29:28 +0000 Subject: [PATCH 21/31] :art: Format Python code with psf/black --- llamascript/__init__.py | 72 ++++++++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 23 deletions(-) diff --git a/llamascript/__init__.py b/llamascript/__init__.py index 32ba6d7..c8db4ab 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -14,19 +14,30 @@ colorama.init() dbg = False + def debug(message): if dbg: - print(f"{colorama.Fore.CYAN}{colorama.Style.BRIGHT}[DEBUG]{colorama.Style.RESET_ALL} {message}") + print( + f"{colorama.Fore.CYAN}{colorama.Style.BRIGHT}[DEBUG]{colorama.Style.RESET_ALL} {message}" + ) + def error(message): - print(f"{colorama.Fore.RED}{colorama.Style.BRIGHT}[ERROR]{colorama.Style.RESET_ALL} {message}") + print( + f"{colorama.Fore.RED}{colorama.Style.BRIGHT}[ERROR]{colorama.Style.RESET_ALL} {message}" + ) + def warning(message): - print(f"{colorama.Fore.YELLOW}{colorama.Style.BRIGHT}[WARNING]{colorama.Style.RESET_ALL} {message}") + print( + f"{colorama.Fore.YELLOW}{colorama.Style.BRIGHT}[WARNING]{colorama.Style.RESET_ALL} {message}" + ) + # Set up logging logging.basicConfig(level=logging.WARNING) + class Lexer: def __init__(self, input_text): self.tokens = [] @@ -34,16 +45,16 @@ def __init__(self, input_text): def tokenize(self, text): token_specification = [ - ("ATTRIBUTE", r"#\[(.*?)\]"), # Attributes e.g., #[stream(true)] - ("NUMBER", r"\d+(\.\d*)?"), # Integer or decimal number - ("STRING", r"\".*?\""), # String literals - ("ID", r"[A-Za-z_][A-Za-z0-9_]*"), # Identifiers - ("LPAREN", r"\("), # Left parenthesis - ("RPAREN", r"\)"), # Right parenthesis - ("COMMA", r","), # Comma - ("NEWLINE", r"\n"), # Line endings - ("SKIP", r"[ \t]+"), # Skip over spaces and tabs - ("MISMATCH", r"."), # Any other character + ("ATTRIBUTE", r"#\[(.*?)\]"), # Attributes e.g., #[stream(true)] + ("NUMBER", r"\d+(\.\d*)?"), # Integer or decimal number + ("STRING", r"\".*?\""), # String literals + ("ID", r"[A-Za-z_][A-Za-z0-9_]*"), # Identifiers + ("LPAREN", r"\("), # Left parenthesis + ("RPAREN", r"\)"), # Right parenthesis + ("COMMA", r","), # Comma + ("NEWLINE", r"\n"), # Line endings + ("SKIP", r"[ \t]+"), # Skip over spaces and tabs + ("MISMATCH", r"."), # Any other character ] tok_regex = "|".join("(?P<%s>%s)" % pair for pair in token_specification) for mo in re.finditer(tok_regex, text): @@ -62,6 +73,7 @@ def tokenize(self, text): error(f"Invalid character: {value}") sys.exit(1) + class Parser: def __init__(self, tokens): self.tokens = tokens @@ -73,7 +85,9 @@ def parse(self): while self.current < len(self.tokens): token = self.tokens[self.current] if token[0] == "ATTRIBUTE": - current_attributes = self.parse_attribute(token[1][2:-1].strip()) # Remove #[ and ] + current_attributes = self.parse_attribute( + token[1][2:-1].strip() + ) # Remove #[ and ] self.current += 1 # Skip ATTRIBUTE elif token[0] == "ID": ast.append(self.statement(current_attributes)) @@ -103,7 +117,9 @@ def statement(self, attributes): def arguments(self): args = [] - while self.current < len(self.tokens) and self.tokens[self.current][0] != "RPAREN": + while ( + self.current < len(self.tokens) and self.tokens[self.current][0] != "RPAREN" + ): token = self.tokens[self.current] if token[0] in {"STRING", "NUMBER"}: args.append(token[1]) @@ -116,7 +132,7 @@ def arguments(self): return args def parse_attribute(self, attr_str): - match = re.match(r'(\w+)\((.+)\)', attr_str) + match = re.match(r"(\w+)\((.+)\)", attr_str) if match: attr_name = match.group(1).lower() attr_value = match.group(2).strip('"').strip("'") @@ -129,6 +145,7 @@ def parse_attribute(self, attr_str): error(f"Invalid attribute: {attr_str}") sys.exit(1) + class Interpreter: def __init__(self, ast, llama_instance): self.ast = ast @@ -146,16 +163,21 @@ def execute(self): elif command == "system": self.llama.system_command(args[0], attributes) elif command == "save": - self.llama.create_model(args[0], { - "model": self.llama.model, - "temperature": args[1], - "system_message": self.llama.system[0]["content"], - }, attributes) + self.llama.create_model( + args[0], + { + "model": self.llama.model, + "temperature": args[1], + "system_message": self.llama.system[0]["content"], + }, + attributes, + ) elif command == "chat": self.llama.chat(attributes) else: raise ValueError(f"Unknown command: {command}") + class Llama: def __init__(self): self.model = "" @@ -187,7 +209,9 @@ def chat(self, attributes): ) debug("Chat successful.") if stream: - warning("Streaming is a work in progress. Please wait for the final response.") + warning( + "Streaming is a work in progress. Please wait for the final response." + ) for message in response: print(message["message"]["content"], end="") print() @@ -253,6 +277,7 @@ def read(self, filename): logging.error("File %s not found.", filename) error(f"File {filename} not found.") + def run(): parser = argparse.ArgumentParser(description="Run llama script.") parser.add_argument("file_name", type=str, help="The name of the file to run") @@ -287,5 +312,6 @@ def run(): except KeyboardInterrupt: pass + if __name__ == "__main__": - run() \ No newline at end of file + run() From e7842a097a5e04cd3d2670d79afe4e785fb393f0 Mon Sep 17 00:00:00 2001 From: Zander Date: Tue, 7 Jan 2025 15:53:47 -0500 Subject: [PATCH 22/31] Comments + Fixed Strings --- examples/chatbot.llama | 12 +++++++++--- llamascript/__init__.py | 18 +++++++++++++++--- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/examples/chatbot.llama b/examples/chatbot.llama index 665531f..3ead331 100644 --- a/examples/chatbot.llama +++ b/examples/chatbot.llama @@ -1,3 +1,9 @@ -use("llama3") -prompt("Why is the sky blue?") -chat() \ No newline at end of file +use("piglatin") // Load the Piglatin module +prompt("Why is the sky blue?") /* Ask a question */ + +// The chatbot will respond in pig latin +chat() + +/* + Done! The chatbot will respond in pig latin. +*/ \ No newline at end of file diff --git a/llamascript/__init__.py b/llamascript/__init__.py index c8db4ab..1db1058 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -1,6 +1,5 @@ __version__ = "1.0.0" -import asyncio import ollama import logging import sys @@ -34,6 +33,12 @@ def warning(message): ) +def info(message): + print( + f"{colorama.Fore.GREEN}{colorama.Style.BRIGHT}[INFO]{colorama.Style.RESET_ALL} {message}" + ) + + # Set up logging logging.basicConfig(level=logging.WARNING) @@ -54,6 +59,8 @@ def tokenize(self, text): ("COMMA", r","), # Comma ("NEWLINE", r"\n"), # Line endings ("SKIP", r"[ \t]+"), # Skip over spaces and tabs + ("SLC", r"//.*"), # Single-line comment e.g., // This is a comment + ("MLC", r"/\*(.|\n)*?\*/"), # Multi-line comment e.g., /* This is a comment */ ("MISMATCH", r"."), # Any other character ] tok_regex = "|".join("(?P<%s>%s)" % pair for pair in token_specification) @@ -69,6 +76,8 @@ def tokenize(self, text): self.tokens.append(("NEWLINE", value)) elif kind == "SKIP": continue + elif kind == ("SLC" or "MLC"): + continue elif kind == "MISMATCH": error(f"Invalid character: {value}") sys.exit(1) @@ -121,7 +130,10 @@ def arguments(self): self.current < len(self.tokens) and self.tokens[self.current][0] != "RPAREN" ): token = self.tokens[self.current] - if token[0] in {"STRING", "NUMBER"}: + if token[0] == "STRING": + args.append(token[1][1:-1]) # Strip surrounding quotes + self.current += 1 + elif token[0] == "NUMBER": args.append(token[1]) self.current += 1 elif token[0] == "COMMA": @@ -249,7 +261,7 @@ def create_model(self, filename, parameters, attributes): stderr=subprocess.DEVNULL, ) stdout, stderr = process.communicate() - print("Model created.") + info("Model created.") if process.returncode != 0: if stderr: From cfd17fe5c5de090ca0c17b034974369d09b62031 Mon Sep 17 00:00:00 2001 From: zanderlewis <158775116+zanderlewis@users.noreply.github.com> Date: Tue, 7 Jan 2025 20:54:13 +0000 Subject: [PATCH 23/31] :art: Format Python code with psf/black --- llamascript/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/llamascript/__init__.py b/llamascript/__init__.py index 1db1058..d9643b8 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -60,7 +60,10 @@ def tokenize(self, text): ("NEWLINE", r"\n"), # Line endings ("SKIP", r"[ \t]+"), # Skip over spaces and tabs ("SLC", r"//.*"), # Single-line comment e.g., // This is a comment - ("MLC", r"/\*(.|\n)*?\*/"), # Multi-line comment e.g., /* This is a comment */ + ( + "MLC", + r"/\*(.|\n)*?\*/", + ), # Multi-line comment e.g., /* This is a comment */ ("MISMATCH", r"."), # Any other character ] tok_regex = "|".join("(?P<%s>%s)" % pair for pair in token_specification) From 7fc53f8d38135c3a59b7b13d9fb363f09bef58cf Mon Sep 17 00:00:00 2001 From: Zander Date: Tue, 7 Jan 2025 17:03:11 -0500 Subject: [PATCH 24/31] Small fixes --- README.md | 18 +++++++++--------- llamascript/__init__.py | 27 ++++++++++++--------------- 2 files changed, 21 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index a71317c..bff66a2 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ LlamaScript is a no-code AI chatbot using Ollama. - [Installation](#installation) - [Usage](#usage) - [License](#license) -- [Examples](examples/) +- [Examples](https://github.com/Project-Llama/llamascript/blob/main/examples/) ## Installation @@ -29,13 +29,13 @@ pip install llamascript ``` ## Usage -To use LlamaScript, create a `.llama` file and write your script. Here are the commands you can use: +To use LlamaScript, create a `.llama` file and write your script. Here are a few functions you can use: ```llamascript -use(...) # Specify the model to use -prompt(...) # Prompt the user for input -system(...) # System message for the AI -chat(...) # Chat with the user -save(...) # Save the model +use(...) // Specify the model to use +prompt(...) // Prompt the user for input +system(...) // System message for the AI +chat(...) // Chat with the user +save(...) // Save the model ``` Here's an example: @@ -46,7 +46,7 @@ chat() ``` > [!NOTE]\ -> For more examples see [here.](examples/) +> For more examples see [here.](https://github.com/Project-Llama/llamascript/blob/main/examples/) You can then run LlamaScript with the following command: ```bash @@ -54,4 +54,4 @@ llamascript myscript.llama ``` ## License -LlamaScript is licensed under the Apache 2.0 License. See [LICENSE](LICENSE) for more information. +LlamaScript is licensed under the Apache 2.0 License. See [LICENSE](https://github.com/Project-Llama/llamascript/blob/main/LICENSE) for more information. diff --git a/llamascript/__init__.py b/llamascript/__init__.py index d9643b8..d806e75 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -50,21 +50,18 @@ def __init__(self, input_text): def tokenize(self, text): token_specification = [ - ("ATTRIBUTE", r"#\[(.*?)\]"), # Attributes e.g., #[stream(true)] - ("NUMBER", r"\d+(\.\d*)?"), # Integer or decimal number - ("STRING", r"\".*?\""), # String literals - ("ID", r"[A-Za-z_][A-Za-z0-9_]*"), # Identifiers - ("LPAREN", r"\("), # Left parenthesis - ("RPAREN", r"\)"), # Right parenthesis - ("COMMA", r","), # Comma - ("NEWLINE", r"\n"), # Line endings - ("SKIP", r"[ \t]+"), # Skip over spaces and tabs - ("SLC", r"//.*"), # Single-line comment e.g., // This is a comment - ( - "MLC", - r"/\*(.|\n)*?\*/", - ), # Multi-line comment e.g., /* This is a comment */ - ("MISMATCH", r"."), # Any other character + ("ATTRIBUTE", r"#\[(.*?)\]"), + ("NUMBER", r"\d+(\.\d*)?"), + ("STRING", r"\".*?\""), + ("ID", r"[A-Za-z_][A-Za-z0-9_]*"), + ("LPAREN", r"\("), + ("RPAREN", r"\)"), + ("COMMA", r","), + ("NEWLINE", r"\n"), + ("SKIP", r"[ \t]+"), + ("SLC", r"//.*"), + ("MLC", r"/\*(.|\n)*?\*/"), + ("MISMATCH", r"."), ] tok_regex = "|".join("(?P<%s>%s)" % pair for pair in token_specification) for mo in re.finditer(tok_regex, text): From cf8e844289693ed4a9f4e6a2c3f7a9dd9ce75c26 Mon Sep 17 00:00:00 2001 From: Zander Date: Wed, 8 Jan 2025 09:35:14 -0500 Subject: [PATCH 25/31] Add input for prompt --- README.md | 2 +- examples/chatbot.llama | 3 ++- llamascript/__init__.py | 7 +++++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index bff66a2..313cde9 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ [![Upload to PyPi](https://github.com/Project-Llama/llamascript/actions/workflows/python-publish.yml/badge.svg)](https://github.com/Project-Llama/llamascript/actions/workflows/python-publish.yml) [![CodeQL](https://github.com/Project-Llama/llamascript/actions/workflows/github-code-scanning/codeql/badge.svg)](https://github.com/Project-Llama/llamascript/actions/workflows/github-code-scanning/codeql) -[![VS Code Extension Downloads](https://img.shields.io/visual-studio-marketplace/d/WolfTheDev.llamascript?label=VS-Code%20Downloads)](https://marketplace.visualstudio.com/items?itemName=WolfTheDev.llamascript) +[![VS Code Extension Downloads](https://img.shields.io/visual-studio-marketplace/d/zanderlewis.llamascript?label=VS-Code%20Downloads)](https://marketplace.visualstudio.com/items?itemName=zanderlewis.llamascript) ![GitHub commit activity](https://img.shields.io/github/commit-activity/w/Project-Llama/llamascript?label=Commits) ![GitHub License](https://img.shields.io/github/license/Project-Llama/llamascript?label=License) diff --git a/examples/chatbot.llama b/examples/chatbot.llama index 3ead331..07b6f3f 100644 --- a/examples/chatbot.llama +++ b/examples/chatbot.llama @@ -1,5 +1,6 @@ use("piglatin") // Load the Piglatin module -prompt("Why is the sky blue?") /* Ask a question */ +#[input(true)] +prompt("Input: ") /* Ask a question */ // The chatbot will respond in pig latin chat() diff --git a/llamascript/__init__.py b/llamascript/__init__.py index d806e75..01cad98 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -200,8 +200,11 @@ def use(self, model_name, _): self.model = model_name.strip('"') debug(f"Using model: {self.model}") - def prompt(self, prompt_text, _): - self.data = prompt_text + def prompt(self, prompt_text, attributes): + if "input" in attributes: + self.data = input(prompt_text) + else: + self.data = prompt_text debug(f"Prompt set to: {self.data}") def system_command(self, system_content, _): From 98cf3d11e694d7c53fc73815717a3ae5aedb2d8a Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Wed, 8 Jan 2025 09:37:04 -0500 Subject: [PATCH 26/31] Fix version --- llamascript/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llamascript/__init__.py b/llamascript/__init__.py index 01cad98..fcbfba7 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -1,4 +1,4 @@ -__version__ = "1.0.0" +__version__ = "1.0.1" import ollama import logging From 676ef0cd9e10246a160ce127fe3ad3eb180769d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 09:02:21 +0000 Subject: [PATCH 27/31] Bump ollama from 0.4.5 to 0.4.6 Bumps [ollama](https://github.com/ollama/ollama-python) from 0.4.5 to 0.4.6. - [Release notes](https://github.com/ollama/ollama-python/releases) - [Commits](https://github.com/ollama/ollama-python/compare/v0.4.5...v0.4.6) --- updated-dependencies: - dependency-name: ollama dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 10feae8..c3b7b8f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -ollama==0.4.5 +ollama==0.4.6 colorama==0.4.6 \ No newline at end of file From f3039485dee5c8055b82aae0e3c35d3eb89d1e0f Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Thu, 16 Jan 2025 10:43:53 -0500 Subject: [PATCH 28/31] Add input macro to system function Fixes #47 This PR adds the feature of using the input macro with the system function like so: ```llamascript use("llama3.2") prompt("Say 'Hello, World!'") #[input(true)] system("What should the AI do? ") chat() ``` --- llamascript/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/llamascript/__init__.py b/llamascript/__init__.py index fcbfba7..a619826 100644 --- a/llamascript/__init__.py +++ b/llamascript/__init__.py @@ -207,7 +207,9 @@ def prompt(self, prompt_text, attributes): self.data = prompt_text debug(f"Prompt set to: {self.data}") - def system_command(self, system_content, _): + def system_command(self, system_content, attributes): + if "input" in attributes: + system_content = input(system_content) self.system = [{"role": "system", "content": system_content}] debug(f"System command set.") From a4c26d4c6b97a2faa082c788483bb935021ab836 Mon Sep 17 00:00:00 2001 From: Zander Lewis Date: Thu, 16 Jan 2025 10:46:39 -0500 Subject: [PATCH 29/31] Update format.yml --- .github/workflows/format.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index baf5f7d..7086216 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -12,8 +12,15 @@ jobs: id: action_black with: black_args: "." + - name: Commit formatted code + if: steps.action_black.outputs.is_formatted == 'true' && github.event_name == 'pull_request' + run: | + git config --global user.name "github-actions[bot]" + git config --global user.email "github-actions[bot]@users.noreply.github.com" + git commit -am ":art: Format Python code with psf/black" + git push - name: Create Pull Request - if: steps.action_black.outputs.is_formatted == 'true' + if: steps.action_black.outputs.is_formatted == 'true' && github.event_name != 'pull_request' uses: peter-evans/create-pull-request@v6 with: token: ${{ secrets.GITHUB_TOKEN }} From 988a9e361ccaee7ab8f993022419d6fcaadc5288 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 08:56:11 +0000 Subject: [PATCH 30/31] Bump ollama from 0.4.6 to 0.4.7 Bumps [ollama](https://github.com/ollama/ollama-python) from 0.4.6 to 0.4.7. - [Release notes](https://github.com/ollama/ollama-python/releases) - [Commits](https://github.com/ollama/ollama-python/compare/v0.4.6...v0.4.7) --- updated-dependencies: - dependency-name: ollama dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c3b7b8f..307b417 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -ollama==0.4.6 +ollama==0.4.7 colorama==0.4.6 \ No newline at end of file From 15ac3ab81e61859241ed0b66a232ab57ce9b2210 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Apr 2025 08:32:35 +0000 Subject: [PATCH 31/31] Bump ollama from 0.4.7 to 0.4.8 Bumps [ollama](https://github.com/ollama/ollama-python) from 0.4.7 to 0.4.8. - [Release notes](https://github.com/ollama/ollama-python/releases) - [Commits](https://github.com/ollama/ollama-python/compare/v0.4.7...v0.4.8) --- updated-dependencies: - dependency-name: ollama dependency-version: 0.4.8 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 307b417..e53497f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -ollama==0.4.7 +ollama==0.4.8 colorama==0.4.6 \ No newline at end of file