Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 12 additions & 11 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
venv/
build/
*.egg-info/
.egg-info/
sessions/
__pycache__/
.pytest_cache/
Expand All @@ -9,16 +9,17 @@ core/__pycache__/
node_modules/
codechat/tree-sitter-c
codechat/tree-sitter-swift
codechat/__pycache__
codechat/events/__pycache__
codechat/domain/__pycache__
codechat/data/__pycache__
codechat/interface/__pycache__
tests/core/__pycache__
tests/interface/__pycache__
tests/data/__pycache__
tests/domain/__pycache__
tests/events/__pycache__
codechat/__pycache__/
codechat/events/__pycache__/
codechat/domain/__pycache__/
codechat/data/__pycache__/
codechat/interface/__pycache__/
tests/core/__pycache__/
tests/interface/__pycache__/
tests/data/__pycache__/
tests/domain/__pycache__/
tests/events/__pycache__/
tests/__pycache__/
dist/
codez_cli.egg-info/
vendor/java-tree-sitter
Expand Down
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,12 @@ Whether you're learning a new language, need a quick code snippet, or want to un
* 🌐 **Web-Savvy (Optional):** Enable a web search tool to pull in external knowledge.
* 🧐 **Code Analysis (Swift/C):** Uses `tree-sitter` for deeper insights into Swift and C codebases (requires a small helper library).

## There'sMore :
- **Model Management**: Easily switch between local LLMs using `/models` or `/model` commands at any time.
- **Automatic Model Download**: If no Ollama models are found, the CLI will prompt you to download and run the default model (`qwen2.5-coder:1.5b-instruct`).
- **Session Memory**: Save, resume, and manage your coding conversations.
- **Extensible & Open Source**: Built for privacy, hackability, and your workflow.

---

## 🚀 Getting Started: Your First Chat in Minutes! 🚀
Expand Down
24 changes: 1 addition & 23 deletions codechat/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,32 +3,10 @@
"""

import typer
from core import repl, summarizer
from core import repl

app = typer.Typer()

@app.command("explain")
def explain(path: str, function: str = None):
"""
Explain the content of a file or a specific function within that file.

Args:
path (str): The file path to explain.
function (str, optional): The specific function name to explain. If None, explain the whole file.

This function calls the summarizer to generate a detailed explanation,
handles errors gracefully, and outputs the result to the CLI.
"""
try:
result = summarizer.explain(path, function)
if not result:
typer.echo("No explanation could be generated. Please check the file path and function name.")
else:
typer.echo(result)
except FileNotFoundError:
typer.echo(f"Error: File not found at path '{path}'. Please provide a valid file path.")
except Exception as e:
typer.echo(f"An unexpected error occurred: {e}")

@app.command("chat")
def chat():
Expand Down
55 changes: 55 additions & 0 deletions core/io_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
"""
Input/output utilities for CodeZ CLI REPL.
"""
from rich.console import Console
from rich.syntax import Syntax
from rich.panel import Panel
from rich.markdown import Markdown
from rich.text import Text

def print_code_snippet(snippet: str, language: str = ""):
"""Print code or text as a formatted snippet in the terminal using Rich."""
console = Console()
if not language:
language = "text"
try:
syntax = Syntax(snippet, language, theme="monokai", line_numbers=False, word_wrap=True)
console.print(Panel(syntax, title="Code Snippet"))
except Exception:
# Fallback to plain panel if syntax highlighting fails
console.print(Panel(snippet, title="Snippet"))

def multiline_code_input(prompt_session=None):
instruction_text = Markdown("""\
Enter your code snippet below.
- Type ` ``` ` on a new line to **start** the block.
- Paste or type your code.
- Use **Shift+Enter** for new lines within the block if using the default console input.
- Type ` ``` ` on a new line again to **finish** the block.
""")
console = Console()
console.print(Panel(instruction_text, title="[bold cyan]Multiline Code Input[/bold cyan]", border_style="cyan", expand=False))

lines = []
in_block = False

# Use a consistent prompt style
input_prompt_style = "[bold sky_blue1]>>> (code)[/bold sky_blue1] " if prompt_session else "[bold sky_blue1]> (code)[/bold sky_blue1] "

while True:
if prompt_session:
# Prompt toolkit's prompt doesn't directly take Rich ConsoleMarkup for the prompt string
# For simplicity, keeping the prompt session's default prompt look here
line = prompt_session.prompt(">>> (code) ", multiline=True) # Simple text prompt
else:
line = console.input(input_prompt_style)

if line.strip() == "```":
if in_block:
break
else:
in_block = True
continue
if in_block:
lines.append(line)
return "\n".join(lines)
40 changes: 39 additions & 1 deletion core/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,13 @@
import re

OLLAMA_GITHUB_URL = "https://github.com/ollama/ollama"
DEFAULT_MODEL = "qwen2.5-coder:1.5b-instruct"

def query_ollama(prompt: str, model: str = "deepseek-r1:latest"):
def query_ollama(prompt: str, model: str = DEFAULT_MODEL):
"""
Query the Ollama LLM with the given prompt and model.
You can change the model at any time using the /models or /model command in the CLI.
"""
result = subprocess.run(
["ollama", "run", model, prompt],
capture_output=True,
Expand All @@ -17,6 +22,10 @@ def query_ollama(prompt: str, model: str = "deepseek-r1:latest"):
return output

def get_ollama_models():
"""
Returns a tuple: (list_of_models, error_message)
If no models are found, prompts the user to download the default model.
"""
try:
result = subprocess.run([
"ollama", "list"
Expand All @@ -27,9 +36,38 @@ def get_ollama_models():
for line in result.stdout.strip().split('\n'):
if line and not line.startswith('NAME'):
models.append(line.split()[0])
# If no models found, prompt user to download the default model
if not models:
print(
f"[CodeZ CLI] No Ollama models found on your system.\n"
f"Would you like to download and run the default model ([bold]{DEFAULT_MODEL}[/bold]) now? [y/N]"
)
try:
user_input = input("Download and run default model? [y/N]: ").strip().lower()
except EOFError:
user_input = "n"
if user_input in ["y", "yes"]:
print(f"[CodeZ CLI] Downloading and running default model: {DEFAULT_MODEL} ...")
try:
pull_result = subprocess.run(
["ollama", "run", DEFAULT_MODEL, "Hello!"],
capture_output=True,
text=True
)
if pull_result.returncode == 0:
print(f"[CodeZ CLI] Model '{DEFAULT_MODEL}' downloaded and ready.")
# Try listing again
return get_ollama_models()
else:
return None, f"Failed to download model '{DEFAULT_MODEL}'. Please try manually: ollama run {DEFAULT_MODEL}"
except Exception as e:
return None, f"Error downloading model '{DEFAULT_MODEL}': {e}"
else:
return None, f"No models found. Please download a model using: ollama run {DEFAULT_MODEL}"
return models, None
except FileNotFoundError:
return None, f"Ollama not found. Please install it from {OLLAMA_GITHUB_URL}"


def fetch_webpage(query, urls):
return {"content": f"[Web search not available in this environment. Simulated result for: {query}]"}
Loading