Skip to content

Commit

Permalink
feat: ask to download model
Browse files Browse the repository at this point in the history
  • Loading branch information
ex3ndr committed Dec 22, 2023
1 parent 1d22c2a commit 665b7f4
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 2 deletions.
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,15 @@ Currently Llama Coder supports only Codellama. Model is quantized in different w
* m - slow on MacOS
* g - slow on older NVidia cards (pre 30xx)

## Troubleshooting

Most of the problems could be seen in output of a plugin in VS Code extension output.

## Changelog

## [0.0.10]
- Adding ability to pick a custom model
- Asking user if he wants to download model if it is not available

## [0.0.9]
- Adding deepseek 1b model and making it default
Expand Down
2 changes: 1 addition & 1 deletion src/extension.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ export function activate(context: vscode.ExtensionContext) {
context.subscriptions.push(statusBarItem);

// Create provider
const provider = new PromptProvider(statusBarItem);
const provider = new PromptProvider(statusBarItem, context);
let disposable = vscode.languages.registerInlineCompletionItemProvider({ pattern: '**', }, provider);
context.subscriptions.push(disposable);
}
Expand Down
20 changes: 19 additions & 1 deletion src/prompts/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,11 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {

lock = new AsyncLock();
statusbar: vscode.StatusBarItem;
context: vscode.ExtensionContext;

constructor(statusbar: vscode.StatusBarItem) {
constructor(statusbar: vscode.StatusBarItem, context: vscode.ExtensionContext) {
this.statusbar = statusbar;
this.context = context;
}

async provideInlineCompletionItems(document: vscode.TextDocument, position: vscode.Position, context: vscode.InlineCompletionContext, token: vscode.CancellationToken): Promise<vscode.InlineCompletionItem[] | vscode.InlineCompletionList | undefined | null> {
Expand Down Expand Up @@ -87,6 +89,22 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {

// Download model if not exists
if (!modelExists) {

// Check if user asked to ignore download
if (this.context.globalState.get('llama-coder-download-ignored')) {
info(`Ingoring since user asked to ignore download.`);
return;
}

// Ask for download
let download = await vscode.window.showInformationMessage(`Model ${inferenceConfig.modelName} is not downloaded. Do you want to download it? Answering "No" would require you to manually download model.`, 'Yes', 'No');
if (download === 'No') {
info(`Ingoring since user asked to ignore download.`);
this.context.globalState.update('llama-coder-download-ignored', true);
return;
}

// Perform download
this.statusbar.text = `$(sync~spin) Downloading`;
await ollamaDownloadModel(inferenceConfig.endpoint, inferenceConfig.modelName);
this.statusbar.text = `$(sync~spin) Llama Coder`;
Expand Down

0 comments on commit 665b7f4

Please sign in to comment.