Skip to content

Commit

Permalink
Merge pull request ex3ndr#43 from Sinan-Karakaya/main
Browse files Browse the repository at this point in the history
FEAT: Added support for Bearer token in header for protected endpoints
  • Loading branch information
ex3ndr authored Apr 7, 2024
2 parents d6aaa95 + f7bd142 commit 38544c1
Show file tree
Hide file tree
Showing 8 changed files with 32 additions and 18 deletions.
5 changes: 5 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,11 @@
"description": "Ollama Server Endpoint. Empty for local instance. Example: http://192.168.0.100:11434",
"order": 1
},
"inference.bearerToken": {
"type": "string",
"default": "",
"description": "Auth Bearer token that should be used for secure requests. Leave empty if not desired."
},
"inference.model": {
"type": "string",
"enum": [
Expand Down
2 changes: 2 additions & 0 deletions src/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ class Config {
if (endpoint === '') {
endpoint = 'http://127.0.0.1:11434';
}
let bearerToken = config.get('bearerToken') as string;

// Load general paremeters
let maxLines = config.get('maxLines') as number;
Expand All @@ -39,6 +40,7 @@ class Config {

return {
endpoint,
bearerToken,
maxLines,
maxTokens,
temperature,
Expand Down
18 changes: 10 additions & 8 deletions src/modules/lineGenerator.ts
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
export async function* lineGenerator(url: string, data: any): AsyncGenerator<string> {

export async function* lineGenerator(url: string, data: any, bearerToken: string): AsyncGenerator<string> {
// Request
const controller = new AbortController();
let res = await fetch(url, {
method: 'POST',
body: JSON.stringify(data),
headers: {
"Content-Type": "application/json",
},
signal: controller.signal
method: 'POST',
body: JSON.stringify(data),
headers: bearerToken ? {
'Content-Type': 'application/json',
Authorization: `Bearer ${bearerToken}`,
} : {
'Content-Type': 'application/json',
},
signal: controller.signal,
});
if (!res.ok || !res.body) {
throw Error('Unable to connect to backend');
Expand Down
9 changes: 6 additions & 3 deletions src/modules/ollamaCheckModel.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
import { info } from "./log";

export async function ollamaCheckModel(endpoint: string, model: string) {

export async function ollamaCheckModel(endpoint: string, model: string, bearerToken: string) {
// Check if exists
let res = await fetch(endpoint + '/api/tags');
let res = await fetch(endpoint + '/api/tags', {
headers: bearerToken ? {
Authorization: `Bearer ${bearerToken}`,
} : {},
});
if (!res.ok) {
info(await res.text());
info(endpoint + '/api/tags');
Expand Down
4 changes: 2 additions & 2 deletions src/modules/ollamaDownloadModel.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import { lineGenerator } from "./lineGenerator";
import { info } from "./log";

export async function ollamaDownloadModel(endpoint: string, model: string) {
export async function ollamaDownloadModel(endpoint: string, model: string, bearerToken: string) {
info('Downloading model from ollama: ' + model);
for await (let line of lineGenerator(endpoint + '/api/pull', { name: model })) {
for await (let line of lineGenerator(endpoint + '/api/pull', { name: model }, bearerToken)) {
info('[DOWNLOAD] ' + line);
}
}
4 changes: 2 additions & 2 deletions src/modules/ollamaTokenGenerator.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ export type OllamaToken = {
done: boolean
};

export async function* ollamaTokenGenerator(url: string, data: any): AsyncGenerator<OllamaToken> {
for await (let line of lineGenerator(url, data)) {
export async function* ollamaTokenGenerator(url: string, data: any, bearerToken: string): AsyncGenerator<OllamaToken> {
for await (let line of lineGenerator(url, data, bearerToken)) {
info('Receive line: ' + line);
let parsed: OllamaToken;
try {
Expand Down
3 changes: 2 additions & 1 deletion src/prompts/autocomplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import { ModelFormat, adaptPrompt } from './processors/models';

export async function autocomplete(args: {
endpoint: string,
bearerToken: string,
model: string,
format: ModelFormat,
prefix: string,
Expand Down Expand Up @@ -33,7 +34,7 @@ export async function autocomplete(args: {
let res = '';
let totalLines = 1;
let blockStack: ('[' | '(' | '{')[] = [];
outer: for await (let tokens of ollamaTokenGenerator(args.endpoint + '/api/generate', data)) {
outer: for await (let tokens of ollamaTokenGenerator(args.endpoint + '/api/generate', data, args.bearerToken)) {
if (args.canceled && args.canceled()) {
break;
}
Expand Down
5 changes: 3 additions & 2 deletions src/prompts/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
try {

// Check model exists
let modelExists = await ollamaCheckModel(inferenceConfig.endpoint, inferenceConfig.modelName);
let modelExists = await ollamaCheckModel(inferenceConfig.endpoint, inferenceConfig.modelName, inferenceConfig.bearerToken);
if (token.isCancellationRequested) {
info(`Canceled after AI completion.`);
return;
Expand All @@ -147,7 +147,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {

// Perform download
this.update('sync~spin', 'Downloading');
await ollamaDownloadModel(inferenceConfig.endpoint, inferenceConfig.modelName);
await ollamaDownloadModel(inferenceConfig.endpoint, inferenceConfig.modelName, inferenceConfig.bearerToken);
this.update('sync~spin', 'Llama Coder')
}
if (token.isCancellationRequested) {
Expand All @@ -161,6 +161,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
prefix: prepared.prefix,
suffix: prepared.suffix,
endpoint: inferenceConfig.endpoint,
bearerToken: inferenceConfig.bearerToken,
model: inferenceConfig.modelName,
format: inferenceConfig.modelFormat,
maxLines: inferenceConfig.maxLines,
Expand Down

0 comments on commit 38544c1

Please sign in to comment.