-
Notifications
You must be signed in to change notification settings - Fork 6
Gemini #100
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Gemini #100
Changes from all commits
4f5dfcc
710c834
ffefa0e
3a6d3d3
88e19e1
60298fe
e2831c3
04b3ecc
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -65,12 +65,13 @@ | |
"@jupyterlab/rendermime": "^4.4.0", | ||
"@jupyterlab/settingregistry": "^4.4.0", | ||
"@jupyterlab/ui-components": "^4.4.0", | ||
"@langchain/anthropic": "^0.3.9", | ||
"@langchain/community": "^0.3.44", | ||
"@langchain/core": "^0.3.57", | ||
"@langchain/mistralai": "^0.1.1", | ||
"@langchain/ollama": "^0.2.0", | ||
"@langchain/openai": "^0.4.4", | ||
"@langchain/anthropic": "^0.3.22", | ||
"@langchain/community": "^0.3.46", | ||
"@langchain/core": "^0.3.58", | ||
"@langchain/google-genai": "^0.2.12", | ||
"@langchain/mistralai": "^0.2.1", | ||
"@langchain/ollama": "^0.2.2", | ||
"@langchain/openai": "^0.5.13", | ||
"@lumino/coreutils": "^2.1.2", | ||
"@lumino/polling": "^2.1.2", | ||
"@lumino/signaling": "^2.1.2", | ||
|
@@ -115,6 +116,9 @@ | |
"typescript": "~5.8.3", | ||
"yjs": "^13.5.0" | ||
}, | ||
"resolutions": { | ||
"zod": "^3.25.56" | ||
}, | ||
Comment on lines
+119
to
+121
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @jtpio I didn't pay much attention to it, but do you think it should be kept? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just pushed a new commit in #117 and it now seems to be building fine. But likely requires the update of the other langchain packages. |
||
"sideEffects": [ | ||
"style/*.css", | ||
"style/index.js" | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
import { | ||
CompletionHandler, | ||
IInlineCompletionContext | ||
} from '@jupyterlab/completer'; | ||
import { ChatGoogleGenerativeAI } from '@langchain/google-genai'; | ||
import { AIMessage, SystemMessage } from '@langchain/core/messages'; | ||
|
||
import { BaseCompleter } from '../../base-completer'; | ||
|
||
export class GeminiCompleter extends BaseCompleter { | ||
constructor(options: BaseCompleter.IOptions) { | ||
super(options); | ||
this._completer = new ChatGoogleGenerativeAI({ | ||
model: 'gemini-pro', | ||
...options.settings | ||
}); | ||
} | ||
|
||
async fetch( | ||
request: CompletionHandler.IRequest, | ||
context: IInlineCompletionContext | ||
) { | ||
const { text, offset: cursorOffset } = request; | ||
const prompt = text.slice(0, cursorOffset); | ||
|
||
const trimmedPrompt = prompt.trim(); | ||
|
||
const messages = [ | ||
new SystemMessage(this.systemPrompt), | ||
new AIMessage(trimmedPrompt) | ||
]; | ||
|
||
try { | ||
const response = await this._completer.invoke(messages); | ||
const items = []; | ||
|
||
// Gemini can return string or complex content, a list of string/images/other. | ||
if (typeof response.content === 'string') { | ||
items.push({ | ||
insertText: response.content | ||
}); | ||
} else { | ||
response.content.forEach(content => { | ||
if (content.type !== 'text') { | ||
return; | ||
} | ||
items.push({ | ||
insertText: content.text, | ||
filterText: prompt.substring(trimmedPrompt.length) | ||
}); | ||
}); | ||
} | ||
return { items }; | ||
} catch (error) { | ||
console.error('Error fetching completions', error); | ||
return { items: [] }; | ||
} | ||
} | ||
|
||
protected _completer: ChatGoogleGenerativeAI; | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
export default ` | ||
<i class="fas fa-exclamation-triangle"></i> This extension is still very much experimental. It is not an official Google extension. | ||
|
||
1. Go to <https://aistudio.google.com> and create an API key. | ||
|
||
2. Open the JupyterLab settings and go to the **Ai providers** section to select the \`Gemini\` | ||
provider and add your API key (required). | ||
3. Open the chat, or use the inline completer. | ||
`; |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
{ | ||
"$schema": "http://json-schema.org/draft-07/schema#", | ||
"type": "object", | ||
"properties": { | ||
"temperature": { | ||
"type": "number", | ||
"description": "Amount of randomness injected into the response. Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and temp closer to 1 for creative and generative tasks." | ||
}, | ||
"topK": { | ||
"type": "number", | ||
"description": "Only sample from the top K options for each subsequent token. Used to remove \"long tail\" low probability responses." | ||
}, | ||
"topP": { | ||
"type": "number", | ||
"description": "Nucleus sampling parameter. Only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation." | ||
}, | ||
"maxOutputTokens": { | ||
"type": "number", | ||
"description": "The maximum number of tokens to generate in the response." | ||
}, | ||
"stopSequences": { | ||
"type": "array", | ||
"items": { | ||
"type": "string" | ||
}, | ||
"description": "A list of strings upon which to stop generating. You probably want something like [\"\\n\\nHuman:\"] for chat conversations." | ||
}, | ||
"streaming": { | ||
"type": "boolean", | ||
"description": "Whether to stream the results or not" | ||
}, | ||
"apiKey": { | ||
"type": "string", | ||
"description": "Google AI Studio API key" | ||
}, | ||
"model": { | ||
"type": "string", | ||
"description": "Model name to use (e.g., gemini-pro, gemini-2.0-flash, etc.)", | ||
"default": "gemini-pro" | ||
}, | ||
"baseURL": { | ||
"type": "string", | ||
"description": "Base URL for the Google AI API" | ||
}, | ||
"safetySettings": { | ||
"type": "array", | ||
"description": "Safety settings for content filtering", | ||
"items": { | ||
"type": "object", | ||
"properties": { | ||
"category": { | ||
"type": "string" | ||
}, | ||
"threshold": { | ||
"type": "string" | ||
} | ||
} | ||
} | ||
} | ||
}, | ||
"additionalProperties": false, | ||
"description": "Input to Google Generative AI Chat class.", | ||
"definitions": {} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The latest version of
@langchain/google-genai
seems to depend on newer releases of@langchain/core
, so bumping the other langchain dependencies here.