Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
894 changes: 543 additions & 351 deletions package-lock.json

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion packages/insomnia/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@
"vite": "^7.1.3"
},
"optionalDependencies": {
"@kong/insomnia-plugin-ai": "^1.0.7",
"@kong/insomnia-plugin-ai": "^1.0.9",
"@kong/insomnia-plugin-external-vault": "0.1.4-dev.20251224090833"
},
"dev": {
Expand Down
2 changes: 1 addition & 1 deletion packages/insomnia/src/common/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ export const CHECK_FOR_UPDATES_INTERVAL = 1000 * 60 * 60 * 24;

export const ACCEPTED_NODE_CA_FILE_EXTS = ['.pem', '.crt', '.cer', '.p12'];

export const LLM_BACKENDS = ['gguf', 'claude', 'openai', 'gemini'] as const;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

https://github.com/Kong/insomnia/blob/develop/packages/insomnia/src/plugins/types.ts#L3-L25 is used in a few places and is missing the new backend type, along with the url field. Right now, the generate commit diffs and mcp sampling both use this interface and would likely fail if the url backend were configured

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch. Let me update this, and some other fields in this file to show what the LLM URL option can support

export const LLM_BACKENDS = ['gguf', 'claude', 'openai', 'gemini', 'url'] as const;

// Available editor key map
export enum EditorKeyMap {
Expand Down
235 changes: 235 additions & 0 deletions packages/insomnia/src/main/__tests__/llm-config-service.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,235 @@
import { beforeEach, describe, expect, it, vi } from 'vitest';

import * as models from '../../models';
import {
clearActiveBackend,
getActiveBackend,
getAllConfigurations,
getBackendConfig,
setActiveBackend,
updateBackendConfig,
} from '../llm-config-service';

vi.mock('../../models', () => ({
pluginData: {
getByKey: vi.fn(),
upsertByKey: vi.fn(),
removeByKey: vi.fn(),
all: vi.fn(),
},
}));

vi.mock('electron', () => ({
app: {
getPath: vi.fn(() => '/mock/user/data'),
},
net: {
fetch: vi.fn(() => Promise.resolve({ ok: true })),
},
}));

vi.mock('@sentry/electron/main', () => ({
init: vi.fn(),
captureException: vi.fn(),
captureMessage: vi.fn(),
}));

const mockPluginData = (key: string, value: string) => ({ key, value }) as any;

describe('llm-config-service', () => {
beforeEach(() => {
vi.clearAllMocks();
});

describe('getBackendConfig()', () => {
it('should retrieve url field from storage', async () => {
vi.mocked(models.pluginData.all).mockResolvedValue([
mockPluginData('url.model', 'gpt-4'),
mockPluginData('url.url', 'https://api.example.com/v1'),
]);

const config = await getBackendConfig('url');

expect(config).toEqual({
backend: 'url',
model: 'gpt-4',
url: 'https://api.example.com/v1',
});
});

it('should retrieve baseURL field from storage', async () => {
vi.mocked(models.pluginData.all).mockResolvedValue([
mockPluginData('url.model', 'claude-3'),
mockPluginData('url.baseURL', 'https://custom-llm.com'),
]);

const config = await getBackendConfig('url');

expect(config).toEqual({
backend: 'url',
model: 'claude-3',
baseURL: 'https://custom-llm.com',
});
});

it('should handle both url and baseURL fields', async () => {
vi.mocked(models.pluginData.all).mockResolvedValue([
mockPluginData('url.url', 'https://api.example.com/v1'),
mockPluginData('url.baseURL', 'https://base.example.com'),
mockPluginData('url.model', 'test-model'),
]);

const config = await getBackendConfig('url');

expect(config.url).toBe('https://api.example.com/v1');
expect(config.baseURL).toBe('https://base.example.com');
expect(config.model).toBe('test-model');
});

it('should return empty config for unconfigured backend', async () => {
vi.mocked(models.pluginData.all).mockResolvedValue([]);

const config = await getBackendConfig('url');

expect(config).toEqual({
backend: 'url',
});
});
});

describe('updateBackendConfig()', () => {
it('should save url field to storage', async () => {
await updateBackendConfig('url', {
url: 'https://api.example.com/v1',
model: 'gpt-4',
});

expect(models.pluginData.upsertByKey).toHaveBeenCalledWith(
'insomnia-llm',
'url.url',
'https://api.example.com/v1'
);
expect(models.pluginData.upsertByKey).toHaveBeenCalledWith(
'insomnia-llm',
'url.model',
'gpt-4'
);
});

it('should save baseURL field to storage', async () => {
await updateBackendConfig('url', {
baseURL: 'https://custom-llm.com',
model: 'claude-3',
});

expect(models.pluginData.upsertByKey).toHaveBeenCalledWith(
'insomnia-llm',
'url.baseURL',
'https://custom-llm.com'
);
});

it('should handle partial config updates', async () => {
await updateBackendConfig('url', {
url: 'https://new-url.com/v1',
});

expect(models.pluginData.upsertByKey).toHaveBeenCalledWith(
'insomnia-llm',
'url.url',
'https://new-url.com/v1'
);
expect(models.pluginData.upsertByKey).toHaveBeenCalledTimes(1);
});

it('should not save backend field', async () => {
await updateBackendConfig('url', {
backend: 'url',
url: 'https://api.example.com/v1',
});

const calls = vi.mocked(models.pluginData.upsertByKey).mock.calls;
const backendFieldCall = calls.find(call => call[1] === 'url.backend');
expect(backendFieldCall).toBeUndefined();
});
});

describe('getAllConfigurations()', () => {
it('should include url backend in configurations', async () => {
vi.mocked(models.pluginData.all).mockResolvedValue([
mockPluginData('url.model', 'gpt-4'),
mockPluginData('url.url', 'https://api.example.com/v1'),
mockPluginData('gguf.model', 'llama-3'),
]);

const configs = await getAllConfigurations();

const urlConfig = configs.find(c => c.backend === 'url');
expect(urlConfig).toBeDefined();
expect(urlConfig?.url).toBe('https://api.example.com/v1');
expect(urlConfig?.model).toBe('gpt-4');
});

it('should filter out unconfigured backends', async () => {
vi.mocked(models.pluginData.all).mockResolvedValue([
mockPluginData('claude.model', 'claude-3-opus'),
mockPluginData('claude.apiKey', 'sk-ant-123'),
]);

const configs = await getAllConfigurations();

// Should only return claude since it's the only one configured
expect(configs).toHaveLength(1);
expect(configs[0].backend).toBe('claude');
});

it('should include backend with only url field set', async () => {
vi.mocked(models.pluginData.all).mockResolvedValue([
mockPluginData('url.url', 'https://api.example.com/v1'),
]);

const configs = await getAllConfigurations();

const urlConfig = configs.find(c => c.backend === 'url');
expect(urlConfig).toBeDefined();
expect(urlConfig?.url).toBe('https://api.example.com/v1');
});
});

describe('Active backend management', () => {
it('should set url as active backend', async () => {
await setActiveBackend('url');

expect(models.pluginData.upsertByKey).toHaveBeenCalledWith(
'insomnia-llm',
'model.active',
'url'
);
});

it('should get url as active backend', async () => {
vi.mocked(models.pluginData.getByKey).mockResolvedValue({ value: 'url' } as any);

const active = await getActiveBackend();

expect(active).toBe('url');
});

it('should return null when no active backend', async () => {
vi.mocked(models.pluginData.getByKey).mockResolvedValue(undefined as any);

const active = await getActiveBackend();

expect(active).toBeNull();
});

it('should clear active backend', async () => {
await clearActiveBackend();

expect(models.pluginData.removeByKey).toHaveBeenCalledWith(
'insomnia-llm',
'model.active'
);
});
});
});
19 changes: 10 additions & 9 deletions packages/insomnia/src/main/llm-config-service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import path from 'node:path';

import { app } from 'electron';

import type { LLM_BACKENDS } from '~/common/constants';
import { LLM_BACKENDS } from '~/common/constants';
import { SegmentEvent, trackSegmentEvent } from '~/main/analytics';
import { ipcMainHandle } from '~/main/ipc/electron';

Expand All @@ -17,6 +17,8 @@ export interface LLMConfig {
model: string;
modelDir?: string;
apiKey?: string;
url?: string;
baseURL?: string;
temperature?: number;
topP?: number;
topK?: number;
Expand Down Expand Up @@ -51,7 +53,9 @@ export const getBackendConfig = async (backend: LLMBackend): Promise<Partial<LLM

switch (field) {
case 'model':
case 'apiKey': {
case 'apiKey':
case 'url':
case 'baseURL': {
config[field] = value;
break;
}
Expand Down Expand Up @@ -91,9 +95,8 @@ export const updateBackendConfig = async (backend: LLMBackend, config: Partial<L
};

export const getAllConfigurations = async (): Promise<LLMConfig[]> => {
const backends: LLMBackend[] = ['gguf', 'claude', 'openai', 'gemini'];
const configs = await Promise.all(
backends.map(
LLM_BACKENDS.map(
async backend =>
({
...(await getBackendConfig(backend)),
Expand All @@ -102,7 +105,7 @@ export const getAllConfigurations = async (): Promise<LLMConfig[]> => {
),
);

return configs.filter(config => config.model || config.apiKey);
return configs.filter(config => config.model || config.apiKey || config.url);
};

export const getCurrentConfig = async (): Promise<LLMConfig | null> => {
Expand Down Expand Up @@ -149,10 +152,8 @@ export const registerLLMConfigServiceAPI = () => {
);
ipcMainHandle('llm.getAllConfigurations', async () => getAllConfigurations());
ipcMainHandle('llm.getCurrentConfig', async () => getCurrentConfig());
ipcMainHandle('llm.getAIFeatureEnabled', async (_, feature: 'aiMockServers' | 'aiCommitMessages') =>
getAIFeatureEnabled(feature),
);
ipcMainHandle('llm.setAIFeatureEnabled', async (_, feature: 'aiMockServers' | 'aiCommitMessages', enabled: boolean) =>
ipcMainHandle('llm.getAIFeatureEnabled', async (_, feature: AIFeatureNames) => getAIFeatureEnabled(feature));
ipcMainHandle('llm.setAIFeatureEnabled', async (_, feature: AIFeatureNames, enabled: boolean) =>
setAIFeatureEnabled(feature, enabled),
);
};
16 changes: 9 additions & 7 deletions packages/insomnia/src/main/network/mcp.ts
Original file line number Diff line number Diff line change
Expand Up @@ -373,13 +373,15 @@ const performConnection = async (context: ConnectionContext) => {
mcpClient.setNotificationHandler(CancelledNotificationSchema, notification => {
const serverRequestId = notification.params.requestId;
// handle server request cancellation
if (mcpServerElicitationRequests.has(serverRequestId)) {
console.log('Received server request cancellation notification for elicitation request', serverRequestId);
mcpServerElicitationRequests.delete(serverRequestId);
}
if (mcpServerSamplingRequests.has(serverRequestId)) {
console.log('Received server request cancellation notification for sampling request', serverRequestId);
mcpServerSamplingRequests.delete(serverRequestId);
if (serverRequestId !== undefined) {
if (mcpServerElicitationRequests.has(serverRequestId)) {
console.log('Received server request cancellation notification for elicitation request', serverRequestId);
mcpServerElicitationRequests.delete(serverRequestId);
}
if (mcpServerSamplingRequests.has(serverRequestId)) {
console.log('Received server request cancellation notification for sampling request', serverRequestId);
mcpServerSamplingRequests.delete(serverRequestId);
}
}
});
const originClientRequest = mcpClient.request.bind(mcpClient);
Expand Down
9 changes: 5 additions & 4 deletions packages/insomnia/src/plugins/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,21 @@
export interface ModelConfig {
// ModelBackendConfig
model: string;
backend: 'gguf' | 'claude' | 'openai' | 'gemini';
backend: 'gguf' | 'claude' | 'openai' | 'gemini' | 'url';
maxTokens?: number;

apiKey?: string; // gemini, openai, claude

// openai
// openai, url
baseURL?: string;
url?: string;
organization?: string;

// openai, gemini, gguf
// openai, gemini, url, gguf
topP?: number;
temperature?: number;

// gguf, gemini
// gguf, gemini, url
topK?: number;

// gguf
Expand Down
6 changes: 4 additions & 2 deletions packages/insomnia/src/ui/components/mcp/event-view.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,10 @@ export const MessageEventView = ({ event }: Props) => {
const getElicitationFormSchema = () => {
if (ElicitRequestSchema.safeParse(eventData).success) {
const parsedElicitRequest = ElicitRequestSchema.parse(eventData);
const requestSchema = parsedElicitRequest.params.requestedSchema;
return requestSchema as RJSFSchema;
if ('requestedSchema' in parsedElicitRequest.params) {
const requestSchema = parsedElicitRequest.params.requestedSchema;
return requestSchema as RJSFSchema;
}
}
return {};
};
Expand Down
Loading
Loading