Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
929 changes: 929 additions & 0 deletions docs/reference.md

Large diffs are not rendered by default.

10 changes: 6 additions & 4 deletions examples/chat-basic/server/app.js
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,10 @@ app.get('/api/library-info', (req, res) => {

app.listen(PORT, () => {
console.log(`Server running on http://localhost:${PORT}`);
console.log(`Make sure to set your API key in environment variables:`);
console.log(` - OPENAI_API_KEY (for OpenAI)`);
console.log(` - ANTHROPIC_API_KEY (for Anthropic)`);
console.log(` - GEMINI_API_KEY (for Gemini)`);
if(!process.env.OPENAI_API_KEY && !process.env.ANTHROPIC_API_KEY && !process.env.GEMINI_API_KEY) {
console.log(`Make sure to set your API key in environment variables:`);
console.log(` - OPENAI_API_KEY (for OpenAI)`);
console.log(` - ANTHROPIC_API_KEY (for Anthropic)`);
console.log(` - GEMINI_API_KEY (for Gemini)`);
}
});
17 changes: 17 additions & 0 deletions examples/playground/.vscode/launch.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
{
"version": "0.2.0",
"configurations": [
{
"type": "node",
"request": "launch",
"name": "Launch Server",
"runtimeExecutable": "npm",
"runtimeArgs": ["run", "dev"],
"cwd": "${workspaceFolder}/examples/playground",
"console": "integratedTerminal",
"internalConsoleOptions": "neverOpen",
"skipFiles": ["<node_internals>/**"]
}
]
}

8 changes: 8 additions & 0 deletions examples/playground/.vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
"files.associations": {
"*.html": "html"
},
"liveServer.settings.port": 8080,
"liveServer.settings.CustomBrowser": "default"
}

31 changes: 31 additions & 0 deletions examples/playground/.vscode/tasks.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
{
"version": "2.0.0",
"tasks": [
{
"label": "Start Dev Server",
"type": "shell",
"command": "npm run dev",
"problemMatcher": [],
"isBackground": true,
"presentation": {
"reveal": "always",
"panel": "new"
},
"runOptions": {
"runOn": "default"
}
},
{
"label": "Start Production Server",
"type": "shell",
"command": "npm start",
"problemMatcher": [],
"isBackground": true,
"presentation": {
"reveal": "always",
"panel": "new"
}
}
]
}

90 changes: 90 additions & 0 deletions examples/playground/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
# AI Agent Playground - made with Resilient LLM

A simple playground to test and build with [`ResilientLLM`](https://github.com/gitcommitshow/resilient-llm)

![Demo Screenshot](./demo.jpg)

## Features

This project can act as the starting point to test your prompt, workflow, and ResilientLLM behavior. Recommended to try this before starting your AI agent project.

- TK

## Project Structure

```
server/ --Backend files--
└── app.js # Express server with ResilientLLM
client/ --Frontend files--
├── index.html # Main HTML file (shows key integration functions)
├── styles.css # Styling
├── api.js # API integration with the express API backend
├── messages.js # Message display and management
└── ui.js # UI components and interactions
```

## Quick Start

### 1. Clone and Setup

```bash
git clone https://github.com/gitcommitshow/resilient-llm
cd resilient-llm/examples/playground
```

### 2. Install Dependencies

```bash
npm install
```

### 3. Set Environment Variables

Set your API key and choose the default LLM service and model:

```bash
# OpenAI
export OPENAI_API_KEY=your_key_here
export AI_SERVICE=openai
export AI_MODEL=gpt-4o-mini

# Or Anthropic
export ANTHROPIC_API_KEY=your_key_here
export AI_SERVICE=anthropic
export AI_MODEL=claude-3-5-sonnet-20240620

# Or Gemini
export GEMINI_API_KEY=your_key_here
export AI_SERVICE=gemini
export AI_MODEL=gemini-2.0-flash
```

### 4. Start the Server

```bash
npm run dev
```

The server will start on `http://localhost:3000` and automatically serve the client files.

### 5. Open in Browser

Navigate to **`http://localhost:3000`** in your browser.

<details>
<summary><strong>Want to preview in the VSCode/Cursor editor directly?</strong></summary>

- Install [Live Preview extension](https://marketplace.cursorapi.com/items/?itemName=ms-vscode.live-server)
- Right-click on `client/index.html` → **"Show Preview"**

**Note:** The server must be running for the preview to work, as it serves the client files and handles API requests.

</details>

----

🐞 Discovered a bug? [Create an issue](https://github.com/gitcommitshow/resilient-llm/issues/new)

## License

MIT License
62 changes: 62 additions & 0 deletions examples/playground/client/api.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
// API Integration with ResilientLLM
// This file handles communication with the ResilientLLM backend

const API_URL = 'http://localhost:3000/api/chat';

/**
* Build conversation history from messages array
* Formats messages for ResilientLLM API
* @param {Array} messages - Array of message objects with role and text
* @returns {Array} - Formatted conversation history
*/
function buildConversationHistory(messages) {
return messages.map(msg => ({
role: msg.role,
content: msg.text
}));
}

/**
* Call the backend API to get LLM response
*
* ResilientLLM handles all the complexity automatically:
* - Rate limiting (requests per minute, tokens per minute)
* - Automatic retries with exponential backoff
* - Circuit breaker for service resilience
* - Token estimation
* - Error handling and recovery
*
* @param {Array} conversationHistory - Array of messages with role and content
* @param {Object} llmOptions - Optional LLM configuration options
* @returns {Promise<string>} - The AI response text
*/
async function getAIResponse(conversationHistory, llmOptions = {}) {
try {
const response = await fetch(API_URL, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
conversationHistory: conversationHistory,
llmOptions: llmOptions
})
});

if (!response.ok) {
const errorData = await response.json().catch(() => ({}));
throw new Error(errorData.error || `HTTP error! status: ${response.status}`);
}

const data = await response.json();
if (data.success && data.response) {
return data.response;
} else {
throw new Error(data.error || 'No response from server');
}
} catch (error) {
console.error('Error calling API:', error);
throw error;
}
}

Loading