Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 21 additions & 1 deletion .env.local.example
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,29 @@ OPENROUTER_API_KEY=

# WorkOS API credentials
# Get started at: https://workos.com/
# NEXT_PUBLIC_AUTH_MODE=workos
# WORKOS_API_KEY='sk_example_123456789'
# WORKOS_CLIENT_ID='client_123456789'
# WORKOS_COOKIE_PASSWORD="<your password>" # generate a secure password

# WorkOS redirect URI (configure in WorkOS dashboard)
# NEXT_PUBLIC_WORKOS_REDIRECT_URI="http://localhost:3000/callback"
# NEXT_PUBLIC_WORKOS_REDIRECT_URI="http://localhost:3000/callback"

# =============================================================================
# RATE LIMITING (Optional - Upstash Redis)
# =============================================================================

# Upstash Redis credentials for rate limiting
# Get started at: https://upstash.com/
# UPSTASH_REDIS_REST_URL="https://your-redis-url.upstash.io"
# UPSTASH_REDIS_REST_TOKEN="your-redis-token"
# RATE_LIMIT_REQUESTS=10

# =============================================================================
# ANALYTICS (Optional - PostHog)
# =============================================================================

# PostHog analytics configuration
# Get started at: https://posthog.com/
# NEXT_PUBLIC_POSTHOG_KEY="phc_your_project_key_here"
# NEXT_PUBLIC_POSTHOG_HOST="https://app.posthog.com"
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ yarn-error.log*

# env files (can opt-in for committing if needed)
.env
.env*.local
.env.local
!.env*.example

# vercel
Expand Down
85 changes: 45 additions & 40 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,56 +1,61 @@
# HackerAI - The AI Pentest Assistant
# HackerAI

## Prerequisites
_Your AI-Powered Penetration Testing Assistant_

Before running the application, you need to obtain API keys for the following services:
---

### Required API Keys
## 🚀 Quick Start

1. **OpenRouter API Key** - For LLM (Large Language Model) usage
- Sign up at [https://openrouter.ai/](https://openrouter.ai/)
- Get your API key from the dashboard
- This enables the AI to access various models including Claude, GPT, etc.
### 1. Install Dependencies

2. **E2B API Key** - For secure sandbox environments
- Sign up at [https://e2b.dev/](https://e2b.dev/)
- Get your API key from the dashboard
- This allows the AI to execute terminal commands and Python code safely in isolated containers
```bash
pnpm install
```

## Getting Started
### 2. Configure Environment

1. **Install dependencies:**
Create `.env.local` from the example file:

```bash
pnpm i
```
```bash
cp .env.local.example .env.local
```

2. **Set up environment variables:**
- Copy `.env.local.example` to `.env.local`
- Add the following required configuration:
```
# OpenRouter API key (Required)
# Get your API key at: https://openrouter.ai/
OPENROUTER_API_KEY=your_openrouter_api_key_here
```
Add your OpenRouter API key:

HackerAI can execute terminal commands locally (default) or in sandbox. For sandbox mode, you'll need an E2B API key:
```env
OPENROUTER_API_KEY=your_openrouter_api_key_here
```

```
# Switch to sandbox mode (optional)
TERMINAL_EXECUTION_MODE=sandbox
### 3. Launch Application

# E2B API key for sandbox execution
# Get your API key at: https://e2b.dev/
E2B_API_KEY=your_e2b_api_key_here
```
```bash
pnpm dev
```

3. **Run the development server:**
Visit **[http://localhost:3000](http://localhost:3000)** and start your penetration testing journey! 🎯

```bash
pnpm dev
```
---

4. **Open the application:**
- Navigate to [http://localhost:3000](http://localhost:3000) in your browser
## 🔑 API Configuration

The AI assistant is now ready to help with your penetration testing tasks!
### Required

| Service | Purpose | Get API Key |
| -------------- | ------------------------------ | --------------------------------------- |
| **OpenRouter** | LLM access (Claude, GPT, etc.) | [openrouter.ai](https://openrouter.ai/) |

### Optional - Sandbox Mode

| Service | Purpose | Get API Key |
| ------- | ------------------------- | --------------------------- |
| **E2B** | Secure isolated execution | [e2b.dev](https://e2b.dev/) |

> 💡 **Default Behavior**: Terminal commands execute locally on your machine
> 🔒 **Sandbox Mode**: Add E2B key for isolated container execution

#### Enable Sandbox Mode

```env
TERMINAL_EXECUTION_MODE=sandbox
E2B_API_KEY=your_e2b_api_key_here
```
201 changes: 115 additions & 86 deletions app/api/chat/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,106 +11,135 @@ import { systemPrompt } from "@/lib/system-prompt";
import { truncateMessagesToTokenLimit } from "@/lib/token-utils";
import { createTools } from "@/lib/ai/tools";
import { pauseSandbox } from "@/lib/ai/tools/utils/sandbox";
import { isWorkOSConfigured } from "@/lib/auth-utils";
import { authkit } from "@workos-inc/authkit-nextjs";
import { getUserID } from "@/lib/auth/server";
import { generateTitleFromUserMessage } from "@/lib/actions";
import { NextRequest } from "next/server";
import { myProvider } from "@/lib/ai/providers";
import type { ChatMode, ExecutionMode } from "@/types";
import { checkRateLimit } from "@/lib/rate-limit";
import { ChatSDKError } from "@/lib/errors";
import PostHogClient from "@/app/posthog";

// Allow streaming responses up to 300 seconds
export const maxDuration = 300;

export async function POST(req: NextRequest) {
const { messages, mode }: { messages: UIMessage[]; mode: ChatMode } =
await req.json();
try {
const { messages, mode }: { messages: UIMessage[]; mode: ChatMode } =
await req.json();

const model = "agent-model";
// Get user ID from authenticated session or fallback to anonymous
const userID = await getUserID(req);

// Get user ID from authenticated session or fallback to anonymous
const getUserID = async (): Promise<string> => {
if (!isWorkOSConfigured()) return "anonymous";
// Check rate limit for the user
await checkRateLimit(userID);

try {
const { session } = await authkit(req);
return session?.user?.id || "anonymous";
} catch (error) {
console.error("Failed to get user session:", error);
return "anonymous";
// Determine execution mode from environment variable
const executionMode: ExecutionMode =
(process.env.TERMINAL_EXECUTION_MODE as ExecutionMode) || "local";

// Truncate messages to stay within token limit (processing is now done on frontend)
const truncatedMessages = truncateMessagesToTokenLimit(messages);

const model = myProvider.languageModel("agent-model");

// Capture analytics event
const posthog = PostHogClient();
if (posthog) {
posthog.capture({
distinctId: userID,
event: "hackerai-" + mode,
});
}
};

const userID = await getUserID();

// Determine execution mode from environment variable
const executionMode: ExecutionMode =
(process.env.TERMINAL_EXECUTION_MODE as ExecutionMode) || "local";

// Truncate messages to stay within token limit (processing is now done on frontend)
const truncatedMessages = truncateMessagesToTokenLimit(messages);

const stream = createUIMessageStream({
execute: async ({ writer }) => {
// Create tools with user context, mode, and writer
const { tools, getSandbox } = createTools(
userID,
writer,
mode,
executionMode,
);

// Generate title in parallel if this is the start of a conversation
const titlePromise =
truncatedMessages.length === 1
? (async () => {
try {
const chatTitle = await generateTitleFromUserMessage(
truncatedMessages,
req.signal,
);

writer.write({
type: "data-title",
data: { chatTitle },
transient: true,

const stream = createUIMessageStream({
execute: async ({ writer }) => {
// Create tools with user context, mode, and writer
const { tools, getSandbox } = createTools(
userID,
writer,
mode,
executionMode,
);

// Generate title in parallel if this is the start of a conversation
const titlePromise =
truncatedMessages.length === 1
? (async () => {
try {
const chatTitle = await generateTitleFromUserMessage(
truncatedMessages,
req.signal,
);

writer.write({
type: "data-title",
data: { chatTitle },
transient: true,
});
} catch (error) {
// Log error but don't propagate to keep main stream resilient
console.error(
"Failed to generate or write chat title:",
error,
);
}
})()
: Promise.resolve();

const result = streamText({
model: model,
system: systemPrompt(model.modelId, executionMode),
messages: convertToModelMessages(truncatedMessages),
tools,
abortSignal: req.signal,
experimental_transform: smoothStream({ chunking: "word" }),
stopWhen: stepCountIs(25),
onChunk: async (chunk) => {
if (chunk.chunk.type === "tool-call") {
if (posthog) {
posthog.capture({
distinctId: userID,
event: "hackerai-" + chunk.chunk.toolName,
});
} catch (error) {
// Log error but don't propagate to keep main stream resilient
console.error("Failed to generate or write chat title:", error);
}
})()
: Promise.resolve();

const result = streamText({
model: myProvider.languageModel("agent-model"),
system: systemPrompt(model, executionMode),
messages: convertToModelMessages(truncatedMessages),
tools,
abortSignal: req.signal,
experimental_transform: smoothStream({ chunking: "word" }),
stopWhen: stepCountIs(25),
onError: async (error) => {
console.error("Error:", error);

// Perform same cleanup as onFinish to prevent resource leaks
const sandbox = getSandbox();
if (sandbox) {
await pauseSandbox(sandbox);
}
await titlePromise;
},
onFinish: async () => {
const sandbox = getSandbox();
if (sandbox) {
await pauseSandbox(sandbox);
}
await titlePromise;
},
});
}
},
onError: async (error) => {
console.error("Error:", error);

writer.merge(result.toUIMessageStream());
},
});
// Perform same cleanup as onFinish to prevent resource leaks
const sandbox = getSandbox();
if (sandbox) {
await pauseSandbox(sandbox);
}
await titlePromise;
},
onFinish: async () => {
const sandbox = getSandbox();
if (sandbox) {
await pauseSandbox(sandbox);
}
await titlePromise;
},
});

writer.merge(result.toUIMessageStream());
},
});

return createUIMessageStreamResponse({ stream });
} catch (error) {
// Handle rate limiting and other ChatSDKErrors
if (error instanceof ChatSDKError) {
return error.toResponse();
}

return createUIMessageStreamResponse({ stream });
// Handle unexpected errors
console.error("Unexpected error in chat route:", error);
const unexpectedError = new ChatSDKError(
"offline:chat",
error instanceof Error ? error.message : "Unknown error occurred",
);
return unexpectedError.toResponse();
}
}
4 changes: 2 additions & 2 deletions app/callback/route.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import { handleAuth } from "@workos-inc/authkit-nextjs";
import { NextResponse } from "next/server";
import { isWorkOSConfigured } from "@/lib/auth-utils";
import { isWorkOSEnabled } from "@/lib/auth/client";

// Redirect the user to `/` after successful sign in
// The redirect can be customized: `handleAuth({ returnPathname: '/foo' })`
export const GET = isWorkOSConfigured()
export const GET = isWorkOSEnabled()
? handleAuth()
: () =>
NextResponse.redirect(
Expand Down
Loading