Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .env.local.example
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ NEXT_PUBLIC_WORKOS_REDIRECT_URI=http://localhost:3000/callback

OPENROUTER_API_KEY=your_openrouter_api_key_here
OPENAI_API_KEY=your_openai_api_key_here
AI_GATEWAY_API_KEY=your_ai_gateway_api_key_here

# =============================================================================
# OPTIONAL CONFIGURATIONS
Expand All @@ -46,6 +45,7 @@ AI_GATEWAY_API_KEY=your_ai_gateway_api_key_here
# UPSTASH_REDIS_REST_TOKEN="your-redis-token"
# PRO_RATE_LIMIT_REQUESTS=
# FREE_RATE_LIMIT_REQUESTS=
# AGENT_MODE_RATE_LIMIT_REQUESTS=

# Analytics (PostHog)
# NEXT_PUBLIC_POSTHOG_KEY="phc_your_project_key_here"
Expand Down
25 changes: 10 additions & 15 deletions app/api/chat/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import {
} from "@/lib/db/actions";
import { v4 as uuidv4 } from "uuid";
import { processChatMessages } from "@/lib/chat/chat-processor";
import { myProvider } from "@/lib/ai/providers";
import { createTrackedProvider } from "@/lib/ai/providers";

export const maxDuration = 300;

Expand Down Expand Up @@ -71,6 +71,7 @@ export async function POST(req: NextRequest) {
userId,
newMessages: messages,
regenerate,
isPro,
});

// Handle initial chat setup, regeneration, and save user message
Expand All @@ -82,22 +83,20 @@ export async function POST(req: NextRequest) {
chat,
});

// Check rate limit for the user
await checkRateLimit(userId, isPro);
// Check rate limit for the user with mode
await checkRateLimit(userId, isPro, mode);

// Process chat messages with moderation and analytics
const posthog = PostHogClient();
// Process chat messages with moderation
const { executionMode, processedMessages, selectedModel } =
await processChatMessages({
messages: truncatedMessages,
mode,
userID: userId,
posthog,
});

// Get user customization to check memory preference (outside stream to avoid duplicate calls)
const userCustomization = await getUserCustomization({ userId });
const memoryEnabled = userCustomization?.include_memory_entries ?? true;
const posthog = PostHogClient();

const stream = createUIMessageStream({
execute: async ({ writer }) => {
Expand All @@ -120,22 +119,18 @@ export async function POST(req: NextRequest) {
)
: Promise.resolve(undefined);

const trackedProvider = createTrackedProvider(userId, chatId, isPro);

const result = streamText({
model: myProvider.languageModel(selectedModel),
model: trackedProvider.languageModel(selectedModel),
system: await systemPrompt(
userId,
isPro,
mode,
executionMode,
userCustomization,
),
messages: convertToModelMessages(processedMessages),
providerOptions: {
...(!isPro && {
gateway: {
order: ["novita"],
},
}),
},
tools,
abortSignal: controller.signal,
headers: getAIHeaders(),
Expand Down
14 changes: 10 additions & 4 deletions app/components/ChatInput.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,11 @@ import { ScrollToBottomButton } from "./ScrollToBottomButton";
import { AttachmentButton } from "./AttachmentButton";
import { useFileUpload } from "../hooks/useFileUpload";
import { useEffect, useRef, useState } from "react";
import { countInputTokens, MAX_TOKENS } from "@/lib/token-utils";
import {
countInputTokens,
MAX_TOKENS_PRO,
MAX_TOKENS_FREE,
} from "@/lib/token-utils";
import { toast } from "sonner";

interface ChatInputProps {
Expand Down Expand Up @@ -137,12 +141,14 @@ export const ChatInput = ({
const pastedText = clipboardData.getData("text");

if (pastedText) {
// Check token limit for the pasted text only
// Check token limit for the pasted text only based on user plan
const tokenCount = countInputTokens(pastedText, []);
if (tokenCount > MAX_TOKENS) {
const maxTokens = hasProPlan ? MAX_TOKENS_PRO : MAX_TOKENS_FREE;
if (tokenCount > maxTokens) {
e.preventDefault();
const planText = hasProPlan ? "" : " (Free plan limit)";
toast.error("Content is too long to paste", {
description: `The content you're trying to paste is too large (${tokenCount.toLocaleString()} tokens). Please copy a smaller amount.`,
description: `The content you're trying to paste is too large (${tokenCount.toLocaleString()} tokens). Please copy a smaller amount${planText}.`,
});
return;
}
Expand Down
6 changes: 3 additions & 3 deletions app/components/MessageErrorState.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@ import { redirectToPricing } from "@/app/hooks/usePricingDialog";

interface MessageErrorStateProps {
error: Error;
onRegenerate: () => void;
onRetry: () => void;
}

export const MessageErrorState = ({
error,
onRegenerate,
onRetry,
}: MessageErrorStateProps) => {
const { hasProPlan } = useGlobalState();
const isRateLimitError =
Expand All @@ -31,7 +31,7 @@ export const MessageErrorState = ({
)}
</div>
<div className="flex gap-2">
<Button variant="destructive" size="sm" onClick={onRegenerate}>
<Button variant="destructive" size="sm" onClick={onRetry}>
{isRateLimitError ? "Try Again" : "Retry"}
</Button>
{isRateLimitError && !hasProPlan && (
Expand Down
6 changes: 3 additions & 3 deletions app/components/Messages.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ interface MessagesProps {
messages: ChatMessage[];
setMessages: Dispatch<SetStateAction<ChatMessage[]>>;
onRegenerate: () => void;
onRetry: () => void;
onEditMessage: (messageId: string, newContent: string) => Promise<void>;
status: ChatStatus;
error: Error | null;
Expand All @@ -48,6 +49,7 @@ export const Messages = ({
messages,
setMessages,
onRegenerate,
onRetry,
onEditMessage,
status,
error,
Expand Down Expand Up @@ -329,9 +331,7 @@ export const Messages = ({
})}

{/* Error state */}
{error && (
<MessageErrorState error={error} onRegenerate={onRegenerate} />
)}
{error && <MessageErrorState error={error} onRetry={onRetry} />}
</div>
</div>
);
Expand Down
7 changes: 3 additions & 4 deletions app/components/PricingDialog.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -121,10 +121,9 @@ const PlanCard: React.FC<PlanCardProps> = ({
};

const PricingDialog: React.FC<PricingDialogProps> = ({ isOpen, onClose }) => {
const { user, loading } = useAuth();
const { user } = useAuth();
const { hasProPlan, isCheckingProPlan } = useGlobalState();
const { upgradeLoading, handleUpgrade, upgradeError } = useUpgrade();
const isMobile = useIsMobile();
const { upgradeLoading, handleUpgrade } = useUpgrade();

const handleSignIn = () => {
window.location.href = "/login";
Expand Down Expand Up @@ -258,7 +257,7 @@ const PricingDialog: React.FC<PricingDialogProps> = ({ isOpen, onClose }) => {
{/* Pro Plan */}
<PlanCard
planName="Pro"
price={20}
price={25}
description="More access to advanced intelligence"
features={proFeatures}
buttonText={proButtonConfig.text}
Expand Down
26 changes: 16 additions & 10 deletions app/components/chat.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -234,16 +234,21 @@ export const Chat = ({ chatId: routeChatId }: { chatId?: string }) => {
}, [handleDragEnter, handleDragLeave, handleDragOver, handleDrop]);

// Chat handlers
const { handleSubmit, handleStop, handleRegenerate, handleEditMessage } =
useChatHandlers({
chatId,
messages,
resetSidebarAutoOpenRef,
sendMessage,
stop,
regenerate,
setMessages,
});
const {
handleSubmit,
handleStop,
handleRegenerate,
handleRetry,
handleEditMessage,
} = useChatHandlers({
chatId,
messages,
resetSidebarAutoOpenRef,
sendMessage,
stop,
regenerate,
setMessages,
});

const handleScrollToBottom = () => scrollToBottom({ force: true });

Expand Down Expand Up @@ -315,6 +320,7 @@ export const Chat = ({ chatId: routeChatId }: { chatId?: string }) => {
messages={messages}
setMessages={setMessages}
onRegenerate={handleRegenerate}
onRetry={handleRetry}
onEditMessage={handleEditMessage}
status={status}
error={error || null}
Expand Down
26 changes: 22 additions & 4 deletions app/hooks/useChatHandlers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,11 @@ import { api } from "@/convex/_generated/api";
import { useGlobalState } from "../contexts/GlobalState";
import type { ChatMessage } from "@/types";
import { Id } from "@/convex/_generated/dataModel";
import { countInputTokens, MAX_TOKENS } from "@/lib/token-utils";
import {
countInputTokens,
MAX_TOKENS_PRO,
MAX_TOKENS_FREE,
} from "@/lib/token-utils";
import { toast } from "sonner";

interface UseChatHandlersProps {
Expand Down Expand Up @@ -42,6 +46,7 @@ export const useChatHandlers = ({
hasActiveChat,
setHasActiveChat,
isUploadingFiles,
hasProPlan,
} = useGlobalState();

const deleteLastAssistantMessage = useMutation(
Expand All @@ -63,12 +68,14 @@ export const useChatHandlers = ({
// Allow submission if there's text input or uploaded files
const hasValidFiles = uploadedFiles.some((f) => f.uploaded && f.url);
if (input.trim() || hasValidFiles) {
// Check token limit before sending
// Check token limit before sending based on user plan
const tokenCount = countInputTokens(input, uploadedFiles);
if (tokenCount > MAX_TOKENS) {
const maxTokens = hasProPlan ? MAX_TOKENS_PRO : MAX_TOKENS_FREE;
if (tokenCount > maxTokens) {
const hasFiles = uploadedFiles.length > 0;
const planText = hasProPlan ? "" : " (Free plan limit)";
toast.error("Message is too long", {
description: `Your message is too large (${tokenCount.toLocaleString()} tokens). Please make it shorter${hasFiles ? " or remove some files" : ""}.`,
description: `Your message is too large (${tokenCount.toLocaleString()} tokens). Please make it shorter${hasFiles ? " or remove some files" : ""}${planText}.`,
});
return;
}
Expand Down Expand Up @@ -162,6 +169,16 @@ export const useChatHandlers = ({
});
};

const handleRetry = async () => {
regenerate({
body: {
mode,
todos,
regenerate: true,
},
});
};

const handleEditMessage = async (messageId: string, newContent: string) => {
await regenerateWithNewContent({
messageId: messageId as Id<"messages">,
Expand Down Expand Up @@ -199,6 +216,7 @@ export const useChatHandlers = ({
handleSubmit,
handleStop,
handleRegenerate,
handleRetry,
handleEditMessage,
};
};
15 changes: 9 additions & 6 deletions app/layout.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import { Toaster } from "@/components/ui/sonner";
import { GlobalStateProvider } from "./contexts/GlobalState";
import { TodoBlockProvider } from "./contexts/TodoBlockContext";
import { ConvexClientProvider } from "@/components/ConvexClientProvider";
import { PostHogProvider } from "./providers";

const geistSans = Geist({
variable: "--font-geist-sans",
Expand Down Expand Up @@ -92,12 +93,14 @@ export default function RootLayout({
}>) {
const content = (
<GlobalStateProvider>
<TodoBlockProvider>
<TooltipProvider>
{children}
<Toaster />
</TooltipProvider>
</TodoBlockProvider>
<PostHogProvider>
<TodoBlockProvider>
<TooltipProvider>
{children}
<Toaster />
</TooltipProvider>
</TodoBlockProvider>
</PostHogProvider>
</GlobalStateProvider>
);

Expand Down
19 changes: 19 additions & 0 deletions app/providers.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
"use client";

import posthog from "posthog-js";
import { PostHogProvider as PHProvider } from "posthog-js/react";
import { useEffect } from "react";

export function PostHogProvider({ children }: { children: React.ReactNode }) {
useEffect(() => {
if (!process.env.NEXT_PUBLIC_POSTHOG_KEY) return;

posthog.init(process.env.NEXT_PUBLIC_POSTHOG_KEY, {
api_host: `${process.env.NEXT_PUBLIC_POSTHOG_HOST}`,
capture_pageview: false, // Disable automatic pageview capture, as we capture manually
autocapture: false, // Disable automatic event capture, as we capture manually
});
}, []);

return <PHProvider client={posthog}>{children}</PHProvider>;
}
8 changes: 5 additions & 3 deletions convex/memories.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,13 @@ import { validateServiceKey } from "./chats";

/**
* Get memories for backend processing (with service key)
* Enforces 10,000 token limit by removing old memories if needed
* Enforces token limit based on user plan (10k for pro, 5k for free)
*/
export const getMemoriesForBackend = query({
args: {
serviceKey: v.optional(v.string()),
userId: v.string(),
isPro: v.boolean(),
},
returns: v.array(
v.object({
Expand All @@ -31,12 +32,13 @@ export const getMemoriesForBackend = query({
.order("desc")
.collect();

// Calculate total tokens and enforce 10,000 token limit
// Calculate total tokens and enforce token limit based on user plan
const tokenLimit = args.isPro ? 10000 : 5000;
let totalTokens = 0;
const validMemories = [];

for (const memory of memories) {
if (totalTokens + memory.tokens <= 10000) {
if (totalTokens + memory.tokens <= tokenLimit) {
totalTokens += memory.tokens;
validMemories.push(memory);
} else {
Expand Down
Loading