Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,2 +1 @@
REACT_APP_JUDGE0_KEY=judge0cekey
REACT_APP_OPEN_AI_KEY=openAIkey
REACT_APP_JUDGE0_KEY=judge0cekey
42 changes: 17 additions & 25 deletions src/Components/Main.jsx
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import { useEffect, useRef, useState } from "react";
import { useContext, useEffect, useRef, useState } from "react";

import { assoc } from "ramda";

Expand All @@ -11,25 +11,27 @@ import CustomInput from "./CustomInput";
import LanguageSelector from "./LanguageSelector";
import { LANGUAGE_OPTIONS } from "./LanguageSelector/constants";
import OutputTerminal from "./OutputTerminal";
import { decodeString, encodeString } from "./utils";
import { decodeString, encodeString, webLlmEngineInput } from "./utils";
import { OUTPUT_STATUES, DEFAULT_OUTPUT_VALUE } from "./contants";
import CustomInputHeader from "./CustomInput/Header";
import OutputTerminalHeader from "./OutputTerminal/Header";
import Header from "./Header";
import CodeActions from "./CodeActions";
import ChatGptModal from "./ChatGptModal";
import RefactorModal from "./RefactorModal";
import { AppState } from "../Hooks/utils";

const Main = ({ webLlmEngine }) => {
const outputRef = useRef(null);
const editorRef = useRef(null);

const { showWebLlmModal, engineOutput, engineStreamLoading } =
useContext(AppState);

const [selectedLanguage, setSelectedLanguage] = useState(LANGUAGE_OPTIONS[0]);
const [value, setValue] = useState(selectedLanguage?.stub);
const [input, setInput] = useState();
const [output, setOutput] = useState(DEFAULT_OUTPUT_VALUE);
const [isLoading, setIsLoading] = useState(false);
const [showModal, setShowModal] = useState(false);
const [chatGptOutput, setChatGptOutput] = useState("");

const { mutateAsync: runCode } = useCreateSubmissionsApi();
const { mutateAsync: getOutput } = useGetSubmissionsApi();
Expand Down Expand Up @@ -95,30 +97,24 @@ const Main = ({ webLlmEngine }) => {
const selectedValue = getSelectedRangeOfValue();
if (!selectedValue && selectedValue === "") return;

setShowModal(true);
showWebLlmModal.value = true;
engineStreamLoading.value = true;
setIsLoading(true);

const engine = await webLlmEngine;

const webLlmOutput = await engine.chat.completions.create({
messages: [
{
role: "system",
content: `You are a chatbot that can refactor any code.
Always return the code block in markdown style with comments about the refactored code.
Always suggest the output in the requested language itself with a single code block.`,
},
{ role: "user", content: `Refactor code snippet ${selectedValue}` },
],
temperature: 0.5,
stream: true, // <-- Enable streaming
});
engine.interruptGenerate();
const webLlmOutput = await engine.chat.completions.create(
webLlmEngineInput(selectedValue),
);

engineOutput.value = "";
for await (const chunk of webLlmOutput) {
const reply = chunk.choices[0]?.delta.content || "";
setChatGptOutput((answer) => answer + reply);
engineOutput.value += reply;
setIsLoading(false);
}
engineStreamLoading.value = false;
} catch (err) {
console.log(err);
}
Expand Down Expand Up @@ -169,14 +165,10 @@ const Main = ({ webLlmEngine }) => {
</>
)}
</div>
<ChatGptModal
showModal={showModal}
setShowModal={setShowModal}
text={chatGptOutput}
<RefactorModal
setValue={setValue}
getSelectedValue={getSelectedRangeOfValue}
isLoading={isLoading}
setChatGptOutput={setChatGptOutput}
/>
</>
);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,33 +1,32 @@
import { Modal, Spin } from "antd";
import MDEditor from "@uiw/react-md-editor";
import rehypeSanitize from "rehype-sanitize";
import { useContext } from "react";
import { AppState } from "../../Hooks/utils";
import { useSignals } from "@preact/signals-react/runtime";

const RefactorModal = ({ setValue, getSelectedValue, isLoading }) => {
useSignals();
const { showWebLlmModal, engineOutput, isEngineStreamLoading } =
useContext(AppState);

const ChatGptModal = ({
showModal,
setShowModal,
text,
setValue,
getSelectedValue,
isLoading,
setChatGptOutput,
}) => {
const extractCodeFromBlock = (blockString) =>
[...blockString.matchAll(/```(?:[a-z]+)?\n([\s\S]+?)\n```/g)].map(
(match) => match[1],
);

const pasteCode = () => {
const selectedValue = getSelectedValue();
const code = extractCodeFromBlock(text);
const code = extractCodeFromBlock(engineOutput.value);

setValue((prevValue) => prevValue.replace(selectedValue, code));
setChatGptOutput("");
setShowModal(false);
engineOutput.value = "";
showWebLlmModal.value = false;
};

const handleCancel = () => {
setChatGptOutput("");
setShowModal(false);
engineOutput.value = "";
showWebLlmModal.value = false;
};

const Footer = [
Expand All @@ -42,6 +41,7 @@ const ChatGptModal = ({
key="pasteCode"
className="disabled:opacity-75 disabled:cursor-not-allowed border border-gray-200 bg-blue-700 text-white rounded-md px-4 py-2 md:m-2 mt-2 transition duration-500 ease select-none hover:bg-blue-500 focus:outline-none focus:shadow-outline"
onClick={pasteCode}
disabled={isEngineStreamLoading.value}
>
Paste Code
</button>,
Expand All @@ -50,7 +50,7 @@ const ChatGptModal = ({
return (
<Modal
title="WebLLM Phi-3 refactored code"
open={showModal}
open={showWebLlmModal.value}
footer={!isLoading ? Footer : null}
width={1000}
onCancel={handleCancel}
Expand All @@ -61,7 +61,7 @@ const ChatGptModal = ({
</div>
) : (
<MDEditor.Markdown
source={text}
source={engineOutput.value}
style={{ padding: 10 }}
previewOptions={{
rehypePlugins: [[rehypeSanitize]],
Expand All @@ -71,4 +71,4 @@ const ChatGptModal = ({
</Modal>
);
};
export default ChatGptModal;
export default RefactorModal;
14 changes: 14 additions & 0 deletions src/Components/utils.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,17 @@ export const encodeString = (str) =>

export const decodeString = (str) =>
decodeURIComponent(escape(window.atob(str)));

export const webLlmEngineInput = (code) => ({
messages: [
{
role: "system",
content: `You are a chatbot that can refactor any code.
Always return the code block in markdown style with comments about the refactored code.
Always suggest the output in the requested language itself with a single code block.`,
},
{ role: "user", content: `Refactor code snippet ${code}` },
],
temperature: 0.5,
stream: true, // <-- Enable streaming
});
24 changes: 19 additions & 5 deletions src/Hooks/utils.js
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,16 @@ export const createAppState = () => {
const startProgress = signal(0);
const endProgress = signal(100);
const isLoading = signal(true);
const showWebLlmModal = signal(false);
const engineOutput = signal("");
const engineStreamLoading = signal(false);

const isModelLoading = computed(() => isLoading.value);
const percent = computed(() =>
Math.floor((startProgress?.value / endProgress?.value) * 100),
);
const isEngineStreamLoading = computed(() => engineStreamLoading.value);

const webLlmEngine = CreateWebWorkerMLCEngine(
new Worker(new URL("../worker.js", import.meta.url), {
type: "module",
Expand All @@ -29,12 +39,16 @@ export const createAppState = () => {
initProgressCallback(progress, startProgress, endProgress, isLoading),
},
);
const isModelLoading = computed(() => isLoading.value);
const percent = computed(() =>
Math.floor((startProgress?.value / endProgress?.value) * 100),
);

return { percent, isModelLoading, webLlmEngine };
return {
percent,
isModelLoading,
webLlmEngine,
showWebLlmModal,
engineOutput,
engineStreamLoading,
isEngineStreamLoading,
};
};

export const AppState = createContext();