|
| 1 | +package main |
| 2 | + |
| 3 | +import ( |
| 4 | + "bytes" |
| 5 | + "context" |
| 6 | + "errors" |
| 7 | + "fmt" |
| 8 | + "log" |
| 9 | + "os" |
| 10 | + "path/filepath" |
| 11 | + "strconv" |
| 12 | + "strings" |
| 13 | + "time" |
| 14 | + |
| 15 | + "github.com/charmbracelet/bubbles/textarea" |
| 16 | + "github.com/charmbracelet/bubbles/viewport" |
| 17 | + tea "github.com/charmbracelet/bubbletea" |
| 18 | + "github.com/charmbracelet/lipgloss" |
| 19 | + "github.com/muesli/reflow/wordwrap" |
| 20 | + "github.com/teilomillet/gollm" |
| 21 | +) |
| 22 | + |
| 23 | +// --- STYLING --- |
| 24 | +var ( |
| 25 | + // Styles for chat messages |
| 26 | + senderStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("5")) // User (Purple) |
| 27 | + botStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("6")) // AI (Cyan) |
| 28 | + errorStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("9")).Bold(true) // Error messages |
| 29 | + |
| 30 | + // A slight border for the chat viewport |
| 31 | + viewportStyle = lipgloss.NewStyle(). |
| 32 | + Border(lipgloss.RoundedBorder()). |
| 33 | + BorderForeground(lipgloss.Color("8")). // Gray |
| 34 | + Padding(1) |
| 35 | +) |
| 36 | + |
| 37 | +func StartChat(buf *bytes.Buffer) { |
| 38 | + // Create and run the Bubble Tea program. |
| 39 | + // tea.WithAltScreen() provides a full-window TUI experience. |
| 40 | + // CORRECTED: Pass aiPtr.llm directly, not its address. |
| 41 | + p := tea.NewProgram(initialModel(NewAI(), buf.String()), tea.WithAltScreen(), tea.WithMouseCellMotion()) |
| 42 | + |
| 43 | + finalModel, err := p.Run() |
| 44 | + if err != nil { |
| 45 | + log.Fatalf("❌ Oh no, there's been an error: %v", err) |
| 46 | + } |
| 47 | + |
| 48 | + if m, ok := finalModel.(model); ok && len(m.messages) > 1 { |
| 49 | + // More than 1 message means there was a conversation (initial message + at least one more). |
| 50 | + |
| 51 | + // Create a timestamped filename. |
| 52 | + timestamp := time.Now().Format("2006-01-02_15-04-05") |
| 53 | + filename := fmt.Sprintf("chatlog_%s.md", timestamp) |
| 54 | + |
| 55 | + var output bytes.Buffer |
| 56 | + output.WriteString("# Summarize Chat Log " + timestamp + "\n\n") |
| 57 | + for i := 0; i < len(m.messages); i++ { |
| 58 | + message := m.messages[i] |
| 59 | + output.WriteString(message) |
| 60 | + output.WriteString("\n") |
| 61 | + } |
| 62 | + |
| 63 | + // Write the chat history to the file. |
| 64 | + if writeErr := os.WriteFile(filepath.Join(*figs.String(kOutputDir), filename), output.Bytes(), 0644); writeErr != nil { |
| 65 | + fmt.Printf("\n❌ Could not save chat log: %v\n", writeErr) |
| 66 | + } else { |
| 67 | + fmt.Printf("\n📝 Chat log saved to %s\n", filename) |
| 68 | + } |
| 69 | + } |
| 70 | +} |
| 71 | + |
| 72 | +// --- BUBBLETEA MESSAGES --- |
| 73 | +// We use custom messages to communicate between our async LLM calls and the UI. |
| 74 | + |
| 75 | +// aiResponseMsg is sent when the AI has successfully generated a response. |
| 76 | +type aiResponseMsg string |
| 77 | + |
| 78 | +// errorMsg is sent when an error occurs during the AI call. |
| 79 | +type errorMsg struct{ err error } |
| 80 | + |
| 81 | +// --- BUBBLETEA MODEL --- |
| 82 | +// The model is the single source of truth for the state of your application. |
| 83 | +type model struct { |
| 84 | + // CORRECTED: The llm field is now the interface type, not a pointer to it. |
| 85 | + llm gollm.LLM |
| 86 | + viewport viewport.Model |
| 87 | + textarea textarea.Model |
| 88 | + messages []string |
| 89 | + summary string |
| 90 | + isGenerating bool |
| 91 | + err error |
| 92 | + ctx context.Context |
| 93 | + chatHistory []string |
| 94 | +} |
| 95 | + |
| 96 | +// initialModel creates the starting state of our application. |
| 97 | +// CORRECTED: The llm parameter is now the interface type. |
| 98 | +func initialModel(llm gollm.LLM, summary string) model { |
| 99 | + if llm == nil { |
| 100 | + errMsg := "LLM is nil. Please try again later." |
| 101 | + return model{ |
| 102 | + llm: nil, |
| 103 | + messages: []string{errorStyle.Render(errMsg)}, |
| 104 | + chatHistory: []string{}, |
| 105 | + isGenerating: false, |
| 106 | + err: errors.New("empty summary"), |
| 107 | + ctx: context.Background(), |
| 108 | + } |
| 109 | + } |
| 110 | + // Configure the text area for user input. |
| 111 | + ta := textarea.New() |
| 112 | + ta.Placeholder = "Send a message... (press Enter to send, Esc to quit)" |
| 113 | + ta.Focus() |
| 114 | + ta.Prompt = "┃ " |
| 115 | + ta.SetHeight(1) |
| 116 | + // Remove the default behavior of Enter creating a new line. |
| 117 | + ta.KeyMap.InsertNewline.SetEnabled(false) |
| 118 | + |
| 119 | + // The viewport is the scrolling area for the chat history. |
| 120 | + vp := viewport.New(0, 0) // Width and height are set dynamically |
| 121 | + |
| 122 | + if len(summary) == 0 { |
| 123 | + errMsg := "No project summary available. Please provide a valid summary to start the chat." |
| 124 | + return model{ |
| 125 | + llm: llm, |
| 126 | + textarea: ta, |
| 127 | + viewport: vp, |
| 128 | + summary: summary, |
| 129 | + messages: []string{errorStyle.Render(errMsg)}, |
| 130 | + chatHistory: []string{}, |
| 131 | + isGenerating: false, |
| 132 | + err: errors.New("empty summary"), |
| 133 | + ctx: context.Background(), |
| 134 | + } |
| 135 | + } |
| 136 | + |
| 137 | + msg := fmt.Sprintf("%s %d bytes!", "Welcome to Summarize AI Chat! We've analyzed your project workspace and are ready to chat with you about ", len(summary)) |
| 138 | + |
| 139 | + return model{ |
| 140 | + llm: llm, |
| 141 | + textarea: ta, |
| 142 | + viewport: vp, |
| 143 | + summary: summary, |
| 144 | + messages: []string{msg}, |
| 145 | + chatHistory: []string{}, |
| 146 | + isGenerating: false, |
| 147 | + err: nil, |
| 148 | + ctx: context.Background(), |
| 149 | + } |
| 150 | +} |
| 151 | + |
| 152 | +// generateResponseCmd is a Bubble Tea command that calls the LLM in a goroutine. |
| 153 | +// This prevents the UI from blocking while waiting for the AI. |
| 154 | +func (m model) generateResponseCmd() tea.Cmd { |
| 155 | + return func() tea.Msg { |
| 156 | + userInput := m.textarea.Value() |
| 157 | + m.chatHistory = append(m.chatHistory, userInput) |
| 158 | + |
| 159 | + var wc strings.Builder |
| 160 | + breaker := "---ARM-GO-SUMMARIZE-BREAK-POINT---" |
| 161 | + if len(m.messages) > 0 { |
| 162 | + wc.WriteString("You are now continuing this conversation. This is the chat log: ") |
| 163 | + for i := 0; i < len(m.messages); i++ { |
| 164 | + v := m.messages[i] |
| 165 | + x := fmt.Sprintf("line %d: %s\n", i+1, v) |
| 166 | + wc.WriteString(x) |
| 167 | + } |
| 168 | + wc.WriteString("\n") |
| 169 | + wc.WriteString("The summarized project is:\n") |
| 170 | + parts := strings.Split(m.summary, breaker) |
| 171 | + if len(parts) == 2 { |
| 172 | + oldPrefix, oldSummary := parts[0], parts[1] |
| 173 | + newSummary := oldPrefix + wc.String() + oldSummary |
| 174 | + m.summary = newSummary |
| 175 | + wc.Reset() |
| 176 | + } |
| 177 | + wc.WriteString(m.summary) |
| 178 | + wc.WriteString("\n") |
| 179 | + } else { |
| 180 | + wc.WriteString("Your name is Summarize in this engagement. This is a comprehensive one page contents of " + |
| 181 | + "entire directory (recursively) of a specific subset of files by extension choice and a strings.Contains() avoid list" + |
| 182 | + "that is used to generate the following summary.\n\n" + |
| 183 | + "You are communicating with the user and shall refer to them as Commander. You are speaking to them in a " + |
| 184 | + "golang bubbletea TUI chat terminal that is ") |
| 185 | + wc.WriteString(strconv.Itoa(m.viewport.Width)) |
| 186 | + wc.WriteString(" (int) width and ") |
| 187 | + wc.WriteString(strconv.Itoa(m.viewport.Height)) |
| 188 | + wc.WriteString(" (int) height with ") |
| 189 | + wc.WriteString(strconv.Itoa(m.viewport.VisibleLineCount())) |
| 190 | + wc.WriteString(" (int) visible lines in the viewport. Your responses should singularly fit in the terminal" + |
| 191 | + "window. Be aware that your response will be formatted using wordwrap.String(<message>, m.viewport.Width) in the Bubbletea TUI, so ensure your message fits within the viewport width. " + |
| 192 | + "\n\n" + |
| 193 | + "The user expects you to be professional and keep focused on the language that you detect from the summary.\n" + |
| 194 | + "Be prepared to answer questions about security, best practices, and security concerns that you have over" + |
| 195 | + "the code. However, do not get distracted. Always follow the lead of the DevOps engineer. Do not be afraid to" + |
| 196 | + "offend. Your brutal honesty is welcome here and iron sharpens iron. Here is the summary now:\n" + |
| 197 | + breaker + "\n") |
| 198 | + oldSummary := m.summary |
| 199 | + newSummary := oldSummary + wc.String() |
| 200 | + m.summary = newSummary |
| 201 | + wc.Reset() |
| 202 | + wc.WriteString(m.summary) |
| 203 | + wc.WriteString("\n") |
| 204 | + } |
| 205 | + |
| 206 | + var systemPrompt strings.Builder |
| 207 | + systemPrompt.WriteString("This is a summary of a project that you are to respond to user prompts with the contents of this project.\n\n") |
| 208 | + systemPrompt.WriteString(m.summary) |
| 209 | + systemPrompt.WriteString("\n") |
| 210 | + |
| 211 | + var ( |
| 212 | + response = "" |
| 213 | + err = errors.New("failed to ask agent for a response") |
| 214 | + prompt = gollm.NewPrompt(userInput, |
| 215 | + gollm.WithContext(strings.Join(m.chatHistory, "\n")), |
| 216 | + gollm.WithSystemPrompt(systemPrompt.String(), gollm.CacheTypeEphemeral), |
| 217 | + gollm.WithMaxLength(7777), |
| 218 | + gollm.WithDirectives("Be concise and offer complete solutions", |
| 219 | + "Act as Commander Data from the USS Starship Enterprise acting as an AI Agent assisting the user", |
| 220 | + "Refer to the user as Commander", |
| 221 | + "Speak as if you were on a Military Base as a member of the USS Starship Enterprise", |
| 222 | + "Speak as if you are on duty with fellow crew mates", |
| 223 | + "When replying to followup requests, build on your previous answer", |
| 224 | + "When a mistake is identified by the user, use the full previous response to modify and return", |
| 225 | + "Do not be afraid to offend and always give an honest answer in as few words as possible", |
| 226 | + ), |
| 227 | + gollm.WithOutput( |
| 228 | + fmt.Sprintf("%s %d wide %d tall.", "Do not apply any formatting to the output"+ |
| 229 | + " text except for line breaks and spaces. Commands and codes should be indented by 4 spaces "+ |
| 230 | + "on the left and right side of the line and the text will render inside of a Golang BubbleTea"+ |
| 231 | + "TUI window that is ", m.viewport.Width-5, m.viewport.Height-5), |
| 232 | + ), |
| 233 | + ) |
| 234 | + ) |
| 235 | + response, err = m.llm.Generate(m.ctx, prompt) |
| 236 | + if err != nil { |
| 237 | + return errorMsg{err} // On error, return an error message. |
| 238 | + } |
| 239 | + response = response + "\n\n" |
| 240 | + |
| 241 | + return aiResponseMsg(response) // On success, return the AI's response. |
| 242 | + } |
| 243 | +} |
| 244 | + |
| 245 | +// --- BUBBLETEA LIFECYCLE --- |
| 246 | + |
| 247 | +// Init is called once when the program starts. It can return an initial command. |
| 248 | +func (m model) Init() tea.Cmd { |
| 249 | + return textarea.Blink // Start with a blinking cursor in the textarea. |
| 250 | +} |
| 251 | + |
| 252 | +// Update is the core of the application. It's called whenever a message (event) occurs. |
| 253 | +func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { |
| 254 | + var ( |
| 255 | + taCmd tea.Cmd |
| 256 | + vpCmd tea.Cmd |
| 257 | + ) |
| 258 | + |
| 259 | + // Handle updates for the textarea and viewport components. |
| 260 | + m.textarea, taCmd = m.textarea.Update(msg) |
| 261 | + m.viewport, vpCmd = m.viewport.Update(msg) |
| 262 | + |
| 263 | + switch msg := msg.(type) { |
| 264 | + // Handle key presses |
| 265 | + case tea.KeyMsg: |
| 266 | + switch msg.Type { |
| 267 | + case tea.KeyCtrlC, tea.KeyEsc: |
| 268 | + return m, tea.Quit |
| 269 | + case tea.KeyEnter: |
| 270 | + // Don't send if the AI is already working or input is empty. |
| 271 | + if m.isGenerating || m.textarea.Value() == "" { |
| 272 | + return m, nil |
| 273 | + } |
| 274 | + |
| 275 | + // Add the user's message to the history and set the generating flag. |
| 276 | + m.messages = append(m.messages, senderStyle.Render("You: ")+m.textarea.Value()) |
| 277 | + m.isGenerating = true |
| 278 | + m.err = nil // Clear any previous error. |
| 279 | + |
| 280 | + // Create the command to call the LLM and reset the input. |
| 281 | + cmd := m.generateResponseCmd() |
| 282 | + m.textarea.Reset() |
| 283 | + m.viewport.SetContent(wordwrap.String(strings.Join(m.messages, "\n"), m.viewport.Width)) |
| 284 | + m.viewport.GotoBottom() // Scroll to the latest message. |
| 285 | + |
| 286 | + return m, cmd |
| 287 | + } |
| 288 | + |
| 289 | + // Handle window resizing |
| 290 | + case tea.WindowSizeMsg: |
| 291 | + // Adjust the layout to the new window size. |
| 292 | + viewportStyle.Width(msg.Width - 2) // Subtract border width |
| 293 | + viewportStyle.Height(msg.Height - 4) // Subtract textarea, help text, and border |
| 294 | + m.viewport.Width = msg.Width - 2 |
| 295 | + m.viewport.Height = msg.Height - 4 |
| 296 | + m.textarea.SetWidth(msg.Width) |
| 297 | + m.viewport.SetContent(wordwrap.String(strings.Join(m.messages, "\n"), m.viewport.Width)) // Re-render content |
| 298 | + |
| 299 | + // Handle the AI's response |
| 300 | + case aiResponseMsg: |
| 301 | + m.isGenerating = false |
| 302 | + m.messages = append(m.messages, botStyle.Render("Summarize AI: ")+string(msg)) |
| 303 | + m.viewport.SetContent(wordwrap.String(strings.Join(m.messages, "\n"), m.viewport.Width)) |
| 304 | + m.viewport.GotoBottom() |
| 305 | + |
| 306 | + // Handle any errors from the AI call |
| 307 | + case errorMsg: |
| 308 | + m.isGenerating = false |
| 309 | + m.err = msg.err |
| 310 | + } |
| 311 | + |
| 312 | + return m, tea.Batch(taCmd, vpCmd) // Return any commands from the components. |
| 313 | +} |
| 314 | + |
| 315 | +// View renders the UI. It's called after every Update. |
| 316 | +func (m model) View() string { |
| 317 | + var bottomLine string |
| 318 | + if m.isGenerating { |
| 319 | + bottomLine = "🤔 Thinking..." |
| 320 | + } else if m.err != nil { |
| 321 | + bottomLine = errorStyle.Render(fmt.Sprintf("Error: %v", m.err)) |
| 322 | + } else { |
| 323 | + bottomLine = m.textarea.View() |
| 324 | + } |
| 325 | + |
| 326 | + // Join the viewport and the bottom line (textarea or status) vertically. |
| 327 | + return lipgloss.JoinVertical( |
| 328 | + lipgloss.Left, |
| 329 | + viewportStyle.Render(m.viewport.View()), |
| 330 | + bottomLine, |
| 331 | + ) |
| 332 | +} |
0 commit comments