readysite / website / controllers / chat.go
20.8 KB
chat.go
package controllers

import (
	"encoding/json"
	"fmt"
	"log"
	"net/http"
	"strings"
	"time"

	"github.com/readysite/readysite/pkg/application"
	"github.com/readysite/readysite/pkg/assistant"
	"github.com/readysite/readysite/website/internal/access"
	"github.com/readysite/readysite/website/internal/assist"
	"github.com/readysite/readysite/website/internal/helpers"
	"github.com/readysite/readysite/website/models"
)

// MaxTitleLength is the maximum length for conversation titles.
const MaxTitleLength = 50

// MaxToolIterations is the maximum number of tool execution iterations per AI response.
const MaxToolIterations = 25

// Chat returns the chat controller.
func Chat() (string, *ChatController) {
	return "chat", &ChatController{}
}

// ChatController handles AI chat conversations.
type ChatController struct {
	application.BaseController
}

// Setup registers routes.
func (c *ChatController) Setup(app *application.App) {
	c.BaseController.Setup(app)
}

// Handle implements Controller interface with value receiver for request isolation.
func (c ChatController) Handle(r *http.Request) application.Controller {
	c.Request = r
	return &c
}

// Conversation returns the current conversation from path or query parameter.
// For workspace views (where path/query may not have conversation ID), use CurrentConversation instead.
func (c *ChatController) Conversation() *models.Conversation {
	return assist.GetConversation(c.Request)
}

// Conversations returns all conversations ordered by most recent.
func (c *ChatController) Conversations() []*models.Conversation {
	return assist.GetAllConversations()
}

// CurrentConversation returns the current conversation for workspace views.
// Checks path param first, then falls back to user's most recent conversation.
func (c *ChatController) CurrentConversation() *models.Conversation {
	user := access.GetUserFromJWT(c.Request)
	if user == nil {
		return nil
	}
	return assist.GetCurrentConversation(c.Request, user.ID)
}

// Messages returns all messages for the current conversation.
// Uses CurrentConversation to work on workspace views without path params.
func (c *ChatController) Messages() []*models.Message {
	return assist.GetMessages(c.CurrentConversation())
}

// StreamingMessageID returns the ID of the message that should connect to SSE.
func (c *ChatController) StreamingMessageID() string {
	return assist.StreamingMessageID(c.CurrentConversation())
}

// CanUndo returns true if the last AI action can be undone.
func (c *ChatController) CanUndo() bool {
	return assist.ConvCanUndo(c.CurrentConversation())
}

// CanRedo returns true if there's an undone action that can be redone.
func (c *ChatController) CanRedo() bool {
	return assist.ConvCanRedo(c.CurrentConversation())
}

// AIConfigured returns true if AI is properly configured.
func (c *ChatController) AIConfigured() bool {
	return assist.IsConfigured()
}

// AIProvider returns the current AI provider name.
func (c *ChatController) AIProvider() string {
	return assist.GetProviderName()
}

// Panel returns just the chat panel partial for a conversation (for HTMX swaps).
func (c *ChatController) Panel(w http.ResponseWriter, r *http.Request) {
	c.Render(w, r, "partials/chat-panel.html", nil)
}

// NewPanel creates a new empty conversation and returns the panel.
func (c *ChatController) NewPanel(w http.ResponseWriter, r *http.Request) {
	user := access.GetUserFromJWT(r)
	if user == nil {
		c.RenderError(w, r, fmt.Errorf("Unauthorized"))
		return
	}

	conv := &models.Conversation{
		UserID: user.ID,
		Title:  "New Conversation",
	}

	convID, err := models.Conversations.Insert(conv)
	if err != nil {
		c.RenderError(w, r, fmt.Errorf("Failed to create conversation"))
		return
	}

	log.Printf("[CHAT] NewPanel: created conv=%s", convID)
	r.SetPathValue("id", convID)
	c.Render(w, r, "partials/chat-panel.html", nil)
}

// NewConversationWithMessage creates a new conversation with an initial message.
func (c *ChatController) NewConversationWithMessage(w http.ResponseWriter, r *http.Request) {
	user := access.GetUserFromJWT(r)
	if user == nil {
		c.RenderError(w, r, fmt.Errorf("Unauthorized"))
		return
	}

	content := r.FormValue("content")
	if content == "" {
		c.RenderError(w, r, fmt.Errorf("Message content required"))
		return
	}

	log.Printf("[CHAT] NewConversationWithMessage: creating with content len=%d", len(content))

	// Create conversation with message as title
	title := helpers.TruncateTitle(content, MaxTitleLength)

	conv := &models.Conversation{
		UserID: user.ID,
		Title:  title,
	}

	convID, err := models.Conversations.Insert(conv)
	if err != nil {
		log.Printf("[CHAT] NewConversationWithMessage: failed to create conv err=%v", err)
		c.RenderError(w, r, fmt.Errorf("Failed to create conversation"))
		return
	}

	// Save user message
	userMsg := &models.Message{
		ConversationID: convID,
		Role:           assist.RoleUser,
		Content:        content,
		Status:         "complete",
	}
	if _, err := models.Messages.Insert(userMsg); err != nil {
		log.Printf("[CHAT] NewConversationWithMessage: failed to save user message err=%v", err)
		c.RenderError(w, r, fmt.Errorf("Failed to save message"))
		return
	}

	// Create pending assistant message
	assistantMsg := &models.Message{
		ConversationID: convID,
		Role:           assist.RoleAssistant,
		Content:        "",
		Status:         "pending",
	}
	if _, err := models.Messages.Insert(assistantMsg); err != nil {
		log.Printf("[CHAT] NewConversationWithMessage: failed to create assistant message err=%v", err)
		c.RenderError(w, r, fmt.Errorf("Failed to create response"))
		return
	}

	log.Printf("[CHAT] NewConversationWithMessage: created conv=%s user_msg=%s assistant_msg=%s", convID, userMsg.ID, assistantMsg.ID)

	// Return the full panel with the new conversation
	// Set path value so CurrentConversation finds it
	r.SetPathValue("id", convID)
	c.Render(w, r, "partials/chat-panel.html", nil)
}

// SendMessage adds a user message and triggers AI response.
// Returns partial HTML with inline EventSource (no full page refresh).
func (c *ChatController) SendMessage(w http.ResponseWriter, r *http.Request) {
	// Rate limit by IP
	key := r.RemoteAddr
	if ip := r.Header.Get("X-Forwarded-For"); ip != "" {
		key = ip
	}
	if !access.ChatLimiter.Allow(key) {
		c.RenderError(w, r, fmt.Errorf("Too many messages. Please wait a moment."))
		return
	}

	convID := r.PathValue("id")
	log.Printf("[CHAT] SendMessage: conv=%s", convID)

	conv, err := models.Conversations.Get(convID)
	if err != nil {
		log.Printf("[CHAT] SendMessage: conv not found err=%v", err)
		c.RenderError(w, r, fmt.Errorf("Conversation not found"))
		return
	}

	content := r.FormValue("content")
	if content == "" {
		c.RenderError(w, r, fmt.Errorf("Message content required"))
		return
	}

	// Save user message
	userMsg := &models.Message{
		ConversationID: convID,
		Role:           assist.RoleUser,
		Content:        content,
		Status:         "complete",
	}
	if _, err := models.Messages.Insert(userMsg); err != nil {
		log.Printf("[CHAT] SendMessage: failed to save user message err=%v", err)
		c.RenderError(w, r, fmt.Errorf("Failed to save message"))
		return
	}

	// Create pending assistant message
	assistantMsg := &models.Message{
		ConversationID: convID,
		Role:           assist.RoleAssistant,
		Content:        "",
		Status:         "pending",
	}
	if _, err := models.Messages.Insert(assistantMsg); err != nil {
		log.Printf("[CHAT] SendMessage: failed to create assistant message err=%v", err)
		c.RenderError(w, r, fmt.Errorf("Failed to create response"))
		return
	}

	// Update conversation (title if first message, always update UpdatedAt)
	if conv.Title == "New Conversation" {
		conv.Title = helpers.TruncateTitle(content, MaxTitleLength)
	}
	// Always update the conversation to refresh UpdatedAt for GetCurrentConversation
	models.Conversations.Update(conv)

	log.Printf("[CHAT] SendMessage: created user_msg=%s assistant_msg=%s", userMsg.ID, assistantMsg.ID)

	// Return partial HTML with inline EventSource (NOT full refresh)
	c.Render(w, r, "partials/chat-messages-new.html", map[string]any{
		"UserMessage":      userMsg,
		"AssistantMessage": assistantMsg,
		"ConversationID":   convID,
	})
}

// Stream handles SSE streaming of AI response with tool loop.
func (c *ChatController) Stream(w http.ResponseWriter, r *http.Request) {
	convID := r.PathValue("id")
	log.Printf("[CHAT] Stream: starting conv=%s", convID)

	// Always start SSE stream first to ensure proper content-type
	stream := c.BaseController.Stream(w)

	conv, err := models.Conversations.Get(convID)
	if err != nil {
		log.Printf("[CHAT] Stream: conv not found err=%v", err)
		stream.Send("error", "Conversation not found")
		stream.Send("done", "")
		return
	}

	// Find the pending or streaming assistant message
	assistantMsg, err := models.Messages.First("WHERE ConversationID = ? AND Status IN ('pending', 'streaming') ORDER BY CreatedAt DESC", convID)
	if err != nil || assistantMsg == nil {
		log.Printf("[CHAT] Stream: no pending message for conv=%s err=%v", convID, err)
		stream.Send("error", "No pending message - response may have already completed")
		stream.Send("done", "")
		return
	}

	log.Printf("[CHAT] Stream: found msg=%s status=%s", assistantMsg.ID, assistantMsg.Status)

	// Update status to streaming
	assistantMsg.Status = "streaming"
	models.Messages.Update(assistantMsg)

	// Get AI assistant
	ai, aiErr := assist.GetAssistant()

	// Check if AI is configured
	if aiErr != nil {
		log.Printf("[CHAT] Stream: AI not configured err=%v", aiErr)
		assistantMsg.Status = "error"
		assistantMsg.Content = aiErr.Error()
		models.Messages.Update(assistantMsg)
		stream.Send("error", aiErr.Error())
		stream.Send("done", "")
		return
	}

	ctx := r.Context()
	executor := assist.NewExecutor(assistantMsg)

	// Build initial message history
	stream.Send("status", "Reading conversation history...")
	messages := assist.BuildMessages(conv)

	// Get model - check for per-conversation override
	model := assist.GetModel()
	if convModel := assist.GetConversationModel(conv); convModel != "" {
		model = convModel
	}

	// Tool loop - limited iterations to prevent infinite loops
	var allToolCalls []assist.ToolCall
	var fullContent string

	for iteration := 0; iteration < MaxToolIterations; iteration++ {
		// Check if context is cancelled (client disconnected)
		select {
		case <-ctx.Done():
			log.Printf("[CHAT] Stream: context cancelled, aborting")
			assistantMsg.Content = fullContent
			if len(allToolCalls) > 0 {
				assist.SetToolCalls(assistantMsg, allToolCalls)
			}
			assistantMsg.Status = "cancelled"
			models.Messages.Update(assistantMsg)
			return
		default:
		}

		log.Printf("[CHAT] Stream: iteration=%d messages=%d", iteration, len(messages))

		// Send thinking status with variety
		switch iteration {
		case 0:
			stream.Send("status", "Thinking...")
		case 1:
			stream.Send("status", "Analyzing results...")
		case 2:
			stream.Send("status", "Processing...")
		default:
			stream.Send("status", "Working...")
		}

		// Start AI request with retry logic for rate limits
		var aiStream *assistant.StreamReader
		var err error
		for retry := 0; retry < 3; retry++ {
			aiStream, err = ai.Stream(ctx, assistant.ChatRequest{
				Model:     model,
				System:    assist.BuildSystemPrompt(conv),
				Messages:  messages,
				Tools:     assist.All(),
				MaxTokens: 4096,
			})
			if err == nil {
				break
			}

			// Check if it's a rate limit error (429)
			if apiErr, ok := err.(*assistant.APIError); ok && apiErr.StatusCode == 429 {
				// Exponential backoff: 10s, 30s, 60s
				backoff := time.Duration(10*(retry+1)*(retry+1)) * time.Second
				log.Printf("[CHAT] Stream: rate limited, waiting %v before retry %d/3", backoff, retry+1)
				stream.Send("status", fmt.Sprintf("Rate limited, waiting %v...", backoff))
				time.Sleep(backoff)
				continue
			}
			break // Non-rate-limit error, don't retry
		}

		if err != nil {
			log.Printf("[CHAT] Stream: failed to start AI stream err=%v", err)
			assistantMsg.Status = "error"
			assistantMsg.Content = "Failed to start AI stream: " + err.Error()
			models.Messages.Update(assistantMsg)
			stream.Send("error", "Failed to start AI stream: "+err.Error())
			stream.Send("done", "")
			return
		}

		// Process stream events for this iteration
		var iterationContent string
		var toolCalls []assist.ToolCall
		// Map Anthropic's content block index to our toolCalls array index
		toolIndexMap := make(map[int]int)

		for {
			event := aiStream.Next()
			if event == nil {
				break
			}

			switch event.Type {
			case assistant.EventContentDelta:
				stream.Send("content", event.Content)
				iterationContent += event.Content
				fullContent += event.Content

			case assistant.EventToolCallStart:
				log.Printf("[CHAT] Stream: tool_call_start name=%s index=%d", event.ToolCall.Name, event.ToolIndex)
				toolCall := assist.ToolCall{
					ID:   event.ToolCall.ID,
					Name: event.ToolCall.Name,
				}
				localIndex := len(toolCalls)
				toolCalls = append(toolCalls, toolCall)
				toolIndexMap[event.ToolIndex] = localIndex
				// Send tool_start with JSON for better UI
				stream.Send("tool_start", fmt.Sprintf(`{"name":"%s","id":"%s"}`, event.ToolCall.Name, event.ToolCall.ID))

			case assistant.EventToolCallDelta:
				if localIdx, ok := toolIndexMap[event.ToolIndex]; ok && localIdx < len(toolCalls) {
					toolCalls[localIdx].Arguments += event.ToolCall.Arguments
					log.Printf("[CHAT] Stream: tool_call_delta index=%d localIdx=%d args_len=%d", event.ToolIndex, localIdx, len(toolCalls[localIdx].Arguments))
				}

			case assistant.EventDone:
				// Close this stream
				aiStream.Close()

				// If no tool calls, we're done
				if len(toolCalls) == 0 {
					log.Printf("[CHAT] Stream: complete, no tool calls")
					assistantMsg.Content = fullContent
					if len(allToolCalls) > 0 {
						assist.SetToolCalls(assistantMsg, allToolCalls)
					}
					assistantMsg.Status = "complete"
					models.Messages.Update(assistantMsg)
					// Update conversation to refresh UpdatedAt
					models.Conversations.Update(conv)

					// Trigger summarization in background if needed
					if assist.ShouldSummarize(conv) {
						go func() {
							if err := assist.CreateSummary(conv); err != nil {
								log.Printf("[CHAT] Summarization failed: %v", err)
							}
						}()
					}

					stream.Send("done", "")
					return
				}

				// Execute tool calls
				log.Printf("[CHAT] Stream: executing %d tool calls", len(toolCalls))

				// Build tool call messages for API
				apiToolCalls := make([]assistant.ToolCall, len(toolCalls))
				for i, tc := range toolCalls {
					apiToolCalls[i] = assistant.ToolCall{
						ID:        tc.ID,
						Name:      tc.Name,
						Arguments: tc.Arguments,
					}
				}

				// Add assistant message with tool calls to conversation
				messages = append(messages, assistant.NewAssistantToolCallMessage(iterationContent, apiToolCalls))

				// Execute each tool and add results
				for i, tc := range toolCalls {
					log.Printf("[CHAT] Stream: executing tool=%s", tc.Name)
					stream.Send("status", fmt.Sprintf("Running %s...", humanizeToolName(tc.Name)))
					stream.Send("tool_executing", fmt.Sprintf(`{"name":"%s","id":"%s"}`, tc.Name, tc.ID))

					// Validate that arguments are valid JSON before executing
					args := tc.Arguments
					if args == "" {
						args = "{}" // Default to empty object for tools with no required params
					}
					if !json.Valid([]byte(args)) {
						log.Printf("[CHAT] Stream: tool=%s has invalid JSON arguments: %s", tc.Name, args)
						toolCalls[i].Error = "incomplete arguments (streaming may have been interrupted)"
						stream.Send("tool_error", fmt.Sprintf(`{"name":"%s","id":"%s","error":"incomplete arguments"}`, tc.Name, tc.ID))
						// Add error result to messages for next iteration
						messages = append(messages, assistant.NewToolResultMessage(tc.ID, "Error: tool call had incomplete/invalid JSON arguments - please retry"))
						continue
					}

					result, err := executor.Execute(assistant.ToolCall{
						ID:        tc.ID,
						Name:      tc.Name,
						Arguments: args,
					})

					if err != nil {
						log.Printf("[CHAT] Stream: tool=%s error=%v", tc.Name, err)
						toolCalls[i].Error = err.Error()
						result = "Error: " + err.Error()
						stream.Send("tool_error", fmt.Sprintf(`{"name":"%s","id":"%s","error":"%s"}`, tc.Name, tc.ID, err.Error()))
					} else {
						log.Printf("[CHAT] Stream: tool=%s success", tc.Name)
						toolCalls[i].Result = result
						stream.Send("tool_done", fmt.Sprintf(`{"name":"%s","id":"%s"}`, tc.Name, tc.ID))

						// Handle navigation
						if tc.Name == "navigate_user" {
							var navResult struct {
								URL string `json:"url"`
							}
							if json.Unmarshal([]byte(result), &navResult) == nil && navResult.URL != "" {
								log.Printf("[CHAT] Stream: navigate url=%s", navResult.URL)
								stream.Send("navigate", navResult.URL)
							}
						}
					}

					// Add tool result to messages for next iteration
					messages = append(messages, assistant.NewToolResultMessage(tc.ID, result))
				}

				// Track all tool calls
				allToolCalls = append(allToolCalls, toolCalls...)

				// Delay before next API call to avoid rate limiting (30k tokens/min limit)
				// 5 second base delay to stay under token limits
				stream.Send("status", "Preparing next step...")
				time.Sleep(5 * time.Second)

				// Continue to next iteration (will make another API call)
				goto nextIteration

			case assistant.EventError:
				log.Printf("[CHAT] Stream: EventError err=%v", event.Error)
				aiStream.Close()
				assistantMsg.Status = "error"
				assistantMsg.Content = "Error: " + event.Error.Error()
				models.Messages.Update(assistantMsg)
				stream.Send("error", event.Error.Error())
				stream.Send("done", "")
				return
			}
		}

		aiStream.Close()
		break

	nextIteration:
		continue
	}

	// Final update
	assistantMsg.Content = fullContent
	if len(allToolCalls) > 0 {
		assist.SetToolCalls(assistantMsg, allToolCalls)
	}
	assistantMsg.Status = "complete"
	models.Messages.Update(assistantMsg)
	// Update conversation to refresh UpdatedAt for GetCurrentConversation
	models.Conversations.Update(conv)
	log.Printf("[CHAT] Stream: final update msg=%s", assistantMsg.ID)

	// Trigger summarization in background if needed
	if assist.ShouldSummarize(conv) {
		go func() {
			if err := assist.CreateSummary(conv); err != nil {
				log.Printf("[CHAT] Summarization failed: %v", err)
			}
		}()
	}

	stream.Send("done", "")
}

// Delete deletes a conversation and all its messages.
func (c *ChatController) Delete(w http.ResponseWriter, r *http.Request) {
	convID := r.PathValue("id")
	log.Printf("[CHAT] Delete: conv=%s", convID)

	conv, err := models.Conversations.Get(convID)
	if err != nil {
		c.RenderError(w, r, fmt.Errorf("Conversation not found"))
		return
	}

	// Delete all messages
	messages, _ := conv.Messages()
	for _, msg := range messages {
		// Delete mutations for each message
		mutations, _ := msg.Mutations()
		for _, m := range mutations {
			models.Mutations.Delete(m)
		}
		models.Messages.Delete(msg)
	}

	models.Conversations.Delete(conv)
	log.Printf("[CHAT] Delete: deleted conv=%s", convID)
	c.Redirect(w, r, "/admin")
}

// Undo undoes the last AI action.
func (c *ChatController) Undo(w http.ResponseWriter, r *http.Request) {
	convID := r.PathValue("id")
	log.Printf("[CHAT] Undo: conv=%s", convID)

	conv, err := models.Conversations.Get(convID)
	if err != nil {
		c.RenderError(w, r, fmt.Errorf("Conversation not found"))
		return
	}

	assist.UndoConversation(conv)
	c.Refresh(w, r)
}

// Redo redoes an undone action.
func (c *ChatController) Redo(w http.ResponseWriter, r *http.Request) {
	convID := r.PathValue("id")
	log.Printf("[CHAT] Redo: conv=%s", convID)

	conv, err := models.Conversations.Get(convID)
	if err != nil {
		c.RenderError(w, r, fmt.Errorf("Conversation not found"))
		return
	}

	assist.RedoConversation(conv)
	c.Refresh(w, r)
}

// humanizeToolName converts a tool name like "create_page" to "creating page"
func humanizeToolName(name string) string {
	switch name {
	case "create_page":
		return "creating page"
	case "update_page":
		return "updating page"
	case "delete_page":
		return "deleting page"
	case "create_collection":
		return "creating collection"
	case "update_collection":
		return "updating collection"
	case "delete_collection":
		return "deleting collection"
	case "create_document":
		return "creating document"
	case "update_document":
		return "updating document"
	case "delete_document":
		return "deleting document"
	case "list_pages":
		return "listing pages"
	case "get_page":
		return "reading page"
	case "list_collections":
		return "listing collections"
	case "get_collection":
		return "reading collection"
	case "list_documents":
		return "listing documents"
	case "get_document":
		return "reading document"
	case "list_files":
		return "listing files"
	case "get_file":
		return "reading file"
	case "read_file":
		return "reading file content"
	case "navigate_user":
		return "navigating"
	case "get_settings":
		return "reading settings"
	case "update_settings":
		return "updating settings"
	default:
		// Convert snake_case to space-separated
		return strings.ReplaceAll(name, "_", " ")
	}
}

← Back