Assistant Layer
AI provider abstraction for chat completions, tool calling, and streaming.
Design Principles
| Principle |
Rationale |
| Provider-agnostic |
Same code works with OpenAI, Anthropic, or mock |
| Tool calling built-in |
Define tools once, works across providers |
| Streaming first-class |
SSE streaming with event types |
| Fluent API |
ToolBuilder for readable tool definitions |
| Testable |
Mock provider for unit tests |
Providers
| Provider |
Import |
API Key |
| OpenAI |
providers/openai |
Required |
| Anthropic |
providers/anthropic |
Required |
| Mock |
providers/mock |
Not needed |
OpenAI
import "github.com/readysite/readysite/pkg/assistant/providers/openai"
ai, err := openai.New(apiKey)
// With options
ai, err := openai.New(apiKey,
openai.WithBaseURL("https://custom-endpoint.com/v1"), // Azure, proxies
openai.WithHTTPClient(customClient),
)
Anthropic
import "github.com/readysite/readysite/pkg/assistant/providers/anthropic"
ai, err := anthropic.New(apiKey)
// With options
ai, err := anthropic.New(apiKey,
anthropic.WithBaseURL("https://custom-endpoint.com/v1"),
anthropic.WithAPIVersion("2023-06-01"),
)
Mock (Testing)
import "github.com/readysite/readysite/pkg/assistant/providers/mock"
ai := mock.New(mock.Config{
Response: "Hello!", // Static response
ToolCalls: []assistant.ToolCall{...}, // Tool calls to return
StreamDelay: 10 * time.Millisecond, // Delay between stream events
Error: nil, // Error to return
})
Core Types
Backend Interface
type Backend interface {
Chat(ctx context.Context, req ChatRequest) (*ChatResponse, error)
Stream(ctx context.Context, req ChatRequest) (*StreamReader, error)
}
ChatRequest
type ChatRequest struct {
Model string // e.g., "gpt-4o", "claude-sonnet-4-5-20250929"
Messages []Message // Conversation history
Tools []Tool // Available tools
MaxTokens int // Max tokens in response
Temperature float64 // Sampling temperature (0-2)
System string // System prompt
}
ChatResponse
type ChatResponse struct {
Content string // Text content
ToolCalls []ToolCall // Tool calls requested by the model
FinishReason string // "stop", "tool_calls", or "length"
Usage Usage // Token usage
}
type Usage struct {
PromptTokens int
CompletionTokens int
TotalTokens int
}
Messages
Role Constants
assistant.RoleSystem = "system"
assistant.RoleUser = "user"
assistant.RoleAssistant = "assistant"
assistant.RoleTool = "tool"
Constructors
msg := assistant.NewUserMessage("Hello")
msg := assistant.NewSystemMessage("You are helpful.")
msg := assistant.NewAssistantMessage("Hi there!")
msg := assistant.NewAssistantToolCallMessage("Let me help.", toolCalls)
msg := assistant.NewToolResultMessage(toolCallID, resultJSON)
Conversation Flow
messages := []assistant.Message{
assistant.NewSystemMessage("You are a helpful assistant."),
assistant.NewUserMessage("What's the weather?"),
}
resp, err := ai.Chat(ctx, assistant.ChatRequest{
Model: "gpt-4o",
Messages: messages,
})
// If the model called tools, add the response and tool results
if len(resp.ToolCalls) > 0 {
messages = append(messages, assistant.NewAssistantToolCallMessage(resp.Content, resp.ToolCalls))
for _, tc := range resp.ToolCalls {
result := executeToolCall(tc)
messages = append(messages, assistant.NewToolResultMessage(tc.ID, result))
}
// Continue the conversation
resp, err = ai.Chat(ctx, assistant.ChatRequest{
Model: "gpt-4o",
Messages: messages,
})
}
Tool Definitions
ToolBuilder (Fluent API)
tool := assistant.NewTool("create_page", "Create a new page").
String("title", "Page title", true).
String("slug", "URL slug", true).
Bool("published", "Whether published", false).
Build()
Builder Methods
| Method |
Type |
Description |
String(name, desc, required) |
string |
Text parameter |
Int(name, desc, required) |
integer |
Integer parameter |
Number(name, desc, required) |
number |
Float parameter |
Bool(name, desc, required) |
boolean |
Boolean parameter |
Enum(name, desc, values, required) |
string |
String with allowed values |
Array(name, desc, itemType, required) |
array |
Array of items |
Build() |
Tool |
Returns the built tool |
Parsing Tool Arguments
for _, tc := range resp.ToolCalls {
switch tc.Name {
case "create_page":
var args struct {
Title string `json:"title"`
Slug string `json:"slug"`
Published bool `json:"published"`
}
if err := tc.ParseArguments(&args); err != nil {
// handle error
}
// Use args.Title, args.Slug, etc.
}
}
Streaming
Basic Streaming
stream, err := ai.Stream(ctx, assistant.ChatRequest{
Model: "gpt-4o",
Messages: messages,
})
if err != nil {
return err
}
defer stream.Close()
// Collect all events into a final response
resp, err := stream.Collect()
Event-by-Event Processing
stream, err := ai.Stream(ctx, req)
defer stream.Close()
for {
event := stream.Next()
if event == nil {
break
}
switch event.Type {
case assistant.EventContentDelta:
fmt.Print(event.Content) // Stream text to user
case assistant.EventToolCallStart:
// New tool call started
case assistant.EventToolCallDelta:
// Tool call arguments chunk
case assistant.EventDone:
// Stream complete, event.Usage may be set
case assistant.EventError:
log.Printf("Stream error: %v", event.Error)
}
}
StreamReader Methods
| Method |
Description |
Next() |
Next event (nil when done) |
Close() |
Close the stream |
Content() |
All accumulated text content |
ToolCalls() |
All accumulated tool calls |
Done() |
Whether stream is complete |
Err() |
Any error that occurred |
Collect() |
Collect all events, return *ChatResponse |
Error Handling
Error Types
assistant.ErrNoAPIKey // API key is required
assistant.ErrRateLimited // Rate limited
assistant.ErrContextCanceled // Context canceled
assistant.ErrInvalidModel // Invalid model
assistant.ErrEmptyResponse // Empty response
API Errors
resp, err := ai.Chat(ctx, req)
if err != nil {
var apiErr *assistant.APIError
if errors.As(err, &apiErr) {
log.Printf("API error %d: %s", apiErr.StatusCode, apiErr.Message)
}
if assistant.IsRateLimited(err) {
// Back off and retry
}
if assistant.IsAuthError(err) {
// Invalid API key
}
}
Complete Example
package main
import (
"context"
"fmt"
"log"
"github.com/readysite/readysite/pkg/assistant"
"github.com/readysite/readysite/pkg/assistant/providers/openai"
)
func main() {
ai, err := openai.New("sk-...")
if err != nil {
log.Fatal(err)
}
// Define tools
tools := []assistant.Tool{
assistant.NewTool("get_weather", "Get current weather").
String("city", "City name", true).
Enum("units", "Temperature units", []string{"celsius", "fahrenheit"}, false).
Build(),
}
// Chat with tools
resp, err := ai.Chat(context.Background(), assistant.ChatRequest{
Model: "gpt-4o",
Messages: []assistant.Message{
assistant.NewUserMessage("What's the weather in Paris?"),
},
Tools: tools,
})
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.Content)
fmt.Printf("Tokens used: %d\n", resp.Usage.TotalTokens)
}
File Structure
pkg/assistant/
├── assistant.go # Backend interface, ChatRequest, ChatResponse
├── message.go # Message types and constructors
├── tool.go # Tool, Parameters, ToolCall, ToolBuilder
├── stream.go # StreamEvent, StreamReader
├── errors.go # Error types and helpers
├── assistant_test.go # Tests
└── providers/
├── openai/openai.go # OpenAI implementation
├── anthropic/anthropic.go # Anthropic implementation
└── mock/mock.go # Mock for testing