最近正好在补 SAC tv 版第二季,Ghost in the shell 这个设定又很适合这个项目的背景,所以新建的仓库就叫它 GhOst 了。
0x01 实现 LLM API 命令调用
1
好的,我们首先初始化项目的结构,第一阶段的目标是实现命令行和 llm api 交互,计划使用的交互命令是 ghost,如 ghost -p "hello",使用 viper 管理 api 和密钥配置,测试用的 api 是http://localhost:3000/v1,密钥是 sk-Ak627xIaxSfgaNNA96aeRdLijcNQyK7HEaafC12HDQRCgNaN
// model is the state of our TUI application. type model struct { textarea textarea.Model content string loading bool err error }
// Update handles incoming messages and updates the model accordingly. func(m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) { var cmds []tea.Cmd var cmd tea.Cmd
switch msg := msg.(type) { case tea.KeyMsg: switch msg.Type { case tea.KeyCtrlC, tea.KeyEsc: return m, tea.Quit // Later, we will handle tea.KeyEnter to send the prompt. } }
// Pass input to the textarea component. m.textarea, cmd = m.textarea.Update(msg) cmds = append(cmds, cmd)
return m, tea.Batch(cmds...) }
// View renders the UI based on the model's state. func(m model) View() string { return fmt.Sprintf( "Ask GhOst a question:\n\n%s\n\n%s", m.textarea.View(), "(Ctrl+C to quit)", ) }
现在启动后会有一个如下的输入交互效果:
1 2 3 4 5 6 7 8 9 10 11 12
D:\Projects\my-project\GhOst git:[main] go run main.go Ask GhOst a question:
// model is the state of our TUI application. type model struct { llmClient *llm.Client llmModel string messages []llm.Message textarea textarea.Model loading bool err error }
// The viewport will be initialized with the correct size via a WindowSizeMsg. vp := viewport.New(0, 0) return model{ llmClient: client, llmModel: modelName, textarea: ti, viewport: vp, messages: []llm.Message{}, }
再更新 Update 中的回车事件:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
case tea.KeyMsg: switch msg.Type { case tea.KeyCtrlC, tea.KeyEsc: return m, tea.Quit case tea.KeyEnter: // Get the value, but trim the newline that the component adds by default. prompt := strings.TrimSpace(m.textarea.Value()) if prompt != "" && !m.loading { m.loading = true m.messages = append(m.messages, llm.Message{Role: "user", Content: prompt}) m.textarea.Reset() m.viewport.SetContent(m.renderConversation()) m.viewport.GotoBottom() return m, m.waitForLLMResponse } } }
// CompletionRequest is the request body for a non-streaming chat completion. type CompletionRequest struct { Model string`json:"model"` Messages []Message `json:"messages"` Stream bool`json:"stream,omitempty"` }
// StreamChoice is a single choice in a streaming chat completion response. type StreamChoice struct { Delta struct { Content string`json:"content"` } `json:"delta"` FinishReason string`json:"finish_reason"` }
// StreamCompletionResponse is the response body for a streaming chat completion. type StreamCompletionResponse struct { Choices []StreamChoice `json:"choices"` }
// Cmd is an IO operation that returns a message when it's complete. If it's // nil it's considered a no-op. Use it for things like HTTP requests, timers, // saving and loading from disk, and so on. // // Note that there's almost never a reason to use a command to send a message // to another part of your program. That can almost always be done in the // update function. type Cmd func() Msg
// Msg contain data from the result of a IO operation. Msgs trigger the update // function and, henceforth, the UI. type Msg interface{}
// Stream is a channel of messages from the LLM stream. type Stream chan tea.Msg