Files
ask/main.go
2026-03-21 16:27:49 +08:00

91 lines
1.9 KiB
Go

package main
import (
"context"
"io"
"log"
"net/http"
"os"
"strings"
llm_api "gitea.starryskymeow.cn/xkm/llm-api"
"github.com/alecthomas/kong"
kongtoml "github.com/alecthomas/kong-toml"
)
var cli struct {
Config string `short:"c" default:"~/.config/ask.toml" help:"Path to config file"`
BaseURL string `short:"b" help:"LLM API base URL" default:"https://api.openai.com/v1"`
ResponseApi bool `default:"false" help:"Use /v1/responses or /v1/chat/completions"`
ApiKey string `short:"k" help:"LLM API Key"`
Model string `short:"m" help:"LLM model" default:"gpt-5-nano"`
ReasoningEffort string `help:"LLM reasoning effort (note that some LLMs may not support certain settings)" default:"minimal"`
Temperature *float64 `help:"LLM Temperature"`
Src []string `arg:"" optional:"" name:"src" help:"Text to ask, or leave empty to use stdin"`
}
func main() {
_ = kong.Parse(&cli)
_ = kong.Parse(
&cli,
kong.Configuration(kongtoml.Loader, cli.Config),
)
src := strings.Join(cli.Src, " ")
if src == "" {
stdin, err := io.ReadAll(os.Stdin)
src = string(stdin)
if err != nil {
panic(err)
}
}
var pr io.ReadCloser
var err error
if cli.ResponseApi {
pr, err = llm_api.OpenaiStreamChatResponses(
context.Background(),
http.DefaultClient,
cli.BaseURL,
cli.ApiKey,
cli.Model,
cli.ReasoningEffort,
nil,
[]llm_api.OpenaiChatMessage{
{
Role: "user",
Content: src,
},
},
)
if err != nil {
log.Fatal(err)
}
} else {
pr, err = llm_api.OpenaiStreamChatCompletions(
context.Background(),
http.DefaultClient,
cli.BaseURL,
cli.ApiKey,
cli.Model,
cli.ReasoningEffort,
nil,
[]llm_api.OpenaiChatMessage{
{
Role: "user",
Content: src,
},
},
)
if err != nil {
log.Fatal(err)
}
}
defer pr.Close()
_, err = io.Copy(os.Stdout, pr)
if err != nil {
log.Fatal(err)
}
}