Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -53,3 +53,5 @@ temp/
bin

obol-agent
.private_keys/
.claude/worktrees/
51 changes: 46 additions & 5 deletions cmd/obol/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ func modelCommand(cfg *config.Config) *cli.Command {
modelSyncCommand(cfg),
modelPullCommand(),
modelListCommand(cfg),
modelRemoveCommand(cfg),
},
}
}
Expand Down Expand Up @@ -87,27 +88,43 @@ func modelSetupCommand(cfg *config.Config) *cli.Command {

func setupOllama(cfg *config.Config, u *ui.UI, models []string) error {
if len(models) == 0 {
// Discover from running Ollama
// Diagnostic: check Ollama connectivity
u.Info("Checking Ollama connectivity...")
ollamaModels, err := model.ListOllamaModels()
if err != nil {
return fmt.Errorf("Ollama is not running: %w\n\nInstall from https://ollama.ai and try again", err)
u.Errorf("Ollama not reachable")
u.Print("")
u.Print(" Hint: Is Ollama running? Try: ollama serve")
u.Print(" Hint: Using a custom host? Set OLLAMA_HOST=http://your-host:port")
u.Print(" Hint: Install from https://ollama.ai")
return fmt.Errorf("Ollama is not running: %w", err)
}
u.Success("Ollama is reachable")

if len(ollamaModels) == 0 {
return fmt.Errorf("Ollama is running but has no models. Pull one first:\n ollama pull qwen3.5:9b")
u.Warn("No models pulled in Ollama")
u.Print("")
u.Print(" Hint: Pull a model with: ollama pull qwen3.5:9b")
u.Print(" Hint: Or run: obol model pull")
return fmt.Errorf("Ollama is running but has no models")
}
u.Successf("Found %d pulled model(s)", len(ollamaModels))

for _, m := range ollamaModels {
name := m.Name
if strings.HasSuffix(name, ":latest") {
name = strings.TrimSuffix(name, ":latest")
}
models = append(models, name)
}
u.Infof("Found %d Ollama model(s): %s", len(models), strings.Join(models, ", "))
u.Infof("Models: %s", strings.Join(models, ", "))
}

if err := model.ConfigureLiteLLM(cfg, u, "ollama", "", models); err != nil {
return err
}

u.Successf("Ollama configured. To change later, run: obol model setup (or obol model remove <name>)")
return syncOpenClawModels(cfg, u)
}

Expand Down Expand Up @@ -135,8 +152,13 @@ func setupCloudProvider(cfg *config.Config, u *ui.UI, provider, apiKey string, m
}

if err := model.ConfigureLiteLLM(cfg, u, provider, apiKey, models); err != nil {
u.Print("")
u.Print(" Hint: Configuration stored in: litellm-config ConfigMap (llm namespace)")
return err
}

u.Print("")
u.Successf("Model configured. To change later, run: obol model setup (or obol model remove <name>)")
return syncOpenClawModels(cfg, u)
}

Expand Down Expand Up @@ -178,7 +200,7 @@ func modelSetupCustomCommand(cfg *config.Config) *cli.Command {
Action: func(ctx context.Context, cmd *cli.Command) error {
u := getUI(cmd)
name := cmd.String("name")
endpoint := cmd.String("endpoint")
endpoint := model.WarnAndStripV1Suffix(cmd.String("endpoint"))
modelName := cmd.String("model")
apiKey := cmd.String("api-key")

Expand Down Expand Up @@ -319,6 +341,25 @@ func modelListCommand(cfg *config.Config) *cli.Command {
}
}

func modelRemoveCommand(cfg *config.Config) *cli.Command {
return &cli.Command{
Name: "remove",
Usage: "Remove a model from the LiteLLM gateway",
ArgsUsage: "<model-name>",
Action: func(ctx context.Context, cmd *cli.Command) error {
u := getUI(cmd)
modelName := cmd.Args().First()
if modelName == "" {
return fmt.Errorf("model name is required\n\nUsage: obol model remove <model-name>\n\nList configured models with: obol model list")
}
if err := model.RemoveModel(cfg, u, modelName); err != nil {
return err
}
return syncOpenClawModels(cfg, u)
},
}
}

func providerInfo(id string) model.ProviderInfo {
providers, _ := model.GetAvailableProviders(nil)
for _, p := range providers {
Expand Down
19 changes: 17 additions & 2 deletions cmd/obol/openclaw.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,29 @@ func openclawCommand(cfg *config.Config) *cli.Command {
},
{
Name: "token",
Usage: "Retrieve gateway token for an OpenClaw instance",
Usage: "Retrieve or regenerate gateway token for an OpenClaw instance",
ArgsUsage: "[instance-name]",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "regenerate",
Usage: "Delete and regenerate the gateway token (restarts the instance)",
},
},
Action: func(ctx context.Context, cmd *cli.Command) error {
id, _, err := openclaw.ResolveInstance(cfg, cmd.Args().Slice())
if err != nil {
return err
}
return openclaw.Token(cfg, id, getUI(cmd))
u := getUI(cmd)
if cmd.Bool("regenerate") {
newToken, err := openclaw.RegenerateToken(cfg, id, u)
if err != nil {
return err
}
u.Print(newToken)
return nil
}
return openclaw.Token(cfg, id, u)
},
},
{
Expand Down
2 changes: 2 additions & 0 deletions internal/agent/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ func Init(cfg *config.Config, u *ui.UI) error {
return fmt.Errorf("failed to inject HEARTBEAT.md: %w", err)
}

u.Print("")
u.Success("Agent initialized. To reconfigure, you can safely re-run: obol agent init")
return nil
}

Expand Down
89 changes: 89 additions & 0 deletions internal/model/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,82 @@ func ConfigureLiteLLM(cfg *config.Config, u *ui.UI, provider, apiKey string, mod
return nil
}

// RemoveModel removes a model entry from the LiteLLM ConfigMap and restarts the deployment.
func RemoveModel(cfg *config.Config, u *ui.UI, modelName string) error {
kubectlBinary := filepath.Join(cfg.BinDir, "kubectl")
kubeconfigPath := filepath.Join(cfg.ConfigDir, "kubeconfig.yaml")

if _, err := os.Stat(kubeconfigPath); os.IsNotExist(err) {
return fmt.Errorf("cluster not running. Run 'obol stack up' first")
}

// Read current config
raw, err := kubectl.Output(kubectlBinary, kubeconfigPath,
"get", "configmap", configMapName, "-n", namespace, "-o", "jsonpath={.data.config\\.yaml}")
if err != nil {
return fmt.Errorf("failed to read LiteLLM config: %w", err)
}

var litellmConfig LiteLLMConfig
if err := yaml.Unmarshal([]byte(raw), &litellmConfig); err != nil {
return fmt.Errorf("failed to parse config.yaml: %w", err)
}

// Find and remove matching entries
var kept []ModelEntry
removed := 0
for _, entry := range litellmConfig.ModelList {
if entry.ModelName == modelName {
removed++
continue
}
kept = append(kept, entry)
}

if removed == 0 {
return fmt.Errorf("model %q not found in LiteLLM config", modelName)
}

litellmConfig.ModelList = kept

// Marshal back to YAML
updated, err := yaml.Marshal(&litellmConfig)
if err != nil {
return fmt.Errorf("failed to marshal config: %w", err)
}

// Build ConfigMap patch
escapedYAML, err := json.Marshal(string(updated))
if err != nil {
return fmt.Errorf("failed to escape YAML: %w", err)
}
patchJSON := fmt.Sprintf(`{"data":{"config.yaml":%s}}`, escapedYAML)

u.Infof("Removing model %q from LiteLLM config", modelName)
if err := kubectl.Run(kubectlBinary, kubeconfigPath,
"patch", "configmap", configMapName, "-n", namespace,
"-p", patchJSON, "--type=merge"); err != nil {
return fmt.Errorf("failed to patch ConfigMap: %w", err)
}

// Restart deployment
u.Info("Restarting LiteLLM")
if err := kubectl.Run(kubectlBinary, kubeconfigPath,
"rollout", "restart", fmt.Sprintf("deployment/%s", deployName), "-n", namespace); err != nil {
return fmt.Errorf("failed to restart LiteLLM: %w", err)
}

if err := kubectl.Run(kubectlBinary, kubeconfigPath,
"rollout", "status", fmt.Sprintf("deployment/%s", deployName), "-n", namespace,
"--timeout=90s"); err != nil {
u.Warnf("LiteLLM rollout not confirmed: %v", err)
} else {
u.Successf("Model %q removed", modelName)
}

return nil
}

// AddCustomEndpoint adds a custom OpenAI-compatible endpoint to LiteLLM
// after validating it works.
func AddCustomEndpoint(cfg *config.Config, u *ui.UI, name, endpoint, modelName, apiKey string) error {
Expand Down Expand Up @@ -639,6 +715,19 @@ func decodeBase64(s string) (string, error) {
return string(decoded[:n]), nil
}

// WarnAndStripV1Suffix checks if an endpoint URL has a trailing /v1 suffix,
// warns the user, and returns the stripped URL. For OpenAI-compatible providers,
// LiteLLM auto-appends /v1, causing double /v1/v1 if the user includes it.
func WarnAndStripV1Suffix(endpoint string) string {
trimmed := strings.TrimRight(endpoint, "/")
if strings.HasSuffix(trimmed, "/v1") {
fmt.Printf(" Warning: stripping trailing /v1 from endpoint URL (LiteLLM adds it automatically)\n")
fmt.Printf(" %s → %s\n", trimmed, strings.TrimSuffix(trimmed, "/v1"))
return strings.TrimSuffix(trimmed, "/v1")
}
return endpoint
}

// localhostToClusterEndpoint translates localhost URLs to k3d-internal URLs
// so that services running on the host are reachable from inside the k3d cluster.
func localhostToClusterEndpoint(endpoint string) string {
Expand Down
57 changes: 57 additions & 0 deletions internal/openclaw/openclaw.go
Original file line number Diff line number Diff line change
Expand Up @@ -737,6 +737,59 @@ func Token(cfg *config.Config, id string, u *ui.UI) error {
return nil
}

// RegenerateToken restarts the OpenClaw pod to generate a new gateway token,
// then retrieves and returns the new token.
func RegenerateToken(cfg *config.Config, id string, u *ui.UI) (string, error) {
namespace := fmt.Sprintf("%s-%s", appName, id)
kubeconfigPath := filepath.Join(cfg.ConfigDir, "kubeconfig.yaml")
if _, err := os.Stat(kubeconfigPath); os.IsNotExist(err) {
return "", fmt.Errorf("cluster not running. Run 'obol stack up' first")
}

kubectlBinary := filepath.Join(cfg.BinDir, "kubectl")

// Delete the existing secret so a fresh token is generated on restart.
u.Info("Deleting existing gateway token...")
deleteCmd := exec.Command(kubectlBinary, "delete", "secret",
"-n", namespace,
"-l", fmt.Sprintf("app.kubernetes.io/name=%s", appName),
"--ignore-not-found")
deleteCmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath))
if out, err := deleteCmd.CombinedOutput(); err != nil {
return "", fmt.Errorf("failed to delete secret: %w\n%s", err, string(out))
}

// Restart the deployment to regenerate the token.
u.Info("Restarting OpenClaw to regenerate token...")
restartCmd := exec.Command(kubectlBinary, "rollout", "restart",
"deployment/openclaw", "-n", namespace)
restartCmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath))
if out, err := restartCmd.CombinedOutput(); err != nil {
return "", fmt.Errorf("failed to restart deployment: %w\n%s", err, string(out))
}

// Wait for rollout to complete.
u.Info("Waiting for new pod to start...")
waitCmd := exec.Command(kubectlBinary, "rollout", "status",
"deployment/openclaw", "-n", namespace, "--timeout=120s")
waitCmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath))
if out, err := waitCmd.CombinedOutput(); err != nil {
return "", fmt.Errorf("rollout not confirmed: %w\n%s", err, string(out))
}

// Wait briefly for the token secret to be created.
time.Sleep(5 * time.Second)

// Retrieve the new token.
newToken, err := getToken(cfg, id)
if err != nil {
return "", fmt.Errorf("token regenerated but could not retrieve it: %w", err)
}

u.Success("Token regenerated successfully")
return newToken, nil
}

// findOpenClawBinary locates the openclaw CLI binary.
// Search order: PATH, then cfg.BinDir.
func findOpenClawBinary(cfg *config.Config) (string, error) {
Expand Down Expand Up @@ -2115,6 +2168,8 @@ func promptForDirectProvider(reader *bufio.Reader, providerName, display, defaul
if baseURL == "" {
baseURL = defaultBaseURL
}
// Strip trailing /v1 — LiteLLM auto-appends it for OpenAI-compatible providers.
baseURL = model.WarnAndStripV1Suffix(baseURL)

return buildDirectProviderOverlay(providerName, baseURL, defaultAPI, defaultAPIKeyEnvVar, modelID, modelName, apiKey), nil
}
Expand All @@ -2127,6 +2182,8 @@ func promptForCustomProvider(reader *bufio.Reader) (*ImportResult, error) {
if baseURL == "" {
return nil, fmt.Errorf("custom base URL is required")
}
// Strip trailing /v1 — LiteLLM auto-appends it for OpenAI-compatible providers.
baseURL = model.WarnAndStripV1Suffix(baseURL)

fmt.Printf("Custom model ID: ")
modelID, _ := reader.ReadString('\n')
Expand Down
9 changes: 8 additions & 1 deletion internal/tunnel/login.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,11 +113,18 @@ func Login(cfg *config.Config, u *ui.UI, opts LoginOptions) error {
return fmt.Errorf("tunnel created, but failed to save local state: %w", err)
}

tunnelURL := fmt.Sprintf("https://%s", hostname)

// Inject AGENT_BASE_URL into obol-agent overlay if deployed.
if err := SyncAgentBaseURL(cfg, fmt.Sprintf("https://%s", hostname)); err != nil {
if err := SyncAgentBaseURL(cfg, tunnelURL); err != nil {
u.Warnf("could not sync AGENT_BASE_URL to obol-agent: %v", err)
}

// Write tunnel URL to ConfigMap so the frontend can read it.
if err := SyncTunnelConfigMap(cfg, tunnelURL); err != nil {
u.Warnf("could not sync tunnel URL to frontend ConfigMap: %v", err)
}

u.Blank()
u.Success("Tunnel login complete")
u.Printf("Persistent URL: https://%s", hostname)
Expand Down
9 changes: 8 additions & 1 deletion internal/tunnel/provision.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,11 +120,18 @@ func Provision(cfg *config.Config, u *ui.UI, opts ProvisionOptions) error {
return fmt.Errorf("tunnel provisioned, but failed to save local state: %w", err)
}

tunnelURL := fmt.Sprintf("https://%s", hostname)

// Inject AGENT_BASE_URL into obol-agent overlay if deployed.
if err := SyncAgentBaseURL(cfg, fmt.Sprintf("https://%s", hostname)); err != nil {
if err := SyncAgentBaseURL(cfg, tunnelURL); err != nil {
u.Warnf("could not sync AGENT_BASE_URL to obol-agent: %v", err)
}

// Write tunnel URL to ConfigMap so the frontend can read it.
if err := SyncTunnelConfigMap(cfg, tunnelURL); err != nil {
u.Warnf("could not sync tunnel URL to frontend ConfigMap: %v", err)
}

u.Blank()
u.Success("Tunnel provisioned")
u.Printf("Persistent URL: https://%s", hostname)
Expand Down
Loading
Loading