|
6 | 6 | import { instances, refreshState } from "$lib/stores/app.svelte"; |
7 | 7 | import { onMount } from "svelte"; |
8 | 8 |
|
9 | | - const apiUrl = browser ? window.location.origin : "http://localhost:52415"; |
| 9 | + const apiUrl = browser ? window.location.origin.replace("localhost", "127.0.0.1") : "http://127.0.0.1:52415"; |
10 | 10 |
|
11 | 11 | const instancesData = $derived(instances()); |
12 | 12 |
|
|
86 | 86 | let codexModel = $state(""); |
87 | 87 | let codexMcpPath = $state("/Users/username"); |
88 | 88 | let openClawModel = $state(""); |
89 | | - let openClawToolsProfile = $state("coding"); |
90 | 89 | $effect(() => { |
91 | 90 | const def = modelsBySize.length > 0 ? modelsBySize[0] : "your-model-id"; |
92 | 91 | codexModel = def; |
|
185 | 184 | const openClawConfig = $derived( |
186 | 185 | JSON.stringify( |
187 | 186 | { |
188 | | - model: openClawModel, |
189 | | - modelProvider: { |
190 | | - name: "exo", |
191 | | - baseURL: `${apiUrl}/v1`, |
192 | | - apiKey: "x", |
| 187 | + gateway: { mode: "local" }, |
| 188 | + models: { |
| 189 | + providers: { |
| 190 | + exo: { |
| 191 | + baseUrl: `${apiUrl}/v1`, |
| 192 | + apiKey: "x", |
| 193 | + api: "openai-completions", |
| 194 | + models: [ |
| 195 | + { |
| 196 | + id: openClawModel, |
| 197 | + name: "exo local", |
| 198 | + input: (modelCapabilities[openClawModel] || []).includes("vision") |
| 199 | + ? ["text", "image"] |
| 200 | + : ["text"], |
| 201 | + }, |
| 202 | + ], |
| 203 | + }, |
| 204 | + }, |
| 205 | + }, |
| 206 | + agents: { |
| 207 | + defaults: { |
| 208 | + model: `exo/${openClawModel}`, |
| 209 | + }, |
193 | 210 | }, |
194 | | - toolsProfile: openClawToolsProfile, |
195 | 211 | }, |
196 | 212 | null, |
197 | 213 | 2, |
|
202 | 218 | `OLLAMA_HOST=${apiUrl}/ollama ollama run ${modelsBySize.length > 0 ? modelsBySize[0] : "your-model-id"}`, |
203 | 219 | ); |
204 | 220 |
|
205 | | - const n8nConfig = $derived.by(() => { |
206 | | - const steps = [ |
207 | | - "1. In n8n, go to Credentials → New Credential → OpenAI API", |
| 221 | + const openWebUiCommand = $derived( |
| 222 | + [ |
| 223 | + `docker run -d -p 3000:8080 \\`, |
| 224 | + ` -e OLLAMA_BASE_URL=${apiUrl.replace("localhost", "host.docker.internal")}/ollama \\`, |
| 225 | + ` -v open-webui:/app/backend/data \\`, |
| 226 | + ` --name open-webui \\`, |
| 227 | + ` ghcr.io/open-webui/open-webui:main`, |
| 228 | + ].join("\n"), |
| 229 | + ); |
| 230 | +
|
| 231 | + const n8nDockerCommand = $derived( |
| 232 | + [ |
| 233 | + `docker run -d -p 5678:5678 \\`, |
| 234 | + ` -v n8n_data:/home/node/.n8n \\`, |
| 235 | + ` --name n8n \\`, |
| 236 | + ` docker.n8n.io/n8nio/n8n`, |
| 237 | + ].join("\n"), |
| 238 | + ); |
| 239 | +
|
| 240 | + const n8nCredentialSteps = $derived( |
| 241 | + [ |
| 242 | + `1. Go to Credentials → Add Credential → search "OpenAI API"`, |
208 | 243 | `2. Set API Key to: x`, |
209 | | - `3. Set Base URL to: ${apiUrl}/v1`, |
210 | | - "4. Save the credential", |
211 | | - `5. In your AI Agent or LLM Chain node, use the OpenAI Chat Model sub-node`, |
212 | | - `6. Enter model name: ${modelsBySize.length > 0 ? modelsBySize[0] : "your-model-id"}`, |
213 | | - ]; |
214 | | - return steps.join("\n"); |
215 | | - }); |
| 244 | + `3. Set Base URL to: ${apiUrl.replace("127.0.0.1", "host.docker.internal").replace("localhost", "host.docker.internal")}/v1`, |
| 245 | + `4. Save the credential`, |
| 246 | + ].join("\n"), |
| 247 | + ); |
| 248 | +
|
| 249 | + const n8nWorkflowSteps = $derived( |
| 250 | + [ |
| 251 | + `1. Create a new workflow → "Start from Scratch"`, |
| 252 | + `2. Add an "AI Agent" or "Basic LLM Chain" node`, |
| 253 | + `3. Inside it, add an "OpenAI Chat Model" sub-node`, |
| 254 | + `4. Select the OpenAI credential you just created`, |
| 255 | + `5. Set Model to "From list" and pick your model (e.g. ${modelsBySize.length > 0 ? modelsBySize[0] : "your-model-id"})`, |
| 256 | + `6. Optionally toggle "Use Responses API", add Built-in Tools, or click "Add Option" for sampling settings`, |
| 257 | + `7. Connect a "Chat Trigger" node for interactive chat`, |
| 258 | + `8. On the Chat Trigger, enable "Allow File Uploads" for vision`, |
| 259 | + ].join("\n"), |
| 260 | + ); |
216 | 261 |
|
217 | 262 | const tabs = [ |
218 | 263 | "Claude Code", |
219 | 264 | "OpenCode", |
220 | 265 | "Codex", |
221 | 266 | "OpenClaw", |
222 | | - "Ollama", |
| 267 | + "Open WebUI", |
223 | 268 | "n8n", |
224 | 269 | ] as const; |
225 | 270 | type Tab = (typeof tabs)[number]; |
|
430 | 475 | language="bash" |
431 | 476 | /> |
432 | 477 | {:else if activeTab === "OpenClaw"} |
433 | | - <div class="flex gap-3 text-xs"> |
434 | | - {#if runningModels.length > 1} |
435 | | - <div> |
436 | | - <span |
437 | | - class="text-exo-light-gray/50 text-[10px] uppercase tracking-wider block mb-1" |
438 | | - >Model</span |
439 | | - > |
440 | | - <select bind:value={openClawModel} class={selectClass}> |
441 | | - {#each runningModels as model} |
442 | | - <option value={model}>{model.split("/").pop()}</option> |
443 | | - {/each} |
444 | | - </select> |
445 | | - </div> |
446 | | - {/if} |
447 | | - <div> |
448 | | - <span |
449 | | - class="text-exo-light-gray/50 text-[10px] uppercase tracking-wider block mb-1" |
450 | | - >Tools Profile</span |
451 | | - > |
452 | | - <select bind:value={openClawToolsProfile} class={selectClass}> |
453 | | - {#each ["minimal", "coding", "messaging", "full"] as profile} |
454 | | - <option value={profile}>{profile}</option> |
| 478 | + {#if runningModels.length > 1} |
| 479 | + <div class="text-xs"> |
| 480 | + <span class="text-exo-light-gray/50 text-[10px] uppercase tracking-wider block mb-1">Model</span> |
| 481 | + <select bind:value={openClawModel} class={selectClass}> |
| 482 | + {#each runningModels as model} |
| 483 | + <option value={model}>{model.split("/").pop()}</option> |
455 | 484 | {/each} |
456 | 485 | </select> |
457 | 486 | </div> |
458 | | - </div> |
| 487 | + {/if} |
459 | 488 | <IntegrationCard |
460 | 489 | title="Config File" |
461 | 490 | subtitle="~/.openclaw/openclaw.json" |
462 | | - description="Add this to your OpenClaw config." |
| 491 | + description="Add this to your OpenClaw config. If you haven't installed OpenClaw yet, run: npm install -g openclaw@latest" |
463 | 492 | config={openClawConfig} |
464 | 493 | /> |
465 | | - {:else if activeTab === "Ollama"} |
466 | 494 | <IntegrationCard |
467 | | - title="Shell Command" |
| 495 | + title="Setup Commands" |
| 496 | + subtitle="Run in terminal" |
| 497 | + description="After saving the config, run these commands to fix metadata and start the gateway." |
| 498 | + config={`openclaw doctor --fix${(modelCapabilities[openClawModel] || []).includes("vision") ? `\nopenclaw models set-image exo/${openClawModel}` : ""}\nopenclaw gateway &\nopenclaw dashboard`} |
| 499 | + language="bash" |
| 500 | + /> |
| 501 | + {:else if activeTab === "Open WebUI"} |
| 502 | + <IntegrationCard |
| 503 | + title="1. Start Open WebUI" |
| 504 | + subtitle="Run in terminal" |
| 505 | + description="Run this to start Open WebUI." |
| 506 | + config={openWebUiCommand} |
| 507 | + language="bash" |
| 508 | + /> |
| 509 | + <IntegrationCard |
| 510 | + title="2. Open & Select Model" |
| 511 | + subtitle="http://localhost:3000" |
| 512 | + description={`Open http://localhost:3000 in your browser. Select the running model from the dropdown at the top: ${runningModels.length > 0 ? runningModels.join(", ") : "no models running"}`} |
| 513 | + config={"open http://localhost:3000"} |
| 514 | + language="bash" |
| 515 | + /> |
| 516 | + <IntegrationCard |
| 517 | + title="Ollama CLI" |
468 | 518 | subtitle="Run in terminal" |
469 | | - description="Set OLLAMA_HOST to point the Ollama CLI at your exo cluster." |
| 519 | + description="Or use the Ollama CLI directly." |
470 | 520 | config={ollamaCommand} |
471 | 521 | language="bash" |
472 | 522 | /> |
473 | 523 | {:else if activeTab === "n8n"} |
474 | 524 | <IntegrationCard |
475 | | - title="Credential Setup" |
476 | | - subtitle="n8n UI" |
477 | | - description="Configure an OpenAI credential in n8n to use your exo cluster." |
478 | | - config={n8nConfig} |
| 525 | + title="1. Start n8n" |
| 526 | + subtitle="Run in terminal" |
| 527 | + description="Start n8n with Docker. If you already have n8n running, skip this step." |
| 528 | + config={n8nDockerCommand} |
| 529 | + language="bash" |
| 530 | + /> |
| 531 | + <IntegrationCard |
| 532 | + title="2. Open n8n" |
| 533 | + subtitle="http://localhost:5678" |
| 534 | + description="Open n8n in your browser. If this is your first time, complete the setup and select 'Start from Scratch' when prompted." |
| 535 | + config={"open http://localhost:5678"} |
| 536 | + language="bash" |
| 537 | + /> |
| 538 | + <IntegrationCard |
| 539 | + title="3. Add OpenAI Credential" |
| 540 | + subtitle="n8n UI → Credentials" |
| 541 | + description="Create an OpenAI credential pointing at your exo cluster." |
| 542 | + config={n8nCredentialSteps} |
| 543 | + /> |
| 544 | + <IntegrationCard |
| 545 | + title="4. Build a Workflow" |
| 546 | + subtitle="n8n UI → Workflows" |
| 547 | + description="Create a workflow that uses your exo-powered model." |
| 548 | + config={n8nWorkflowSteps} |
479 | 549 | /> |
480 | 550 | {/if} |
481 | 551 | </div> |
|
0 commit comments