- llm.yaml + example: replace localhost URLs with Docker service names (ollama:11434, vllm:8000, vision:8002); replace personal model names (meghan-cover-writer, llama3.1:8b) with llama3.2:3b - user.yaml.example: update service hosts to Docker names (ollama, vllm, searxng) and searxng port from 8888 (host-mapped) to 8080 (internal) - wizard step 5: fix hardcoded localhost defaults — wizard runs inside Docker, so service name defaults are required for connection tests to pass - scrapers/companyScraper.py: bundle scraper so Dockerfile COPY succeeds - setup.sh: remove host Ollama install (conflicts with Docker Ollama on port 11434); Docker entrypoint handles model download automatically - README + setup.sh banner: add Circuit Forge mission statement
66 lines
1.7 KiB
Text
66 lines
1.7 KiB
Text
backends:
|
|
anthropic:
|
|
api_key_env: ANTHROPIC_API_KEY
|
|
enabled: false
|
|
model: claude-sonnet-4-6
|
|
type: anthropic
|
|
supports_images: true
|
|
claude_code:
|
|
api_key: any
|
|
base_url: http://localhost:3009/v1
|
|
enabled: false
|
|
model: claude-code-terminal
|
|
type: openai_compat
|
|
supports_images: true
|
|
github_copilot:
|
|
api_key: any
|
|
base_url: http://localhost:3010/v1
|
|
enabled: false
|
|
model: gpt-4o
|
|
type: openai_compat
|
|
supports_images: false
|
|
ollama:
|
|
api_key: ollama
|
|
base_url: http://ollama:11434/v1 # Docker service name; use localhost:11434 outside Docker
|
|
enabled: true
|
|
model: llama3.2:3b
|
|
type: openai_compat
|
|
supports_images: false
|
|
ollama_research:
|
|
api_key: ollama
|
|
base_url: http://ollama:11434/v1 # Docker service name; use localhost:11434 outside Docker
|
|
enabled: true
|
|
model: llama3.2:3b
|
|
type: openai_compat
|
|
supports_images: false
|
|
vllm:
|
|
api_key: ''
|
|
base_url: http://vllm:8000/v1 # Docker service name; use localhost:8000 outside Docker
|
|
enabled: true
|
|
model: __auto__
|
|
type: openai_compat
|
|
supports_images: false
|
|
vision_service:
|
|
base_url: http://localhost:8002
|
|
enabled: false
|
|
type: vision_service
|
|
supports_images: true
|
|
fallback_order:
|
|
- ollama
|
|
- claude_code
|
|
- vllm
|
|
- github_copilot
|
|
- anthropic
|
|
research_fallback_order:
|
|
- claude_code
|
|
- vllm
|
|
- ollama_research
|
|
- github_copilot
|
|
- anthropic
|
|
vision_fallback_order:
|
|
- vision_service
|
|
- claude_code
|
|
- anthropic
|
|
# Note: 'ollama' (meghan-cover-writer) intentionally excluded — research
|
|
# must never use the fine-tuned writer model, and this also avoids evicting
|
|
# the writer from GPU memory while a cover letter task is in flight.
|