peregrine/config/llm.yaml.example

77 lines
2.4 KiB
Text

backends:
anthropic:
api_key_env: ANTHROPIC_API_KEY
enabled: false
model: claude-sonnet-4-6
type: anthropic
supports_images: true
claude_code:
api_key: any
base_url: http://localhost:3009/v1
enabled: false
model: claude-code-terminal
type: openai_compat
supports_images: true
github_copilot:
api_key: any
base_url: http://localhost:3010/v1
enabled: false
model: gpt-4o
type: openai_compat
supports_images: false
ollama:
api_key: ollama
base_url: http://ollama:11434/v1 # Docker service name; use localhost:11434 outside Docker
enabled: true
model: llama3.2:3b
type: openai_compat
supports_images: false
ollama_research:
api_key: ollama
base_url: http://ollama:11434/v1 # Docker service name; use localhost:11434 outside Docker
enabled: true
model: llama3.2:3b
type: openai_compat
supports_images: false
vllm:
api_key: ''
base_url: http://vllm:8000/v1 # Docker service name; use localhost:8000 outside Docker
enabled: true
model: __auto__
type: openai_compat
supports_images: false
vision_service:
base_url: http://localhost:8002
enabled: false
type: vision_service
supports_images: true
fallback_order:
- ollama
- claude_code
- vllm
- github_copilot
- anthropic
research_fallback_order:
- claude_code
- vllm
- ollama_research
- github_copilot
- anthropic
vision_fallback_order:
- vision_service
- claude_code
- anthropic
# Note: 'ollama' (alex-cover-writer) intentionally excluded — research
# must never use the fine-tuned writer model, and this also avoids evicting
# the writer from GPU memory while a cover letter task is in flight.
# ── Scheduler — LLM batch queue optimizer ─────────────────────────────────────
# The scheduler batches LLM tasks by model type to avoid GPU model switching.
# VRAM budgets are conservative peak estimates (GB) for each task type.
# Increase if your models are larger; decrease if tasks share GPU memory well.
scheduler:
vram_budgets:
cover_letter: 2.5 # alex-cover-writer:latest (~2GB GGUF + headroom)
company_research: 5.0 # llama3.1:8b or vllm model
wizard_generate: 2.5 # same model family as cover_letter
max_queue_depth: 500 # max pending tasks per type before drops (with logged warning)