config(llm): add cf_orch block to vllm backend

This commit is contained in:
pyr0ball 2026-04-02 12:20:41 -07:00
parent 13cd4c0d8a
commit 7c9dcd2620

View file

@ -30,7 +30,7 @@ backends:
api_key: ollama
base_url: http://host.docker.internal:11434/v1
enabled: true
model: llama3.2:3b
model: llama3.1:8b
supports_images: false
type: openai_compat
vision_service:
@ -45,6 +45,12 @@ backends:
model: __auto__
supports_images: false
type: openai_compat
cf_orch:
service: vllm
model_candidates:
- Ouro-2.6B-Thinking
- Ouro-1.4B
ttl_s: 300
vllm_research:
api_key: ''
base_url: http://host.docker.internal:8000/v1