fix: ollama docker_owned=True; finetune gets own profile to avoid build on start
- preflight: ollama was incorrectly marked docker_owned=False — Docker does define an ollama service, so external detection now correctly disables it via compose.override.yml when host Ollama is already running - compose.yml: finetune moves from [cpu,single-gpu,dual-gpu] profiles to [finetune] profile so it is never built during 'make start' (pytorch/cuda base is 3.7GB+ and unnecessary for the UI) - compose.yml: remove depends_on ollama from finetune — it reaches Ollama via OLLAMA_URL env var which works whether Ollama is Docker or host - Makefile: finetune target uses --profile finetune + compose.gpu.yml overlay
This commit is contained in:
parent
3518d63ec2
commit
010abe6339
3 changed files with 3 additions and 6 deletions
2
Makefile
2
Makefile
|
|
@ -55,7 +55,7 @@ prepare-training: ## Scan docs_dir for cover letters and build training JSONL
|
|||
|
||||
finetune: ## Fine-tune your personal cover letter model (run prepare-training first)
|
||||
@echo "Starting fine-tune (30-90 min on GPU, much longer on CPU)..."
|
||||
$(COMPOSE) $(COMPOSE_FILES) --profile $(PROFILE) run --rm finetune
|
||||
$(COMPOSE) $(COMPOSE_FILES) -f compose.gpu.yml --profile finetune run --rm finetune
|
||||
|
||||
clean: ## Remove containers, images, and data volumes (DESTRUCTIVE)
|
||||
@echo "WARNING: This will delete all Peregrine containers and data."
|
||||
|
|
|
|||
|
|
@ -92,8 +92,5 @@ services:
|
|||
- OLLAMA_URL=http://ollama:11434
|
||||
- OLLAMA_MODELS_MOUNT=/ollama-models
|
||||
- OLLAMA_MODELS_OLLAMA_PATH=/root/.ollama
|
||||
depends_on:
|
||||
ollama:
|
||||
condition: service_started
|
||||
profiles: [cpu, single-gpu, dual-gpu]
|
||||
profiles: [finetune]
|
||||
restart: "no"
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ _SERVICES: dict[str, tuple[str, int, str, bool, bool]] = {
|
|||
"searxng": ("searxng_port", 8888, "SEARXNG_PORT", True, True),
|
||||
"vllm": ("vllm_port", 8000, "VLLM_PORT", True, True),
|
||||
"vision": ("vision_port", 8002, "VISION_PORT", True, True),
|
||||
"ollama": ("ollama_port", 11434, "OLLAMA_PORT", False, True),
|
||||
"ollama": ("ollama_port", 11434, "OLLAMA_PORT", True, True),
|
||||
}
|
||||
|
||||
# LLM yaml backend keys → url suffix, keyed by service name
|
||||
|
|
|
|||
Loading…
Reference in a new issue