Backend - dev-api.py: Q&A suggest endpoint, Log Contact, cf-orch node detection in wizard hardware step, canonical search_profiles format (profiles:[...]), connections settings endpoints, Resume Library endpoints - db_migrate.py: migrations 002/003/004 — ATS columns, resume review, final resume struct - discover.py: _normalize_profiles() for legacy wizard YAML format compat - resume_optimizer.py: section-by-section resume parsing + scoring - task_runner.py: Q&A and contact-log task types - company_research.py: accessibility brief column wiring - generate_cover_letter.py: restore _candidate module-level binding Frontend - InterviewPrepView.vue: Q&A chat tab, Log Contact form, MarkdownView rendering - InterviewCard.vue: new reusable card component for interviews kanban - InterviewsView.vue: rejected analytics section with stage breakdown chips - ResumeProfileView.vue: sync with new resume store shape - SearchPrefsView.vue: cf-orch toggle, profile format migration - SystemSettingsView.vue: connections settings wiring - ConnectionsSettingsView.vue: new view for integration connections - MarkdownView.vue: new component for safe markdown rendering - ApplyWorkspace.vue: a11y — h1→h2 demotion, aria-expanded on Q&A toggle, confirmation dialog on Reject action (#98 #99 #100) - peregrine.css: explicit [data-theme="dark"] token block for light-OS users (#101), :focus-visible outline (#97) - wizard.css: cf-orch hardware step styles - WizardHardwareStep.vue: cf-orch node display, profile selection with orch option - WizardLayout.vue: hardware step wiring Infra - compose.yml / compose.cloud.yml: cf-orch agent sidecar, llm.cloud.yaml mount - Dockerfile.cfcore: cf-core editable install in image build - HANDOFF-xanderland.md: Podman/systemd setup guide for beta tester - podman-standalone.sh: standalone Podman run script Tests - test_dev_api_settings.py: remove stale worktree path bootstrap (credential_store now in main repo); fix job_boards fixture to use non-empty list - test_wizard_api.py: update profiles assertion to superset check (cf-orch added); update step6 assertion to canonical profiles[].titles format
172 lines
5.3 KiB
YAML
172 lines
5.3 KiB
YAML
# compose.yml — Peregrine by Circuit Forge LLC
|
|
# Profiles: remote | cpu | single-gpu | dual-gpu-ollama
|
|
services:
|
|
|
|
app:
|
|
build:
|
|
context: ..
|
|
dockerfile: peregrine/Dockerfile.cfcore
|
|
command: >
|
|
bash -c "streamlit run app/app.py
|
|
--server.port=8501
|
|
--server.headless=true
|
|
--server.fileWatcherType=none
|
|
2>&1 | tee /app/data/.streamlit.log"
|
|
ports:
|
|
- "${STREAMLIT_PORT:-8501}:8501"
|
|
volumes:
|
|
- ./config:/app/config
|
|
- ./data:/app/data
|
|
- ${DOCS_DIR:-~/Documents/JobSearch}:/docs
|
|
- /var/run/docker.sock:/var/run/docker.sock
|
|
- /usr/bin/docker:/usr/bin/docker:ro
|
|
environment:
|
|
- STAGING_DB=/app/data/staging.db
|
|
- DOCS_DIR=/docs
|
|
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
|
- OPENAI_COMPAT_URL=${OPENAI_COMPAT_URL:-}
|
|
- OPENAI_COMPAT_KEY=${OPENAI_COMPAT_KEY:-}
|
|
- PEREGRINE_GPU_COUNT=${PEREGRINE_GPU_COUNT:-0}
|
|
- PEREGRINE_GPU_NAMES=${PEREGRINE_GPU_NAMES:-}
|
|
- RECOMMENDED_PROFILE=${RECOMMENDED_PROFILE:-remote}
|
|
- STREAMLIT_SERVER_BASE_URL_PATH=${STREAMLIT_BASE_URL_PATH:-}
|
|
- FORGEJO_API_TOKEN=${FORGEJO_API_TOKEN:-}
|
|
- FORGEJO_REPO=${FORGEJO_REPO:-}
|
|
- FORGEJO_API_URL=${FORGEJO_API_URL:-}
|
|
- PYTHONUNBUFFERED=1
|
|
- PYTHONLOGGING=WARNING
|
|
- PEREGRINE_CADDY_PROXY=1
|
|
depends_on:
|
|
searxng:
|
|
condition: service_healthy
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
restart: unless-stopped
|
|
|
|
api:
|
|
build:
|
|
context: ..
|
|
dockerfile: peregrine/Dockerfile.cfcore
|
|
command: >
|
|
bash -c "uvicorn dev_api:app --host 0.0.0.0 --port 8601"
|
|
volumes:
|
|
- ./config:/app/config
|
|
- ./data:/app/data
|
|
- ${DOCS_DIR:-~/Documents/JobSearch}:/docs
|
|
environment:
|
|
- STAGING_DB=/app/data/staging.db
|
|
- DOCS_DIR=/docs
|
|
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
|
- OPENAI_COMPAT_URL=${OPENAI_COMPAT_URL:-}
|
|
- OPENAI_COMPAT_KEY=${OPENAI_COMPAT_KEY:-}
|
|
- PEREGRINE_GPU_COUNT=${PEREGRINE_GPU_COUNT:-0}
|
|
- PEREGRINE_GPU_NAMES=${PEREGRINE_GPU_NAMES:-}
|
|
- CF_ORCH_URL=${CF_ORCH_URL:-http://host.docker.internal:7700}
|
|
- PYTHONUNBUFFERED=1
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
restart: unless-stopped
|
|
|
|
web:
|
|
build:
|
|
context: .
|
|
dockerfile: docker/web/Dockerfile
|
|
ports:
|
|
- "${VUE_PORT:-8506}:80"
|
|
depends_on:
|
|
- api
|
|
restart: unless-stopped
|
|
|
|
searxng:
|
|
image: searxng/searxng:latest
|
|
ports:
|
|
- "${SEARXNG_PORT:-8888}:8080"
|
|
volumes:
|
|
- ./docker/searxng:/etc/searxng:ro
|
|
healthcheck:
|
|
test: ["CMD", "wget", "-q", "--spider", "http://localhost:8080/"]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 3
|
|
restart: unless-stopped
|
|
|
|
ollama:
|
|
image: ollama/ollama:latest
|
|
ports:
|
|
- "${OLLAMA_PORT:-11434}:11434"
|
|
volumes:
|
|
- ${OLLAMA_MODELS_DIR:-~/models/ollama}:/root/.ollama
|
|
- ./docker/ollama/entrypoint.sh:/entrypoint.sh
|
|
environment:
|
|
- OLLAMA_MODELS=/root/.ollama
|
|
- DEFAULT_OLLAMA_MODEL=${OLLAMA_DEFAULT_MODEL:-llama3.2:3b}
|
|
entrypoint: ["/bin/bash", "/entrypoint.sh"]
|
|
profiles: [cpu, single-gpu, dual-gpu-ollama, dual-gpu-vllm, dual-gpu-mixed]
|
|
restart: unless-stopped
|
|
|
|
ollama_research:
|
|
image: ollama/ollama:latest
|
|
ports:
|
|
- "${OLLAMA_RESEARCH_PORT:-11435}:11434"
|
|
volumes:
|
|
- ${OLLAMA_MODELS_DIR:-~/models/ollama}:/root/.ollama
|
|
- ./docker/ollama/entrypoint.sh:/entrypoint.sh
|
|
environment:
|
|
- OLLAMA_MODELS=/root/.ollama
|
|
- DEFAULT_OLLAMA_MODEL=${OLLAMA_RESEARCH_MODEL:-llama3.2:3b}
|
|
entrypoint: ["/bin/bash", "/entrypoint.sh"]
|
|
profiles: [dual-gpu-ollama, dual-gpu-mixed]
|
|
restart: unless-stopped
|
|
|
|
vision:
|
|
build:
|
|
context: .
|
|
dockerfile: scripts/vision_service/Dockerfile
|
|
ports:
|
|
- "${VISION_PORT:-8002}:8002"
|
|
environment:
|
|
- VISION_MODEL=${VISION_MODEL:-vikhyatk/moondream2}
|
|
- VISION_REVISION=${VISION_REVISION:-2025-01-09}
|
|
profiles: [single-gpu, dual-gpu-ollama, dual-gpu-vllm, dual-gpu-mixed]
|
|
restart: unless-stopped
|
|
|
|
cf-orch-agent:
|
|
build:
|
|
context: ..
|
|
dockerfile: peregrine/Dockerfile.cfcore
|
|
command: ["/bin/sh", "/app/docker/cf-orch-agent/start.sh"]
|
|
ports:
|
|
- "${CF_ORCH_AGENT_PORT:-7701}:7701"
|
|
environment:
|
|
- CF_ORCH_COORDINATOR_URL=${CF_ORCH_COORDINATOR_URL:-http://host.docker.internal:7700}
|
|
- CF_ORCH_NODE_ID=${CF_ORCH_NODE_ID:-peregrine}
|
|
- CF_ORCH_AGENT_PORT=${CF_ORCH_AGENT_PORT:-7701}
|
|
- CF_ORCH_ADVERTISE_HOST=${CF_ORCH_ADVERTISE_HOST:-}
|
|
- PYTHONUNBUFFERED=1
|
|
extra_hosts:
|
|
- "host.docker.internal:host-gateway"
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
count: all
|
|
capabilities: [gpu]
|
|
profiles: [single-gpu, dual-gpu-ollama, dual-gpu-vllm, dual-gpu-mixed]
|
|
restart: unless-stopped
|
|
|
|
finetune:
|
|
build:
|
|
context: .
|
|
dockerfile: Dockerfile.finetune
|
|
volumes:
|
|
- ${DOCS_DIR:-~/Documents/JobSearch}:/docs
|
|
- ${OLLAMA_MODELS_DIR:-~/models/ollama}:/ollama-models
|
|
- ./config:/app/config
|
|
environment:
|
|
- DOCS_DIR=/docs
|
|
- OLLAMA_URL=http://ollama:11434
|
|
- OLLAMA_MODELS_MOUNT=/ollama-models
|
|
- OLLAMA_MODELS_OLLAMA_PATH=/root/.ollama
|
|
profiles: [finetune]
|
|
restart: "no"
|