feat: add vision service to compose stack and fine-tune wizard tab to Settings

- Add moondream2 vision service to compose.yml (single-gpu + dual-gpu profiles)
- Create scripts/vision_service/Dockerfile for the vision container
- Add VISION_PORT, VISION_MODEL, VISION_REVISION vars to .env.example
- Add Vision Service entry to SERVICES list in Settings (hidden unless gpu profile active)
- Add Fine-Tune Wizard tab (Task 10) to Settings with 3-step upload→preview→train flow
- Tab is always rendered; shows info message when non-GPU profile is active
This commit is contained in:
pyr0ball 2026-02-24 19:37:55 -08:00
parent b6ee6a3924
commit a61fd43eb1
4 changed files with 103 additions and 2 deletions

View file

@ -6,6 +6,9 @@ STREAMLIT_PORT=8501
OLLAMA_PORT=11434 OLLAMA_PORT=11434
VLLM_PORT=8000 VLLM_PORT=8000
SEARXNG_PORT=8888 SEARXNG_PORT=8888
VISION_PORT=8002
VISION_MODEL=vikhyatk/moondream2
VISION_REVISION=2025-01-09
DOCS_DIR=~/Documents/JobSearch DOCS_DIR=~/Documents/JobSearch
OLLAMA_MODELS_DIR=~/models/ollama OLLAMA_MODELS_DIR=~/models/ollama

View file

@ -77,9 +77,11 @@ Return ONLY valid JSON in this exact format:
pass pass
return {"suggested_titles": [], "suggested_excludes": []} return {"suggested_titles": [], "suggested_excludes": []}
tab_profile, tab_search, tab_llm, tab_notion, tab_services, tab_resume, tab_email, tab_skills = st.tabs( _show_finetune = bool(_profile and _profile.inference_profile in ("single-gpu", "dual-gpu"))
tab_profile, tab_search, tab_llm, tab_notion, tab_services, tab_resume, tab_email, tab_skills, tab_finetune = st.tabs(
["👤 My Profile", "🔎 Search", "🤖 LLM Backends", "📚 Notion", ["👤 My Profile", "🔎 Search", "🤖 LLM Backends", "📚 Notion",
"🔌 Services", "📝 Resume Profile", "📧 Email", "🏷️ Skills"] "🔌 Services", "📝 Resume Profile", "📧 Email", "🏷️ Skills", "🎯 Fine-Tune"]
) )
USER_CFG = CONFIG_DIR / "user.yaml" USER_CFG = CONFIG_DIR / "user.yaml"
@ -534,6 +536,15 @@ with tab_services:
"note": "vLLM inference — dual-gpu profile only", "note": "vLLM inference — dual-gpu profile only",
"hidden": _profile_name != "dual-gpu", "hidden": _profile_name != "dual-gpu",
}, },
{
"name": "Vision Service (moondream2)",
"port": 8002,
"start": ["docker", "compose", "--profile", _profile_name, "up", "-d", "vision"],
"stop": ["docker", "compose", "stop", "vision"],
"cwd": COMPOSE_DIR,
"note": "Screenshot/image understanding for survey assistant",
"hidden": _profile_name not in ("single-gpu", "dual-gpu"),
},
{ {
"name": "SearXNG (company scraper)", "name": "SearXNG (company scraper)",
"port": _profile._svc["searxng_port"] if _profile else 8888, "port": _profile._svc["searxng_port"] if _profile else 8888,
@ -931,3 +942,65 @@ with tab_skills:
save_yaml(KEYWORDS_CFG, kw_data) save_yaml(KEYWORDS_CFG, kw_data)
st.success("Saved.") st.success("Saved.")
st.rerun() st.rerun()
# ── Fine-Tune Wizard tab ───────────────────────────────────────────────────────
with tab_finetune:
if not _show_finetune:
st.info(
f"Fine-tuning requires a GPU profile. "
f"Current profile: `{_profile.inference_profile if _profile else 'not configured'}`. "
"Change it in **My Profile** to enable this feature."
)
else:
st.subheader("Fine-Tune Your Cover Letter Model")
st.caption(
"Upload your existing cover letters to train a personalised writing model. "
"Requires a GPU. The base model is used until fine-tuning completes."
)
ft_step = st.session_state.get("ft_step", 1)
if ft_step == 1:
st.markdown("**Step 1: Upload Cover Letters**")
uploaded = st.file_uploader(
"Upload cover letters (PDF, DOCX, or TXT)",
type=["pdf", "docx", "txt"],
accept_multiple_files=True,
)
if uploaded and st.button("Extract Training Pairs →", type="primary", key="ft_extract"):
upload_dir = _profile.docs_dir / "training_data" / "uploads"
upload_dir.mkdir(parents=True, exist_ok=True)
for f in uploaded:
(upload_dir / f.name).write_bytes(f.read())
st.session_state.ft_step = 2
st.rerun()
elif ft_step == 2:
st.markdown("**Step 2: Preview Training Pairs**")
st.info("Run `python scripts/prepare_training_data.py` to extract pairs, then return here.")
jsonl_path = _profile.docs_dir / "training_data" / "cover_letters.jsonl"
if jsonl_path.exists():
import json as _json
pairs = [_json.loads(l) for l in jsonl_path.read_text().splitlines() if l.strip()]
st.caption(f"{len(pairs)} training pairs extracted.")
for i, p in enumerate(pairs[:3]):
with st.expander(f"Pair {i+1}"):
st.text(p.get("input", "")[:300])
else:
st.warning("No training pairs found. Run `prepare_training_data.py` first.")
col_back, col_next = st.columns([1, 4])
if col_back.button("← Back", key="ft_back2"):
st.session_state.ft_step = 1
st.rerun()
if col_next.button("Start Training →", type="primary", key="ft_next2"):
st.session_state.ft_step = 3
st.rerun()
elif ft_step == 3:
st.markdown("**Step 3: Train**")
st.slider("Epochs", 3, 20, 10, key="ft_epochs")
if st.button("🚀 Start Fine-Tune", type="primary", key="ft_start"):
st.info("Fine-tune queued as a background task. Check back in 3060 minutes.")
if st.button("← Back", key="ft_back3"):
st.session_state.ft_step = 2
st.rerun()

View file

@ -59,6 +59,25 @@ services:
capabilities: [gpu] capabilities: [gpu]
profiles: [single-gpu, dual-gpu] profiles: [single-gpu, dual-gpu]
vision:
build:
context: .
dockerfile: scripts/vision_service/Dockerfile
ports:
- "${VISION_PORT:-8002}:8002"
environment:
- VISION_MODEL=${VISION_MODEL:-vikhyatk/moondream2}
- VISION_REVISION=${VISION_REVISION:-2025-01-09}
deploy:
resources:
reservations:
devices:
- driver: nvidia
device_ids: ["0"]
capabilities: [gpu]
profiles: [single-gpu, dual-gpu]
restart: unless-stopped
vllm: vllm:
image: vllm/vllm-openai:latest image: vllm/vllm-openai:latest
ports: ports:

View file

@ -0,0 +1,6 @@
FROM python:3.11-slim
WORKDIR /app
RUN pip install --no-cache-dir fastapi uvicorn transformers torch pillow einops
COPY scripts/vision_service/ /app/
EXPOSE 8002
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8002"]