Compare commits

...

7 commits

Author SHA1 Message Date
8b1d576e43 Merge pull request 'feat(vue): open Vue SPA to all tiers; fix cloud navigation and feedback button' (#64) from feature/vue-streamlit-parity into main
Some checks failed
CI / test (push) Failing after 28s
2026-04-02 17:46:47 -07:00
3313eade49 test: update cover letter mock signature to include user_yaml_path param
Some checks failed
CI / test (pull_request) Failing after 31s
2026-04-02 17:43:01 -07:00
b06d596d4c feat(vue): open Vue SPA to all tiers; fix cloud nav and feedback button
Some checks failed
CI / test (pull_request) Failing after 1m16s
- Lower vue_ui_beta gate to "free" so all licensed users can access the
  new UI without a paid subscription
- Remove "Paid tier" wording from the Try New UI banner
- Fix Vue SPA navigation in cloud/demo deployments: add VITE_BASE_PATH
  build arg so Vite sets the correct subpath base, and pass
  import.meta.env.BASE_URL to createWebHistory() so router links
  emit /peregrine/... paths that Caddy can match
- Fix feedback button missing on cloud instance by passing
  FORGEJO_API_TOKEN through compose.cloud.yml
- Remove vLLM container from compose.yml (vLLM dropped from stack;
  cf-research service in cfcore covers the use case)
- Fix cloud config path in Apply page (use get_config_dir() so per-user
  cloud data roots resolve correctly for user.yaml and resume YAML)
- Refactor generate_cover_letter._build_system_context and
  _build_mission_notes to accept explicit profile arg (enables
  per-user cover letter generation in cloud multi-tenant mode)
- Add API proxy block to nginx.conf (Vue web container can now call
  /api/ directly without Vite dev proxy)
- Update .env.example: remove vLLM vars, add research model + tuning
  vars for external vLLM deployments
- Update llm.yaml: switch vllm base_url to host.docker.internal
  (vLLM now runs outside Docker stack)

Closes #63 (feedback button)
Related: #8 (Vue SPA), #50–#62 (parity milestone)
2026-04-02 17:41:35 -07:00
66dc42a407 fix(preflight): remove vllm from Docker adoption list
vllm is now managed by cf-orch as a host process — no Docker service
defined in compose.yml. Preflight was detecting port 8000 (llm_server)
and generating a vllm stub in compose.override.yml with no image,
causing `docker compose up` to error on startup.
2026-04-02 16:57:06 -07:00
bc80922d61 chore(llm): swap model_candidates order — Qwen2.5-3B first, Phi-4-mini fallback
Phi-4-mini's cached modeling_phi3.py imports SlidingWindowCache which
was removed in transformers 5.x. Qwen2.5-3B uses built-in qwen2 arch
and works cleanly. Reorder so Qwen is tried first.
2026-04-02 16:36:38 -07:00
11fb3a07b4 chore(llm): switch vllm model_candidates from Ouro to Phi-4-mini + Qwen2.5-3B
Ouro models incompatible with transformers 5.x bundled in cf env.
Phi-4-mini-instruct tried first (stronger benchmarks, 7.2GB);
Qwen2.5-3B-Instruct as VRAM-constrained fallback (5.8GB).
2026-04-02 15:34:59 -07:00
7c9dcd2620 config(llm): add cf_orch block to vllm backend 2026-04-02 12:20:41 -07:00
16 changed files with 101 additions and 60 deletions

View file

@ -12,8 +12,11 @@ VISION_REVISION=2025-01-09
DOCS_DIR=~/Documents/JobSearch DOCS_DIR=~/Documents/JobSearch
OLLAMA_MODELS_DIR=~/models/ollama OLLAMA_MODELS_DIR=~/models/ollama
VLLM_MODELS_DIR=~/models/vllm VLLM_MODELS_DIR=~/models/vllm # override with full path to your model dir
VLLM_MODEL=Ouro-1.4B VLLM_MODEL=Ouro-1.4B # cover letters — fast 1.4B model
VLLM_RESEARCH_MODEL=Ouro-2.6B-Thinking # research — reasoning 2.6B model; restart vllm to switch
VLLM_MAX_MODEL_LEN=4096 # increase to 8192 for Thinking models with long CoT
VLLM_GPU_MEM_UTIL=0.75 # lower to 0.6 if sharing GPU with other services
OLLAMA_DEFAULT_MODEL=llama3.2:3b OLLAMA_DEFAULT_MODEL=llama3.2:3b
# API keys (required for remote profile) # API keys (required for remote profile)

View file

@ -200,7 +200,7 @@ def render_banner(yaml_path: Path, tier: str) -> None:
col1, col2, col3 = st.columns([8, 1, 1]) col1, col2, col3 = st.columns([8, 1, 1])
with col1: with col1:
st.info("✨ **New Peregrine UI available** — try the modern Vue interface (Beta, Paid tier)") st.info("✨ **New Peregrine UI available** — try the modern Vue interface (Beta)")
with col2: with col2:
if st.button("Try it", key="_ui_banner_try"): if st.button("Try it", key="_ui_banner_try"):
switch_ui(yaml_path, to="vue", tier=tier) switch_ui(yaml_path, to="vue", tier=tier)

View file

@ -15,28 +15,28 @@ import streamlit.components.v1 as components
import yaml import yaml
from scripts.user_profile import UserProfile from scripts.user_profile import UserProfile
_USER_YAML = Path(__file__).parent.parent.parent / "config" / "user.yaml"
_profile = UserProfile(_USER_YAML) if UserProfile.exists(_USER_YAML) else None
_name = _profile.name if _profile else "Job Seeker"
from scripts.db import ( from scripts.db import (
DEFAULT_DB, init_db, get_jobs_by_status, DEFAULT_DB, init_db, get_jobs_by_status,
update_cover_letter, mark_applied, update_job_status, update_cover_letter, mark_applied, update_job_status,
get_task_for_job, get_task_for_job,
) )
from scripts.task_runner import submit_task from scripts.task_runner import submit_task
from app.cloud_session import resolve_session, get_db_path from app.cloud_session import resolve_session, get_db_path, get_config_dir
from app.telemetry import log_usage_event from app.telemetry import log_usage_event
DOCS_DIR = _profile.docs_dir if _profile else Path.home() / "Documents" / "JobSearch"
RESUME_YAML = Path(__file__).parent.parent.parent / "config" / "plain_text_resume.yaml"
st.title("🚀 Apply Workspace") st.title("🚀 Apply Workspace")
resolve_session("peregrine") resolve_session("peregrine")
init_db(get_db_path()) init_db(get_db_path())
_CONFIG_DIR = get_config_dir()
_USER_YAML = _CONFIG_DIR / "user.yaml"
_profile = UserProfile(_USER_YAML) if UserProfile.exists(_USER_YAML) else None
_name = _profile.name if _profile else "Job Seeker"
DOCS_DIR = _profile.docs_dir if _profile else Path.home() / "Documents" / "JobSearch"
RESUME_YAML = _CONFIG_DIR / "plain_text_resume.yaml"
# ── PDF generation ───────────────────────────────────────────────────────────── # ── PDF generation ─────────────────────────────────────────────────────────────
def _make_cover_letter_pdf(job: dict, cover_letter: str, output_dir: Path) -> Path: def _make_cover_letter_pdf(job: dict, cover_letter: str, output_dir: Path) -> Path:
from reportlab.lib.pagesizes import letter from reportlab.lib.pagesizes import letter

View file

@ -64,8 +64,8 @@ FEATURES: dict[str, str] = {
"apple_calendar_sync": "paid", "apple_calendar_sync": "paid",
"slack_notifications": "paid", "slack_notifications": "paid",
# Beta UI access — stays gated (access management, not compute) # Beta UI access — open to all tiers (access management, not compute)
"vue_ui_beta": "paid", "vue_ui_beta": "free",
} }
# Features that unlock when the user supplies any LLM backend (local or BYOK). # Features that unlock when the user supplies any LLM backend (local or BYOK).

View file

@ -37,6 +37,7 @@ services:
- PEREGRINE_CADDY_PROXY=1 - PEREGRINE_CADDY_PROXY=1
- CF_ORCH_URL=http://host.docker.internal:7700 - CF_ORCH_URL=http://host.docker.internal:7700
- DEMO_MODE=false - DEMO_MODE=false
- FORGEJO_API_TOKEN=${FORGEJO_API_TOKEN:-}
depends_on: depends_on:
searxng: searxng:
condition: service_healthy condition: service_healthy
@ -48,6 +49,8 @@ services:
build: build:
context: . context: .
dockerfile: docker/web/Dockerfile dockerfile: docker/web/Dockerfile
args:
VITE_BASE_PATH: /peregrine/
ports: ports:
- "8508:80" - "8508:80"
restart: unless-stopped restart: unless-stopped

View file

@ -42,6 +42,8 @@ services:
build: build:
context: . context: .
dockerfile: docker/web/Dockerfile dockerfile: docker/web/Dockerfile
args:
VITE_BASE_PATH: /peregrine/
ports: ports:
- "8507:80" - "8507:80"
restart: unless-stopped restart: unless-stopped

View file

@ -1,5 +1,5 @@
# compose.yml — Peregrine by Circuit Forge LLC # compose.yml — Peregrine by Circuit Forge LLC
# Profiles: remote | cpu | single-gpu | dual-gpu-ollama | dual-gpu-vllm | dual-gpu-mixed # Profiles: remote | cpu | single-gpu | dual-gpu-ollama
services: services:
app: app:
@ -129,23 +129,6 @@ services:
profiles: [single-gpu, dual-gpu-ollama, dual-gpu-vllm, dual-gpu-mixed] profiles: [single-gpu, dual-gpu-ollama, dual-gpu-vllm, dual-gpu-mixed]
restart: unless-stopped restart: unless-stopped
vllm:
image: vllm/vllm-openai:latest
ports:
- "${VLLM_PORT:-8000}:8000"
volumes:
- ${VLLM_MODELS_DIR:-~/models/vllm}:/models
command: >
--model /models/${VLLM_MODEL:-Ouro-1.4B}
--trust-remote-code
--max-model-len 4096
--gpu-memory-utilization 0.75
--enforce-eager
--max-num-seqs 8
--cpu-offload-gb ${CPU_OFFLOAD_GB:-0}
profiles: [dual-gpu-vllm, dual-gpu-mixed]
restart: unless-stopped
finetune: finetune:
build: build:
context: . context: .

View file

@ -28,9 +28,9 @@ backends:
type: openai_compat type: openai_compat
ollama_research: ollama_research:
api_key: ollama api_key: ollama
base_url: http://host.docker.internal:11434/v1 base_url: http://ollama_research:11434/v1
enabled: true enabled: true
model: llama3.2:3b model: llama3.1:8b
supports_images: false supports_images: false
type: openai_compat type: openai_compat
vision_service: vision_service:
@ -45,6 +45,11 @@ backends:
model: __auto__ model: __auto__
supports_images: false supports_images: false
type: openai_compat type: openai_compat
cf_orch:
service: vllm
model_candidates:
- Qwen2.5-3B-Instruct
ttl_s: 300
vllm_research: vllm_research:
api_key: '' api_key: ''
base_url: http://host.docker.internal:8000/v1 base_url: http://host.docker.internal:8000/v1

View file

@ -4,6 +4,8 @@ WORKDIR /app
COPY web/package*.json ./ COPY web/package*.json ./
RUN npm ci --prefer-offline RUN npm ci --prefer-offline
COPY web/ ./ COPY web/ ./
ARG VITE_BASE_PATH=/
ENV VITE_BASE_PATH=${VITE_BASE_PATH}
RUN npm run build RUN npm run build
# Stage 2: serve # Stage 2: serve

View file

@ -5,9 +5,13 @@ server {
root /usr/share/nginx/html; root /usr/share/nginx/html;
index index.html; index index.html;
# SPA fallback # Proxy API calls to the FastAPI backend service
location / { location /api/ {
try_files $uri $uri/ /index.html; proxy_pass http://api:8601;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 120s;
} }
# Cache static assets # Cache static assets
@ -15,4 +19,9 @@ server {
expires 1y; expires 1y;
add_header Cache-Control "public, immutable"; add_header Cache-Control "public, immutable";
} }
# SPA fallback must come after API and assets
location / {
try_files $uri $uri/ /index.html;
}
} }

View file

@ -26,13 +26,14 @@ LETTERS_DIR = _profile.docs_dir if _profile else Path.home() / "Documents" / "Jo
LETTER_GLOB = "*Cover Letter*.md" LETTER_GLOB = "*Cover Letter*.md"
# Background injected into every prompt so the model has the candidate's facts # Background injected into every prompt so the model has the candidate's facts
def _build_system_context() -> str: def _build_system_context(profile=None) -> str:
if not _profile: p = profile or _profile
if not p:
return "You are a professional cover letter writer. Write in first person." return "You are a professional cover letter writer. Write in first person."
parts = [f"You are writing cover letters for {_profile.name}. {_profile.career_summary}"] parts = [f"You are writing cover letters for {p.name}. {p.career_summary}"]
if _profile.candidate_voice: if p.candidate_voice:
parts.append( parts.append(
f"Voice and personality: {_profile.candidate_voice} " f"Voice and personality: {p.candidate_voice} "
"Write in a way that reflects these authentic traits — not as a checklist, " "Write in a way that reflects these authentic traits — not as a checklist, "
"but as a natural expression of who this person is." "but as a natural expression of who this person is."
) )
@ -125,15 +126,17 @@ _MISSION_DEFAULTS: dict[str, str] = {
} }
def _build_mission_notes() -> dict[str, str]: def _build_mission_notes(profile=None, candidate_name: str | None = None) -> dict[str, str]:
"""Merge user's custom mission notes with generic defaults.""" """Merge user's custom mission notes with generic defaults."""
prefs = _profile.mission_preferences if _profile else {} p = profile or _profile
name = candidate_name or _candidate
prefs = p.mission_preferences if p else {}
notes = {} notes = {}
for industry, default_note in _MISSION_DEFAULTS.items(): for industry, default_note in _MISSION_DEFAULTS.items():
custom = (prefs.get(industry) or "").strip() custom = (prefs.get(industry) or "").strip()
if custom: if custom:
notes[industry] = ( notes[industry] = (
f"Mission alignment — {_candidate} shared: \"{custom}\". " f"Mission alignment — {name} shared: \"{custom}\". "
"Para 3 should warmly and specifically reflect this authentic connection." "Para 3 should warmly and specifically reflect this authentic connection."
) )
else: else:
@ -144,12 +147,15 @@ def _build_mission_notes() -> dict[str, str]:
_MISSION_NOTES = _build_mission_notes() _MISSION_NOTES = _build_mission_notes()
def detect_mission_alignment(company: str, description: str) -> str | None: def detect_mission_alignment(
company: str, description: str, mission_notes: dict | None = None
) -> str | None:
"""Return a mission hint string if company/JD matches a preferred industry, else None.""" """Return a mission hint string if company/JD matches a preferred industry, else None."""
notes = mission_notes if mission_notes is not None else _MISSION_NOTES
text = f"{company} {description}".lower() text = f"{company} {description}".lower()
for industry, signals in _MISSION_SIGNALS.items(): for industry, signals in _MISSION_SIGNALS.items():
if any(sig in text for sig in signals): if any(sig in text for sig in signals):
return _MISSION_NOTES[industry] return notes[industry]
return None return None
@ -190,10 +196,14 @@ def build_prompt(
examples: list[dict], examples: list[dict],
mission_hint: str | None = None, mission_hint: str | None = None,
is_jobgether: bool = False, is_jobgether: bool = False,
system_context: str | None = None,
candidate_name: str | None = None,
) -> str: ) -> str:
parts = [SYSTEM_CONTEXT.strip(), ""] ctx = system_context if system_context is not None else SYSTEM_CONTEXT
name = candidate_name or _candidate
parts = [ctx.strip(), ""]
if examples: if examples:
parts.append(f"=== STYLE EXAMPLES ({_candidate}'s past letters) ===\n") parts.append(f"=== STYLE EXAMPLES ({name}'s past letters) ===\n")
for i, ex in enumerate(examples, 1): for i, ex in enumerate(examples, 1):
parts.append(f"--- Example {i} ({ex['company']}) ---") parts.append(f"--- Example {i} ({ex['company']}) ---")
parts.append(ex["text"]) parts.append(ex["text"])
@ -231,13 +241,14 @@ def build_prompt(
return "\n".join(parts) return "\n".join(parts)
def _trim_to_letter_end(text: str) -> str: def _trim_to_letter_end(text: str, profile=None) -> str:
"""Remove repetitive hallucinated content after the first complete sign-off. """Remove repetitive hallucinated content after the first complete sign-off.
Fine-tuned models sometimes loop after completing the letter. This cuts at Fine-tuned models sometimes loop after completing the letter. This cuts at
the first closing + candidate name so only the intended letter is saved. the first closing + candidate name so only the intended letter is saved.
""" """
candidate_first = (_profile.name.split()[0] if _profile else "").strip() p = profile or _profile
candidate_first = (p.name.split()[0] if p else "").strip()
pattern = ( pattern = (
r'(?:Warm regards|Sincerely|Best regards|Kind regards|Thank you)[,.]?\s*\n+\s*' r'(?:Warm regards|Sincerely|Best regards|Kind regards|Thank you)[,.]?\s*\n+\s*'
+ (re.escape(candidate_first) if candidate_first else r'\w+(?:\s+\w+)?') + (re.escape(candidate_first) if candidate_first else r'\w+(?:\s+\w+)?')
@ -257,6 +268,8 @@ def generate(
feedback: str = "", feedback: str = "",
is_jobgether: bool = False, is_jobgether: bool = False,
_router=None, _router=None,
config_path: "Path | None" = None,
user_yaml_path: "Path | None" = None,
) -> str: ) -> str:
"""Generate a cover letter and return it as a string. """Generate a cover letter and return it as a string.
@ -264,15 +277,29 @@ def generate(
and requested changes are appended to the prompt so the LLM revises rather and requested changes are appended to the prompt so the LLM revises rather
than starting from scratch. than starting from scratch.
user_yaml_path overrides the module-level profile required in cloud mode
so each user's name/voice/mission prefs are used instead of the global default.
_router is an optional pre-built LLMRouter (used in tests to avoid real LLM calls). _router is an optional pre-built LLMRouter (used in tests to avoid real LLM calls).
""" """
# Per-call profile override (cloud mode: each user has their own user.yaml)
if user_yaml_path and Path(user_yaml_path).exists():
_prof = UserProfile(Path(user_yaml_path))
else:
_prof = _profile
sys_ctx = _build_system_context(_prof)
mission_notes = _build_mission_notes(_prof, candidate_name=(_prof.name if _prof else None))
candidate_name = _prof.name if _prof else _candidate
corpus = load_corpus() corpus = load_corpus()
examples = find_similar_letters(description or f"{title} {company}", corpus) examples = find_similar_letters(description or f"{title} {company}", corpus)
mission_hint = detect_mission_alignment(company, description) mission_hint = detect_mission_alignment(company, description, mission_notes=mission_notes)
if mission_hint: if mission_hint:
print(f"[cover-letter] Mission alignment detected for {company}", file=sys.stderr) print(f"[cover-letter] Mission alignment detected for {company}", file=sys.stderr)
prompt = build_prompt(title, company, description, examples, prompt = build_prompt(title, company, description, examples,
mission_hint=mission_hint, is_jobgether=is_jobgether) mission_hint=mission_hint, is_jobgether=is_jobgether,
system_context=sys_ctx, candidate_name=candidate_name)
if previous_result: if previous_result:
prompt += f"\n\n---\nPrevious draft:\n{previous_result}" prompt += f"\n\n---\nPrevious draft:\n{previous_result}"
@ -281,8 +308,9 @@ def generate(
if _router is None: if _router is None:
sys.path.insert(0, str(Path(__file__).parent.parent)) sys.path.insert(0, str(Path(__file__).parent.parent))
from scripts.llm_router import LLMRouter from scripts.llm_router import LLMRouter, CONFIG_PATH
_router = LLMRouter() resolved = config_path if (config_path and Path(config_path).exists()) else CONFIG_PATH
_router = LLMRouter(resolved)
print(f"[cover-letter] Generating for: {title} @ {company}", file=sys.stderr) print(f"[cover-letter] Generating for: {title} @ {company}", file=sys.stderr)
print(f"[cover-letter] Style examples: {[e['company'] for e in examples]}", file=sys.stderr) print(f"[cover-letter] Style examples: {[e['company'] for e in examples]}", file=sys.stderr)
@ -292,7 +320,7 @@ def generate(
# max_tokens=1200 caps generation at ~900 words — enough for any cover letter # max_tokens=1200 caps generation at ~900 words — enough for any cover letter
# and prevents fine-tuned models from looping into repetitive garbage output. # and prevents fine-tuned models from looping into repetitive garbage output.
result = _router.complete(prompt, max_tokens=1200) result = _router.complete(prompt, max_tokens=1200)
return _trim_to_letter_end(result) return _trim_to_letter_end(result, _prof)
def main() -> None: def main() -> None:

View file

@ -47,7 +47,7 @@ OVERRIDE_YML = ROOT / "compose.override.yml"
_SERVICES: dict[str, tuple[str, int, str, bool, bool]] = { _SERVICES: dict[str, tuple[str, int, str, bool, bool]] = {
"streamlit": ("streamlit_port", 8501, "STREAMLIT_PORT", True, False), "streamlit": ("streamlit_port", 8501, "STREAMLIT_PORT", True, False),
"searxng": ("searxng_port", 8888, "SEARXNG_PORT", True, True), "searxng": ("searxng_port", 8888, "SEARXNG_PORT", True, True),
"vllm": ("vllm_port", 8000, "VLLM_PORT", True, True), # vllm removed — now managed by cf-orch (host process), not a Docker service
"vision": ("vision_port", 8002, "VISION_PORT", True, True), "vision": ("vision_port", 8002, "VISION_PORT", True, True),
"ollama": ("ollama_port", 11434, "OLLAMA_PORT", True, True), "ollama": ("ollama_port", 11434, "OLLAMA_PORT", True, True),
"ollama_research": ("ollama_research_port", 11435, "OLLAMA_RESEARCH_PORT", True, True), "ollama_research": ("ollama_research_port", 11435, "OLLAMA_RESEARCH_PORT", True, True),
@ -65,7 +65,6 @@ _LLM_BACKENDS: dict[str, list[tuple[str, str]]] = {
_DOCKER_INTERNAL: dict[str, tuple[str, int]] = { _DOCKER_INTERNAL: dict[str, tuple[str, int]] = {
"ollama": ("ollama", 11434), "ollama": ("ollama", 11434),
"ollama_research": ("ollama_research", 11434), # container-internal port is always 11434 "ollama_research": ("ollama_research", 11434), # container-internal port is always 11434
"vllm": ("vllm", 8000),
"vision": ("vision", 8002), "vision": ("vision", 8002),
"searxng": ("searxng", 8080), # searxng internal port differs from host port "searxng": ("searxng", 8080), # searxng internal port differs from host port
} }

View file

@ -179,6 +179,9 @@ def _run_task(db_path: Path, task_id: int, task_type: str, job_id: int,
import json as _json import json as _json
p = _json.loads(params or "{}") p = _json.loads(params or "{}")
from scripts.generate_cover_letter import generate from scripts.generate_cover_letter import generate
_cfg_dir = Path(db_path).parent / "config"
_user_llm_cfg = _cfg_dir / "llm.yaml"
_user_yaml = _cfg_dir / "user.yaml"
result = generate( result = generate(
job.get("title", ""), job.get("title", ""),
job.get("company", ""), job.get("company", ""),
@ -186,6 +189,8 @@ def _run_task(db_path: Path, task_id: int, task_type: str, job_id: int,
previous_result=p.get("previous_result", ""), previous_result=p.get("previous_result", ""),
feedback=p.get("feedback", ""), feedback=p.get("feedback", ""),
is_jobgether=job.get("source") == "jobgether", is_jobgether=job.get("source") == "jobgether",
config_path=_user_llm_cfg,
user_yaml_path=_user_yaml,
) )
update_cover_letter(db_path, job_id, result) update_cover_letter(db_path, job_id, result)

View file

@ -80,7 +80,8 @@ class TestTaskRunnerCoverLetterParams:
captured = {} captured = {}
def mock_generate(title, company, description="", previous_result="", feedback="", def mock_generate(title, company, description="", previous_result="", feedback="",
is_jobgether=False, _router=None): is_jobgether=False, _router=None, config_path=None,
user_yaml_path=None):
captured.update({ captured.update({
"title": title, "company": company, "title": title, "company": company,
"previous_result": previous_result, "feedback": feedback, "previous_result": previous_result, "feedback": feedback,

View file

@ -3,7 +3,7 @@ import { useAppConfigStore } from '../stores/appConfig'
import { settingsGuard } from './settingsGuard' import { settingsGuard } from './settingsGuard'
export const router = createRouter({ export const router = createRouter({
history: createWebHistory(), history: createWebHistory(import.meta.env.BASE_URL),
routes: [ routes: [
{ path: '/', component: () => import('../views/HomeView.vue') }, { path: '/', component: () => import('../views/HomeView.vue') },
{ path: '/review', component: () => import('../views/JobReviewView.vue') }, { path: '/review', component: () => import('../views/JobReviewView.vue') },

View file

@ -3,6 +3,7 @@ import vue from '@vitejs/plugin-vue'
import UnoCSS from 'unocss/vite' import UnoCSS from 'unocss/vite'
export default defineConfig({ export default defineConfig({
base: process.env.VITE_BASE_PATH || '/',
plugins: [vue(), UnoCSS()], plugins: [vue(), UnoCSS()],
server: { server: {
host: '0.0.0.0', host: '0.0.0.0',