fix: repair beta installer path for Docker-first deployment

- llm.yaml + example: replace localhost URLs with Docker service names
  (ollama:11434, vllm:8000, vision:8002); replace personal model names
  (alex-cover-writer, llama3.1:8b) with llama3.2:3b
- user.yaml.example: update service hosts to Docker names (ollama, vllm,
  searxng) and searxng port from 8888 (host-mapped) to 8080 (internal)
- wizard step 5: fix hardcoded localhost defaults — wizard runs inside
  Docker, so service name defaults are required for connection tests to pass
- scrapers/companyScraper.py: bundle scraper so Dockerfile COPY succeeds
- setup.sh: remove host Ollama install (conflicts with Docker Ollama on
  port 11434); Docker entrypoint handles model download automatically
- README + setup.sh banner: add Circuit Forge mission statement
This commit is contained in:
pyr0ball 2026-02-25 16:03:10 -08:00
parent 8e804761a4
commit 7620a2ab8d
8 changed files with 1059 additions and 98 deletions

5
.gitignore vendored
View file

@ -22,3 +22,8 @@ config/user.yaml
config/.backup-* config/.backup-*
config/integrations/*.yaml config/integrations/*.yaml
!config/integrations/*.yaml.example !config/integrations/*.yaml.example
# companyScraper runtime artifacts
scrapers/.cache/
scrapers/.debug/
scrapers/raw_scrapes/

View file

@ -2,6 +2,8 @@
**AI-powered job search pipeline — by [Circuit Forge LLC](https://circuitforge.io)** **AI-powered job search pipeline — by [Circuit Forge LLC](https://circuitforge.io)**
> *"Don't be evil, for real and forever."*
Automates the full job search lifecycle: discovery → matching → cover letters → applications → interview prep. Automates the full job search lifecycle: discovery → matching → cover letters → applications → interview prep.
Privacy-first, local-first. Your data never leaves your machine. Privacy-first, local-first. Your data never leaves your machine.

View file

@ -403,9 +403,9 @@ elif step == 5:
st.caption("Change only if services run on non-default ports or remote hosts.") st.caption("Change only if services run on non-default ports or remote hosts.")
svc = dict(saved_yaml.get("services", {})) svc = dict(saved_yaml.get("services", {}))
for svc_name, default_host, default_port in [ for svc_name, default_host, default_port in [
("ollama", "localhost", 11434), ("ollama", "ollama", 11434), # Docker service name
("vllm", "localhost", 8000), ("vllm", "vllm", 8000), # Docker service name
("searxng", "localhost", 8888), ("searxng", "searxng", 8080), # Docker internal port (host-mapped: 8888)
]: ]:
c1, c2 = st.columns([3, 1]) c1, c2 = st.columns([3, 1])
svc[f"{svc_name}_host"] = c1.text_input( svc[f"{svc_name}_host"] = c1.text_input(

View file

@ -21,26 +21,26 @@ backends:
type: openai_compat type: openai_compat
ollama: ollama:
api_key: ollama api_key: ollama
base_url: http://localhost:11434/v1 base_url: http://ollama:11434/v1
enabled: true enabled: true
model: alex-cover-writer:latest model: llama3.2:3b
supports_images: false supports_images: false
type: openai_compat type: openai_compat
ollama_research: ollama_research:
api_key: ollama api_key: ollama
base_url: http://localhost:11434/v1 base_url: http://ollama:11434/v1
enabled: true enabled: true
model: llama3.1:8b model: llama3.2:3b
supports_images: false supports_images: false
type: openai_compat type: openai_compat
vision_service: vision_service:
base_url: http://localhost:8002 base_url: http://vision:8002
enabled: true enabled: true
supports_images: true supports_images: true
type: vision_service type: vision_service
vllm: vllm:
api_key: '' api_key: ''
base_url: http://localhost:8000/v1 base_url: http://vllm:8000/v1
enabled: true enabled: true
model: __auto__ model: __auto__
supports_images: false supports_images: false

View file

@ -21,21 +21,21 @@ backends:
supports_images: false supports_images: false
ollama: ollama:
api_key: ollama api_key: ollama
base_url: http://localhost:11434/v1 base_url: http://ollama:11434/v1 # Docker service name; use localhost:11434 outside Docker
enabled: true enabled: true
model: alex-cover-writer:latest model: llama3.2:3b
type: openai_compat type: openai_compat
supports_images: false supports_images: false
ollama_research: ollama_research:
api_key: ollama api_key: ollama
base_url: http://localhost:11434/v1 base_url: http://ollama:11434/v1 # Docker service name; use localhost:11434 outside Docker
enabled: true enabled: true
model: llama3.1:8b model: llama3.2:3b
type: openai_compat type: openai_compat
supports_images: false supports_images: false
vllm: vllm:
api_key: '' api_key: ''
base_url: http://localhost:8000/v1 base_url: http://vllm:8000/v1 # Docker service name; use localhost:8000 outside Docker
enabled: true enabled: true
model: __auto__ model: __auto__
type: openai_compat type: openai_compat

View file

@ -44,15 +44,15 @@ inference_profile: "remote" # remote | cpu | single-gpu | dual-gpu
services: services:
streamlit_port: 8501 streamlit_port: 8501
ollama_host: localhost ollama_host: ollama # Docker service name; use "localhost" if running outside Docker
ollama_port: 11434 ollama_port: 11434
ollama_ssl: false ollama_ssl: false
ollama_ssl_verify: true ollama_ssl_verify: true
vllm_host: localhost vllm_host: vllm # Docker service name; use "localhost" if running outside Docker
vllm_port: 8000 vllm_port: 8000
vllm_ssl: false vllm_ssl: false
vllm_ssl_verify: true vllm_ssl_verify: true
searxng_host: localhost searxng_host: searxng # Docker service name; use "localhost" if running outside Docker
searxng_port: 8888 searxng_port: 8080 # internal Docker port; use 8888 for host-mapped access
searxng_ssl: false searxng_ssl: false
searxng_ssl_verify: true searxng_ssl_verify: true

1026
scrapers/companyScraper.py Executable file

File diff suppressed because it is too large Load diff

View file

@ -204,81 +204,9 @@ install_nvidia_toolkit() {
success "NVIDIA Container Toolkit installed." success "NVIDIA Container Toolkit installed."
} }
# ── Ollama ─────────────────────────────────────────────────────────────────────
install_ollama() {
# ── Install ───────────────────────────────────────────────────────────────
if cmd_exists ollama; then
success "Ollama already installed: $(ollama --version 2>/dev/null)"
else
info "Installing Ollama…"
case "$OS" in
Linux)
curl -fsSL https://ollama.com/install.sh | sh ;;
Darwin)
if cmd_exists brew; then
brew install ollama
else
warn "Homebrew not found — skipping Ollama. Install from: https://ollama.com/download"
return
fi ;;
esac
success "Ollama installed."
fi
# ── Start service ─────────────────────────────────────────────────────────
if [[ "$OS" == "Linux" ]] && command -v systemctl &>/dev/null; then
$SUDO systemctl enable ollama 2>/dev/null || true
if ! systemctl is-active --quiet ollama 2>/dev/null; then
info "Starting Ollama service…"
$SUDO systemctl start ollama 2>/dev/null || true
fi
info "Waiting for Ollama to be ready…"
local i=0
until ollama list &>/dev/null 2>&1; do
sleep 1; i=$((i+1))
if [[ $i -ge 30 ]]; then
warn "Ollama service timed out. Run: sudo systemctl start ollama"
return
fi
done
success "Ollama service running."
elif [[ "$OS" == "Darwin" ]]; then
if ! ollama list &>/dev/null 2>&1; then
info "Starting Ollama…"
brew services start ollama 2>/dev/null \
|| { ollama serve &>/tmp/ollama.log &; }
local i=0
until ollama list &>/dev/null 2>&1; do
sleep 1; i=$((i+1))
if [[ $i -ge 15 ]]; then
warn "Ollama did not start. Run: ollama serve"
return
fi
done
fi
success "Ollama service running."
fi
# ── Pull default model ────────────────────────────────────────────────────
local script_dir model
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
model="$(grep -E '^OLLAMA_DEFAULT_MODEL=' "${script_dir}/.env" 2>/dev/null \
| cut -d= -f2 | tr -d '[:space:]')"
[[ -z "$model" ]] && model="llama3.2:3b"
if ollama show "${model}" &>/dev/null 2>&1; then
success "Default model already present: ${model}"
else
info "Pulling default model: ${model} (this may take several minutes)…"
if ollama pull "${model}"; then
success "Default model ready: ${model}"
else
warn "Model pull failed — run manually: ollama pull ${model}"
fi
fi
}
# ── Environment setup ────────────────────────────────────────────────────────── # ── Environment setup ──────────────────────────────────────────────────────────
# Note: Ollama runs as a Docker container — the compose.yml ollama service
# handles model download automatically on first start (see docker/ollama/entrypoint.sh).
setup_env() { setup_env() {
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [[ ! -f "$SCRIPT_DIR/.env" ]]; then if [[ ! -f "$SCRIPT_DIR/.env" ]]; then
@ -292,10 +220,11 @@ setup_env() {
# ── Main ─────────────────────────────────────────────────────────────────────── # ── Main ───────────────────────────────────────────────────────────────────────
main() { main() {
echo "" echo ""
echo -e "${BLUE}╔══════════════════════════════════════════╗${NC}" echo -e "${BLUE}╔══════════════════════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ Peregrine — Dependency Installer ║${NC}" echo -e "${BLUE}║ Peregrine — Dependency Installer ║${NC}"
echo -e "${BLUE}║ by Circuit Forge LLC ║${NC}" echo -e "${BLUE}║ by Circuit Forge LLC ║${NC}"
echo -e "${BLUE}╚══════════════════════════════════════════╝${NC}" echo -e "${BLUE}║ \"Don't be evil, for real and forever.\" ║${NC}"
echo -e "${BLUE}╚══════════════════════════════════════════════════════╝${NC}"
echo "" echo ""
install_git install_git
@ -305,8 +234,7 @@ main() {
check_compose check_compose
install_nvidia_toolkit install_nvidia_toolkit
fi fi
setup_env # creates .env before install_ollama reads OLLAMA_DEFAULT_MODEL setup_env
install_ollama
echo "" echo ""
success "All dependencies installed." success "All dependencies installed."