feat: prompt for model weights directory during install

Interactive prompt lets users with split-drive setups point Ollama and
vLLM model dirs at a dedicated storage drive. Reads current .env value
as default so re-runs are idempotent. Skips prompts in non-interactive
(piped) mode. Creates the target directory immediately and updates .env
in-place via portable awk (Linux + macOS). Also simplifies next-steps
output since model paths are now configured at install time.
This commit is contained in:
pyr0ball 2026-02-25 16:08:14 -08:00
parent 7620a2ab8d
commit f8eb4e9cfd

View file

@ -217,6 +217,58 @@ setup_env() {
fi fi
} }
# ── Model weights storage ───────────────────────────────────────────────────────
_update_env_key() {
# Portable in-place key=value update for .env files (Linux + macOS).
# Appends the key if not already present.
local file="$1" key="$2" val="$3"
awk -v k="$key" -v v="$val" '
BEGIN { found=0 }
$0 ~ ("^" k "=") { print k "=" v; found=1; next }
{ print }
END { if (!found) print k "=" v }
' "$file" > "${file}.tmp" && mv "${file}.tmp" "$file"
}
configure_model_paths() {
local env_file
env_file="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.env"
# Skip prompts when stdin is not a terminal (e.g. curl | bash)
if [[ ! -t 0 ]]; then
info "Non-interactive — using default model paths from .env"
return
fi
echo ""
info "Model weights storage"
echo -e " AI models can be 230+ GB each. If you have a separate data drive,"
echo -e " point these at it now. Press Enter to keep the value shown in [brackets]."
echo ""
local current input
current="$(grep -E '^OLLAMA_MODELS_DIR=' "$env_file" 2>/dev/null | cut -d= -f2-)"
[[ -z "$current" ]] && current="~/models/ollama"
read -rp " Ollama models dir [${current}]: " input || input=""
input="${input:-$current}"
input="${input/#\~/$HOME}"
mkdir -p "$input" 2>/dev/null || warn "Could not create $input — ensure it exists before 'make start'"
_update_env_key "$env_file" "OLLAMA_MODELS_DIR" "$input"
success "OLLAMA_MODELS_DIR=$input"
current="$(grep -E '^VLLM_MODELS_DIR=' "$env_file" 2>/dev/null | cut -d= -f2-)"
[[ -z "$current" ]] && current="~/models/vllm"
read -rp " vLLM models dir [${current}]: " input || input=""
input="${input:-$current}"
input="${input/#\~/$HOME}"
mkdir -p "$input" 2>/dev/null || warn "Could not create $input — ensure it exists before 'make start'"
_update_env_key "$env_file" "VLLM_MODELS_DIR" "$input"
success "VLLM_MODELS_DIR=$input"
echo ""
}
# ── Main ─────────────────────────────────────────────────────────────────────── # ── Main ───────────────────────────────────────────────────────────────────────
main() { main() {
echo "" echo ""
@ -235,15 +287,17 @@ main() {
install_nvidia_toolkit install_nvidia_toolkit
fi fi
setup_env setup_env
configure_model_paths
echo "" echo ""
success "All dependencies installed." success "All dependencies installed."
echo "" echo ""
echo -e " ${GREEN}Next steps:${NC}" echo -e " ${GREEN}Next steps:${NC}"
echo -e " 1. Edit ${YELLOW}.env${NC} to set your preferred ports and model paths" echo -e " 1. Start Peregrine:"
echo -e " 2. Start Peregrine:" echo -e " ${YELLOW}make start${NC} # remote/API-only (no local GPU)"
echo -e " ${YELLOW}make start${NC} (auto-detects Docker or Podman)" echo -e " ${YELLOW}make start PROFILE=cpu${NC} # local Ollama inference (CPU)"
echo -e " 3. Open ${YELLOW}http://localhost:8501${NC} — the setup wizard will guide you" echo -e " 2. Open ${YELLOW}http://localhost:8501${NC} — the setup wizard will guide you"
echo -e " (Tip: edit ${YELLOW}.env${NC} any time to adjust ports or model paths)"
echo "" echo ""
if groups "$USER" 2>/dev/null | grep -q docker; then if groups "$USER" 2>/dev/null | grep -q docker; then
true true