Compare commits

..

No commits in common. "main" and "v0.2.0" have entirely different histories.
main ... v0.2.0

112 changed files with 506 additions and 10238 deletions

View file

@ -1,28 +0,0 @@
[changelog]
header = ""
body = """
{% for group, commits in commits | group_by(attribute="group") %}
### {{ group | upper_first }}
{% for commit in commits %}
- {{ commit.message | upper_first }} ([{{ commit.id | truncate(length=7, end="") }}](https://git.opensourcesolarpunk.com/Circuit-Forge/snipe/commit/{{ commit.id }}))
{%- endfor %}
{% endfor %}
"""
trim = true
[git]
conventional_commits = true
filter_unconventional = true
split_commits = false
commit_parsers = [
{ message = "^feat", group = "Features" },
{ message = "^fix", group = "Bug Fixes" },
{ message = "^perf", group = "Performance" },
{ message = "^refactor", group = "Refactoring" },
{ message = "^docs", group = "Documentation" },
{ message = "^test", group = "Testing" },
{ message = "^chore", skip = true },
{ message = "^ci", skip = true },
]
filter_commits = false
tag_pattern = "v[0-9].*"

View file

@ -19,25 +19,6 @@ EBAY_SANDBOX_CERT_ID=
# production | sandbox
EBAY_ENV=production
# ── eBay OAuth — Authorization Code (user account connection) ─────────────────
# Enables paid-tier users to connect their personal eBay account for instant
# trust scoring via Trading API GetUser (account age + per-category feedback).
# Without this, Snipe falls back to Shopping API + Playwright scraping.
#
# Setup steps:
# 1. Go to https://developer.ebay.com/my/keys → select your Production app
# 2. Under "Auth Accepted URL / RuName", create a new entry:
# - Callback URL: https://your-domain/api/ebay/callback
# (e.g. https://menagerie.circuitforge.tech/snipe/api/ebay/callback)
# - Snipe generates the redirect automatically — just register the URL above
# 3. Copy the RuName value (looks like "YourName-AppName-PRD-xxx-yyy")
# and paste it as EBAY_RUNAME below.
# 4. Set EBAY_OAUTH_REDIRECT_URI to the same HTTPS callback URL.
#
# Self-hosted: your callback URL must be HTTPS and publicly reachable.
# EBAY_RUNAME=YourName-AppName-PRD-xxxxxxxx-xxxxxxxx
# EBAY_OAUTH_REDIRECT_URI=https://your-domain/api/ebay/callback
# ── eBay Account Deletion Webhook ──────────────────────────────────────────────
# Register endpoint at https://developer.ebay.com/my/notification — required for
# production key activation. Set EBAY_NOTIFICATION_ENDPOINT to the public HTTPS
@ -51,9 +32,6 @@ EBAY_WEBHOOK_VERIFY_SIGNATURES=true
# ── Database ───────────────────────────────────────────────────────────────────
SNIPE_DB=data/snipe.db
# Product identifier reported in cf-orch coordinator analytics for per-app breakdown
CF_APP_NAME=snipe
# ── Cloud mode (managed / menagerie instance only) ─────────────────────────────
# Leave unset for self-hosted / local use. When set, per-user DB isolation
# and Heimdall licensing are enabled. compose.cloud.yml sets CLOUD_MODE=true
@ -76,17 +54,13 @@ CF_APP_NAME=snipe
# own ID; the CF cloud instance uses CF's campaign ID (disclosed in the UI).
# EBAY_AFFILIATE_CAMPAIGN_ID=
# ── LLM inference (Search with AI / photo analysis) ──────────────────────────
# For self-hosted use, create config/llm.yaml from config/llm.yaml.example.
# config/llm.yaml is the preferred way to configure backends (supports cf-orch,
# multiple fallback backends, per-backend model selection).
#
# As a quick alternative, circuitforge-core LLMRouter also auto-detects backends
# from these env vars when no llm.yaml is present:
# ── LLM inference (vision / photo analysis) ──────────────────────────────────
# circuitforge-core LLMRouter auto-detects backends from these env vars
# (no llm.yaml required). Backends are tried in this priority order:
# 1. ANTHROPIC_API_KEY → Claude API (cloud; requires Paid tier key)
# 2. OPENAI_API_KEY → OpenAI-compatible endpoint
# 3. OLLAMA_HOST → local Ollama (default: http://localhost:11434)
# Leave all unset to disable LLM features (Search with AI won't be available).
# Leave all unset to disable LLM features (photo analysis won't run).
# ANTHROPIC_API_KEY=
# ANTHROPIC_MODEL=claude-haiku-4-5-20251001
@ -98,22 +72,9 @@ CF_APP_NAME=snipe
# OLLAMA_HOST=http://localhost:11434
# OLLAMA_MODEL=llava:7b
# CF Orchestrator — routes vision/LLM tasks to a cf-orch coordinator for VRAM management.
# Self-hosted: point at a local cf-orch coordinator if you have one running.
# Cloud (internal): managed coordinator at orch.circuitforge.tech.
# Leave unset to run vision tasks inline (no VRAM coordination).
# CF_ORCH_URL=http://10.1.10.71:7700
#
# cf-orch agent (compose --profile orch) — coordinator URL for the sidecar agent.
# Defaults to CF_ORCH_URL if unset.
# CF_ORCH_COORDINATOR_URL=http://10.1.10.71:7700
# ── Community DB (optional) ──────────────────────────────────────────────────
# When set, seller trust signals (confirmed scammers added to blocklist) are
# published to the shared community PostgreSQL for cross-user signal aggregation.
# Managed instances: set automatically by cf-orch. Self-hosted: leave unset.
# Requires cf-community-postgres container (cf-orch compose stack).
# COMMUNITY_DB_URL=postgresql://cf_community:<password>@localhost:5432/cf_community
# CF Orchestrator — managed inference for Paid+ cloud users (internal use only).
# Self-hosted users leave this unset; it has no effect without a valid allocation token.
# CF_ORCH_URL=https://orch.circuitforge.tech
# ── In-app feedback (beta) ────────────────────────────────────────────────────
# When set, a feedback FAB appears in the UI and routes submissions to Forgejo.

View file

@ -1,57 +0,0 @@
name: CI
on:
push:
branches: [main, 'feature/**', 'fix/**']
pull_request:
branches: [main]
jobs:
python:
name: Python tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.12'
cache: pip
# circuitforge-core is a sibling on dev machines but a public GitHub
# mirror in CI — install from there to avoid path-dependency issues.
- name: Install circuitforge-core
run: pip install --no-cache-dir git+https://github.com/CircuitForgeLLC/circuitforge-core.git
- name: Install snipe (dev extras)
run: pip install --no-cache-dir -e ".[dev]"
- name: Lint
run: ruff check .
- name: Test
run: pytest tests/ -v --tb=short
frontend:
name: Frontend typecheck + tests
runs-on: ubuntu-latest
defaults:
run:
working-directory: web
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: npm
cache-dependency-path: web/package-lock.json
- name: Install dependencies
run: npm ci
- name: Typecheck + build
run: npm run build
- name: Unit tests
run: npm run test

View file

@ -1,30 +0,0 @@
name: Mirror
on:
push:
branches: [main]
tags: ['v*']
jobs:
mirror:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Mirror to GitHub
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_MIRROR_TOKEN }}
REPO: ${{ github.event.repository.name }}
run: |
git remote add github "https://x-access-token:${GITHUB_TOKEN}@github.com/CircuitForgeLLC/${REPO}.git"
git push github --mirror
- name: Mirror to Codeberg
env:
CODEBERG_TOKEN: ${{ secrets.CODEBERG_MIRROR_TOKEN }}
REPO: ${{ github.event.repository.name }}
run: |
git remote add codeberg "https://CircuitForge:${CODEBERG_TOKEN}@codeberg.org/CircuitForge/${REPO}.git"
git push codeberg --mirror

View file

@ -1,92 +0,0 @@
name: Release
on:
push:
tags: ['v*']
env:
# Forgejo container registry (BSL product — not pushing to public GHCR)
# cf-agents#3: revisit public registry policy before enabling GHCR push
REGISTRY: git.opensourcesolarpunk.com
IMAGE_API: git.opensourcesolarpunk.com/circuit-forge/snipe-api
IMAGE_WEB: git.opensourcesolarpunk.com/circuit-forge/snipe-web
jobs:
release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
# ── Changelog ────────────────────────────────────────────────────────────
- name: Generate changelog
uses: orhun/git-cliff-action@v3
id: cliff
with:
config: .cliff.toml
args: --latest --strip header
env:
OUTPUT: CHANGES.md
# ── Docker ───────────────────────────────────────────────────────────────
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to Forgejo registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.FORGEJO_RELEASE_TOKEN }}
# API image — built with circuitforge-core sibling from GitHub mirror
- name: Checkout circuitforge-core
uses: actions/checkout@v4
with:
repository: CircuitForgeLLC/circuitforge-core
path: circuitforge-core
- name: Build and push API image
uses: docker/build-push-action@v6
with:
context: .
file: Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
${{ env.IMAGE_API }}:${{ github.ref_name }}
${{ env.IMAGE_API }}:latest
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Build and push web image
uses: docker/build-push-action@v6
with:
context: .
file: docker/web/Dockerfile
push: true
platforms: linux/amd64,linux/arm64
tags: |
${{ env.IMAGE_WEB }}:${{ github.ref_name }}
${{ env.IMAGE_WEB }}:latest
cache-from: type=gha
cache-to: type=gha,mode=max
# ── Forgejo Release ───────────────────────────────────────────────────────
- name: Create Forgejo release
env:
FORGEJO_TOKEN: ${{ secrets.FORGEJO_RELEASE_TOKEN }}
REPO: ${{ github.event.repository.name }}
TAG: ${{ github.ref_name }}
NOTES: ${{ steps.cliff.outputs.content }}
run: |
curl -sS -X POST \
"https://git.opensourcesolarpunk.com/api/v1/repos/Circuit-Forge/${REPO}/releases" \
-H "Authorization: token ${FORGEJO_TOKEN}" \
-H "Content-Type: application/json" \
-d "$(jq -n --arg tag "$TAG" --arg body "$NOTES" \
'{tag_name: $tag, name: $tag, body: $body}')"

View file

@ -1,62 +0,0 @@
# Snipe CI — runs on GitHub mirror for public credibility badge.
# Forgejo (.forgejo/workflows/ci.yml) is the canonical CI — keep these in sync.
# No Forgejo-specific secrets used here; circuitforge-core is public on Forgejo.
#
# Note: playwright browser binaries are not installed here — tests using
# headed Chromium (Kasada bypass) are skipped in CI via pytest marks.
name: CI
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
backend:
name: Backend (Python)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: pip
- name: Install circuitforge-core
run: pip install git+https://git.opensourcesolarpunk.com/Circuit-Forge/circuitforge-core.git@main
- name: Install dependencies
run: pip install -e ".[dev]"
- name: Lint
run: ruff check .
- name: Test
run: pytest tests/ -v --tb=short -m "not browser"
frontend:
name: Frontend (Vue)
runs-on: ubuntu-latest
defaults:
run:
working-directory: web
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: npm
cache-dependency-path: web/package-lock.json
- name: Install dependencies
run: npm ci
- name: Type check
run: npx vue-tsc --noEmit
- name: Test
run: npm run test

2
.gitignore vendored
View file

@ -9,5 +9,3 @@ data/
.superpowers/
web/node_modules/
web/dist/
config/llm.yaml
.worktrees/

View file

@ -6,140 +6,6 @@ Versions follow [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
---
## [0.5.1] — 2026-04-16
### Added
**Reported sellers tracking** — after bulk-reporting sellers to eBay Trust & Safety, cards show a muted "Reported to eBay" badge so users know not to re-report the same seller.
- Migration 012: `reported_sellers` table in user DB (UNIQUE on platform + seller ID, preserves first-report timestamp on re-report).
- `Store.mark_reported` / `list_reported` methods.
- `POST /api/reported` + `GET /api/reported` endpoints.
- `reported` Pinia store: optimistic local update, best-effort server persistence.
- `ListingCard`: accepts `sellerReported` prop; shows `.card__reported-badge` when true.
- `App.vue`: loads reported store at startup alongside blocklist.
**Community blocklist share toggle** — Settings > Community section (signed-in users only, default OFF).
- Toggle persisted as `community.blocklist_share` via existing user preferences path system.
- Backend `add_to_blocklist` now gates community signal publishing on opt-in preference; privacy-by-architecture: sharing is never implicit.
### Fixed
- SSE live score push (snipe#1) verified working end-to-end: enrichment thread correctly streams re-scored trust scores via `SimpleQueue → StreamingResponse` generator, terminates with `event: done`. Closed.
---
## [0.5.0] — 2026-04-16
### Added
**Listing detail page** — full trust breakdown for any individual listing (closes placeholder)
- `ListingView.vue` rewritten from "coming soon" stub into a full trust breakdown view.
- SVG trust ring: `stroke-dasharray` fill proportional to composite score (0100), colour-coded `lv-ring--high/mid/low` (≥80 / 5079 / <50).
- Five-signal breakdown table: account age, feedback count, feedback ratio, price vs. market, category history — each row shows score, max, and a plain-English label.
- Red flag badges: hard flags (`.lv-flag--hard`) for `new_account`, `suspicious_price`, `duplicate_photo`, `zero_feedback`, `established_bad_actor`; soft flags (`.lv-flag--soft`) for `scratch_dent_mentioned`, `long_on_market`, `significant_price_drop`, `account_under_30_days`.
- Triple Red easter egg: new/under-30-days account + suspicious price + photo/actor/zero-feedback/scratch flag combination triggers pulsing red glow animation.
- Partial score warning: `score_is_partial` flag shows `.lv-verdict__partial` notice and "pending" in affected signal rows.
- Seller panel: username, account age, feedback count/ratio, category history JSON, inline block-seller form.
- Photo carousel: thumbnail strip with keyboard-navigable main image.
- Not-found state for direct URL navigation when store is empty.
- `getListing(platformListingId)` getter added to search store.
- `ListingCard.vue`: "Details" link wired to `/listing/:id` route.
**Theme override** — user-controlled dark/light/system toggle in Settings
- `useTheme` composable: module-level `mode` ref, `setMode()` writes `data-theme` attribute + localStorage, `restore()` re-reads localStorage on hard reload.
- `theme.css`: explicit `[data-theme="dark"]` and `[data-theme="light"]` attribute selector blocks so user preference beats OS media query. Snipe mode override preserved.
- `SettingsView.vue`: new Appearance section with System/Dark/Light segmented button group.
- `App.vue`: `restoreTheme()` called in `onMounted` alongside snipe mode restore.
**Frontend test suite** — 32 Vitest tests, all green
- `useTheme.test.ts` (7 tests): defaults, setMode, data-theme attribute, localStorage persistence, restore() behaviour.
- `searchStore.test.ts` (7 tests): getListing() edge cases, pipe characters in IDs, trustScores/sellers map lookups.
- `ListingView.test.ts` (18 tests): not-found state, title/price/score/signals/seller rendering, hard/soft flag badges, no-flags, triple-red class, partial/pending signals, ring colour classes.
### Fixed
- `useTheme.restore()` re-reads from localStorage instead of cached module-level ref — prevented correct theme restore after a `setMode()` call in the same JS session.
- Landing hero subtitle rewritten with narrative opener ("Seen a listing that looks almost too good to pass up?") — universal framing, no category assumptions.
- eBay cancellation callout CTA updated to "Search above to score listings before you commit" — direct action vs. passive notice.
- Tile descriptions: concrete examples added ("40% below median", quoted "scratch and dent") for instant domain recognition.
---
## [0.4.0] — 2026-04-14
### Added
**Search with AI** — natural language to eBay search filters (closes #29, Paid+ tier)
- `QueryTranslator`: sends a free-text prompt to a local LLM (via cf-orch, defaulting to `llama3.1:8b`) with a domain-aware system prompt and eBay Taxonomy category hints. Returns structured `SearchParamsResponse` (keywords, price range, condition, category, sort order, pages).
- `EbayCategoryCache`: bootstraps from a seed list; refreshes from the eBay Browse API Taxonomy endpoint on a 7-day TTL. `get_relevant(query)` injects the 10 closest categories into the system prompt to reduce hallucinated filter values.
- `POST /api/search/build` — tier-gated endpoint (paid+) that accepts `{"prompt": "..."}` and returns populated `SearchParamsResponse`. Wired to `LLMRouter` via the Peregrine-style shim.
- `LLMQueryPanel.vue`: collapsible panel above the search form with a text area, a "Search with AI" button, and an auto-run toggle. A11y (accessibility): `aria-expanded`, `aria-controls`, `aria-live="polite"` on status, keyboard-navigable, `prefers-reduced-motion` guard on collapse animation.
- `useLLMQueryBuilder` composable: manages `buildQuery()` state machine (`idle | loading | done | error`), exposes `autoRun` flag, calls `populateFromLLM()` on the search store.
- `SettingsView`: new "Search with AI" section with the auto-run toggle persisted to user preferences.
- `search.ts`: `populateFromLLM()` merges LLM-returned filters into the store; guards `v-model.number` empty-string edge case (cleared price inputs sent `NaN` to the API).
**Preferences system**
- `Store.get_user_preference` / `set_user_preference` / `get_all_preferences`: dot-path read/write over a singleton `user_preferences` JSON row (immutable update pattern via `circuitforge_core.preferences.paths`).
- `Store.save_community_signal`: persists trust feedback signals to `community_signals` table.
- `preferencesStore` (Pinia): loaded after session bootstrap; `load()` / `set()` / `get()` surface preferences to Vue components.
**Community module** (closes #31 #32 #33)
- `corrections` router wired: `POST /api/community/signal` now lands in SQLite `community_signals`.
- `COMMUNITY_DB_URL` env var documented in `.env.example`.
### Fixed
- `useTrustFeedback`: prefixes fetch URL with `VITE_API_BASE` so feedback signals route correctly under menagerie reverse proxy.
- `App.vue`: skip-to-main link moved before `<AppNav>` so keyboard users reach it as the first focusable element (WCAG 2.4.1 bypass-blocks compliance).
- `@/` path alias removed from Vue components (Vite config had no alias configured; replaced with relative imports to fix production build).
- `search.ts`: LLM-populated filters now sync back into `SearchView` local state so the form reflects the AI-generated values immediately.
- Python import ordering pass (isort) across adapters, trust modules, tasks, and test files.
### Closed
- `#29` LLM query builder — shipped.
- `#31` `#32` `#33` Community corrections router — shipped.
---
## [0.3.0] — 2026-04-14
### Added
**Infrastructure and DevOps**
- `.forgejo/workflows/ci.yml` — Python lint (ruff) + pytest + Vue typecheck + vitest on every PR/push to main. Installs circuitforge-core from GitHub mirror so the CI runner doesn't need the sibling directory.
- `.forgejo/workflows/release.yml` — Docker build and push (api + web images) to Forgejo container registry on `v*` tags. Builds both images multi-arch (amd64 + arm64). Creates a Forgejo release with git-cliff changelog notes.
- `.forgejo/workflows/mirror.yml` — Mirror push to GitHub and Codeberg on main/tags.
- `install.sh` — Full rewrite following the CircuitForge installer pattern: colored output, `--docker` / `--bare-metal` / `--help` flags, auto-detection of Docker/conda/Python/Node/Chromium/Xvfb, license key prompting, structured named functions.
- `docs/nginx-self-hosted.conf` — Sample nginx config for bare-metal self-hosted deployments (SPA fallback, SSE proxy settings, long-term asset caching).
- `docs/getting-started/installation.md` — No-Docker install section: bare-metal instructions, nginx setup, Chromium/Xvfb note.
- `compose.override.yml``cf-orch-agent` sidecar service for routing vision tasks to a cf-orch GPU coordinator (`--profile orch` opt-in). `CF_ORCH_COORDINATOR_URL` env var documented.
- `.env.example``CF_ORCH_URL` and `CF_ORCH_COORDINATOR_URL` comments expanded with self-hosted coordinator guidance.
**Screenshots** (post CSS fix)
- Retook all docs screenshots (`01-hero`, `02-results`, `03-steal-badge`, `hero`) after the color-mix token fix so tints match the theme in both dark and light mode.
### Closed
- `#1` SSE live score push — already fully implemented in 0.2.0; closed.
- `#22` Forgejo Actions CI/CD — shipped.
- `#24` nginx config for no-Docker self-hosting — shipped.
- `#25` Self-hosted installer script — shipped.
- `#15` cf-orch agent in compose stack — shipped.
- `#27` MCP server — already shipped in 0.2.0; closed.
---
## [0.2.0] — 2026-04-12
### Added

View file

@ -4,8 +4,6 @@
**Status:** Active — eBay listing intelligence MVP complete (search, trust scoring, affiliate links, feedback FAB, vision task scheduling). Auction sniping engine and multi-platform support are next.
**[Documentation](https://docs.circuitforge.tech/snipe/)** · [circuitforge.tech](https://circuitforge.tech)
## Quick install (self-hosted)
**Requirements:** Docker with Compose plugin, Git. No API keys needed to get started.
@ -64,21 +62,6 @@ The name is the origin of the word "sniping" — common snipes are notoriously e
---
## Screenshots
**Landing page — no account required**
![Snipe landing hero showing search bar and three feature tiles: Seller trust score, Price vs. market, Red flag detection](docs/screenshots/01-hero.png)
**Search results with trust scores**
![Search results for vintage film camera listings, each card showing a trust score badge, seller feedback, price, and market comparison](docs/screenshots/02-results.png)
**STEAL badge — price significantly below market**
![Listing cards with STEAL badge highlighting listings priced well below completed sales median](docs/screenshots/03-steal-badge.png)
> Red flag and Triple Red screenshots coming — captured opportunistically from real scammy listings.
---
## Implemented: eBay Listing Intelligence
### Search & filtering

View file

@ -16,6 +16,8 @@ FastAPI usage:
"""
from __future__ import annotations
import hashlib
import hmac
import logging
import os
import re
@ -69,13 +71,13 @@ class SessionFeatures:
photo_analysis: bool
shared_scammer_db: bool
shared_image_db: bool
llm_query_builder: bool
def compute_features(tier: str) -> SessionFeatures:
"""Compute feature flags from tier. Evaluated server-side; sent to frontend."""
local = tier == "local"
paid_plus = local or tier in ("paid", "premium", "ultra")
premium_plus = local or tier in ("premium", "ultra")
return SessionFeatures(
saved_searches=True, # all tiers get saved searches
@ -86,35 +88,16 @@ def compute_features(tier: str) -> SessionFeatures:
photo_analysis=paid_plus,
shared_scammer_db=paid_plus,
shared_image_db=paid_plus,
llm_query_builder=paid_plus,
)
# ── JWT validation ────────────────────────────────────────────────────────────
def _extract_session_token(header_value: str) -> str:
"""Extract cf_session value from a Cookie or X-CF-Session header string.
Returns the JWT token string, or "" if no valid session token is found.
Cookie strings like "snipe_guest=abc123" (no cf_session key) return ""
so the caller falls through to the guest/anonymous path rather than
passing a non-JWT string to validate_session_jwt().
"""
"""Extract cf_session value from a Cookie or X-CF-Session header string."""
# X-CF-Session may be the raw JWT or the full cookie string
m = re.search(r'(?:^|;)\s*cf_session=([^;]+)', header_value)
if m:
return m.group(1).strip()
# Only treat as a raw JWT if it has exactly three base64url segments (header.payload.sig).
# Cookie strings like "snipe_guest=abc123" must NOT be forwarded to JWT validation.
stripped = header_value.strip()
if re.match(r'^[A-Za-z0-9\-_]+\.[A-Za-z0-9\-_]+\.[A-Za-z0-9\-_=]+$', stripped):
return stripped # bare JWT forwarded directly by Caddy
return "" # not a JWT and no cf_session cookie — treat as unauthenticated
def _extract_guest_token(cookie_header: str) -> str | None:
"""Extract snipe_guest UUID from the Cookie header, if present."""
m = re.search(r'(?:^|;)\s*snipe_guest=([^;]+)', cookie_header)
return m.group(1).strip() if m else None
return m.group(1).strip() if m else header_value.strip()
def validate_session_jwt(token: str) -> str:
@ -195,18 +178,6 @@ def _user_db_path(user_id: str) -> Path:
return path
def _anon_db_path() -> Path:
"""Shared pool DB for unauthenticated visitors.
All anonymous searches write listing data here. Seller and market comp
data accumulates in shared_db as normal, growing the anti-scammer corpus
with every public search regardless of auth state.
"""
path = CLOUD_DATA_ROOT / "anonymous" / "snipe" / "user.db"
path.parent.mkdir(parents=True, exist_ok=True)
return path
# ── FastAPI dependency ────────────────────────────────────────────────────────
def get_session(request: Request) -> CloudUser:
@ -215,8 +186,6 @@ def get_session(request: Request) -> CloudUser:
Local mode: returns a fully-privileged "local" user pointing at SNIPE_DB.
Cloud mode: validates X-CF-Session JWT, provisions Heimdall license,
resolves tier, returns per-user DB paths.
Unauthenticated cloud visitors: returns a free-tier anonymous user so
search and scoring work without an account.
"""
if not CLOUD_MODE:
return CloudUser(
@ -226,30 +195,16 @@ def get_session(request: Request) -> CloudUser:
user_db=_LOCAL_SNIPE_DB,
)
cookie_header = request.headers.get("cookie", "")
raw_header = request.headers.get("x-cf-session", "") or cookie_header
raw_header = (
request.headers.get("x-cf-session", "")
or request.headers.get("cookie", "")
)
if not raw_header:
# No session at all — check for a guest UUID cookie set by /api/session
guest_uuid = _extract_guest_token(cookie_header)
user_id = f"guest:{guest_uuid}" if guest_uuid else "anonymous"
return CloudUser(
user_id=user_id,
tier="free",
shared_db=_shared_db_path(),
user_db=_anon_db_path(),
)
raise HTTPException(status_code=401, detail="Not authenticated")
token = _extract_session_token(raw_header)
if not token:
guest_uuid = _extract_guest_token(cookie_header)
user_id = f"guest:{guest_uuid}" if guest_uuid else "anonymous"
return CloudUser(
user_id=user_id,
tier="free",
shared_db=_shared_db_path(),
user_db=_anon_db_path(),
)
raise HTTPException(status_code=401, detail="Not authenticated")
user_id = validate_session_jwt(token)
_ensure_provisioned(user_id)

View file

@ -26,14 +26,13 @@ from pathlib import Path
from typing import Optional
import requests
from fastapi import APIRouter, Header, HTTPException, Request
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives.asymmetric.ec import ECDSA
from cryptography.hazmat.primitives.hashes import SHA1
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from fastapi import APIRouter, Header, HTTPException, Request
from app.db.store import Store
from app.platforms.ebay.auth import EbayTokenManager
log = logging.getLogger(__name__)
@ -41,24 +40,6 @@ router = APIRouter()
_DB_PATH = Path(os.environ.get("SNIPE_DB", "data/snipe.db"))
# ── App-level token manager ───────────────────────────────────────────────────
# Lazily initialized from env vars; shared across all webhook requests.
# The Notification public_key endpoint requires a Bearer app token.
_app_token_manager: EbayTokenManager | None = None
def _get_app_token() -> str | None:
"""Return a valid eBay app-level Bearer token, or None if creds are absent."""
global _app_token_manager
client_id = (os.environ.get("EBAY_APP_ID") or os.environ.get("EBAY_CLIENT_ID", "")).strip()
client_secret = (os.environ.get("EBAY_CERT_ID") or os.environ.get("EBAY_CLIENT_SECRET", "")).strip()
if not client_id or not client_secret:
return None
if _app_token_manager is None:
_app_token_manager = EbayTokenManager(client_id, client_secret)
return _app_token_manager.get_token()
# ── Public-key cache ──────────────────────────────────────────────────────────
# eBay key rotation is rare; 1-hour TTL is appropriate.
_KEY_CACHE_TTL = 3600
@ -77,14 +58,7 @@ def _fetch_public_key(kid: str) -> bytes:
return cached[0]
key_url = _EBAY_KEY_URL.format(kid=kid)
headers: dict[str, str] = {}
app_token = _get_app_token()
if app_token:
headers["Authorization"] = f"Bearer {app_token}"
else:
log.warning("public_key fetch: no app credentials — request will likely fail")
resp = requests.get(key_url, headers=headers, timeout=10)
resp = requests.get(key_url, timeout=10)
if not resp.ok:
log.error("public key fetch failed: %s %s — body: %s", resp.status_code, key_url, resp.text[:500])
resp.raise_for_status()
@ -94,42 +68,6 @@ def _fetch_public_key(kid: str) -> bytes:
return pem_bytes
# ── GET — webhook health check ───────────────────────────────────────────────
@router.get("/api/ebay/webhook-health")
def ebay_webhook_health() -> dict:
"""Lightweight health check for eBay webhook compliance monitoring.
Returns 200 + status dict when the webhook is fully configured.
Returns 500 when required env vars are missing.
Intended for Uptime Kuma or similar uptime monitors.
"""
token = os.environ.get("EBAY_NOTIFICATION_TOKEN", "")
endpoint = os.environ.get("EBAY_NOTIFICATION_ENDPOINT", "")
client_id = (os.environ.get("EBAY_APP_ID") or os.environ.get("EBAY_CLIENT_ID", "")).strip()
client_secret = (os.environ.get("EBAY_CERT_ID") or os.environ.get("EBAY_CLIENT_SECRET", "")).strip()
missing = [
name for name, val in [
("EBAY_NOTIFICATION_TOKEN", token),
("EBAY_NOTIFICATION_ENDPOINT", endpoint),
("EBAY_APP_ID / EBAY_CLIENT_ID", client_id),
("EBAY_CERT_ID / EBAY_CLIENT_SECRET", client_secret),
] if not val
]
if missing:
log.error("ebay_webhook_health: missing config: %s", missing)
raise HTTPException(
status_code=500,
detail=f"Webhook misconfigured — missing: {missing}",
)
return {
"status": "ok",
"endpoint": endpoint,
"signature_verification": os.environ.get("EBAY_WEBHOOK_VERIFY_SIGNATURES", "true"),
}
# ── GET — challenge verification ──────────────────────────────────────────────
@router.get("/api/ebay/account-deletion")

File diff suppressed because it is too large Load diff

View file

@ -1,11 +0,0 @@
-- Community trust signals: user feedback on individual trust scores.
-- "This score looks right" (confirmed=1) / "This score is wrong" (confirmed=0).
-- Stored in shared_db so signals aggregate across all users.
CREATE TABLE IF NOT EXISTS community_signals (
id INTEGER PRIMARY KEY AUTOINCREMENT,
seller_id TEXT NOT NULL,
confirmed INTEGER NOT NULL CHECK (confirmed IN (0, 1)),
recorded_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
);
CREATE INDEX IF NOT EXISTS idx_community_signals_seller ON community_signals(seller_id);

View file

@ -1,9 +0,0 @@
-- Per-user preferences stored as a single JSON blob.
-- Lives in user_db (each user has their own DB file) — never in shared.db.
-- Single-row enforced by PRIMARY KEY CHECK (id = 1): acts as a singleton table.
-- Path reads/writes use cf-core preferences.paths (get_path / set_path).
CREATE TABLE IF NOT EXISTS user_preferences (
id INTEGER PRIMARY KEY CHECK (id = 1),
prefs_json TEXT NOT NULL DEFAULT '{}',
updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ', 'now'))
);

View file

@ -1,23 +0,0 @@
-- LLM output corrections for SFT training pipeline (cf-core make_corrections_router).
-- Stores thumbs-up/down feedback and explicit corrections on LLM-generated content.
-- Used once #29 (LLM query builder) ships; table is safe to pre-create now.
CREATE TABLE IF NOT EXISTS corrections (
id INTEGER PRIMARY KEY AUTOINCREMENT,
item_id TEXT NOT NULL DEFAULT '',
product TEXT NOT NULL,
correction_type TEXT NOT NULL,
input_text TEXT NOT NULL,
original_output TEXT NOT NULL,
corrected_output TEXT NOT NULL DEFAULT '',
rating TEXT NOT NULL DEFAULT 'down',
context TEXT NOT NULL DEFAULT '{}',
opted_in INTEGER NOT NULL DEFAULT 0,
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE INDEX IF NOT EXISTS idx_corrections_product
ON corrections (product);
CREATE INDEX IF NOT EXISTS idx_corrections_opted_in
ON corrections (opted_in);

View file

@ -1,16 +0,0 @@
-- app/db/migrations/011_ebay_categories.sql
-- eBay category leaf node cache. Refreshed weekly via EbayCategoryCache.refresh().
-- Seeded with a small bootstrap table when no eBay API credentials are configured.
-- MIT License
CREATE TABLE IF NOT EXISTS ebay_categories (
id INTEGER PRIMARY KEY AUTOINCREMENT,
category_id TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
full_path TEXT NOT NULL, -- "Consumer Electronics > ... > Leaf Name"
is_leaf INTEGER NOT NULL DEFAULT 1, -- SQLite stores bool as int
refreshed_at TEXT NOT NULL -- ISO8601 timestamp
);
CREATE INDEX IF NOT EXISTS idx_ebay_cat_name
ON ebay_categories (name);

View file

@ -1,12 +0,0 @@
CREATE TABLE IF NOT EXISTS reported_sellers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
platform TEXT NOT NULL,
platform_seller_id TEXT NOT NULL,
username TEXT,
reported_at TEXT DEFAULT CURRENT_TIMESTAMP,
reported_by TEXT NOT NULL DEFAULT 'user', -- user | bulk_action
UNIQUE(platform, platform_seller_id)
);
CREATE INDEX IF NOT EXISTS idx_reported_sellers_lookup
ON reported_sellers(platform, platform_seller_id);

View file

@ -1,6 +1,5 @@
"""Dataclasses for all Snipe domain objects."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Optional

View file

@ -1,6 +1,5 @@
"""Thin SQLite read/write layer for all Snipe models."""
from __future__ import annotations
import json
from datetime import datetime, timezone
from pathlib import Path
@ -8,7 +7,7 @@ from typing import Optional
from circuitforge_core.db import get_connection, run_migrations
from .models import Listing, MarketComp, SavedSearch, ScammerEntry, Seller, TrustScore
from .models import Listing, Seller, TrustScore, MarketComp, SavedSearch, ScammerEntry
MIGRATIONS_DIR = Path(__file__).parent / "migrations"
@ -382,88 +381,6 @@ class Store:
for r in rows
]
# --- Reported Sellers ---
def mark_reported(
self,
platform: str,
platform_seller_id: str,
username: Optional[str] = None,
reported_by: str = "user",
) -> None:
"""Record that the user has filed an eBay T&S report for this seller.
Uses IGNORE on conflict so the first-report timestamp is preserved.
"""
self._conn.execute(
"INSERT OR IGNORE INTO reported_sellers "
"(platform, platform_seller_id, username, reported_by) "
"VALUES (?,?,?,?)",
(platform, platform_seller_id, username, reported_by),
)
self._conn.commit()
def list_reported(self, platform: str = "ebay") -> list[str]:
"""Return all platform_seller_ids that have been reported."""
rows = self._conn.execute(
"SELECT platform_seller_id FROM reported_sellers WHERE platform=?",
(platform,),
).fetchall()
return [r[0] for r in rows]
def save_community_signal(self, seller_id: str, confirmed: bool) -> None:
"""Record a user's trust-score feedback signal into the shared DB."""
self._conn.execute(
"INSERT INTO community_signals (seller_id, confirmed) VALUES (?, ?)",
(seller_id, 1 if confirmed else 0),
)
self._conn.commit()
# --- User Preferences ---
def get_user_preference(self, path: str, default=None):
"""Read a preference value at dot-separated path (e.g. 'affiliate.opt_out').
Reads from the singleton user_preferences row; returns *default* if the
table is empty or the path is not set.
"""
from circuitforge_core.preferences.paths import get_path
row = self._conn.execute(
"SELECT prefs_json FROM user_preferences WHERE id=1"
).fetchone()
if not row:
return default
return get_path(json.loads(row[0]), path, default=default)
def set_user_preference(self, path: str, value) -> None:
"""Write *value* at dot-separated path (immutable JSON update).
Creates the singleton row on first write; merges subsequent updates
so sibling paths are preserved.
"""
from circuitforge_core.preferences.paths import set_path
row = self._conn.execute(
"SELECT prefs_json FROM user_preferences WHERE id=1"
).fetchone()
prefs = json.loads(row[0]) if row else {}
updated = set_path(prefs, path, value)
self._conn.execute(
"INSERT INTO user_preferences (id, prefs_json, updated_at) "
"VALUES (1, ?, strftime('%Y-%m-%dT%H:%M:%SZ', 'now')) "
"ON CONFLICT(id) DO UPDATE SET "
" prefs_json = excluded.prefs_json, "
" updated_at = excluded.updated_at",
(json.dumps(updated),),
)
self._conn.commit()
def get_all_preferences(self) -> dict:
"""Return all preferences as a plain dict (empty dict if not yet set)."""
row = self._conn.execute(
"SELECT prefs_json FROM user_preferences WHERE id=1"
).fetchone()
return json.loads(row[0]) if row else {}
def get_market_comp(self, platform: str, query_hash: str) -> Optional[MarketComp]:
row = self._conn.execute(
"SELECT platform, query_hash, median_price, sample_count, expires_at, id, fetched_at "

View file

@ -1,5 +0,0 @@
# app/llm/__init__.py
# BSL 1.1 License
from .query_translator import QueryTranslator, QueryTranslatorError, SearchParamsResponse
__all__ = ["QueryTranslator", "QueryTranslatorError", "SearchParamsResponse"]

View file

@ -1,167 +0,0 @@
# app/llm/query_translator.py
# BSL 1.1 License
"""LLM query builder — translates natural language to eBay SearchFilters.
The QueryTranslator calls LLMRouter.complete() (synchronous) with a domain-aware
system prompt. The prompt includes category hints injected from EbayCategoryCache.
The LLM returns a single JSON object matching SearchParamsResponse.
"""
from __future__ import annotations
import json
import logging
from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from app.platforms.ebay.categories import EbayCategoryCache
log = logging.getLogger(__name__)
class QueryTranslatorError(Exception):
"""Raised when the LLM output cannot be parsed into SearchParamsResponse."""
def __init__(self, message: str, raw: str = "") -> None:
super().__init__(message)
self.raw = raw
@dataclass(frozen=True)
class SearchParamsResponse:
"""Parsed LLM response — maps 1:1 to the /api/search query parameters."""
base_query: str
must_include_mode: str # "all" | "any" | "groups"
must_include: str # raw filter string
must_exclude: str # comma-separated exclusion terms
max_price: Optional[float]
min_price: Optional[float]
condition: list[str] # subset of ["new", "used", "for_parts"]
category_id: Optional[str] # eBay category ID string, or None
explanation: str # one-sentence plain-language summary
_VALID_MODES = {"all", "any", "groups"}
_VALID_CONDITIONS = {"new", "used", "for_parts"}
def _parse_response(raw: str) -> SearchParamsResponse:
"""Parse the LLM's raw text output into a SearchParamsResponse.
Raises QueryTranslatorError if the JSON is malformed or required fields
are missing.
"""
try:
data = json.loads(raw.strip())
except json.JSONDecodeError as exc:
raise QueryTranslatorError(f"LLM returned unparseable JSON: {exc}", raw=raw) from exc
try:
base_query = str(data["base_query"]).strip()
if not base_query:
raise KeyError("base_query is empty")
must_include_mode = str(data.get("must_include_mode", "all"))
if must_include_mode not in _VALID_MODES:
must_include_mode = "all"
must_include = str(data.get("must_include", ""))
must_exclude = str(data.get("must_exclude", ""))
max_price = float(data["max_price"]) if data.get("max_price") is not None else None
min_price = float(data["min_price"]) if data.get("min_price") is not None else None
raw_conditions = data.get("condition", [])
condition = [c for c in raw_conditions if c in _VALID_CONDITIONS]
category_id = str(data["category_id"]) if data.get("category_id") else None
explanation = str(data.get("explanation", "")).strip()
except (KeyError, TypeError, ValueError) as exc:
raise QueryTranslatorError(
f"LLM response missing or invalid field: {exc}", raw=raw
) from exc
return SearchParamsResponse(
base_query=base_query,
must_include_mode=must_include_mode,
must_include=must_include,
must_exclude=must_exclude,
max_price=max_price,
min_price=min_price,
condition=condition,
category_id=category_id,
explanation=explanation,
)
# ── System prompt template ────────────────────────────────────────────────────
_SYSTEM_PROMPT_TEMPLATE = """\
You are a search assistant for Snipe, an eBay listing intelligence tool.
Your job is to translate a natural-language description of what someone is looking for
into a structured eBay search configuration.
Return ONLY a JSON object with these exact fields no preamble, no markdown, no extra keys:
base_query (string) Primary search term, short e.g. "RTX 3080", "vintage Leica"
must_include_mode (string) One of: "all" (AND), "any" (OR), "groups" (CNF: pipe=OR within group, comma=AND between groups)
must_include (string) Filter string per mode leave blank if nothing to filter
must_exclude (string) Comma-separated terms to exclude e.g. "mining,for parts,broken"
max_price (number|null) Maximum price in USD, or null
min_price (number|null) Minimum price in USD, or null
condition (array) Any of: "new", "used", "for_parts" empty array means any condition
category_id (string|null) eBay category ID from the list below, or null if no match
explanation (string) One plain sentence summarizing what you built
eBay "groups" mode syntax example: to find a GPU that is BOTH (nvidia OR amd) AND (16gb OR 8gb):
must_include_mode: "groups"
must_include: "nvidia|amd, 16gb|8gb"
Phrase "like new", "open box", "refurbished" -> condition: ["used"]
Phrase "broken", "for parts", "not working" -> condition: ["for_parts"]
If unsure about condition, use an empty array.
Available eBay categories (use category_id verbatim if one fits otherwise omit):
{category_hints}
If none match, omit category_id (set to null). Respond with valid JSON only. No commentary outside the JSON object.
"""
# ── QueryTranslator ───────────────────────────────────────────────────────────
class QueryTranslator:
"""Translates natural-language search descriptions into SearchParamsResponse.
Args:
category_cache: An EbayCategoryCache instance (may have empty cache).
llm_router: An LLMRouter instance from circuitforge_core.
"""
def __init__(self, category_cache: "EbayCategoryCache", llm_router: object) -> None:
self._cache = category_cache
self._llm_router = llm_router
def translate(self, natural_language: str) -> SearchParamsResponse:
"""Translate a natural-language query into a SearchParamsResponse.
Raises QueryTranslatorError if the LLM fails or returns bad JSON.
"""
# Extract up to 10 keywords for category hint lookup
keywords = [w for w in natural_language.split()[:10] if len(w) > 2]
hints = self._cache.get_relevant(keywords, limit=30)
if not hints:
hints = self._cache.get_all_for_prompt(limit=40)
if hints:
category_hints = "\n".join(f"{cid}: {path}" for cid, path in hints)
else:
category_hints = "(no categories cached — omit category_id)"
system_prompt = _SYSTEM_PROMPT_TEMPLATE.format(category_hints=category_hints)
try:
raw = self._llm_router.complete(
natural_language,
system=system_prompt,
max_tokens=512,
)
except Exception as exc:
raise QueryTranslatorError(
f"LLM backend error: {exc}", raw=""
) from exc
return _parse_response(raw)

View file

@ -1,36 +0,0 @@
# app/llm/router.py
# BSL 1.1 License
"""
Snipe LLMRouter shim tri-level config path priority.
Config lookup order:
1. <repo>/config/llm.yaml per-install local override
2. ~/.config/circuitforge/llm.yaml user-level config (circuitforge-core default)
3. env-var auto-config (ANTHROPIC_API_KEY, OPENAI_API_KEY, OLLAMA_HOST, CF_ORCH_URL)
"""
from pathlib import Path
from circuitforge_core.llm import LLMRouter as _CoreLLMRouter
_REPO_CONFIG = Path(__file__).parent.parent.parent / "config" / "llm.yaml"
_USER_CONFIG = Path.home() / ".config" / "circuitforge" / "llm.yaml"
class LLMRouter(_CoreLLMRouter):
"""Snipe-specific LLMRouter with tri-level config resolution.
Explicit ``config_path`` bypasses the lookup (useful in tests).
"""
def __init__(self, config_path: Path | None = None) -> None:
if config_path is not None:
super().__init__(config_path)
return
if _REPO_CONFIG.exists():
super().__init__(_REPO_CONFIG)
elif _USER_CONFIG.exists():
super().__init__(_USER_CONFIG)
else:
# No yaml — let circuitforge-core env-var auto-config handle it.
super().__init__()

View file

View file

@ -1,110 +0,0 @@
"""Condense Snipe API search results into LLM-friendly format.
Raw Snipe responses are verbose full listing dicts, nested seller objects,
redundant fields. This module trims to what an LLM needs for reasoning:
title, price, market delta, trust summary, GPU inference score, url.
Results are sorted by a composite key: trust × gpu_inference_score / price.
This surfaces high-trust, VRAM-rich, underpriced boards at the top.
"""
from __future__ import annotations
import json
from typing import Any
from app.mcp.gpu_scoring import parse_gpu, score_gpu
def format_results(
response: dict[str, Any],
vram_weight: float = 0.6,
arch_weight: float = 0.4,
top_n: int = 20,
) -> dict[str, Any]:
"""Return a condensed, LLM-ready summary of a Snipe search response."""
listings: list[dict] = response.get("listings", [])
trust_map: dict = response.get("trust_scores", {})
seller_map: dict = response.get("sellers", {})
market_price: float | None = response.get("market_price")
condensed = []
for listing in listings:
lid = listing.get("platform_listing_id", "")
title = listing.get("title", "")
price = float(listing.get("price") or 0)
trust = trust_map.get(lid, {})
seller_id = listing.get("seller_platform_id", "")
seller = seller_map.get(seller_id, {})
gpu_info = _gpu_info(title, vram_weight, arch_weight)
trust_score = trust.get("composite_score", 0) or 0
inference_score = gpu_info["inference_score"] if gpu_info else 0.0
condensed.append({
"id": lid,
"title": title,
"price": price,
"vs_market": _vs_market(price, market_price),
"trust_score": trust_score,
"trust_partial": bool(trust.get("score_is_partial")),
"red_flags": _parse_flags(trust.get("red_flags_json", "[]")),
"seller_age_days": seller.get("account_age_days"),
"seller_feedback": seller.get("feedback_count"),
"gpu": gpu_info,
"url": listing.get("url", ""),
# Sort key — not included in output
"_sort_key": _composite_key(trust_score, inference_score, price),
})
condensed.sort(key=lambda r: r["_sort_key"], reverse=True)
for r in condensed:
del r["_sort_key"]
no_gpu = sum(1 for r in condensed if r["gpu"] is None)
return {
"total_found": len(listings),
"showing": min(top_n, len(condensed)),
"market_price": market_price,
"adapter": response.get("adapter_used"),
"no_gpu_detected": no_gpu,
"results": condensed[:top_n],
}
def _gpu_info(title: str, vram_weight: float, arch_weight: float) -> dict | None:
spec = parse_gpu(title)
if not spec:
return None
match = score_gpu(spec, vram_weight, arch_weight)
return {
"model": spec.model,
"vram_gb": spec.vram_gb,
"arch": spec.arch_name,
"vendor": spec.vendor,
"vram_score": match.vram_score,
"arch_score": match.arch_score,
"inference_score": match.inference_score,
}
def _vs_market(price: float, market_price: float | None) -> str | None:
if not market_price or price <= 0:
return None
delta_pct = ((market_price - price) / market_price) * 100
if delta_pct >= 0:
return f"{delta_pct:.0f}% below market (${market_price:.0f} median)"
return f"{abs(delta_pct):.0f}% above market (${market_price:.0f} median)"
def _composite_key(trust_score: float, inference_score: float, price: float) -> float:
"""Higher = better value. Zero price or zero trust scores near zero."""
if price <= 0 or trust_score <= 0:
return 0.0
return (trust_score * (inference_score or 50.0)) / price
def _parse_flags(flags_json: str) -> list[str]:
try:
return json.loads(flags_json) or []
except (ValueError, TypeError):
return []

View file

@ -1,143 +0,0 @@
"""GPU architecture and VRAM scoring for laptop mainboard inference-value ranking.
Parses GPU model names from eBay listing titles and scores them on two axes:
- vram_score: linear 0100, anchored at 24 GB = 100
- arch_score: linear 0100, architecture tier 15 (5 = newest)
inference_score = (vram_score × vram_weight + arch_score × arch_weight)
/ (vram_weight + arch_weight)
Patterns are matched longest-first to prevent "RTX 3070" matching before "RTX 3070 Ti".
"""
from __future__ import annotations
import re
from dataclasses import dataclass
@dataclass(frozen=True)
class GpuSpec:
model: str # canonical name, e.g. "RTX 3070 Ti"
vram_gb: int
arch_tier: int # 15; 5 = newest generation
arch_name: str # human-readable, e.g. "Ampere"
vendor: str # "nvidia" | "amd" | "intel"
@dataclass
class GpuMatch:
spec: GpuSpec
vram_score: float
arch_score: float
inference_score: float
# ── GPU database ──────────────────────────────────────────────────────────────
# Laptop VRAM often differs from desktop; using common laptop variants.
# Listed longest-name-first within each family to guide sort order.
_GPU_DB: list[GpuSpec] = [
# NVIDIA Ada Lovelace — tier 5
GpuSpec("RTX 4090", 16, 5, "Ada Lovelace", "nvidia"),
GpuSpec("RTX 4080", 12, 5, "Ada Lovelace", "nvidia"),
GpuSpec("RTX 4070 Ti", 12, 5, "Ada Lovelace", "nvidia"),
GpuSpec("RTX 4070", 8, 5, "Ada Lovelace", "nvidia"),
GpuSpec("RTX 4060 Ti", 8, 5, "Ada Lovelace", "nvidia"),
GpuSpec("RTX 4060", 8, 5, "Ada Lovelace", "nvidia"),
GpuSpec("RTX 4050", 6, 5, "Ada Lovelace", "nvidia"),
# NVIDIA Ampere — tier 4
GpuSpec("RTX 3090", 24, 4, "Ampere", "nvidia"), # rare laptop variant
GpuSpec("RTX 3080 Ti", 16, 4, "Ampere", "nvidia"),
GpuSpec("RTX 3080", 8, 4, "Ampere", "nvidia"), # most laptop 3080s = 8 GB
GpuSpec("RTX 3070 Ti", 8, 4, "Ampere", "nvidia"),
GpuSpec("RTX 3070", 8, 4, "Ampere", "nvidia"),
GpuSpec("RTX 3060", 6, 4, "Ampere", "nvidia"),
GpuSpec("RTX 3050 Ti", 4, 4, "Ampere", "nvidia"),
GpuSpec("RTX 3050", 4, 4, "Ampere", "nvidia"),
# NVIDIA Turing — tier 3
GpuSpec("RTX 2080", 8, 3, "Turing", "nvidia"),
GpuSpec("RTX 2070", 8, 3, "Turing", "nvidia"),
GpuSpec("RTX 2060", 6, 3, "Turing", "nvidia"),
GpuSpec("GTX 1660 Ti", 6, 3, "Turing", "nvidia"),
GpuSpec("GTX 1660", 6, 3, "Turing", "nvidia"),
GpuSpec("GTX 1650 Ti", 4, 3, "Turing", "nvidia"),
GpuSpec("GTX 1650", 4, 3, "Turing", "nvidia"),
# NVIDIA Pascal — tier 2
GpuSpec("GTX 1080", 8, 2, "Pascal", "nvidia"),
GpuSpec("GTX 1070", 8, 2, "Pascal", "nvidia"),
GpuSpec("GTX 1060", 6, 2, "Pascal", "nvidia"),
GpuSpec("GTX 1050 Ti", 4, 2, "Pascal", "nvidia"),
GpuSpec("GTX 1050", 4, 2, "Pascal", "nvidia"),
# AMD RDNA3 — tier 5
GpuSpec("RX 7900M", 16, 5, "RDNA3", "amd"),
GpuSpec("RX 7700S", 8, 5, "RDNA3", "amd"),
GpuSpec("RX 7600M XT", 8, 5, "RDNA3", "amd"),
GpuSpec("RX 7600S", 8, 5, "RDNA3", "amd"),
GpuSpec("RX 7600M", 8, 5, "RDNA3", "amd"),
# AMD RDNA2 — tier 4
GpuSpec("RX 6850M XT", 12, 4, "RDNA2", "amd"),
GpuSpec("RX 6800S", 12, 4, "RDNA2", "amd"),
GpuSpec("RX 6800M", 12, 4, "RDNA2", "amd"),
GpuSpec("RX 6700S", 10, 4, "RDNA2", "amd"),
GpuSpec("RX 6700M", 10, 4, "RDNA2", "amd"),
GpuSpec("RX 6650M", 8, 4, "RDNA2", "amd"),
GpuSpec("RX 6600S", 8, 4, "RDNA2", "amd"),
GpuSpec("RX 6600M", 8, 4, "RDNA2", "amd"),
GpuSpec("RX 6500M", 4, 4, "RDNA2", "amd"),
# AMD RDNA1 — tier 3
GpuSpec("RX 5700M", 8, 3, "RDNA1", "amd"),
GpuSpec("RX 5600M", 6, 3, "RDNA1", "amd"),
GpuSpec("RX 5500M", 4, 3, "RDNA1", "amd"),
# Intel Arc Alchemist — tier 4 (improving ROCm/IPEX-LLM support)
GpuSpec("Arc A770M", 16, 4, "Alchemist", "intel"),
GpuSpec("Arc A550M", 8, 4, "Alchemist", "intel"),
GpuSpec("Arc A370M", 4, 4, "Alchemist", "intel"),
GpuSpec("Arc A350M", 4, 4, "Alchemist", "intel"),
]
def _build_patterns() -> list[tuple[re.Pattern[str], GpuSpec]]:
"""Compile regex patterns, sorted longest-model-name first to prevent prefix shadowing."""
result = []
for spec in sorted(_GPU_DB, key=lambda s: -len(s.model)):
# Allow optional space or hyphen between tokens (e.g. "RTX3070" or "RTX-3070")
escaped = re.escape(spec.model).replace(r"\ ", r"[\s\-]?")
result.append((re.compile(escaped, re.IGNORECASE), spec))
return result
_PATTERNS: list[tuple[re.Pattern[str], GpuSpec]] = _build_patterns()
def parse_gpu(title: str) -> GpuSpec | None:
"""Return the first GPU model found in a listing title, or None."""
for pattern, spec in _PATTERNS:
if pattern.search(title):
return spec
return None
def score_gpu(spec: GpuSpec, vram_weight: float, arch_weight: float) -> GpuMatch:
"""Compute normalized inference value scores for a GPU spec.
vram_score: linear scale, 24 GB anchors at 100. Capped at 100.
arch_score: linear scale, tier 1 = 0, tier 5 = 100.
inference_score: weighted average of both, normalized to the total weight.
"""
vram_score = min(100.0, (spec.vram_gb / 24.0) * 100.0)
arch_score = ((spec.arch_tier - 1) / 4.0) * 100.0
total_weight = vram_weight + arch_weight
if total_weight <= 0:
inference_score = 0.0
else:
inference_score = (
vram_score * vram_weight + arch_score * arch_weight
) / total_weight
return GpuMatch(
spec=spec,
vram_score=round(vram_score, 1),
arch_score=round(arch_score, 1),
inference_score=round(inference_score, 1),
)

View file

@ -1,262 +0,0 @@
"""Snipe MCP Server — eBay search with trust scoring and GPU inference-value ranking.
Exposes three tools to Claude:
snipe_search search eBay via Snipe, GPU-scored and trust-ranked
snipe_enrich deep seller/listing enrichment for a specific result
snipe_save persist a productive search for ongoing monitoring
Run with:
python -m app.mcp.server
(from /Library/Development/CircuitForge/snipe with cf conda env active)
Configure in Claude Code ~/.claude.json:
"snipe": {
"command": "/devl/miniconda3/envs/cf/bin/python",
"args": ["-m", "app.mcp.server"],
"cwd": "/Library/Development/CircuitForge/snipe",
"env": { "SNIPE_API_URL": "http://localhost:8510" }
}
"""
from __future__ import annotations
import asyncio
import json
import os
import httpx
from mcp.server import Server
from mcp.server.stdio import stdio_server
from mcp.types import TextContent, Tool
_SNIPE_API = os.environ.get("SNIPE_API_URL", "http://localhost:8510")
_TIMEOUT = 120.0
server = Server("snipe")
@server.list_tools()
async def list_tools() -> list[Tool]:
return [
Tool(
name="snipe_search",
description=(
"Search eBay listings via Snipe. Returns results condensed for LLM reasoning, "
"sorted by composite value: trust_score × gpu_inference_score / price. "
"GPU inference_score weights VRAM and architecture tier — tune with vram_weight/arch_weight. "
"Use must_include_mode='groups' with pipe-separated OR alternatives for broad GPU coverage "
"(e.g. 'rtx 3060|rtx 3070|rtx 3080'). "
"Laptop Motherboard category ID: 177946."
),
inputSchema={
"type": "object",
"required": ["query"],
"properties": {
"query": {
"type": "string",
"description": "Base eBay search keywords, e.g. 'laptop motherboard'",
},
"must_include": {
"type": "string",
"description": (
"Comma-separated AND groups; use | for OR within a group. "
"E.g. 'rtx 3060|rtx 3070|rx 6700m, 8gb|12gb|16gb'"
),
},
"must_include_mode": {
"type": "string",
"enum": ["all", "any", "groups"],
"default": "groups",
"description": "groups: pipe=OR comma=AND. Recommended for multi-GPU searches.",
},
"must_exclude": {
"type": "string",
"description": (
"Comma-separated terms to exclude. "
"Suggested: 'broken,cracked,no post,for parts,parts only,untested,"
"lcd,screen,chassis,housing,bios locked'"
),
},
"max_price": {
"type": "number",
"default": 0,
"description": "Max price USD (0 = no limit)",
},
"min_price": {
"type": "number",
"default": 0,
"description": "Min price USD (0 = no limit)",
},
"pages": {
"type": "integer",
"default": 2,
"description": "Pages of eBay results to fetch (1 page ≈ 50 listings)",
},
"category_id": {
"type": "string",
"default": "",
"description": (
"eBay category ID. "
"177946 = Laptop Motherboards & System Boards. "
"27386 = Graphics Cards (PCIe, for price comparison). "
"Leave empty to search all categories."
),
},
"vram_weight": {
"type": "number",
"default": 0.6,
"description": (
"01. Weight of VRAM in GPU inference score. "
"Higher = VRAM is primary ranking factor. "
"Use 1.0 to rank purely by VRAM (ignores arch generation)."
),
},
"arch_weight": {
"type": "number",
"default": 0.4,
"description": (
"01. Weight of architecture generation in GPU inference score. "
"Higher = prefer newer GPU arch (Ada > Ampere > Turing etc.). "
"Use 0.0 to ignore arch and rank purely by VRAM."
),
},
"top_n": {
"type": "integer",
"default": 20,
"description": "Max results to return after sorting",
},
},
},
),
Tool(
name="snipe_enrich",
description=(
"Deep-dive enrichment for a specific seller + listing. "
"Runs BTF scraping and category history to fill partial trust scores (~20s). "
"Use when snipe_search returns trust_partial=true on a promising listing."
),
inputSchema={
"type": "object",
"required": ["seller_id", "listing_id"],
"properties": {
"seller_id": {
"type": "string",
"description": "eBay seller platform ID (from snipe_search result seller_id field)",
},
"listing_id": {
"type": "string",
"description": "eBay listing platform ID (from snipe_search result id field)",
},
"query": {
"type": "string",
"default": "",
"description": "Original search query — provides market comp context for re-scoring",
},
},
},
),
Tool(
name="snipe_save",
description="Persist a productive search for ongoing monitoring in the Snipe UI.",
inputSchema={
"type": "object",
"required": ["name", "query"],
"properties": {
"name": {
"type": "string",
"description": "Human-readable label, e.g. 'RTX 3070+ laptop boards under $250'",
},
"query": {
"type": "string",
"description": "The eBay search query string",
},
"filters_json": {
"type": "string",
"default": "{}",
"description": "JSON string of filter params to preserve (max_price, must_include, etc.)",
},
},
},
),
]
@server.call_tool()
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
if name == "snipe_search":
return await _search(arguments)
if name == "snipe_enrich":
return await _enrich(arguments)
if name == "snipe_save":
return await _save(arguments)
return [TextContent(type="text", text=f"Unknown tool: {name}")]
async def _search(args: dict) -> list[TextContent]:
from app.mcp.formatters import format_results
# Build params — omit empty strings and zero numerics (except q)
raw = {
"q": args.get("query", ""),
"must_include": args.get("must_include", ""),
"must_include_mode": args.get("must_include_mode", "groups"),
"must_exclude": args.get("must_exclude", ""),
"max_price": args.get("max_price", 0),
"min_price": args.get("min_price", 0),
"pages": args.get("pages", 2),
"category_id": args.get("category_id", ""),
}
params = {k: v for k, v in raw.items() if v != "" and v != 0 or k == "q"}
async with httpx.AsyncClient(timeout=_TIMEOUT) as client:
resp = await client.get(f"{_SNIPE_API}/api/search", params=params)
resp.raise_for_status()
formatted = format_results(
resp.json(),
vram_weight=float(args.get("vram_weight", 0.6)),
arch_weight=float(args.get("arch_weight", 0.4)),
top_n=int(args.get("top_n", 20)),
)
return [TextContent(type="text", text=json.dumps(formatted, indent=2))]
async def _enrich(args: dict) -> list[TextContent]:
async with httpx.AsyncClient(timeout=_TIMEOUT) as client:
resp = await client.post(
f"{_SNIPE_API}/api/enrich",
params={
"seller": args["seller_id"],
"listing_id": args["listing_id"],
"query": args.get("query", ""),
},
)
resp.raise_for_status()
return [TextContent(type="text", text=json.dumps(resp.json(), indent=2))]
async def _save(args: dict) -> list[TextContent]:
async with httpx.AsyncClient(timeout=_TIMEOUT) as client:
resp = await client.post(
f"{_SNIPE_API}/api/saved-searches",
json={
"name": args["name"],
"query": args["query"],
"filters_json": args.get("filters_json", "{}"),
},
)
resp.raise_for_status()
data = resp.json()
return [TextContent(type="text", text=f"Saved (id={data.get('id')}): {args['name']}")]
async def _main() -> None:
async with stdio_server() as (read_stream, write_stream):
await server.run(
read_stream,
write_stream,
server.create_initialization_options(),
)
if __name__ == "__main__":
asyncio.run(_main())

View file

@ -1,10 +1,8 @@
"""PlatformAdapter abstract base and shared types."""
from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Optional
from app.db.models import Listing, Seller

View file

@ -1,12 +1,10 @@
"""eBay Browse API adapter."""
from __future__ import annotations
import hashlib
import logging
from dataclasses import replace
from datetime import datetime, timedelta, timezone
from typing import Optional
import requests
log = logging.getLogger(__name__)
@ -20,7 +18,7 @@ _SHOPPING_API_MAX_PER_SEARCH = 5 # sellers enriched per search call
_SHOPPING_API_INTER_REQUEST_DELAY = 0.5 # seconds between successive calls
_SELLER_ENRICH_TTL_HOURS = 24 # skip re-enrichment within this window
from app.db.models import Listing, MarketComp, Seller
from app.db.models import Listing, Seller, MarketComp
from app.db.store import Store
from app.platforms import PlatformAdapter, SearchFilters
from app.platforms.ebay.auth import EbayTokenManager

View file

@ -1,10 +1,8 @@
"""eBay OAuth2 client credentials token manager."""
from __future__ import annotations
import base64
import time
from typing import Optional
import requests
EBAY_OAUTH_URLS = {

View file

@ -1,394 +0,0 @@
"""Pre-warmed Chromium browser pool for the eBay scraper.
Eliminates cold-start latency (5-10s per call) by keeping a small pool of
long-lived Playwright browser instances with fresh contexts ready to serve.
Key design:
- Pool slots: ``(xvfb_proc, pw_instance, browser, context, display_num, last_used_ts)``
One headed Chromium browser per slot keeps the Kasada fingerprint clean.
- Thread safety: ``queue.Queue`` with blocking get (timeout=3s before fresh fallback).
- Replenishment: after each use, the dirty context is closed and a new context is
opened on the *same* browser, then returned to the queue. Browser launch overhead
is only paid at startup and during idle-cleanup replenishment.
- Idle cleanup: daemon thread closes slots idle for >5 minutes to avoid memory leaks
when the service is quiet.
- Graceful degradation: if Playwright / Xvfb is unavailable (host-side test env),
``fetch_html`` falls back to launching a fresh browser per call same behavior
as before this module existed.
Pool size is controlled via ``BROWSER_POOL_SIZE`` env var (default: 2).
"""
from __future__ import annotations
import itertools
import logging
import os
import queue
import subprocess
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from dataclasses import dataclass, field
from typing import Optional
log = logging.getLogger(__name__)
# Reuse the same display counter namespace as scraper.py to avoid collisions.
# Pool uses :100-:199; scraper.py fallback uses :200-:299.
_pool_display_counter = itertools.cycle(range(100, 200))
_IDLE_TIMEOUT_SECS = 300 # 5 minutes
_CLEANUP_INTERVAL_SECS = 60
_QUEUE_TIMEOUT_SECS = 3.0
_CHROMIUM_ARGS = ["--no-sandbox", "--disable-dev-shm-usage"]
_USER_AGENT = (
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36"
)
_VIEWPORT = {"width": 1280, "height": 800}
@dataclass
class _PooledBrowser:
"""One slot in the browser pool."""
xvfb: subprocess.Popen
pw: object # playwright instance (sync_playwright().__enter__())
browser: object # playwright Browser
ctx: object # playwright BrowserContext (fresh per use)
display_num: int
last_used_ts: float = field(default_factory=time.time)
def _launch_slot() -> "_PooledBrowser":
"""Launch a new Xvfb display + headed Chromium browser + fresh context.
Raises on failure callers must catch and handle gracefully.
"""
from playwright.sync_api import sync_playwright
from playwright_stealth import Stealth # noqa: F401 — imported here to confirm availability
display_num = next(_pool_display_counter)
display = f":{display_num}"
env = os.environ.copy()
env["DISPLAY"] = display
xvfb = subprocess.Popen(
["Xvfb", display, "-screen", "0", "1280x800x24"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# Small grace period for Xvfb to bind the display socket.
time.sleep(0.3)
pw = sync_playwright().start()
try:
browser = pw.chromium.launch(
headless=False,
env=env,
args=_CHROMIUM_ARGS,
)
ctx = browser.new_context(
user_agent=_USER_AGENT,
viewport=_VIEWPORT,
)
except Exception:
pw.stop()
xvfb.terminate()
xvfb.wait()
raise
return _PooledBrowser(
xvfb=xvfb,
pw=pw,
browser=browser,
ctx=ctx,
display_num=display_num,
last_used_ts=time.time(),
)
def _close_slot(slot: _PooledBrowser) -> None:
"""Cleanly close a pool slot: context → browser → Playwright → Xvfb."""
try:
slot.ctx.close()
except Exception:
pass
try:
slot.browser.close()
except Exception:
pass
try:
slot.pw.stop()
except Exception:
pass
try:
slot.xvfb.terminate()
slot.xvfb.wait(timeout=5)
except Exception:
pass
def _replenish_slot(slot: _PooledBrowser) -> _PooledBrowser:
"""Close the used context and open a fresh one on the same browser.
Returns a new _PooledBrowser sharing the same xvfb/pw/browser but with a
clean context avoids paying browser launch overhead on every fetch.
"""
try:
slot.ctx.close()
except Exception:
pass
new_ctx = slot.browser.new_context(
user_agent=_USER_AGENT,
viewport=_VIEWPORT,
)
return _PooledBrowser(
xvfb=slot.xvfb,
pw=slot.pw,
browser=slot.browser,
ctx=new_ctx,
display_num=slot.display_num,
last_used_ts=time.time(),
)
class BrowserPool:
"""Thread-safe pool of pre-warmed Playwright browser contexts."""
def __init__(self, size: int = 2) -> None:
self._size = size
self._q: queue.Queue[_PooledBrowser] = queue.Queue()
self._lock = threading.Lock()
self._started = False
self._stopped = False
self._playwright_available: Optional[bool] = None # cached after first check
# ------------------------------------------------------------------
# Lifecycle
# ------------------------------------------------------------------
def start(self) -> None:
"""Pre-warm N browser slots in background threads.
Non-blocking: returns immediately; slots appear in the queue as they
finish launching. Safe to call multiple times (no-op after first).
"""
with self._lock:
if self._started:
return
self._started = True
if not self._check_playwright():
log.warning(
"BrowserPool: Playwright / Xvfb not available — "
"pool disabled, falling back to per-call fresh browser."
)
return
def _warm_one(_: int) -> None:
try:
slot = _launch_slot()
self._q.put(slot)
log.debug("BrowserPool: slot :%d ready", slot.display_num)
except Exception as exc:
log.warning("BrowserPool: pre-warm failed: %s", exc)
with ThreadPoolExecutor(max_workers=self._size) as ex:
futures = [ex.submit(_warm_one, i) for i in range(self._size)]
# Don't wait — executor exits after submitting, threads continue.
# Actually ThreadPoolExecutor.__exit__ waits for completion, which
# is fine: pre-warming completes in background relative to FastAPI
# startup because this whole method is called from a thread.
for f in as_completed(futures):
pass # propagate exceptions via logging, not raises
_idle_cleaner = threading.Thread(
target=self._idle_cleanup_loop, daemon=True, name="browser-pool-idle-cleaner"
)
_idle_cleaner.start()
log.info("BrowserPool: started with %d slots", self._q.qsize())
def stop(self) -> None:
"""Drain and close all pool slots. Called at FastAPI shutdown."""
with self._lock:
self._stopped = True
closed = 0
while True:
try:
slot = self._q.get_nowait()
_close_slot(slot)
closed += 1
except queue.Empty:
break
log.info("BrowserPool: stopped, closed %d slot(s)", closed)
# ------------------------------------------------------------------
# Core fetch
# ------------------------------------------------------------------
def fetch_html(self, url: str, delay: float = 1.0) -> str:
"""Navigate to *url* and return the rendered HTML.
Borrows a browser context from the pool (blocks up to 3s), uses it to
fetch the page, then replenishes the slot with a fresh context.
Falls back to a fully fresh browser if the pool is empty after the
timeout or if Playwright is unavailable.
"""
time.sleep(delay)
slot: Optional[_PooledBrowser] = None
try:
slot = self._q.get(timeout=_QUEUE_TIMEOUT_SECS)
except queue.Empty:
log.debug("BrowserPool: pool empty after %.1fs — using fresh browser", _QUEUE_TIMEOUT_SECS)
if slot is not None:
try:
html = self._fetch_with_slot(slot, url)
# Replenish: close dirty context, open fresh one, return to queue.
try:
fresh_slot = _replenish_slot(slot)
self._q.put(fresh_slot)
except Exception as exc:
log.warning("BrowserPool: replenish failed, slot discarded: %s", exc)
_close_slot(slot)
return html
except Exception as exc:
log.warning("BrowserPool: pooled fetch failed (%s) — closing slot", exc)
_close_slot(slot)
# Fall through to fresh browser below.
# Fallback: fresh browser (same code as old scraper._fetch_url).
return self._fetch_fresh(url)
# ------------------------------------------------------------------
# Internal helpers
# ------------------------------------------------------------------
def _check_playwright(self) -> bool:
"""Return True if Playwright and Xvfb are importable/runnable."""
if self._playwright_available is not None:
return self._playwright_available
try:
import playwright # noqa: F401
from playwright_stealth import Stealth # noqa: F401
self._playwright_available = True
except ImportError:
self._playwright_available = False
return self._playwright_available
def _fetch_with_slot(self, slot: _PooledBrowser, url: str) -> str:
"""Open a new page on *slot.ctx*, navigate to *url*, return HTML."""
from playwright_stealth import Stealth
page = slot.ctx.new_page()
try:
Stealth().apply_stealth_sync(page)
page.goto(url, wait_until="domcontentloaded", timeout=30_000)
page.wait_for_timeout(2000)
return page.content()
finally:
try:
page.close()
except Exception:
pass
def _fetch_fresh(self, url: str) -> str:
"""Launch a fully fresh browser, fetch *url*, close everything."""
import subprocess as _subprocess
try:
from playwright.sync_api import sync_playwright
from playwright_stealth import Stealth
except ImportError as exc:
raise RuntimeError(
"Playwright not installed — cannot fetch eBay pages. "
"Install playwright and playwright-stealth in the Docker image."
) from exc
display_num = next(_pool_display_counter)
display = f":{display_num}"
env = os.environ.copy()
env["DISPLAY"] = display
xvfb = _subprocess.Popen(
["Xvfb", display, "-screen", "0", "1280x800x24"],
stdout=_subprocess.DEVNULL,
stderr=_subprocess.DEVNULL,
)
try:
with sync_playwright() as pw:
browser = pw.chromium.launch(
headless=False,
env=env,
args=_CHROMIUM_ARGS,
)
ctx = browser.new_context(
user_agent=_USER_AGENT,
viewport=_VIEWPORT,
)
page = ctx.new_page()
Stealth().apply_stealth_sync(page)
page.goto(url, wait_until="domcontentloaded", timeout=30_000)
page.wait_for_timeout(2000)
html = page.content()
browser.close()
finally:
xvfb.terminate()
xvfb.wait()
return html
def _idle_cleanup_loop(self) -> None:
"""Daemon thread: drain slots idle for >5 minutes every 60 seconds."""
while not self._stopped:
time.sleep(_CLEANUP_INTERVAL_SECS)
if self._stopped:
break
now = time.time()
idle_cutoff = now - _IDLE_TIMEOUT_SECS
# Drain the entire queue, keep non-idle slots, close idle ones.
kept: list[_PooledBrowser] = []
closed = 0
while True:
try:
slot = self._q.get_nowait()
except queue.Empty:
break
if slot.last_used_ts < idle_cutoff:
_close_slot(slot)
closed += 1
else:
kept.append(slot)
for slot in kept:
self._q.put(slot)
if closed:
log.info("BrowserPool: idle cleanup closed %d slot(s)", closed)
# ---------------------------------------------------------------------------
# Module-level singleton
# ---------------------------------------------------------------------------
_pool: Optional[BrowserPool] = None
_pool_lock = threading.Lock()
def get_pool() -> BrowserPool:
"""Return the module-level BrowserPool singleton (creates it if needed).
Pool size is read from ``BROWSER_POOL_SIZE`` env var (default: 2).
Call ``get_pool().start()`` at FastAPI startup to pre-warm slots.
"""
global _pool
if _pool is None:
with _pool_lock:
if _pool is None:
size = int(os.environ.get("BROWSER_POOL_SIZE", "2"))
_pool = BrowserPool(size)
return _pool

View file

@ -1,254 +0,0 @@
# app/platforms/ebay/categories.py
# MIT License
"""eBay category cache — fetches leaf categories from the Taxonomy API and stores them
in the local SQLite DB for injection into LLM query-builder prompts.
Refreshed weekly. Falls back to a hardcoded bootstrap table when no eBay API
credentials are configured (scraper-only users still get usable category hints).
"""
from __future__ import annotations
import logging
import sqlite3
from datetime import datetime, timedelta, timezone
from typing import Optional
import requests
log = logging.getLogger(__name__)
# Bootstrap table — common categories for self-hosters without eBay API credentials.
# category_id values are stable eBay leaf IDs (US marketplace, as of 2026).
_BOOTSTRAP_CATEGORIES: list[tuple[str, str, str]] = [
("27386", "Graphics Cards", "Consumer Electronics > Computers > Components > Graphics/Video Cards"),
("164", "CPUs/Processors", "Consumer Electronics > Computers > Components > CPUs/Processors"),
("170083","RAM", "Consumer Electronics > Computers > Components > Memory (RAM)"),
("175669","Solid State Drives", "Consumer Electronics > Computers > Components > Drives > Solid State Drives"),
("177089","Hard Drives", "Consumer Electronics > Computers > Components > Drives > Hard Drives"),
("179142","Laptops", "Consumer Electronics > Computers > Laptops & Netbooks"),
("171957","Desktop Computers", "Consumer Electronics > Computers > Desktops & All-in-Ones"),
("293", "Consumer Electronics","Consumer Electronics"),
("625", "Cameras", "Consumer Electronics > Cameras & Photography > Digital Cameras"),
("15052", "Vintage Cameras", "Consumer Electronics > Cameras & Photography > Vintage Movie Cameras"),
("11724", "Audio Equipment", "Consumer Electronics > TV, Video & Home Audio > Home Audio"),
("3676", "Vinyl Records", "Music > Records"),
("870", "Musical Instruments","Musical Instruments & Gear"),
("31388", "Video Game Consoles","Video Games & Consoles > Video Game Consoles"),
("139971","Video Games", "Video Games & Consoles > Video Games"),
("139973","Video Game Accessories", "Video Games & Consoles > Video Game Accessories"),
("14308", "Networking Gear", "Computers/Tablets & Networking > Home Networking & Connectivity"),
("182062","Smartphones", "Cell Phones & Smartphones"),
("9394", "Tablets", "Computers/Tablets & Networking > Tablets & eBook Readers"),
("11233", "Collectibles", "Collectibles"),
]
class EbayCategoryCache:
"""Caches eBay leaf categories in SQLite for LLM prompt injection.
Args:
conn: An open sqlite3.Connection with migration 011 already applied.
"""
def __init__(self, conn: sqlite3.Connection) -> None:
self._conn = conn
def is_stale(self, max_age_days: int = 7) -> bool:
"""Return True if the cache is empty or all entries are older than max_age_days."""
cur = self._conn.execute("SELECT MAX(refreshed_at) FROM ebay_categories")
row = cur.fetchone()
if not row or not row[0]:
return True
try:
latest = datetime.fromisoformat(row[0])
if latest.tzinfo is None:
latest = latest.replace(tzinfo=timezone.utc)
return datetime.now(timezone.utc) - latest > timedelta(days=max_age_days)
except ValueError:
return True
def _seed_bootstrap(self) -> None:
"""Insert the hardcoded bootstrap categories. Idempotent (ON CONFLICT IGNORE)."""
now = datetime.now(timezone.utc).isoformat()
self._conn.executemany(
"INSERT OR IGNORE INTO ebay_categories"
" (category_id, name, full_path, is_leaf, refreshed_at)"
" VALUES (?, ?, ?, 1, ?)",
[(cid, name, path, now) for cid, name, path in _BOOTSTRAP_CATEGORIES],
)
self._conn.commit()
log.info("EbayCategoryCache: seeded %d bootstrap categories.", len(_BOOTSTRAP_CATEGORIES))
def get_relevant(
self,
keywords: list[str],
limit: int = 30,
) -> list[tuple[str, str]]:
"""Return (category_id, full_path) pairs matching any keyword.
Matches against both name and full_path (case-insensitive LIKE).
Returns at most `limit` rows.
"""
if not keywords:
return []
conditions = " OR ".join(
"LOWER(name) LIKE ? OR LOWER(full_path) LIKE ?" for _ in keywords
)
params: list[str] = []
for kw in keywords:
like = f"%{kw.lower()}%"
params.extend([like, like])
params.append(limit)
cur = self._conn.execute(
f"SELECT category_id, full_path FROM ebay_categories"
f" WHERE {conditions} ORDER BY name LIMIT ?",
params,
)
return [(row[0], row[1]) for row in cur.fetchall()]
def get_all_for_prompt(self, limit: int = 80) -> list[tuple[str, str]]:
"""Return up to `limit` (category_id, full_path) pairs, sorted by name.
Used when no keyword context is available.
"""
cur = self._conn.execute(
"SELECT category_id, full_path FROM ebay_categories ORDER BY name LIMIT ?",
(limit,),
)
return [(row[0], row[1]) for row in cur.fetchall()]
def refresh(
self,
token_manager: Optional["EbayTokenManager"] = None,
community_store: Optional[object] = None,
) -> int:
"""Fetch the eBay category tree and upsert leaf nodes into SQLite.
Args:
token_manager: An `EbayTokenManager` instance for the Taxonomy API.
If None, falls back to seeding the hardcoded bootstrap table.
community_store: Optional SnipeCommunityStore instance.
If provided and token_manager is set, publish leaves after a successful
Taxonomy API fetch.
If provided and token_manager is None, fetch from community before
falling back to the hardcoded bootstrap (requires >= 10 rows).
Returns:
Number of leaf categories stored.
"""
if token_manager is None:
# Try community store first
if community_store is not None:
try:
community_cats = community_store.fetch_categories()
if len(community_cats) >= 10:
now = datetime.now(timezone.utc).isoformat()
self._conn.executemany(
"INSERT OR REPLACE INTO ebay_categories"
" (category_id, name, full_path, is_leaf, refreshed_at)"
" VALUES (?, ?, ?, 1, ?)",
[(cid, name, path, now) for cid, name, path in community_cats],
)
self._conn.commit()
log.info(
"EbayCategoryCache: loaded %d categories from community store.",
len(community_cats),
)
return len(community_cats)
log.info(
"EbayCategoryCache: community store has %d categories (< 10) — falling back to bootstrap.",
len(community_cats),
)
except Exception:
log.warning(
"EbayCategoryCache: community store fetch failed — falling back to bootstrap.",
exc_info=True,
)
self._seed_bootstrap()
cur = self._conn.execute("SELECT COUNT(*) FROM ebay_categories")
return cur.fetchone()[0]
try:
token = token_manager.get_token()
headers = {"Authorization": f"Bearer {token}"}
# Step 1: get default tree ID for EBAY_US
id_resp = requests.get(
"https://api.ebay.com/commerce/taxonomy/v1/get_default_category_tree_id",
params={"marketplace_id": "EBAY_US"},
headers=headers,
timeout=30,
)
id_resp.raise_for_status()
tree_id = id_resp.json()["categoryTreeId"]
# Step 2: fetch full tree (large response — may take several seconds)
tree_resp = requests.get(
f"https://api.ebay.com/commerce/taxonomy/v1/category_tree/{tree_id}",
headers=headers,
timeout=120,
)
tree_resp.raise_for_status()
tree = tree_resp.json()
leaves: list[tuple[str, str, str]] = []
_extract_leaves(tree["rootCategoryNode"], path="", leaves=leaves)
now = datetime.now(timezone.utc).isoformat()
self._conn.executemany(
"INSERT OR REPLACE INTO ebay_categories"
" (category_id, name, full_path, is_leaf, refreshed_at)"
" VALUES (?, ?, ?, 1, ?)",
[(cid, name, path, now) for cid, name, path in leaves],
)
self._conn.commit()
log.info(
"EbayCategoryCache: refreshed %d leaf categories from eBay Taxonomy API.",
len(leaves),
)
# Publish to community store if available
if community_store is not None:
try:
community_store.publish_categories(leaves)
except Exception:
log.warning(
"EbayCategoryCache: failed to publish categories to community store.",
exc_info=True,
)
return len(leaves)
except Exception:
log.warning(
"EbayCategoryCache: Taxonomy API refresh failed — falling back to bootstrap.",
exc_info=True,
)
self._seed_bootstrap()
cur = self._conn.execute("SELECT COUNT(*) FROM ebay_categories")
return cur.fetchone()[0]
def _extract_leaves(
node: dict,
path: str,
leaves: list[tuple[str, str, str]],
) -> None:
"""Recursively walk the eBay category tree, collecting leaf node tuples.
Args:
node: A categoryTreeNode dict from the eBay Taxonomy API response.
path: The ancestor breadcrumb, e.g. "Consumer Electronics > Computers".
leaves: Accumulator list of (category_id, name, full_path) tuples.
"""
cat = node["category"]
cat_id: str = cat["categoryId"]
cat_name: str = cat["categoryName"]
full_path = f"{path} > {cat_name}" if path else cat_name
if node.get("leafCategoryTreeNode", False):
leaves.append((cat_id, cat_name, full_path))
return # leaf — no children to recurse into
for child in node.get("childCategoryTreeNodes", []):
_extract_leaves(child, full_path, leaves)

View file

@ -1,10 +1,8 @@
"""Convert raw eBay API responses into Snipe domain objects."""
from __future__ import annotations
import json
from datetime import datetime, timezone
from typing import Optional
from app.db.models import Listing, Seller

View file

@ -16,7 +16,7 @@ import json
import logging
import re
import time
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime, timedelta, timezone
from typing import Optional
@ -291,7 +291,7 @@ class ScrapedEbayAdapter(PlatformAdapter):
self._delay = delay
def _fetch_url(self, url: str) -> str:
"""Core Playwright fetch — stealthed headed Chromium via pre-warmed browser pool.
"""Core Playwright fetch — stealthed headed Chromium via Xvfb.
Shared by both search (_get) and BTF item-page enrichment (_fetch_item_html).
Results cached for _HTML_CACHE_TTL seconds.
@ -300,8 +300,41 @@ class ScrapedEbayAdapter(PlatformAdapter):
if cached and time.time() < cached[1]:
return cached[0]
from app.platforms.ebay.browser_pool import get_pool # noqa: PLC0415 — lazy import
html = get_pool().fetch_html(url, delay=self._delay)
time.sleep(self._delay)
import subprocess, os
display_num = next(_display_counter)
display = f":{display_num}"
xvfb = subprocess.Popen(
["Xvfb", display, "-screen", "0", "1280x800x24"],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
)
env = os.environ.copy()
env["DISPLAY"] = display
try:
from playwright.sync_api import sync_playwright # noqa: PLC0415 — lazy: only needed in Docker
from playwright_stealth import Stealth # noqa: PLC0415
with sync_playwright() as pw:
browser = pw.chromium.launch(
headless=False,
env=env,
args=["--no-sandbox", "--disable-dev-shm-usage"],
)
ctx = browser.new_context(
user_agent=_HEADERS["User-Agent"],
viewport={"width": 1280, "height": 800},
)
page = ctx.new_page()
Stealth().apply_stealth_sync(page)
page.goto(url, wait_until="domcontentloaded", timeout=30_000)
page.wait_for_timeout(2000) # let any JS challenges resolve
html = page.content()
browser.close()
finally:
xvfb.terminate()
xvfb.wait()
_html_cache[url] = (html, time.time() + _HTML_CACHE_TTL)
return html

View file

@ -19,6 +19,7 @@ import logging
from pathlib import Path
import requests
from circuitforge_core.db import get_connection
from circuitforge_core.llm import LLMRouter

View file

@ -5,11 +5,9 @@ from __future__ import annotations
from pathlib import Path
from circuitforge_core.tasks.scheduler import (
TaskScheduler, # re-export for tests
)
from circuitforge_core.tasks.scheduler import (
TaskScheduler,
get_scheduler as _base_get_scheduler,
reset_scheduler, # re-export for lifespan teardown
reset_scheduler, # re-export for tests
)
from app.tasks.runner import LLM_TASK_TYPES, VRAM_BUDGETS, run_task

View file

@ -14,8 +14,7 @@ Intentionally ungated (free for all):
- saved_searches retention feature; friction cost outweighs gate value
"""
from __future__ import annotations
from circuitforge_core.tiers import can_use as _core_can_use # noqa: F401
from circuitforge_core.tiers import can_use as _core_can_use, TIERS # noqa: F401
# Feature key → minimum tier required.
FEATURES: dict[str, str] = {
@ -26,7 +25,6 @@ FEATURES: dict[str, str] = {
"reverse_image_search": "paid",
"ebay_oauth": "paid", # full trust scores via eBay Trading API
"background_monitoring": "paid", # limited at Paid; see LIMITS below
"llm_query_builder": "paid", # inline natural-language → filter translator
# Premium tier
"auto_bidding": "premium",

View file

@ -1,12 +1,10 @@
import hashlib
import math
from app.db.models import Listing, TrustScore
from app.db.store import Store
from .aggregator import Aggregator
from .metadata import MetadataScorer
from .photo import PhotoScorer
from .aggregator import Aggregator
from app.db.models import Seller, Listing, TrustScore
from app.db.store import Store
import hashlib
import math
class TrustScorer:

View file

@ -1,10 +1,8 @@
"""Composite score and red flag extraction."""
from __future__ import annotations
import json
from datetime import datetime, timezone
from typing import Optional
from app.db.models import Seller, TrustScore
HARD_FILTER_AGE_DAYS = 7
@ -60,9 +58,9 @@ def _days_since(iso: Optional[str]) -> Optional[int]:
dt = datetime.fromisoformat(iso.replace("Z", "+00:00"))
# Normalize to naive UTC so both paths (timezone-aware ISO and SQLite
# CURRENT_TIMESTAMP naive strings) compare correctly.
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
return (datetime.now(timezone.utc) - dt).days
if dt.tzinfo is not None:
dt = dt.replace(tzinfo=None)
return (datetime.utcnow() - dt).days
except ValueError:
return None

View file

@ -1,9 +1,7 @@
"""Five metadata trust signals, each scored 020."""
from __future__ import annotations
import json
from typing import Optional
from app.db.models import Seller
ELECTRONICS_CATEGORIES = {"ELECTRONICS", "COMPUTERS_TABLETS", "VIDEO_GAMES", "CELL_PHONES"}

View file

@ -1,9 +1,7 @@
"""Perceptual hash deduplication within a result set (free tier, v0.1)."""
from __future__ import annotations
import io
from typing import Optional
import io
import requests
try:

View file

@ -1,24 +1,19 @@
"""Main search + results page."""
from __future__ import annotations
import logging
import os
from pathlib import Path
import streamlit as st
from circuitforge_core.config import load_env
from app.db.store import Store
from app.platforms import PlatformAdapter, SearchFilters
from app.trust import TrustScorer
from app.ui.components.easter_eggs import (
auction_hours_remaining,
check_snipe_mode,
inject_steal_css,
render_snipe_mode_banner,
)
from app.ui.components.filters import FilterState, build_filter_options, render_filter_sidebar
from app.ui.components.filters import build_filter_options, render_filter_sidebar, FilterState
from app.ui.components.listing_row import render_listing_row
from app.ui.components.easter_eggs import (
inject_steal_css, check_snipe_mode, render_snipe_mode_banner,
auction_hours_remaining,
)
log = logging.getLogger(__name__)

View file

@ -22,6 +22,7 @@ import streamlit as st
from app.db.models import Listing, TrustScore
# ---------------------------------------------------------------------------
# 1. Konami → Snipe Mode
# ---------------------------------------------------------------------------

View file

@ -1,12 +1,9 @@
"""Build dynamic filter options from a result set and render the Streamlit sidebar."""
from __future__ import annotations
import json
from dataclasses import dataclass, field
from typing import Optional
import streamlit as st
from app.db.models import Listing, TrustScore

View file

@ -1,17 +1,13 @@
"""Render a single listing row with trust score, badges, and error states."""
from __future__ import annotations
import json
from typing import Optional
import streamlit as st
from app.db.models import Listing, Seller, TrustScore
from app.db.models import Listing, TrustScore, Seller
from app.ui.components.easter_eggs import (
auction_hours_remaining,
is_steal,
render_auction_notice,
render_steal_banner,
is_steal, render_steal_banner, render_auction_notice, auction_hours_remaining,
)

View file

@ -1,8 +1,6 @@
"""First-run wizard: collect eBay credentials and write .env."""
from __future__ import annotations
from pathlib import Path
import streamlit as st
from circuitforge_core.wizard import BaseWizard

View file

@ -20,16 +20,9 @@ services:
CLOUD_MODE: "true"
CLOUD_DATA_ROOT: /devl/snipe-cloud-data
# DIRECTUS_JWT_SECRET, HEIMDALL_URL, HEIMDALL_ADMIN_TOKEN — set in .env (never commit)
# CF_ORCH_URL routes LLM query builder through cf-orch for VRAM-aware scheduling.
# Override in .env to use a different coordinator URL.
CF_ORCH_URL: "http://host.docker.internal:7700"
CF_APP_NAME: snipe
extra_hosts:
- "host.docker.internal:host-gateway"
# No network_mode: host — isolated on snipe-cloud-net; nginx reaches it via 'api:8510'
volumes:
- /devl/snipe-cloud-data:/devl/snipe-cloud-data
- ./config/llm.cloud.yaml:/app/snipe/config/llm.yaml:ro
networks:
- snipe-cloud-net

View file

@ -4,9 +4,7 @@
# What this adds over compose.yml:
# - Live source mounts so code changes take effect without rebuilding images
# - RELOAD=true to enable uvicorn --reload for the API
# - cf-orch-agent sidecar for local GPU task routing (opt-in: --profile orch)
#
# NOTE: circuitforge-core is NOT mounted here — use `./manage.sh build` to
# - NOTE: circuitforge-core is NOT mounted here — use `./manage.sh build` to
# pick up cf-core changes. Mounting it as a bind volume would break self-hosted
# installs that don't have the sibling directory.
services:
@ -17,32 +15,3 @@ services:
- ./tests:/app/snipe/tests
environment:
- RELOAD=true
# Point the LLM/vision task scheduler at the local cf-orch coordinator.
# Only has effect when CF_ORCH_URL is set (uncomment in .env, or set inline).
# - CF_ORCH_URL=http://10.1.10.71:7700
# cf-orch agent — routes trust_photo_analysis vision tasks to the GPU coordinator.
# Only starts when you pass --profile orch:
# docker compose --profile orch up
#
# Requires a running cf-orch coordinator. Default: Heimdall at 10.1.10.71:7700.
# Override via CF_ORCH_COORDINATOR_URL in .env.
#
# To use a locally-built cf-orch image instead of the published one:
# build:
# context: ../circuitforge-orch
# dockerfile: Dockerfile
cf-orch-agent:
image: ghcr.io/circuitforgellc/cf-orch:latest
command: >
agent
--coordinator ${CF_ORCH_COORDINATOR_URL:-http://10.1.10.71:7700}
--node-id snipe-dev
--host 0.0.0.0
--port 7701
--advertise-host 127.0.0.1
environment:
CF_COORDINATOR_URL: ${CF_ORCH_COORDINATOR_URL:-http://10.1.10.71:7700}
restart: on-failure
profiles:
- orch

View file

@ -1,38 +0,0 @@
# config/llm.cloud.yaml
# Snipe — LLM config for the managed cloud instance (menagerie)
#
# Mounted read-only into the cloud API container at /app/config/llm.yaml
# (see compose.cloud.yml). Personal fine-tunes and local-only backends
# (claude_code, copilot) are intentionally excluded here.
#
# CF Orchestrator routes both ollama and vllm allocations for VRAM-aware
# scheduling. CF_ORCH_URL must be set in .env for allocations to resolve;
# if cf-orch is unreachable the backend falls back to its static base_url.
#
# Model choice for query builder: llama3.1:8b
# - Reliable instruction following and JSON output
# - No creative fine-tuning drift (unlike writer models in the pool)
# - Fits comfortably in 8 GB VRAM alongside other services
backends:
ollama:
type: openai_compat
base_url: http://host.docker.internal:11434/v1
api_key: ollama
model: llama3.1:8b
enabled: true
supports_images: false
cf_orch:
service: ollama
ttl_s: 300
anthropic:
type: anthropic
api_key_env: ANTHROPIC_API_KEY
model: claude-haiku-4-5-20251001
enabled: false
supports_images: false
fallback_order:
- ollama
- anthropic

View file

@ -1,60 +0,0 @@
# config/llm.yaml.example
# Snipe — LLM backend configuration
#
# Copy to config/llm.yaml and edit for your setup.
# The query builder ("Search with AI") uses the text fallback_order.
#
# Backends are tried in fallback_order until one succeeds.
# Set enabled: false to skip a backend without removing it.
#
# CF Orchestrator (cf-orch): when CF_ORCH_URL is set in the environment and a
# backend has a cf_orch block, allocations are routed through cf-orch for
# VRAM-aware scheduling. Omit cf_orch to hit the backend directly.
backends:
anthropic:
type: anthropic
api_key_env: ANTHROPIC_API_KEY
model: claude-haiku-4-5-20251001
enabled: false
supports_images: false
openai:
type: openai_compat
base_url: https://api.openai.com/v1
api_key_env: OPENAI_API_KEY
model: gpt-4o-mini
enabled: false
supports_images: false
ollama:
type: openai_compat
base_url: http://localhost:11434/v1
api_key: ollama
model: llama3.1:8b
enabled: true
supports_images: false
# Uncomment to route through cf-orch for VRAM-aware scheduling:
# cf_orch:
# service: ollama
# ttl_s: 300
# ── cf-orch trunk services ─────────────────────────────────────────────────
# Allocate via cf-orch; the router calls the allocated service directly.
# Set CF_ORCH_URL (env) or url below to activate.
cf_text:
type: openai_compat
enabled: false
base_url: http://localhost:8008/v1
model: __auto__
api_key: any
supports_images: false
cf_orch:
service: cf-text
model_candidates: []
ttl_s: 3600
fallback_order:
- anthropic
- openai
- ollama

View file

@ -1,39 +0,0 @@
# eBay API Keys (Optional)
Snipe works without any credentials using its Playwright scraper fallback. Adding eBay API credentials unlocks faster searches and higher rate limits.
## What API keys enable
| Feature | Without keys | With keys |
|---------|-------------|-----------|
| Listing search | Playwright scraper | eBay Browse API (faster, higher limits) |
| Market comps (completed sales) | Not available | eBay Marketplace Insights API |
| Seller account data | BTF scraper (Xvfb) | BTF scraper (same — eBay API doesn't expose join date) |
## Getting credentials
1. Create a developer account at [developer.ebay.com](https://developer.ebay.com/my/keys)
2. Create a new application (choose **Production**)
3. Copy your **App ID (Client ID)** and **Cert ID (Client Secret)**
## Configuration
Add your credentials to `.env`:
```bash
EBAY_APP_ID=YourAppID-...
EBAY_CERT_ID=YourCertID-...
```
Then restart:
```bash
./manage.sh restart
```
## Verifying
After restart, the search bar shows **API** as available in the data source selector. The auto mode will use the API by default.
!!! note
The Marketplace Insights API (for completed sales comps) requires an approved eBay developer account. New accounts may not have access. Snipe gracefully falls back to Browse API results when Insights returns 403 or 404.

View file

@ -1,102 +0,0 @@
# Installation
## Requirements
- Docker with Compose plugin
- Git
- No API keys required to get started
## One-line install
```bash
bash <(curl -fsSL https://git.opensourcesolarpunk.com/Circuit-Forge/snipe/raw/branch/main/install.sh)
```
This clones the repo to `~/snipe` and starts the stack. Open **http://localhost:8509** when it completes.
## Manual install
Snipe's API image is built from a context that includes `circuitforge-core`. Both repos must sit as siblings:
```
workspace/
├── snipe/ ← this repo
└── circuitforge-core/ ← required sibling
```
```bash
mkdir snipe-workspace && cd snipe-workspace
git clone https://git.opensourcesolarpunk.com/Circuit-Forge/snipe.git
git clone https://git.opensourcesolarpunk.com/Circuit-Forge/circuitforge-core.git
cd snipe
cp .env.example .env
./manage.sh start
```
## Managing the stack
```bash
./manage.sh start # build and start all containers
./manage.sh stop # stop containers
./manage.sh restart # rebuild and restart
./manage.sh status # container health
./manage.sh logs # tail logs
./manage.sh open # open in browser
```
## Updating
```bash
git pull
./manage.sh restart
```
## Ports
| Service | Default port |
|---------|-------------|
| Web UI | 8509 |
| API | 8510 |
Both ports are configurable in `.env`.
---
## No-Docker install (bare metal)
Run `install.sh --bare-metal` to skip Docker and install via conda or venv instead.
This sets up the Python environment, builds the Vue frontend, and writes helper scripts.
**Requirements:** Python 3.11+, Node.js 20+, `xvfb` (for the eBay scraper).
```bash
bash <(curl -fsSL https://git.opensourcesolarpunk.com/Circuit-Forge/snipe/raw/branch/main/install.sh) --bare-metal
```
After install, you get two scripts:
| Script | What it does |
|--------|-------------|
| `./start-local.sh` | Start the FastAPI API on port 8510 |
| `./serve-ui.sh` | Serve the built frontend with `python3 -m http.server 8509` (dev only) |
`serve-ui.sh` is single-threaded and suitable for testing only. For a real deployment, use nginx.
### nginx config (production bare-metal)
Install nginx, copy the sample config, and reload:
```bash
sudo cp docs/nginx-self-hosted.conf /etc/nginx/sites-available/snipe
sudo ln -s /etc/nginx/sites-available/snipe /etc/nginx/sites-enabled/snipe
# Edit the file — update `root` to your actual web/dist path
sudo nginx -t && sudo systemctl reload nginx
```
See [`docs/nginx-self-hosted.conf`](../nginx-self-hosted.conf) for the full config with TLS notes.
### Chromium / Xvfb note
Snipe uses headed Chromium via Xvfb to bypass Kasada (the anti-bot layer on eBay seller profile pages). If Chromium is not detected, the scraper falls back to the eBay Browse API — add `EBAY_APP_ID` / `EBAY_CERT_ID` to `.env` so that fallback has credentials.
The installer detects and installs Xvfb automatically on Debian/Ubuntu/Fedora. Chromium is installed via `playwright install chromium`. macOS is not supported for the scraper path.

View file

@ -1,39 +0,0 @@
# Quick Start
## 1. Run a search
Type a query into the search bar and press **Search** or hit Enter.
!!! tip
Start broad (`vintage camera`) then narrow with keyword filters once you see results. The must-include and must-exclude fields let you refine without re-searching from scratch.
## 2. Read the trust badge
Each listing card shows a trust badge in the top-right corner:
| Badge | Meaning |
|-------|---------|
| Green (70100) | Established seller, no major concerns |
| Yellow (4069) | Some signals missing or marginal |
| Red (039) | Multiple red flags — proceed carefully |
| `STEAL` label | Price significantly below market median |
A spinning indicator below the badge means enrichment is still in progress (account age is being fetched). Scores update automatically when enrichment completes.
## 3. Check red flags
Red flag pills appear below the listing title when Snipe detects a concern. Hover or tap a flag for a plain-language explanation.
## 4. Click through to eBay
Listing titles link directly to eBay. In cloud mode, links include an affiliate code that supports Snipe's development at no cost to you. You can opt out in Settings.
## 5. Filter results
Use the sidebar filters to narrow results without re-running the eBay search:
- **Min trust score** — slider to hide low-confidence listings
- **Min account age / Min feedback** — hide new or low-volume sellers
- **Hide listings checkboxes** — hide new accounts, suspicious prices, duplicate photos, damage mentions, long-on-market, significant price drop
These filters apply instantly to the current result set. Use the search bar to change the underlying eBay query.

View file

@ -1,33 +0,0 @@
# Snipe
**eBay trust scoring before you bid.**
![Snipe landing hero](screenshots/01-hero.png)
Snipe scores eBay listings and sellers for trustworthiness before you place a bid. Paste a search query, get results with trust scores, and know exactly which listings are worth your time.
## What it catches
- **New accounts** selling high-value items with no track record
- **Suspicious prices** — listings priced far below completed sales
- **Duplicate photos** — images copy-pasted from other listings (perceptual hash deduplication)
- **Damage buried in titles** — scratch, dent, untested, for parts, and similar
- **Known bad actors** — sellers on the community blocklist
## How it works
![Search results with trust scores](screenshots/02-results.png)
Each listing gets a composite trust score from 0100 based on five seller signals: account age, feedback count, feedback ratio, price vs. market, and category history. Red flags are surfaced alongside the score, not buried in it.
## Free, no account required
Search and scoring work without creating an account. Community features (reporting sellers, importing blocklists) require a free account.
## Quick links
- [Installation](getting-started/installation.md)
- [Understanding trust scores](user-guide/trust-scores.md)
- [Red flags reference](user-guide/red-flags.md)
- [Cloud demo](https://menagerie.circuitforge.tech/snipe)
- [Source code](https://git.opensourcesolarpunk.com/Circuit-Forge/snipe)

View file

@ -1,58 +0,0 @@
# nginx config for Snipe — bare-metal self-hosted (no Docker).
#
# Usage:
# sudo cp docs/nginx-self-hosted.conf /etc/nginx/sites-available/snipe
# # Edit: update `root` to your actual web/dist path and `server_name` to your hostname
# sudo ln -s /etc/nginx/sites-available/snipe /etc/nginx/sites-enabled/snipe
# sudo nginx -t && sudo systemctl reload nginx
#
# Assumes:
# - The Snipe FastAPI API is running on 127.0.0.1:8510 (./start-local.sh)
# - The Vue frontend was built by install.sh into web/dist/
# - TLS termination is handled separately (Caddy, certbot, or upstream proxy)
#
# For TLS with Let's Encrypt, run:
# sudo certbot --nginx -d your.domain.com
# Certbot will add the ssl_certificate lines automatically.
server {
listen 80;
server_name your.domain.com; # replace or use _ for catch-all
# Path to the Vue production build — update to match your install directory
root /home/youruser/snipe/snipe/web/dist;
index index.html;
# Proxy all /api/ requests to the FastAPI backend
location /api/ {
proxy_pass http://127.0.0.1:8510;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# SSE (Server-Sent Events) — live trust score updates
# These are long-lived streaming responses; disable buffering.
proxy_buffering off;
proxy_cache off;
proxy_read_timeout 120s;
}
# index.html — never cache; ensures clients always get the latest entry point
# after a deployment (JS/CSS chunks are content-hashed so they cache forever)
location = /index.html {
add_header Cache-Control "no-cache, no-store, must-revalidate";
try_files $uri /index.html;
}
# SPA fallback — all unknown paths serve index.html so Vue Router handles routing
location / {
try_files $uri $uri/ /index.html;
}
# Long-term cache for content-hashed static assets
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff2?)$ {
expires 1y;
add_header Cache-Control "public, immutable";
}
}

View file

@ -1 +0,0 @@
(function(){var s=document.createElement("script");s.defer=true;s.dataset.domain="docs.circuitforge.tech,circuitforge.tech";s.dataset.api="https://analytics.circuitforge.tech/api/event";s.src="https://analytics.circuitforge.tech/js/script.js";document.head.appendChild(s);})();

View file

@ -1,66 +0,0 @@
# Architecture
## Stack
| Layer | Technology |
|-------|-----------|
| Frontend | Vue 3, Pinia, UnoCSS |
| API | FastAPI (Python), host networking |
| Database | SQLite (WAL mode) |
| Scraper | Playwright + Chromium + Xvfb |
| Container | Docker Compose |
## Data flow
```mermaid
graph LR
User -->|search query| VueSPA
VueSPA -->|GET /api/search| FastAPI
FastAPI -->|Browse API or Playwright| eBay
eBay --> FastAPI
FastAPI -->|score_batch| TrustScorer
TrustScorer --> FastAPI
FastAPI -->|BTF enrich queue| XvfbScraper
XvfbScraper -->|seller profile| eBayProfile
eBayProfile --> XvfbScraper
XvfbScraper -->|account_age update| SQLite
FastAPI -->|SSE push| VueSPA
```
## Database layout
Snipe uses two SQLite databases in cloud mode:
| Database | Contents |
|----------|---------|
| `shared.db` | Sellers, listings, market comps, community signals, scammer blocklist |
| `user.db` | Trust scores, saved searches, user preferences, background tasks |
In local (self-hosted) mode, everything uses a single `snipe.db`.
WAL (Write-Ahead Logging) mode is enabled on all connections for concurrent reader safety.
## Seller enrichment pipeline
eBay's Browse API returns listings without seller account ages. Snipe fetches account ages by loading the seller's eBay profile page in a headed Chromium instance via Xvfb.
Each enrichment session uses a unique Xvfb display number (`:200``:299`, cycling) to prevent lock file collisions across parallel sessions. Kasada bot protection blocks headless Chrome and curl-based requests — only a full headed browser session passes.
## Affiliate URL wrapping
All listing URLs are wrapped with an eBay Partner Network (EPN) affiliate code before being returned to the frontend. Resolution order:
1. User opted out → plain URL
2. User has BYOK EPN ID (Premium) → wrap with user's ID
3. CF affiliate ID configured in `.env` → wrap with CF's ID
4. Not configured → plain URL
## Licensing
| Layer | License |
|-------|---------|
| Discovery pipeline (scraper, trust scoring, search) | MIT |
| AI features (photo analysis, description reasoning) | BSL 1.1 |
| Fine-tuned model weights | Proprietary |
BSL 1.1 is free for personal non-commercial self-hosting. SaaS re-hosting requires a commercial license. Converts to MIT after 4 years.

View file

@ -1,31 +0,0 @@
# Tier System
Snipe uses CircuitForge's three-tier model.
## Tiers
| Tier | Price | Key features |
|------|-------|-------------|
| **Free** | Free | Search, trust scoring, red flags, blocklist, market comps, affiliate links, saved searches |
| **Paid** | $5/mo or $129 lifetime | Photo analysis, background monitoring (up to 5 searches), serial number check |
| **Premium** | $10/mo or $249 lifetime | All Paid features, background monitoring (up to 25), custom affiliate ID (BYOK EPN) |
## Free tier philosophy
Snipe's core trust-scoring pipeline — the part that actually catches scammers — is entirely free and requires no account. This is intentional.
More users = more community blocklist data = better protection for everyone. The free tier drives the network effect that makes the paid features more valuable.
## Self-hosted
Running Snipe yourself? All features are available with no tier gates in local mode. Bring your own LLM (Ollama compatible) to unlock photo analysis and description reasoning on your own hardware.
## BYOK (Bring Your Own Key)
Premium subscribers can supply:
- **Local LLM endpoint** — any OpenAI-compatible server (Ollama, vLLM, LM Studio) unlocks AI features on Free tier
- **eBay Partner Network campaign ID** — your affiliate revenue instead of Snipe's
## Cloud trial
15-day free trial of Paid tier on first signup. No credit card required.

View file

@ -1,84 +0,0 @@
# Trust Score Algorithm
## Signal scoring
Each signal contributes 020 points to the composite score.
### account_age
| Days old | Score |
|----------|-------|
| < 7 | 0 (triggers `new_account` hard flag) |
| 730 | 5 |
| 3090 | 10 |
| 90365 | 15 |
| > 365 | 20 |
Data source: eBay profile page (BTF scraper via headed Chromium + Xvfb — eBay API does not expose account registration date).
### feedback_count
| Count | Score |
|-------|-------|
| 0 | 0 (triggers `zero_feedback` hard flag, score capped at 35) |
| 19 | 5 |
| 1049 | 10 |
| 50199 | 15 |
| 200+ | 20 |
### feedback_ratio
| Ratio | Score |
|-------|-------|
| < 80% (with 20+ reviews) | 0 (triggers `established_bad_actor`) |
| < 90% | 5 |
| 9094% | 10 |
| 9598% | 15 |
| 99100% | 20 |
### price_vs_market
Compares listing price to the median of recent completed sales from eBay Marketplace Insights API.
| Price vs. median | Score |
|-----------------|-------|
| < 40% | 0 (triggers `suspicious_price` flag) |
| 4059% | 5 |
| 6079% | 10 |
| 80120% | 20 (normal range) |
| 121149% | 15 |
| 150%+ | 10 |
`suspicious_price` flag is suppressed when the market price distribution is too wide (standard deviation > 50% of median) — this prevents false positives on heterogeneous search results.
When no market data is available, this signal returns `None` and is excluded from the composite.
### category_history
Derived from the seller's recent listing history (categories of their sold items):
| Result | Score |
|--------|-------|
| Seller has history in this category | 20 |
| Seller sells cross-category (generalist) | 10 |
| No category history available | None (excluded from composite) |
## Composite calculation
```
composite = (sum of available signal scores) / (20 × count of available signals) × 100
```
This ensures missing signals don't penalize a seller — only available signals count toward the denominator.
## Zero-feedback cap
When `feedback_count == 0`, the composite is hard-capped at **35** after the standard calculation. A 0-feedback seller cannot score above 35 regardless of other signals.
## Partial scores
A score is marked **partial** when one or more signals are `None` (not yet available). The score is recalculated and the partial flag is cleared when enrichment completes.
## Red flag override
Red flags are evaluated independently of the composite score. A seller can have a high composite score and still trigger red flags — for example, a long-established seller with a suspicious-priced listing and duplicate photos.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 191 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

View file

@ -1,34 +0,0 @@
# Community Blocklist
The blocklist is a shared database of sellers flagged by Snipe users. When a blocklisted seller appears in search results, their listing card is marked with an `established_bad_actor` flag.
## Viewing the blocklist
Navigate to **Blocklist** in the sidebar to see all reported sellers, with usernames, platforms, and optional reasons.
## Reporting a seller
On any listing card, click the **Block** button (shield icon) to report the seller. You can optionally add a reason (e.g. "sent counterfeit item", "never shipped").
!!! note
In cloud mode, blocking requires a signed-in account. Anonymous users can view the blocklist but cannot report sellers.
## Importing a blocklist
The Blocklist view has an **Import CSV** button. The accepted format:
```csv
platform,platform_seller_id,username,reason
ebay,seller123,seller123,counterfeit item
ebay,badactor99,badactor99,
```
The `reason` column is optional. `platform` defaults to `ebay` if omitted.
## Exporting the blocklist
Click **Export CSV** in the Blocklist view to download the current blocklist. Use this to back up, share with others, or import into another Snipe instance.
## Blocklist sync (roadmap)
Batch reporting to eBay's Trust & Safety team is on the roadmap (issue #4). This would allow community-flagged sellers to be reported directly to eBay from within Snipe.

View file

@ -1,58 +0,0 @@
# Red Flags
Red flags appear as pills on listing cards when Snipe detects a concern. Each flag is independent — a listing can have multiple flags at once.
## Hard red flags
These override the composite score display with a strong visual warning.
### `zero_feedback`
Seller has received zero feedback. Score is capped at 35.
### `new_account`
Account registered within the last 7 days. Extremely high fraud indicator for high-value listings.
### `established_bad_actor`
Feedback ratio below 80% with 20 or more reviews. A sustained pattern of negative feedback from an established seller.
## Soft flags
Shown as warnings — not automatic disqualifiers, but worth investigating.
### `account_under_30_days`
Account is less than 30 days old. Less severe than `new_account` but worth noting for high-value items.
### `low_feedback_count`
Fewer than 10 feedback ratings total. Seller is new to eBay or rarely transacts.
### `suspicious_price`
Listing price is more than 50% below the market median from recent completed sales.
!!! note
This flag is suppressed automatically when the search returns a heterogeneous price range — for example, a search that mixes laptop generations spanning $200$2,000. In that case, the median is not meaningful and flagging would produce false positives.
### `duplicate_photo`
The same image (by perceptual hash) appears on another listing. Common in scams where photos are lifted from legitimate listings.
### `scratch_dent_mentioned`
The title or description contains keywords indicating cosmetic damage, functional problems, or evasive language:
- Damage: *scratch, dent, crack, chip, broken, damaged*
- Functional: *untested, for parts, parts only, as-is, not working*
- Evasive: *read description, see description, sold as-is*
### `long_on_market`
The listing has been seen 5 or more times over 14 or more days without selling. A listing that isn't moving may be overpriced or have undisclosed problems.
### `significant_price_drop`
The current price is more than 20% below the price when Snipe first saw this listing. Sudden drops can indicate seller desperation — or a motivated seller — depending on context.
## Triple Red
When a listing hits all three of these simultaneously:
- `new_account` OR `account_under_30_days`
- `suspicious_price`
- `duplicate_photo` OR `zero_feedback` OR `established_bad_actor` OR `scratch_dent_mentioned`
The card gets a **pulsing red border glow** to make it impossible to miss in a crowded results grid.

View file

@ -1,56 +0,0 @@
# Searching
## Basic search
Type a query and press **Search**. Snipe fetches listings from eBay and scores each seller in parallel.
Result count depends on the **Pages to fetch** setting (1 page = up to 200 listings). More pages means a more complete picture but a longer wait.
## Keyword modes
The must-include field has three modes:
| Mode | Behavior |
|------|---------|
| **All** | Every term must appear in results (eBay AND search) |
| **Any** | At least one term must appear (eBay OR search) |
| **Groups** | Comma-separated groups, each searched separately and merged |
Groups mode is the most powerful. Use it to search for variations that eBay's relevance ranking might drop:
```
16gb, 32gb
RTX 4090, 4090 founders
```
This sends two separate eBay queries and deduplicates the results by listing ID.
## Must-exclude
Terms in the must-exclude field are forwarded to eBay on re-search. Common uses:
```
broken, parts only, for parts, untested, cracked
```
!!! note
Must-exclude applies on re-search (it goes to eBay). The **Hide listings: Scratch/dent mentioned** sidebar filter applies instantly to current results using Snipe's own detection logic, which is more comprehensive than eBay's keyword exclusion.
## Filters sidebar
The sidebar has two sections:
**eBay Search** — settings forwarded to eBay on re-search:
- Category filter
- Price range (min/max)
- Pages to fetch
- Data source (Auto / API / Scraper)
**Filter Results** — applied instantly to current results:
- Min trust score slider
- Min account age / Min feedback count
- Hide listings checkboxes
## Saved searches
Click the bookmark icon next to the Search button to save a search with its current filter settings. Saved searches appear in the **Saved** view and can be re-run with one click, restoring all filters.

View file

@ -1,25 +0,0 @@
# Settings
Navigate to **Settings** in the sidebar to access preferences.
## Community
### Trust score feedback
Shows "This score looks right / wrong" buttons on each listing card. Your feedback is recorded anonymously and used to improve trust scoring for all users.
This is opt-in and enabled by default.
## Affiliate Links (cloud accounts only)
### Opt out of affiliate links
When enabled, listing links go directly to eBay without an affiliate code. Your purchases won't generate revenue for Snipe's development.
By default, Snipe includes an affiliate code in eBay links at no cost to you — you pay the same price either way.
### Custom affiliate ID (Premium)
Premium subscribers can supply their own eBay Partner Network (EPN) campaign ID. When set, your eBay purchases through Snipe links generate revenue for your own EPN account instead of Snipe's.
This requires an active EPN account at [partnernetwork.ebay.com](https://partnernetwork.ebay.com).

View file

@ -1,39 +0,0 @@
# Trust Scores
## How scoring works
Each listing gets a composite trust score from 0100, built from five signals:
| Signal | Max points | What it measures |
|--------|-----------|-----------------|
| `account_age` | 20 | Days since the seller's eBay account was registered |
| `feedback_count` | 20 | Total feedback received (volume proxy for experience) |
| `feedback_ratio` | 20 | Percentage of positive feedback |
| `price_vs_market` | 20 | How the listing price compares to recent completed sales |
| `category_history` | 20 | Whether the seller has a history in this item category |
The composite score is the sum of available signals divided by the maximum possible from available signals. Missing signals don't penalize the seller — they reduce the max rather than adding a zero.
## Score bands
| Score | Label | Meaning |
|-------|-------|---------|
| 70100 | Green | Established seller, no major concerns |
| 4069 | Yellow | Some signals marginal or missing |
| 039 | Red | Multiple red flags — proceed carefully |
## Zero-feedback cap
A seller with zero feedback is hard-capped at a composite score of **35**, regardless of other signals. Zero feedback is the single strongest indicator of a fraudulent or new account, and it would be misleading to allow such a seller to score higher based on price alignment alone.
## Partial scores
When account age hasn't yet been enriched (the BTF scraper is still running), the score is marked **partial** and shown with a spinning indicator. Partial scores are based on available signals only and update automatically when enrichment completes — typically within 3060 seconds per seller.
## STEAL badge
The **STEAL** badge appears when a listing's price is significantly below the market median from recently completed sales. This is a useful signal for buyers, but it can also indicate a scam — always cross-reference with the trust score and red flags.
## Market comps
Market price data comes from eBay's Marketplace Insights API (completed sales). When this API is unavailable (requires an approved eBay developer account), Snipe falls back to listing prices from the Browse API, which is less accurate. The market price shown in search results reflects whichever source was available.

View file

@ -1,384 +1,226 @@
#!/usr/bin/env bash
# Snipe — self-hosted installer
# Snipe — self-hosted install script
#
# Supports two install paths:
# Docker (recommended) — everything in containers, no system Python deps required
# Bare metal — conda or pip venv + uvicorn, for machines without Docker
# No-Docker — conda or venv + direct uvicorn, for machines without Docker
#
# Usage:
# bash install.sh # interactive (auto-detects Docker)
# bash install.sh --docker # Docker Compose setup only
# bash install.sh --bare-metal # conda or venv + uvicorn
# bash install.sh --help
# bash install.sh # installs to ~/snipe
# bash install.sh /opt/snipe # custom install directory
# bash install.sh ~/snipe --no-docker # force no-Docker path even if Docker present
#
# No account or API key required. eBay credentials are optional (faster searches).
# Requirements (Docker path): Docker with Compose plugin, Git
# Requirements (no-Docker path): Python 3.11+, Node.js 20+, Git, xvfb (system)
set -euo pipefail
# ── Terminal colours ───────────────────────────────────────────────────────────
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
BOLD='\033[1m'
DIM='\033[2m'
NC='\033[0m'
info() { echo -e "${BLUE}${NC} $*"; }
ok() { echo -e "${GREEN}${NC} $*"; }
warn() { echo -e "${YELLOW}${NC} $*"; }
error() { echo -e "${RED}${NC} $*" >&2; }
header() { echo; echo -e "${BOLD}$*${NC}"; printf '%0.s─' {1..60}; echo; }
dim() { echo -e "${DIM}$*${NC}"; }
ask() { echo -e "${CYAN}?${NC} ${BOLD}$*${NC}"; }
fail() { error "$*"; exit 1; }
# ── Paths ──────────────────────────────────────────────────────────────────────
SNIPE_CONFIG_DIR="${HOME}/.config/circuitforge"
SNIPE_ENV_FILE="${SNIPE_CONFIG_DIR}/snipe.env"
SNIPE_VENV_DIR="${SNIPE_CONFIG_DIR}/venv"
INSTALL_DIR="${1:-$HOME/snipe}"
FORCE_NO_DOCKER="${2:-}"
FORGEJO="https://git.opensourcesolarpunk.com/Circuit-Forge"
CONDA_ENV="cf"
# Default install directory. Overridable:
# SNIPE_DIR=/opt/snipe bash install.sh
SNIPE_INSTALL_DIR="${SNIPE_DIR:-${HOME}/snipe}"
info() { echo " [snipe] $*"; }
ok() { echo "$*"; }
warn() { echo "! $*"; }
fail() { echo "$*" >&2; exit 1; }
hr() { echo "────────────────────────────────────────────────────────"; }
# ── Argument parsing ───────────────────────────────────────────────────────────
MODE_FORCE=""
for arg in "$@"; do
case "$arg" in
--bare-metal) MODE_FORCE="bare-metal" ;;
--docker) MODE_FORCE="docker" ;;
--help|-h)
echo "Usage: bash install.sh [--docker|--bare-metal|--help]"
echo
echo " --docker Docker Compose install (recommended)"
echo " --bare-metal conda or pip venv + uvicorn"
echo " --help Show this message"
echo
echo " Set SNIPE_DIR=/path to change the install directory (default: ~/snipe)"
exit 0
;;
*) echo "Unknown argument: $arg" >&2; exit 1 ;;
esac
done
echo ""
echo " Snipe — self-hosted installer"
echo " Install directory: $INSTALL_DIR"
echo ""
# ── Banner ─────────────────────────────────────────────────────────────────────
echo
echo -e "${BOLD} 🎯 Snipe — eBay listing intelligence${NC}"
echo -e "${DIM} Bid with confidence. Privacy-first, no account required.${NC}"
echo -e "${DIM} Part of the Circuit Forge LLC suite (BSL 1.1)${NC}"
echo
# ── System checks ──────────────────────────────────────────────────────────────
header "System checks"
# ── Detect capabilities ──────────────────────────────────────────────────────
HAS_DOCKER=false
HAS_CONDA=false
HAS_CONDA_CMD=""
HAS_PYTHON=false
HAS_NODE=false
HAS_CHROMIUM=false
HAS_XVFB=false
command -v git >/dev/null 2>&1 || fail "Git is required. Install: sudo apt-get install git"
ok "Git found"
docker compose version >/dev/null 2>&1 && HAS_DOCKER=true
if $HAS_DOCKER; then ok "Docker (Compose plugin) found"; fi
conda --version >/dev/null 2>&1 && HAS_CONDA=true
python3 --version >/dev/null 2>&1 && HAS_PYTHON=true
node --version >/dev/null 2>&1 && HAS_NODE=true
command -v git >/dev/null 2>&1 || fail "Git is required. Install with: sudo apt-get install git"
# Detect conda / mamba / micromamba in preference order
for _c in conda mamba micromamba; do
if command -v "$_c" >/dev/null 2>&1; then
HAS_CONDA=true
HAS_CONDA_CMD="$_c"
ok "Conda manager found: $_c"
break
fi
done
# Honour --no-docker flag
[[ "$FORCE_NO_DOCKER" == "--no-docker" ]] && HAS_DOCKER=false
# Python 3.11+ check
if command -v python3 >/dev/null 2>&1; then
_py_ok=$(python3 -c "import sys; print(sys.version_info >= (3,11))" 2>/dev/null || echo "False")
if [[ "$_py_ok" == "True" ]]; then
HAS_PYTHON=true
ok "Python 3.11+ found ($(python3 --version))"
else
warn "Python found but version is below 3.11 ($(python3 --version)) — bare-metal path may fail"
fi
fi
command -v node >/dev/null 2>&1 && HAS_NODE=true
if $HAS_NODE; then ok "Node.js found ($(node --version))"; fi
# Chromium / Google Chrome — needed for the Kasada-bypass scraper
for _chrome in google-chrome chromium-browser chromium; do
if command -v "$_chrome" >/dev/null 2>&1; then
HAS_CHROMIUM=true
ok "Chromium/Chrome found: $_chrome"
break
fi
done
if ! $HAS_CHROMIUM; then
warn "Chromium / Google Chrome not found."
warn "Snipe uses headed Chromium + Xvfb to bypass eBay's Kasada anti-bot."
warn "The installer will install Chromium via Playwright. If that fails,"
warn "add eBay API credentials to .env to use the API adapter instead."
fi
# Xvfb — virtual framebuffer for headed Chromium on headless servers
command -v Xvfb >/dev/null 2>&1 && HAS_XVFB=true
if $HAS_XVFB; then ok "Xvfb found"; fi
# ── Mode selection ─────────────────────────────────────────────────────────────
header "Install mode"
INSTALL_MODE=""
if [[ -n "$MODE_FORCE" ]]; then
INSTALL_MODE="$MODE_FORCE"
info "Mode forced: $INSTALL_MODE"
elif $HAS_DOCKER; then
INSTALL_MODE="docker"
ok "Docker available — using Docker install (recommended)"
dim " Pass --bare-metal to override"
if $HAS_DOCKER; then
INSTALL_PATH="docker"
ok "Docker found — using Docker install path (recommended)"
elif $HAS_PYTHON; then
INSTALL_MODE="bare-metal"
warn "Docker not found — using bare-metal install"
INSTALL_PATH="python"
warn "Docker not found — using no-Docker path (conda or venv)"
else
fail "Docker or Python 3.11+ is required. Install Docker: https://docs.docker.com/get-docker/"
fi
# ── Clone repos ───────────────────────────────────────────────────────────────
header "Clone repositories"
# ── Clone repos ──────────────────────────────────────────────────────────────
# compose.yml and the Dockerfile both use context: .. (parent directory), so
# snipe/ and circuitforge-core/ must be siblings inside SNIPE_INSTALL_DIR.
REPO_DIR="$SNIPE_INSTALL_DIR"
SNIPE_DIR_ACTUAL="$REPO_DIR/snipe"
CORE_DIR="$REPO_DIR/circuitforge-core"
# snipe/ and circuitforge-core/ must be siblings inside INSTALL_DIR.
SNIPE_DIR="$INSTALL_DIR/snipe"
CORE_DIR="$INSTALL_DIR/circuitforge-core"
_clone_or_pull() {
local label="$1" url="$2" dest="$3"
if [[ -d "$dest/.git" ]]; then
info "$label already cloned — pulling latest..."
git -C "$dest" pull --ff-only
else
info "Cloning $label..."
mkdir -p "$(dirname "$dest")"
git clone "$url" "$dest"
fi
ok "$label$dest"
}
if [[ -d "$SNIPE_DIR" ]]; then
info "Snipe already cloned — pulling latest..."
git -C "$SNIPE_DIR" pull --ff-only
else
info "Cloning Snipe..."
mkdir -p "$INSTALL_DIR"
git clone "$FORGEJO/snipe.git" "$SNIPE_DIR"
fi
ok "Snipe → $SNIPE_DIR"
_clone_or_pull "snipe" "$FORGEJO/snipe.git" "$SNIPE_DIR_ACTUAL"
_clone_or_pull "circuitforge-core" "$FORGEJO/circuitforge-core.git" "$CORE_DIR"
if [[ -d "$CORE_DIR" ]]; then
info "circuitforge-core already cloned — pulling latest..."
git -C "$CORE_DIR" pull --ff-only
else
info "Cloning circuitforge-core (shared library)..."
git clone "$FORGEJO/circuitforge-core.git" "$CORE_DIR"
fi
ok "circuitforge-core → $CORE_DIR"
# ── Config file ────────────────────────────────────────────────────────────────
header "Configuration"
# ── Configure environment ────────────────────────────────────────────────────
ENV_FILE="$SNIPE_DIR_ACTUAL/.env"
ENV_FILE="$SNIPE_DIR/.env"
if [[ ! -f "$ENV_FILE" ]]; then
cp "$SNIPE_DIR_ACTUAL/.env.example" "$ENV_FILE"
# Disable webhook signature verification for local installs
# (no production eBay key yet — the endpoint won't be registered)
cp "$SNIPE_DIR/.env.example" "$ENV_FILE"
# Safe defaults for local installs — no eBay registration, no Heimdall
sed -i 's/^EBAY_WEBHOOK_VERIFY_SIGNATURES=true/EBAY_WEBHOOK_VERIFY_SIGNATURES=false/' "$ENV_FILE"
ok ".env created from .env.example"
echo
dim " Snipe works out of the box with no API keys (scraper mode)."
dim " Add EBAY_APP_ID / EBAY_CERT_ID later for faster searches (optional)."
dim " Edit: $ENV_FILE"
echo
echo ""
info "Snipe works out of the box with no API keys."
info "Add EBAY_APP_ID / EBAY_CERT_ID later for faster searches (optional)."
echo ""
else
info ".env already exists — skipping (delete to reset defaults)"
info ".env already exists — skipping (delete it to reset)"
fi
# ── License key (optional) ─────────────────────────────────────────────────────
header "CircuitForge license key (optional)"
dim " Snipe is free to self-host. A Paid/Premium key unlocks cloud features"
dim " (photo analysis, eBay OAuth). Skip this if you don't have one."
echo
ask "Enter your license key, or press Enter to skip:"
read -r _license_key || true
cd "$SNIPE_DIR"
if [[ -n "${_license_key:-}" ]]; then
_key_re='^CFG-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}-[A-Z0-9]{4}$'
if echo "$_license_key" | grep -qP "$_key_re" 2>/dev/null || \
echo "$_license_key" | grep -qE "$_key_re" 2>/dev/null; then
# Append / uncomment Heimdall vars in .env
if grep -q "^# HEIMDALL_URL=" "$ENV_FILE" 2>/dev/null; then
sed -i "s|^# HEIMDALL_URL=.*|HEIMDALL_URL=https://license.circuitforge.tech|" "$ENV_FILE"
else
echo "HEIMDALL_URL=https://license.circuitforge.tech" >> "$ENV_FILE"
fi
# Write or replace CF_LICENSE_KEY
if grep -q "^CF_LICENSE_KEY=" "$ENV_FILE" 2>/dev/null; then
sed -i "s|^CF_LICENSE_KEY=.*|CF_LICENSE_KEY=${_license_key}|" "$ENV_FILE"
else
echo "CF_LICENSE_KEY=${_license_key}" >> "$ENV_FILE"
fi
ok "License key saved to .env"
else
warn "Key format not recognised (expected CFG-XXXX-XXXX-XXXX-XXXX) — skipping."
warn "Edit $ENV_FILE to add it manually."
fi
else
info "No license key entered — self-hosted free tier."
fi
# ── Docker install path ───────────────────────────────────────────────────────
# ── Docker install ─────────────────────────────────────────────────────────────
_install_docker() {
header "Docker install"
cd "$SNIPE_DIR_ACTUAL"
if [[ "$INSTALL_PATH" == "docker" ]]; then
info "Building Docker images (~1 GB download on first run)..."
docker compose build
info "Starting Snipe..."
docker compose up -d
echo
echo ""
ok "Snipe is running!"
printf '%0.s─' {1..60}; echo
echo -e " ${GREEN}Web UI:${NC} http://localhost:8509"
echo -e " ${GREEN}API:${NC} http://localhost:8510/docs"
echo
echo -e " ${DIM}Manage: cd $SNIPE_DIR_ACTUAL && ./manage.sh {start|stop|restart|logs|test}${NC}"
printf '%0.s─' {1..60}; echo
echo
}
hr
echo " Web UI: http://localhost:8509"
echo " API: http://localhost:8510/docs"
echo ""
echo " Manage: cd $SNIPE_DIR && ./manage.sh {start|stop|restart|logs|test}"
hr
echo ""
exit 0
fi
# ── Bare-metal install ─────────────────────────────────────────────────────────
_install_xvfb() {
if $HAS_XVFB; then return; fi
# ── No-Docker install path ───────────────────────────────────────────────────
# System deps: Xvfb is required for Playwright (Kasada bypass via headed Chromium)
if ! command -v Xvfb >/dev/null 2>&1; then
info "Installing Xvfb (required for eBay scraper)..."
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get install -y --no-install-recommends xvfb
ok "Xvfb installed"
elif command -v dnf >/dev/null 2>&1; then
sudo dnf install -y xorg-x11-server-Xvfb
ok "Xvfb installed"
elif command -v brew >/dev/null 2>&1; then
warn "macOS: Xvfb not available via Homebrew."
warn "The scraper (Kasada bypass) will not work on macOS."
warn "macOS: Xvfb not available. The scraper fallback may fail."
warn "Add eBay API credentials to .env to use the API adapter instead."
else
warn "Could not install Xvfb automatically. Install it with your system package manager."
warn " Debian/Ubuntu: sudo apt-get install xvfb"
warn " Fedora/RHEL: sudo dnf install xorg-x11-server-Xvfb"
warn "Could not install Xvfb automatically. Install it with your package manager."
fi
}
fi
_setup_python_env() {
if $HAS_CONDA; then
info "Setting up conda environment (manager: $HAS_CONDA_CMD)..."
_env_name="cf"
if "$HAS_CONDA_CMD" env list 2>/dev/null | grep -q "^${_env_name} "; then
info "Conda env '$_env_name' already exists — updating packages..."
else
"$HAS_CONDA_CMD" create -n "$_env_name" python=3.11 -y
fi
"$HAS_CONDA_CMD" run -n "$_env_name" pip install --quiet -e "$CORE_DIR"
"$HAS_CONDA_CMD" run -n "$_env_name" pip install --quiet -e "$SNIPE_DIR_ACTUAL"
"$HAS_CONDA_CMD" run -n "$_env_name" playwright install chromium
"$HAS_CONDA_CMD" run -n "$_env_name" playwright install-deps chromium
PYTHON_BIN="$HAS_CONDA_CMD run -n $_env_name"
ok "Conda environment '$_env_name' ready"
# ── Python environment setup ─────────────────────────────────────────────────
if $HAS_CONDA; then
info "Setting up conda environment '$CONDA_ENV'..."
if conda env list | grep -q "^$CONDA_ENV "; then
info "Conda env '$CONDA_ENV' already exists — updating..."
conda run -n "$CONDA_ENV" pip install --quiet -e "$CORE_DIR"
conda run -n "$CONDA_ENV" pip install --quiet -e "$SNIPE_DIR"
else
info "Setting up pip venv at $SNIPE_VENV_DIR ..."
mkdir -p "$SNIPE_CONFIG_DIR"
python3 -m venv "$SNIPE_VENV_DIR"
"$SNIPE_VENV_DIR/bin/pip" install --quiet -e "$CORE_DIR"
"$SNIPE_VENV_DIR/bin/pip" install --quiet -e "$SNIPE_DIR_ACTUAL"
"$SNIPE_VENV_DIR/bin/playwright" install chromium
"$SNIPE_VENV_DIR/bin/playwright" install-deps chromium
PYTHON_BIN="$SNIPE_VENV_DIR/bin"
ok "Python venv ready at $SNIPE_VENV_DIR"
conda create -n "$CONDA_ENV" python=3.11 -y
conda run -n "$CONDA_ENV" pip install --quiet -e "$CORE_DIR"
conda run -n "$CONDA_ENV" pip install --quiet -e "$SNIPE_DIR"
fi
}
conda run -n "$CONDA_ENV" playwright install chromium
conda run -n "$CONDA_ENV" playwright install-deps chromium
PYTHON_RUN="conda run -n $CONDA_ENV"
ok "Conda environment '$CONDA_ENV' ready"
else
info "Setting up Python venv at $SNIPE_DIR/.venv ..."
python3 -m venv "$SNIPE_DIR/.venv"
"$SNIPE_DIR/.venv/bin/pip" install --quiet -e "$CORE_DIR"
"$SNIPE_DIR/.venv/bin/pip" install --quiet -e "$SNIPE_DIR"
"$SNIPE_DIR/.venv/bin/playwright" install chromium
"$SNIPE_DIR/.venv/bin/playwright" install-deps chromium
PYTHON_RUN="$SNIPE_DIR/.venv/bin"
ok "Python venv ready at $SNIPE_DIR/.venv"
fi
_build_frontend() {
if ! $HAS_NODE; then
warn "Node.js not found — skipping frontend build."
warn "Install Node.js 20+ from https://nodejs.org and re-run install.sh."
warn "Until then, access the API at http://localhost:8510/docs"
return
fi
# ── Frontend ─────────────────────────────────────────────────────────────────
if $HAS_NODE; then
info "Building Vue frontend..."
cd "$SNIPE_DIR_ACTUAL/web"
cd "$SNIPE_DIR/web"
npm ci --prefer-offline --silent
npm run build
cd "$SNIPE_DIR_ACTUAL"
cd "$SNIPE_DIR"
ok "Frontend built → web/dist/"
}
else
warn "Node.js not found — skipping frontend build."
warn "Install Node.js 20+ from https://nodejs.org and re-run install.sh to build the UI."
warn "Until then, you can access the API directly at http://localhost:8510/docs"
fi
_write_start_scripts() {
# start-local.sh — launches the FastAPI server
cat > "$SNIPE_DIR_ACTUAL/start-local.sh" << 'STARTSCRIPT'
# ── Write start/stop scripts ─────────────────────────────────────────────────
cat > "$SNIPE_DIR/start-local.sh" << 'STARTSCRIPT'
#!/usr/bin/env bash
# Start Snipe API (bare-metal / no-Docker mode)
# Start Snipe without Docker (API only — run from the snipe/ directory)
set -euo pipefail
cd "$(dirname "$0")"
if [[ -f "$HOME/.config/circuitforge/venv/bin/uvicorn" ]]; then
UVICORN="$HOME/.config/circuitforge/venv/bin/uvicorn"
elif command -v conda >/dev/null 2>&1 && conda env list 2>/dev/null | grep -q "^cf "; then
if [[ -f .venv/bin/uvicorn ]]; then
UVICORN=".venv/bin/uvicorn"
elif command -v conda >/dev/null 2>&1 && conda env list | grep -q "^cf "; then
UVICORN="conda run -n cf uvicorn"
elif command -v mamba >/dev/null 2>&1 && mamba env list 2>/dev/null | grep -q "^cf "; then
UVICORN="mamba run -n cf uvicorn"
else
echo "No Snipe Python environment found. Run install.sh first." >&2; exit 1
echo "No Python env found. Run install.sh first." >&2; exit 1
fi
mkdir -p data
echo "Starting Snipe API http://localhost:8510 ..."
exec $UVICORN api.main:app --host 0.0.0.0 --port 8510 "${@}"
echo "Starting Snipe API on http://localhost:8510 ..."
$UVICORN api.main:app --host 0.0.0.0 --port 8510 "${@}"
STARTSCRIPT
chmod +x "$SNIPE_DIR_ACTUAL/start-local.sh"
chmod +x "$SNIPE_DIR/start-local.sh"
# serve-ui.sh — serves the built Vue frontend (dev only)
cat > "$SNIPE_DIR_ACTUAL/serve-ui.sh" << 'UISCRIPT'
# Frontend serving (if built)
cat > "$SNIPE_DIR/serve-ui.sh" << 'UISCRIPT'
#!/usr/bin/env bash
# Serve the pre-built Vue frontend (dev only — use nginx for production).
# See docs/nginx-self-hosted.conf for a production nginx config.
# Serve the pre-built Vue frontend on port 8509 (dev only — use nginx for production)
cd "$(dirname "$0")/web/dist"
echo "Serving Snipe UI → http://localhost:8509 (Ctrl+C to stop)"
exec python3 -m http.server 8509
python3 -m http.server 8509
UISCRIPT
chmod +x "$SNIPE_DIR_ACTUAL/serve-ui.sh"
chmod +x "$SNIPE_DIR/serve-ui.sh"
ok "Start scripts written"
}
_install_bare_metal() {
header "Bare-metal install"
_install_xvfb
_setup_python_env
_build_frontend
_write_start_scripts
echo
ok "Snipe installed (bare-metal mode)"
printf '%0.s─' {1..60}; echo
echo -e " ${GREEN}Start API:${NC} cd $SNIPE_DIR_ACTUAL && ./start-local.sh"
echo -e " ${GREEN}Serve UI:${NC} cd $SNIPE_DIR_ACTUAL && ./serve-ui.sh ${DIM}(separate terminal)${NC}"
echo -e " ${GREEN}API docs:${NC} http://localhost:8510/docs"
echo -e " ${GREEN}Web UI:${NC} http://localhost:8509 ${DIM}(after ./serve-ui.sh)${NC}"
echo
echo -e " ${DIM}For production, configure nginx to proxy /api/ to localhost:8510${NC}"
echo -e " ${DIM}and serve web/dist/ as the document root.${NC}"
echo -e " ${DIM}See: $SNIPE_DIR_ACTUAL/docs/nginx-self-hosted.conf${NC}"
printf '%0.s─' {1..60}; echo
echo
}
# ── Main ───────────────────────────────────────────────────────────────────────
main() {
if [[ "$INSTALL_MODE" == "docker" ]]; then
_install_docker
else
_install_bare_metal
fi
}
main
echo ""
ok "Snipe installed (no-Docker mode)"
hr
echo " Start API: cd $SNIPE_DIR && ./start-local.sh"
echo " Serve UI: cd $SNIPE_DIR && ./serve-ui.sh (separate terminal)"
echo " API docs: http://localhost:8510/docs"
echo " Web UI: http://localhost:8509 (after ./serve-ui.sh)"
echo ""
echo " For production, point nginx at web/dist/ and proxy /api/ to localhost:8510"
hr
echo ""

View file

@ -1,66 +0,0 @@
site_name: Snipe
site_description: eBay trust scoring before you bid — catch scammers, flag suspicious prices, surface duplicate photos.
site_author: Circuit Forge LLC
site_url: https://docs.circuitforge.tech/snipe
repo_url: https://git.opensourcesolarpunk.com/Circuit-Forge/snipe
repo_name: Circuit-Forge/snipe
theme:
name: material
palette:
- scheme: default
primary: deep orange
accent: orange
toggle:
icon: material/brightness-7
name: Switch to dark mode
- scheme: slate
primary: deep orange
accent: orange
toggle:
icon: material/brightness-4
name: Switch to light mode
features:
- navigation.tabs
- navigation.sections
- navigation.expand
- navigation.top
- search.suggest
- search.highlight
- content.code.copy
markdown_extensions:
- admonition
- pymdownx.details
- pymdownx.superfences:
custom_fences:
- name: mermaid
class: mermaid
format: !!python/name:pymdownx.superfences.fence_code_format
- pymdownx.highlight:
anchor_linenums: true
- pymdownx.tabbed:
alternate_style: true
- tables
- toc:
permalink: true
nav:
- Home: index.md
- Getting Started:
- Installation: getting-started/installation.md
- Quick Start: getting-started/quick-start.md
- eBay API Keys (Optional): getting-started/ebay-api.md
- User Guide:
- Searching: user-guide/searching.md
- Trust Scores: user-guide/trust-scores.md
- Red Flags: user-guide/red-flags.md
- Community Blocklist: user-guide/blocklist.md
- Settings: user-guide/settings.md
- Reference:
- Trust Score Algorithm: reference/trust-scoring.md
- Tier System: reference/tier-system.md
- Architecture: reference/architecture.md
extra_javascript:
- plausible.js

View file

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "snipe"
version = "0.3.0"
version = "0.2.0"
description = "Auction listing monitor and trust scorer"
requires-python = ">=3.11"
dependencies = [
@ -25,33 +25,9 @@ dependencies = [
"PyJWT>=2.8",
]
[project.optional-dependencies]
dev = [
"pytest>=8.0",
"pytest-cov>=5.0",
"ruff>=0.4",
"httpx>=0.27", # FastAPI test client
]
[tool.setuptools.packages.find]
where = ["."]
include = ["app*", "api*"]
[tool.pytest.ini_options]
testpaths = ["tests"]
markers = [
"browser: tests that require a headed Chromium browser (Xvfb + playwright install required)",
]
[tool.ruff]
line-length = 100
target-version = "py311"
[tool.ruff.lint]
select = ["E", "F", "W", "I"]
ignore = [
"E501", # line length — handled by formatter
"E402", # module-import-not-at-top — intentional for conditional/lazy imports
"E701", # multiple-statements-colon — `if x: return y` is accepted style
"E741", # ambiguous variable name — l/q used intentionally for listing/query
]

View file

@ -1,8 +1,6 @@
"""Streamlit entrypoint."""
from pathlib import Path
import streamlit as st
from app.wizard import SnipeSetupWizard
st.set_page_config(
@ -18,7 +16,6 @@ if not wizard.is_configured():
st.stop()
from app.ui.components.easter_eggs import inject_konami_detector
inject_konami_detector()
with st.sidebar:
@ -30,5 +27,4 @@ with st.sidebar:
)
from app.ui.Search import render
render(audio_enabled=audio_enabled)

View file

@ -1,9 +1,8 @@
from datetime import datetime, timedelta, timezone
import pytest
from app.db.models import Listing, MarketComp, Seller
from datetime import datetime, timedelta, timezone
from pathlib import Path
from app.db.store import Store
from app.db.models import Listing, Seller, TrustScore, MarketComp
@pytest.fixture

View file

@ -1,466 +0,0 @@
"""Tests for app.platforms.ebay.browser_pool.
All tests run without real Chromium / Xvfb / Playwright.
Playwright, Xvfb subprocess calls, and Stealth are mocked throughout.
"""
from __future__ import annotations
import queue
import subprocess
import threading
import time
from typing import Any
from unittest.mock import MagicMock, patch, call
import pytest
# ---------------------------------------------------------------------------
# Helpers to reset the module-level singleton between tests
# ---------------------------------------------------------------------------
def _reset_pool_singleton():
"""Force the module-level _pool singleton back to None."""
import app.platforms.ebay.browser_pool as _mod
_mod._pool = None
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def reset_singleton():
"""Reset the singleton before and after every test."""
_reset_pool_singleton()
yield
_reset_pool_singleton()
def _make_fake_slot():
"""Build a mock _PooledBrowser with all necessary attributes."""
from app.platforms.ebay.browser_pool import _PooledBrowser
xvfb = MagicMock(spec=subprocess.Popen)
pw = MagicMock()
browser = MagicMock()
ctx = MagicMock()
slot = _PooledBrowser(
xvfb=xvfb,
pw=pw,
browser=browser,
ctx=ctx,
display_num=100,
last_used_ts=time.time(),
)
return slot
# ---------------------------------------------------------------------------
# Singleton tests
# ---------------------------------------------------------------------------
class TestGetPoolSingleton:
def test_returns_same_instance(self):
from app.platforms.ebay.browser_pool import get_pool, BrowserPool
p1 = get_pool()
p2 = get_pool()
assert p1 is p2
def test_returns_browser_pool_instance(self):
from app.platforms.ebay.browser_pool import get_pool, BrowserPool
assert isinstance(get_pool(), BrowserPool)
def test_default_size_is_two(self):
from app.platforms.ebay.browser_pool import get_pool
pool = get_pool()
assert pool._size == 2
def test_custom_size_from_env(self, monkeypatch):
monkeypatch.setenv("BROWSER_POOL_SIZE", "5")
from app.platforms.ebay.browser_pool import get_pool
pool = get_pool()
assert pool._size == 5
# ---------------------------------------------------------------------------
# start() / stop() lifecycle tests
# ---------------------------------------------------------------------------
class TestLifecycle:
def test_start_is_noop_when_playwright_unavailable(self):
"""Pool should handle missing Playwright gracefully — no error raised."""
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=2)
with patch.object(pool, "_check_playwright", return_value=False):
pool.start() # must not raise
# Pool queue is empty — no slots launched.
assert pool._q.empty()
def test_start_only_runs_once(self):
"""Calling start() twice must not double-warm."""
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=1)
with patch.object(pool, "_check_playwright", return_value=False):
pool.start()
pool.start()
assert pool._started is True
def test_stop_drains_queue(self):
"""stop() should close every slot in the queue."""
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=2)
slot1 = _make_fake_slot()
slot2 = _make_fake_slot()
pool._q.put(slot1)
pool._q.put(slot2)
with patch("app.platforms.ebay.browser_pool._close_slot") as mock_close:
pool.stop()
assert mock_close.call_count == 2
assert pool._q.empty()
assert pool._stopped is True
def test_stop_on_empty_pool_is_safe(self):
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=2)
pool.stop() # must not raise
# ---------------------------------------------------------------------------
# fetch_html — pool hit path
# ---------------------------------------------------------------------------
class TestFetchHtmlPoolHit:
def test_uses_pooled_slot_and_replenishes(self):
"""fetch_html should borrow a slot, call _fetch_with_slot, replenish."""
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=1)
slot = _make_fake_slot()
pool._q.put(slot)
fresh_slot = _make_fake_slot()
with (
patch.object(pool, "_fetch_with_slot", return_value="<html>ok</html>") as mock_fetch,
patch("app.platforms.ebay.browser_pool._replenish_slot", return_value=fresh_slot) as mock_replenish,
patch("time.sleep"),
):
html = pool.fetch_html("https://www.ebay.com/sch/i.html?_nkw=test", delay=0)
assert html == "<html>ok</html>"
mock_fetch.assert_called_once_with(slot, "https://www.ebay.com/sch/i.html?_nkw=test")
mock_replenish.assert_called_once_with(slot)
# Fresh slot returned to queue
assert pool._q.get_nowait() is fresh_slot
def test_delay_is_respected(self):
"""fetch_html must call time.sleep(delay)."""
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=1)
slot = _make_fake_slot()
pool._q.put(slot)
with (
patch.object(pool, "_fetch_with_slot", return_value="<html/>"),
patch("app.platforms.ebay.browser_pool._replenish_slot", return_value=_make_fake_slot()),
patch("app.platforms.ebay.browser_pool.time") as mock_time,
):
pool.fetch_html("https://example.com", delay=1.5)
mock_time.sleep.assert_called_once_with(1.5)
# ---------------------------------------------------------------------------
# fetch_html — pool empty / fallback path
# ---------------------------------------------------------------------------
class TestFetchHtmlFallback:
def test_falls_back_to_fresh_browser_when_pool_empty(self):
"""When pool is empty after timeout, _fetch_fresh should be called."""
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=1)
# Queue is empty — no slots available.
with (
patch.object(pool, "_fetch_fresh", return_value="<html>fresh</html>") as mock_fresh,
patch("time.sleep"),
# Make Queue.get raise Empty after a short wait.
patch.object(pool._q, "get", side_effect=queue.Empty),
):
html = pool.fetch_html("https://www.ebay.com/sch/i.html?_nkw=widget", delay=0)
assert html == "<html>fresh</html>"
mock_fresh.assert_called_once_with("https://www.ebay.com/sch/i.html?_nkw=widget")
def test_falls_back_when_pooled_fetch_raises(self):
"""If _fetch_with_slot raises, the slot is closed and _fetch_fresh is used."""
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=1)
slot = _make_fake_slot()
pool._q.put(slot)
with (
patch.object(pool, "_fetch_with_slot", side_effect=RuntimeError("Chromium crashed")),
patch.object(pool, "_fetch_fresh", return_value="<html>recovered</html>") as mock_fresh,
patch("app.platforms.ebay.browser_pool._close_slot") as mock_close,
patch("time.sleep"),
):
html = pool.fetch_html("https://www.ebay.com/", delay=0)
assert html == "<html>recovered</html>"
mock_close.assert_called_once_with(slot)
mock_fresh.assert_called_once()
# ---------------------------------------------------------------------------
# ImportError graceful fallback
# ---------------------------------------------------------------------------
class TestImportErrorHandling:
def test_check_playwright_returns_false_on_import_error(self):
"""_check_playwright should cache False when playwright is not installed."""
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=2)
with patch.dict("sys.modules", {"playwright": None, "playwright_stealth": None}):
# Force re-check by clearing the cached value.
pool._playwright_available = None
result = pool._check_playwright()
assert result is False
assert pool._playwright_available is False
def test_start_logs_warning_when_playwright_missing(self, caplog):
"""start() should log a warning and not crash when Playwright is absent."""
import logging
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=1)
pool._playwright_available = False # simulate missing
with patch.object(pool, "_check_playwright", return_value=False):
with caplog.at_level(logging.WARNING, logger="app.platforms.ebay.browser_pool"):
pool.start()
assert any("not available" in r.message for r in caplog.records)
def test_fetch_fresh_raises_runtime_error_when_playwright_missing(self):
"""_fetch_fresh must raise RuntimeError (not ImportError) when PW absent."""
from app.platforms.ebay.browser_pool import BrowserPool
pool = BrowserPool(size=1)
with patch.dict("sys.modules", {"playwright": None, "playwright.sync_api": None}):
with pytest.raises(RuntimeError, match="Playwright not installed"):
pool._fetch_fresh("https://www.ebay.com/")
# ---------------------------------------------------------------------------
# Idle cleanup
# ---------------------------------------------------------------------------
class TestIdleCleanup:
def test_idle_cleanup_closes_stale_slots(self):
"""_idle_cleanup_loop should close slots whose last_used_ts is too old."""
from app.platforms.ebay.browser_pool import BrowserPool, _IDLE_TIMEOUT_SECS
pool = BrowserPool(size=2)
stale_slot = _make_fake_slot()
stale_slot.last_used_ts = time.time() - (_IDLE_TIMEOUT_SECS + 60)
fresh_slot = _make_fake_slot()
fresh_slot.last_used_ts = time.time()
pool._q.put(stale_slot)
pool._q.put(fresh_slot)
closed_slots = []
def fake_close(s):
closed_slots.append(s)
with patch("app.platforms.ebay.browser_pool._close_slot", side_effect=fake_close):
# Run one cleanup tick directly (not the full loop).
now = time.time()
idle_cutoff = now - _IDLE_TIMEOUT_SECS
kept = []
while True:
try:
s = pool._q.get_nowait()
except queue.Empty:
break
if s.last_used_ts < idle_cutoff:
fake_close(s)
else:
kept.append(s)
for s in kept:
pool._q.put(s)
assert stale_slot in closed_slots
assert fresh_slot not in closed_slots
assert pool._q.qsize() == 1
def test_idle_cleanup_loop_stops_when_pool_stopped(self):
"""Cleanup daemon should exit when _stopped is True."""
from app.platforms.ebay.browser_pool import BrowserPool, _CLEANUP_INTERVAL_SECS
pool = BrowserPool(size=1)
pool._stopped = True
# The loop should return after one iteration of the while check.
# Use a very short sleep mock so the test doesn't actually wait 60s.
sleep_calls = []
def fake_sleep(secs):
sleep_calls.append(secs)
with patch("app.platforms.ebay.browser_pool.time") as mock_time:
mock_time.time.return_value = time.time()
mock_time.sleep.side_effect = fake_sleep
# Run in a thread with a short timeout to confirm it exits.
t = threading.Thread(target=pool._idle_cleanup_loop)
t.start()
t.join(timeout=2.0)
assert not t.is_alive(), "idle cleanup loop did not exit when _stopped=True"
# ---------------------------------------------------------------------------
# _replenish_slot helper
# ---------------------------------------------------------------------------
class TestReplenishSlot:
def test_replenish_closes_old_context_and_opens_new(self):
from app.platforms.ebay.browser_pool import _replenish_slot, _PooledBrowser
old_ctx = MagicMock()
new_ctx = MagicMock()
browser = MagicMock()
browser.new_context.return_value = new_ctx
slot = _PooledBrowser(
xvfb=MagicMock(),
pw=MagicMock(),
browser=browser,
ctx=old_ctx,
display_num=101,
last_used_ts=time.time() - 10,
)
result = _replenish_slot(slot)
old_ctx.close.assert_called_once()
browser.new_context.assert_called_once()
assert result.ctx is new_ctx
assert result.browser is browser
assert result.xvfb is slot.xvfb
# last_used_ts is refreshed
assert result.last_used_ts > slot.last_used_ts
# ---------------------------------------------------------------------------
# _close_slot helper
# ---------------------------------------------------------------------------
class TestCloseSlot:
def test_close_slot_closes_all_components(self):
from app.platforms.ebay.browser_pool import _close_slot, _PooledBrowser
xvfb = MagicMock(spec=subprocess.Popen)
pw = MagicMock()
browser = MagicMock()
ctx = MagicMock()
slot = _PooledBrowser(
xvfb=xvfb, pw=pw, browser=browser, ctx=ctx,
display_num=102, last_used_ts=time.time(),
)
_close_slot(slot)
ctx.close.assert_called_once()
browser.close.assert_called_once()
pw.stop.assert_called_once()
xvfb.terminate.assert_called_once()
xvfb.wait.assert_called_once()
def test_close_slot_ignores_exceptions(self):
"""_close_slot must not raise even if components throw."""
from app.platforms.ebay.browser_pool import _close_slot, _PooledBrowser
xvfb = MagicMock(spec=subprocess.Popen)
xvfb.terminate.side_effect = OSError("already dead")
xvfb.wait.side_effect = OSError("already dead")
pw = MagicMock()
pw.stop.side_effect = RuntimeError("stopped")
browser = MagicMock()
browser.close.side_effect = RuntimeError("gone")
ctx = MagicMock()
ctx.close.side_effect = RuntimeError("gone")
slot = _PooledBrowser(
xvfb=xvfb, pw=pw, browser=browser, ctx=ctx,
display_num=103, last_used_ts=time.time(),
)
_close_slot(slot) # must not raise
# ---------------------------------------------------------------------------
# Scraper integration — _fetch_url uses pool
# ---------------------------------------------------------------------------
class TestScraperUsesPool:
def test_fetch_url_delegates_to_pool(self):
"""ScrapedEbayAdapter._fetch_url must use the pool, not launch its own browser."""
from app.platforms.ebay.browser_pool import BrowserPool
from app.platforms.ebay.scraper import ScrapedEbayAdapter
from app.db.store import Store
store = MagicMock(spec=Store)
adapter = ScrapedEbayAdapter(store, delay=0)
fake_pool = MagicMock(spec=BrowserPool)
fake_pool.fetch_html.return_value = "<html>pooled</html>"
with patch("app.platforms.ebay.browser_pool.get_pool", return_value=fake_pool):
# Clear the cache so fetch_url actually hits the pool.
import app.platforms.ebay.scraper as scraper_mod
scraper_mod._html_cache.clear()
html = adapter._fetch_url("https://www.ebay.com/sch/i.html?_nkw=test")
assert html == "<html>pooled</html>"
fake_pool.fetch_html.assert_called_once_with(
"https://www.ebay.com/sch/i.html?_nkw=test", delay=0
)
def test_fetch_url_uses_cache_before_pool(self):
"""_fetch_url should return cached HTML without hitting the pool."""
from app.platforms.ebay.scraper import ScrapedEbayAdapter, _html_cache, _HTML_CACHE_TTL
from app.db.store import Store
store = MagicMock(spec=Store)
adapter = ScrapedEbayAdapter(store, delay=0)
url = "https://www.ebay.com/sch/i.html?_nkw=cached"
_html_cache[url] = ("<html>cached</html>", time.time() + _HTML_CACHE_TTL)
fake_pool = MagicMock()
with patch("app.platforms.ebay.browser_pool.get_pool", return_value=fake_pool):
html = adapter._fetch_url(url)
assert html == "<html>cached</html>"
fake_pool.fetch_html.assert_not_called()
# Cleanup
_html_cache.pop(url, None)

View file

@ -1,9 +1,7 @@
import time
from unittest.mock import MagicMock, patch
import pytest
import requests
from unittest.mock import patch, MagicMock
import pytest
from app.platforms.ebay.auth import EbayTokenManager

View file

@ -1,6 +1,4 @@
import pytest
from api.main import _extract_ebay_item_id
from app.platforms.ebay.normaliser import normalise_listing, normalise_seller
@ -57,48 +55,3 @@ def test_normalise_seller_maps_fields():
assert seller.feedback_count == 300
assert seller.feedback_ratio == pytest.approx(0.991, abs=0.001)
assert seller.account_age_days > 0
# ── _extract_ebay_item_id ─────────────────────────────────────────────────────
class TestExtractEbayItemId:
"""Unit tests for the URL-to-item-ID normaliser."""
def test_itm_url_with_title_slug(self):
url = "https://www.ebay.com/itm/Sony-WH-1000XM5-Headphones/123456789012"
assert _extract_ebay_item_id(url) == "123456789012"
def test_itm_url_without_title_slug(self):
url = "https://www.ebay.com/itm/123456789012"
assert _extract_ebay_item_id(url) == "123456789012"
def test_itm_url_no_www(self):
url = "https://ebay.com/itm/123456789012"
assert _extract_ebay_item_id(url) == "123456789012"
def test_itm_url_with_query_params(self):
url = "https://www.ebay.com/itm/123456789012?hash=item1234abcd"
assert _extract_ebay_item_id(url) == "123456789012"
def test_pay_ebay_rxo_with_itemId_query_param(self):
url = "https://pay.ebay.com/rxo?action=view&sessionid=abc123&itemId=123456789012"
assert _extract_ebay_item_id(url) == "123456789012"
def test_pay_ebay_rxo_path_with_itemId(self):
url = "https://pay.ebay.com/rxo/view?itemId=123456789012"
assert _extract_ebay_item_id(url) == "123456789012"
def test_non_ebay_url_returns_none(self):
assert _extract_ebay_item_id("https://amazon.com/dp/B08N5WRWNW") is None
def test_plain_keyword_returns_none(self):
assert _extract_ebay_item_id("rtx 4090 gpu") is None
def test_empty_string_returns_none(self):
assert _extract_ebay_item_id("") is None
def test_ebay_url_no_item_id_returns_none(self):
assert _extract_ebay_item_id("https://www.ebay.com/sch/i.html?_nkw=gpu") is None
def test_pay_ebay_no_item_id_returns_none(self):
assert _extract_ebay_item_id("https://pay.ebay.com/rxo?action=view&sessionid=abc") is None

View file

@ -3,18 +3,16 @@
Uses a minimal HTML fixture mirroring eBay's current s-card markup.
No HTTP requests are made all tests operate on the pure parsing functions.
"""
from datetime import timedelta
import pytest
from bs4 import BeautifulSoup
from datetime import timedelta
from app.platforms.ebay.scraper import (
_extract_seller_from_card,
_parse_price,
_parse_time_left,
scrape_listings,
scrape_sellers,
_parse_price,
_parse_time_left,
_extract_seller_from_card,
)
from bs4 import BeautifulSoup
# ---------------------------------------------------------------------------
# Minimal eBay search results HTML fixture (li.s-card schema)

View file

@ -1,83 +0,0 @@
"""Integration tests for POST /api/search/build."""
from __future__ import annotations
import json
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from fastapi.testclient import TestClient
@pytest.fixture
def client(tmp_path):
"""TestClient with a fresh DB and mocked LLMRouter/category cache."""
import os
os.environ["SNIPE_DB"] = str(tmp_path / "snipe.db")
# Import app AFTER setting SNIPE_DB so the DB path is picked up
from api.main import app
return TestClient(app, raise_server_exceptions=False)
def _good_llm_response() -> str:
return json.dumps({
"base_query": "RTX 3080",
"must_include_mode": "groups",
"must_include": "rtx|geforce, 3080",
"must_exclude": "mining",
"max_price": 300.0,
"min_price": None,
"condition": ["used"],
"category_id": "27386",
"explanation": "Used RTX 3080 under $300.",
})
def test_build_endpoint_success(client):
with patch("api.main._get_query_translator") as mock_get_t:
mock_t = MagicMock()
from app.llm.query_translator import SearchParamsResponse
mock_t.translate.return_value = SearchParamsResponse(
base_query="RTX 3080",
must_include_mode="groups",
must_include="rtx|geforce, 3080",
must_exclude="mining",
max_price=300.0,
min_price=None,
condition=["used"],
category_id="27386",
explanation="Used RTX 3080 under $300.",
)
mock_get_t.return_value = mock_t
resp = client.post(
"/api/search/build",
json={"natural_language": "used RTX 3080 under $300 no mining"},
)
assert resp.status_code == 200
data = resp.json()
assert data["base_query"] == "RTX 3080"
assert data["explanation"] == "Used RTX 3080 under $300."
def test_build_endpoint_llm_unavailable(client):
with patch("api.main._get_query_translator") as mock_get_t:
mock_get_t.return_value = None # no translator configured
resp = client.post(
"/api/search/build",
json={"natural_language": "GPU"},
)
assert resp.status_code == 503
def test_build_endpoint_bad_json(client):
with patch("api.main._get_query_translator") as mock_get_t:
from app.llm.query_translator import QueryTranslatorError
mock_t = MagicMock()
mock_t.translate.side_effect = QueryTranslatorError("unparseable", raw="garbage output")
mock_get_t.return_value = mock_t
resp = client.post(
"/api/search/build",
json={"natural_language": "GPU"},
)
assert resp.status_code == 422
assert "raw" in resp.json()["detail"]

View file

@ -1,231 +0,0 @@
"""Tests for GET /api/search/async (fire-and-forget search + SSE streaming).
Verifies:
- Returns HTTP 202 with session_id and status: "queued"
- session_id is registered in _update_queues immediately
- Actual scraping is not performed (mocked out)
- Empty query path returns a completed session with done event
"""
from __future__ import annotations
import os
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from fastapi.testclient import TestClient
# ── Fixtures ──────────────────────────────────────────────────────────────────
@pytest.fixture
def client(tmp_path):
"""TestClient with a fresh tmp DB. Must set SNIPE_DB *before* importing app."""
os.environ["SNIPE_DB"] = str(tmp_path / "snipe.db")
from api.main import app
return TestClient(app, raise_server_exceptions=False)
def _make_mock_listing():
"""Return a minimal mock listing object that satisfies the search pipeline."""
m = MagicMock()
m.platform_listing_id = "123456789"
m.seller_platform_id = "test_seller"
m.title = "Test GPU"
m.price = 100.0
m.currency = "USD"
m.condition = "Used"
m.url = "https://www.ebay.com/itm/123456789"
m.photo_urls = []
m.listing_age_days = 5
m.buying_format = "fixed_price"
m.ends_at = None
m.fetched_at = None
m.trust_score_id = None
m.id = 1
m.category_name = None
return m
# ── Core contract tests ───────────────────────────────────────────────────────
def test_async_search_returns_202(client):
"""GET /api/search/async?q=... returns HTTP 202 with session_id and status."""
with (
patch("api.main._make_adapter") as mock_adapter_factory,
patch("api.main._trigger_scraper_enrichment"),
patch("api.main.TrustScorer") as mock_scorer_cls,
):
mock_adapter = MagicMock()
mock_adapter.search.return_value = []
mock_adapter.get_completed_sales.return_value = None
mock_adapter_factory.return_value = mock_adapter
mock_scorer = MagicMock()
mock_scorer.score_batch.return_value = []
mock_scorer_cls.return_value = mock_scorer
resp = client.get("/api/search/async?q=test+gpu")
assert resp.status_code == 202
data = resp.json()
assert "session_id" in data
assert data["status"] == "queued"
assert isinstance(data["session_id"], str)
assert len(data["session_id"]) > 0
def test_async_search_registers_session_id(client):
"""session_id returned by 202 response must appear in _update_queues immediately."""
with (
patch("api.main._make_adapter") as mock_adapter_factory,
patch("api.main._trigger_scraper_enrichment"),
patch("api.main.TrustScorer") as mock_scorer_cls,
):
mock_adapter = MagicMock()
mock_adapter.search.return_value = []
mock_adapter.get_completed_sales.return_value = None
mock_adapter_factory.return_value = mock_adapter
mock_scorer = MagicMock()
mock_scorer.score_batch.return_value = []
mock_scorer_cls.return_value = mock_scorer
resp = client.get("/api/search/async?q=test+gpu")
assert resp.status_code == 202
session_id = resp.json()["session_id"]
# The queue must be registered so the SSE endpoint can open it.
from api.main import _update_queues
assert session_id in _update_queues
def test_async_search_empty_query(client):
"""Empty query returns 202 with a pre-loaded done sentinel, no scraping needed."""
resp = client.get("/api/search/async?q=")
assert resp.status_code == 202
data = resp.json()
assert data["status"] == "queued"
assert "session_id" in data
from api.main import _update_queues
import queue as _queue
sid = data["session_id"]
assert sid in _update_queues
q = _update_queues[sid]
# First item should be the empty listings event
first = q.get_nowait()
assert first is not None
assert first["type"] == "listings"
assert first["listings"] == []
# Second item should be the sentinel
sentinel = q.get_nowait()
assert sentinel is None
def test_async_search_no_real_chromium(client):
"""Async search endpoint must not launch real Chromium in tests.
Verifies that the background scraper is submitted to the executor but the
adapter factory is patched no real Playwright/Xvfb process is spawned.
Uses a broad patch on Store to avoid sqlite3 DB path issues in the thread pool.
"""
import threading
scrape_called = threading.Event()
def _fake_search(query, filters):
scrape_called.set()
return []
with (
patch("api.main._make_adapter") as mock_adapter_factory,
patch("api.main._trigger_scraper_enrichment"),
patch("api.main.TrustScorer") as mock_scorer_cls,
patch("api.main.Store") as mock_store_cls,
):
mock_adapter = MagicMock()
mock_adapter.search.side_effect = _fake_search
mock_adapter.get_completed_sales.return_value = None
mock_adapter_factory.return_value = mock_adapter
mock_scorer = MagicMock()
mock_scorer.score_batch.return_value = []
mock_scorer_cls.return_value = mock_scorer
mock_store = MagicMock()
mock_store.get_listings_staged.return_value = {}
mock_store.refresh_seller_categories.return_value = 0
mock_store.save_listings.return_value = None
mock_store.save_trust_scores.return_value = None
mock_store.get_market_comp.return_value = None
mock_store.get_seller.return_value = None
mock_store.get_user_preference.return_value = None
mock_store_cls.return_value = mock_store
resp = client.get("/api/search/async?q=rtx+3080")
assert resp.status_code == 202
# Give the background worker a moment to run (it's in a thread pool)
scrape_called.wait(timeout=5.0)
# If we get here without a real Playwright process, the test passes.
assert scrape_called.is_set(), "Background search worker never ran"
def test_async_search_query_params_forwarded(client):
"""All filter params accepted by /api/search are also accepted here."""
with (
patch("api.main._make_adapter") as mock_adapter_factory,
patch("api.main._trigger_scraper_enrichment"),
patch("api.main.TrustScorer") as mock_scorer_cls,
):
mock_adapter = MagicMock()
mock_adapter.search.return_value = []
mock_adapter.get_completed_sales.return_value = None
mock_adapter_factory.return_value = mock_adapter
mock_scorer = MagicMock()
mock_scorer.score_batch.return_value = []
mock_scorer_cls.return_value = mock_scorer
resp = client.get(
"/api/search/async"
"?q=rtx+3080"
"&max_price=400"
"&min_price=100"
"&pages=2"
"&must_include=rtx,3080"
"&must_include_mode=all"
"&must_exclude=mining"
"&category_id=27386"
"&adapter=auto"
)
assert resp.status_code == 202
def test_async_search_session_id_is_uuid(client):
"""session_id must be a valid UUID v4 string."""
import uuid as _uuid
with (
patch("api.main._make_adapter") as mock_adapter_factory,
patch("api.main._trigger_scraper_enrichment"),
patch("api.main.TrustScorer") as mock_scorer_cls,
):
mock_adapter = MagicMock()
mock_adapter.search.return_value = []
mock_adapter.get_completed_sales.return_value = None
mock_adapter_factory.return_value = mock_adapter
mock_scorer = MagicMock()
mock_scorer.score_batch.return_value = []
mock_scorer_cls.return_value = mock_scorer
resp = client.get("/api/search/async?q=test")
assert resp.status_code == 202
sid = resp.json()["session_id"]
# Should not raise if it's a valid UUID
parsed = _uuid.UUID(sid)
assert str(parsed) == sid

View file

@ -1,218 +0,0 @@
"""Unit tests for EbayCategoryCache."""
from __future__ import annotations
import sqlite3
from datetime import datetime, timedelta, timezone
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from app.platforms.ebay.categories import EbayCategoryCache
BOOTSTRAP_MIN = 10 # bootstrap must seed at least this many rows
@pytest.fixture
def db(tmp_path):
"""In-memory SQLite with migrations applied."""
from circuitforge_core.db import get_connection, run_migrations
conn = get_connection(tmp_path / "test.db")
run_migrations(conn, Path("app/db/migrations"))
return conn
def test_is_stale_empty_db(db):
cache = EbayCategoryCache(db)
assert cache.is_stale() is True
def test_is_stale_fresh(db):
now = datetime.now(timezone.utc).isoformat()
db.execute(
"INSERT INTO ebay_categories (category_id, name, full_path, is_leaf, refreshed_at)"
" VALUES (?, ?, ?, 1, ?)",
("12345", "Graphics Cards", "Consumer Electronics > GPUs > Graphics Cards", now),
)
db.commit()
cache = EbayCategoryCache(db)
assert cache.is_stale() is False
def test_is_stale_old(db):
old = (datetime.now(timezone.utc) - timedelta(days=8)).isoformat()
db.execute(
"INSERT INTO ebay_categories (category_id, name, full_path, is_leaf, refreshed_at)"
" VALUES (?, ?, ?, 1, ?)",
("12345", "Graphics Cards", "Consumer Electronics > GPUs > Graphics Cards", old),
)
db.commit()
cache = EbayCategoryCache(db)
assert cache.is_stale() is True
def test_seed_bootstrap_populates_rows(db):
cache = EbayCategoryCache(db)
cache._seed_bootstrap()
cur = db.execute("SELECT COUNT(*) FROM ebay_categories")
count = cur.fetchone()[0]
assert count >= BOOTSTRAP_MIN
def test_get_relevant_keyword_match(db):
cache = EbayCategoryCache(db)
cache._seed_bootstrap()
results = cache.get_relevant(["GPU", "graphics"], limit=5)
ids = [r[0] for r in results]
assert "27386" in ids # Graphics Cards
def test_get_relevant_no_match(db):
cache = EbayCategoryCache(db)
cache._seed_bootstrap()
results = cache.get_relevant(["zzznomatch_xyzxyz"], limit=5)
assert results == []
def test_get_relevant_respects_limit(db):
cache = EbayCategoryCache(db)
cache._seed_bootstrap()
results = cache.get_relevant(["electronics"], limit=3)
assert len(results) <= 3
def test_get_all_for_prompt_returns_rows(db):
cache = EbayCategoryCache(db)
cache._seed_bootstrap()
results = cache.get_all_for_prompt(limit=10)
assert len(results) > 0
# Each entry is (category_id, full_path)
assert all(len(r) == 2 for r in results)
def _make_tree_response() -> dict:
"""Minimal eBay Taxonomy API tree response with two leaf nodes."""
return {
"categoryTreeId": "0",
"rootCategoryNode": {
"category": {"categoryId": "6000", "categoryName": "Root"},
"leafCategoryTreeNode": False,
"childCategoryTreeNodes": [
{
"category": {"categoryId": "6001", "categoryName": "Electronics"},
"leafCategoryTreeNode": False,
"childCategoryTreeNodes": [
{
"category": {"categoryId": "6002", "categoryName": "GPUs"},
"leafCategoryTreeNode": True,
"childCategoryTreeNodes": [],
},
{
"category": {"categoryId": "6003", "categoryName": "CPUs"},
"leafCategoryTreeNode": True,
"childCategoryTreeNodes": [],
},
],
}
],
},
}
def test_refresh_inserts_leaf_nodes(db):
mock_tm = MagicMock()
mock_tm.get_token.return_value = "fake-token"
tree_resp = MagicMock()
tree_resp.raise_for_status = MagicMock()
tree_resp.json.return_value = _make_tree_response()
id_resp = MagicMock()
id_resp.raise_for_status = MagicMock()
id_resp.json.return_value = {"categoryTreeId": "0"}
with patch("app.platforms.ebay.categories.requests.get") as mock_get:
mock_get.side_effect = [id_resp, tree_resp]
cache = EbayCategoryCache(db)
count = cache.refresh(mock_tm)
assert count == 2 # two leaf nodes in our fake tree
cur = db.execute("SELECT category_id FROM ebay_categories ORDER BY category_id")
ids = {row[0] for row in cur.fetchall()}
assert "6002" in ids
assert "6003" in ids
def test_refresh_no_token_manager_seeds_bootstrap(db):
cache = EbayCategoryCache(db)
count = cache.refresh(token_manager=None)
assert count >= BOOTSTRAP_MIN
def test_refresh_api_error_logs_warning(db, caplog):
import logging
mock_tm = MagicMock()
mock_tm.get_token.return_value = "fake-token"
with patch("app.platforms.ebay.categories.requests.get") as mock_get:
mock_get.side_effect = Exception("network error")
cache = EbayCategoryCache(db)
with caplog.at_level(logging.WARNING, logger="app.platforms.ebay.categories"):
count = cache.refresh(mock_tm)
# Falls back to bootstrap on API error
assert count >= BOOTSTRAP_MIN
def test_refresh_publishes_to_community_when_creds_available(db):
"""After a successful Taxonomy API refresh, categories are published to community store."""
mock_tm = MagicMock()
mock_tm.get_token.return_value = "fake-token"
id_resp = MagicMock()
id_resp.raise_for_status = MagicMock()
id_resp.json.return_value = {"categoryTreeId": "0"}
tree_resp = MagicMock()
tree_resp.raise_for_status = MagicMock()
tree_resp.json.return_value = _make_tree_response()
mock_community = MagicMock()
mock_community.publish_categories.return_value = 2
with patch("app.platforms.ebay.categories.requests.get") as mock_get:
mock_get.side_effect = [id_resp, tree_resp]
cache = EbayCategoryCache(db)
cache.refresh(mock_tm, community_store=mock_community)
mock_community.publish_categories.assert_called_once()
published = mock_community.publish_categories.call_args[0][0]
assert len(published) == 2
def test_refresh_fetches_from_community_when_no_creds(db):
"""Without creds, community categories are used when available (>= 10 rows)."""
mock_community = MagicMock()
mock_community.fetch_categories.return_value = [
(str(i), f"Cat {i}", f"Path > Cat {i}") for i in range(15)
]
cache = EbayCategoryCache(db)
count = cache.refresh(token_manager=None, community_store=mock_community)
assert count == 15
cur = db.execute("SELECT COUNT(*) FROM ebay_categories")
assert cur.fetchone()[0] == 15
def test_refresh_falls_back_to_bootstrap_when_community_sparse(db):
"""Falls back to bootstrap if community returns fewer than 10 rows."""
mock_community = MagicMock()
mock_community.fetch_categories.return_value = [
("1", "Only One", "Path > Only One")
]
cache = EbayCategoryCache(db)
count = cache.refresh(token_manager=None, community_store=mock_community)
assert count >= BOOTSTRAP_MIN

View file

@ -4,10 +4,12 @@ from __future__ import annotations
from collections.abc import Callable
from unittest.mock import MagicMock, patch
from circuitforge_core.api.feedback import make_feedback_router
from fastapi import FastAPI
from fastapi.testclient import TestClient
from circuitforge_core.api.feedback import make_feedback_router
# ── Test app factory ──────────────────────────────────────────────────────────
def _make_client(demo_mode_fn: Callable[[], bool] | None = None) -> TestClient:

View file

@ -1,76 +0,0 @@
"""Tests for PATCH /api/preferences display.currency validation."""
from __future__ import annotations
import os
from pathlib import Path
from unittest.mock import patch
import pytest
from fastapi.testclient import TestClient
@pytest.fixture
def client(tmp_path):
"""TestClient with a patched local DB path.
api.cloud_session._LOCAL_SNIPE_DB is set at module import time, so we
cannot rely on setting SNIPE_DB before import when other tests have already
triggered the module load. Patch the module-level variable directly so
the session dependency points at our fresh tmp DB for the duration of this
fixture.
"""
db_path = tmp_path / "snipe.db"
# Ensure the DB is initialised so the Store can create its tables.
import api.cloud_session as _cs
from circuitforge_core.db import get_connection, run_migrations
conn = get_connection(db_path)
run_migrations(conn, Path("app/db/migrations"))
conn.close()
from api.main import app
with patch.object(_cs, "_LOCAL_SNIPE_DB", db_path):
yield TestClient(app, raise_server_exceptions=False)
def test_set_display_currency_valid(client):
"""Accepted ISO 4217 codes are stored and returned."""
for code in ("USD", "GBP", "EUR", "CAD", "AUD", "JPY", "CHF", "MXN", "BRL", "INR"):
resp = client.patch("/api/preferences", json={"path": "display.currency", "value": code})
assert resp.status_code == 200, f"Expected 200 for {code}, got {resp.status_code}: {resp.text}"
data = resp.json()
assert data.get("display", {}).get("currency") == code
def test_set_display_currency_normalises_lowercase(client):
"""Lowercase code is accepted and normalised to uppercase."""
resp = client.patch("/api/preferences", json={"path": "display.currency", "value": "eur"})
assert resp.status_code == 200
assert resp.json()["display"]["currency"] == "EUR"
def test_set_display_currency_unsupported_returns_400(client):
"""Unsupported currency code returns 400 with a clear message."""
resp = client.patch("/api/preferences", json={"path": "display.currency", "value": "XYZ"})
assert resp.status_code == 400
detail = resp.json().get("detail", "")
assert "XYZ" in detail
assert "Supported" in detail or "supported" in detail
def test_set_display_currency_empty_string_returns_400(client):
"""Empty string is not a valid currency code."""
resp = client.patch("/api/preferences", json={"path": "display.currency", "value": ""})
assert resp.status_code == 400
def test_set_display_currency_none_returns_400(client):
"""None is not a valid currency code."""
resp = client.patch("/api/preferences", json={"path": "display.currency", "value": None})
assert resp.status_code == 400
def test_other_preference_paths_unaffected(client):
"""Unrelated preference paths still work normally after currency validation added."""
resp = client.patch("/api/preferences", json={"path": "affiliate.opt_out", "value": True})
assert resp.status_code == 200
assert resp.json().get("affiliate", {}).get("opt_out") is True

View file

@ -1,170 +0,0 @@
"""Unit tests for QueryTranslator — LLMRouter mocked at boundary."""
from __future__ import annotations
import json
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
from app.llm.query_translator import QueryTranslator, QueryTranslatorError, SearchParamsResponse, _parse_response
# ── _parse_response ───────────────────────────────────────────────────────────
def test_parse_response_happy_path():
raw = json.dumps({
"base_query": "RTX 3080",
"must_include_mode": "groups",
"must_include": "rtx|geforce, 3080",
"must_exclude": "mining,for parts",
"max_price": 300.0,
"min_price": None,
"condition": ["used"],
"category_id": "27386",
"explanation": "Searching for used RTX 3080 GPUs under $300.",
})
result = _parse_response(raw)
assert result.base_query == "RTX 3080"
assert result.must_include_mode == "groups"
assert result.max_price == 300.0
assert result.min_price is None
assert result.condition == ["used"]
assert result.category_id == "27386"
assert "RTX 3080" in result.explanation
def test_parse_response_missing_optional_fields():
raw = json.dumps({
"base_query": "vintage camera",
"must_include_mode": "all",
"must_include": "",
"must_exclude": "",
"max_price": None,
"min_price": None,
"condition": [],
"category_id": None,
"explanation": "Searching for vintage cameras.",
})
result = _parse_response(raw)
assert result.category_id is None
assert result.max_price is None
assert result.condition == []
def test_parse_response_invalid_json():
with pytest.raises(QueryTranslatorError, match="unparseable"):
_parse_response("this is not json {{{ garbage")
def test_parse_response_missing_required_field():
# base_query is required — missing it should raise
raw = json.dumps({
"must_include_mode": "all",
"must_include": "",
"must_exclude": "",
"max_price": None,
"min_price": None,
"condition": [],
"category_id": None,
"explanation": "oops",
})
with pytest.raises(QueryTranslatorError):
_parse_response(raw)
# ── QueryTranslator (integration with mocked LLMRouter) ──────────────────────
from app.platforms.ebay.categories import EbayCategoryCache
from circuitforge_core.db import get_connection, run_migrations
@pytest.fixture
def db_with_categories(tmp_path):
conn = get_connection(tmp_path / "test.db")
run_migrations(conn, Path("app/db/migrations"))
cache = EbayCategoryCache(conn)
cache._seed_bootstrap()
return conn
def _make_translator(db_conn, llm_response: str) -> QueryTranslator:
from app.platforms.ebay.categories import EbayCategoryCache
cache = EbayCategoryCache(db_conn)
mock_router = MagicMock()
mock_router.complete.return_value = llm_response
return QueryTranslator(category_cache=cache, llm_router=mock_router)
def test_translate_returns_search_params(db_with_categories):
llm_out = json.dumps({
"base_query": "RTX 3080",
"must_include_mode": "groups",
"must_include": "rtx|geforce, 3080",
"must_exclude": "mining,for parts",
"max_price": 300.0,
"min_price": None,
"condition": ["used"],
"category_id": "27386",
"explanation": "Searching for used RTX 3080 GPUs under $300.",
})
t = _make_translator(db_with_categories, llm_out)
result = t.translate("used RTX 3080 under $300 no mining")
assert result.base_query == "RTX 3080"
assert result.max_price == 300.0
def test_translate_injects_category_hints(db_with_categories):
"""The system prompt sent to the LLM must contain category_id hints."""
llm_out = json.dumps({
"base_query": "GPU",
"must_include_mode": "all",
"must_include": "",
"must_exclude": "",
"max_price": None,
"min_price": None,
"condition": [],
"category_id": None,
"explanation": "Searching for GPUs.",
})
t = _make_translator(db_with_categories, llm_out)
t.translate("GPU")
call_args = t._llm_router.complete.call_args
system_prompt = call_args.kwargs.get("system") or call_args.args[1]
# Bootstrap seeds "27386" for Graphics Cards — should appear in the prompt
assert "27386" in system_prompt
def test_translate_empty_category_cache_still_works(tmp_path):
"""No crash when category cache is empty — prompt uses fallback text."""
from circuitforge_core.db import get_connection, run_migrations
conn = get_connection(tmp_path / "empty.db")
run_migrations(conn, Path("app/db/migrations"))
# Do NOT seed bootstrap — empty cache
llm_out = json.dumps({
"base_query": "vinyl",
"must_include_mode": "all",
"must_include": "",
"must_exclude": "",
"max_price": None,
"min_price": None,
"condition": [],
"category_id": None,
"explanation": "Searching for vinyl records.",
})
t = _make_translator(conn, llm_out)
result = t.translate("vinyl records")
assert result.base_query == "vinyl"
call_args = t._llm_router.complete.call_args
system_prompt = call_args.kwargs.get("system") or call_args.args[1]
assert "If none match" in system_prompt
def test_translate_llm_error_raises_query_translator_error(db_with_categories):
from app.platforms.ebay.categories import EbayCategoryCache
cache = EbayCategoryCache(db_with_categories)
mock_router = MagicMock()
mock_router.complete.side_effect = RuntimeError("all backends exhausted")
t = QueryTranslator(category_cache=cache, llm_router=mock_router)
with pytest.raises(QueryTranslatorError, match="LLM backend"):
t.translate("used GPU")

View file

@ -1,402 +0,0 @@
"""Tests for the short-TTL search result cache in api/main.py.
Covers:
- _cache_key stability (same inputs same key)
- _cache_key uniqueness (different inputs different keys)
- cache hit path returns early without scraping (async worker)
- cache miss path stores result in _search_result_cache
- refresh=True bypasses cache read (still writes fresh result)
- TTL expiry: expired entries are not returned as hits
- _evict_expired_cache removes expired entries
"""
from __future__ import annotations
import os
import queue as _queue
import time
from pathlib import Path
from unittest.mock import MagicMock, patch
import pytest
# ── Helpers ───────────────────────────────────────────────────────────────────
def _clear_cache():
"""Reset module-level cache state between tests."""
import api.main as _main
_main._search_result_cache.clear()
_main._last_eviction_ts = 0.0
@pytest.fixture(autouse=True)
def isolated_cache():
"""Ensure each test starts with an empty cache."""
_clear_cache()
yield
_clear_cache()
@pytest.fixture
def client(tmp_path):
"""TestClient backed by a fresh tmp DB."""
os.environ["SNIPE_DB"] = str(tmp_path / "snipe.db")
from api.main import app
from fastapi.testclient import TestClient
return TestClient(app, raise_server_exceptions=False)
def _make_mock_listing(listing_id: str = "123456789", seller_id: str = "test_seller"):
"""Return a MagicMock listing (for use where asdict() is NOT called on it)."""
m = MagicMock()
m.platform_listing_id = listing_id
m.seller_platform_id = seller_id
m.title = "Test GPU"
m.price = 100.0
m.currency = "USD"
m.condition = "Used"
m.url = f"https://www.ebay.com/itm/{listing_id}"
m.photo_urls = []
m.listing_age_days = 5
m.buying_format = "fixed_price"
m.ends_at = None
m.fetched_at = None
m.trust_score_id = None
m.id = 1
m.category_name = None
return m
def _make_real_listing(listing_id: str = "123456789", seller_id: str = "test_seller"):
"""Return a real Listing dataclass instance (for use where asdict() is called)."""
from app.db.models import Listing
return Listing(
platform="ebay",
platform_listing_id=listing_id,
title="Test GPU",
price=100.0,
currency="USD",
condition="Used",
seller_platform_id=seller_id,
url=f"https://www.ebay.com/itm/{listing_id}",
photo_urls=[],
listing_age_days=5,
buying_format="fixed_price",
id=None,
)
# ── _cache_key unit tests ─────────────────────────────────────────────────────
def test_cache_key_stable_for_same_inputs():
"""The same parameter set always produces the same key."""
from api.main import _cache_key
k1 = _cache_key("rtx 3080", 400.0, 100.0, 2, "rtx,3080", "all", "mining", "27386")
k2 = _cache_key("rtx 3080", 400.0, 100.0, 2, "rtx,3080", "all", "mining", "27386")
assert k1 == k2
def test_cache_key_case_normalised():
"""Query is normalised to lower-case + stripped before hashing."""
from api.main import _cache_key
k1 = _cache_key("RTX 3080", None, None, 1, "", "all", "", "")
k2 = _cache_key("rtx 3080", None, None, 1, "", "all", "", "")
assert k1 == k2
def test_cache_key_differs_on_query_change():
"""Different query strings must produce different keys."""
from api.main import _cache_key
k1 = _cache_key("rtx 3080", None, None, 1, "", "all", "", "")
k2 = _cache_key("gtx 1080", None, None, 1, "", "all", "", "")
assert k1 != k2
def test_cache_key_differs_on_price_filter():
"""Different max_price must produce a different key."""
from api.main import _cache_key
k1 = _cache_key("gpu", 400.0, None, 1, "", "all", "", "")
k2 = _cache_key("gpu", 500.0, None, 1, "", "all", "", "")
assert k1 != k2
def test_cache_key_differs_on_min_price():
"""Different min_price must produce a different key."""
from api.main import _cache_key
k1 = _cache_key("gpu", None, 50.0, 1, "", "all", "", "")
k2 = _cache_key("gpu", None, 100.0, 1, "", "all", "", "")
assert k1 != k2
def test_cache_key_differs_on_pages():
"""Different page count must produce a different key."""
from api.main import _cache_key
k1 = _cache_key("gpu", None, None, 1, "", "all", "", "")
k2 = _cache_key("gpu", None, None, 2, "", "all", "", "")
assert k1 != k2
def test_cache_key_differs_on_must_include():
"""Different must_include terms must produce a different key."""
from api.main import _cache_key
k1 = _cache_key("gpu", None, None, 1, "rtx", "all", "", "")
k2 = _cache_key("gpu", None, None, 1, "gtx", "all", "", "")
assert k1 != k2
def test_cache_key_differs_on_must_exclude():
"""Different must_exclude terms must produce a different key."""
from api.main import _cache_key
k1 = _cache_key("gpu", None, None, 1, "", "all", "mining", "")
k2 = _cache_key("gpu", None, None, 1, "", "all", "defective", "")
assert k1 != k2
def test_cache_key_differs_on_category_id():
"""Different category_id must produce a different key."""
from api.main import _cache_key
k1 = _cache_key("gpu", None, None, 1, "", "all", "", "27386")
k2 = _cache_key("gpu", None, None, 1, "", "all", "", "12345")
assert k1 != k2
def test_cache_key_is_16_chars():
"""Key must be exactly 16 hex characters."""
from api.main import _cache_key
k = _cache_key("gpu", None, None, 1, "", "all", "", "")
assert len(k) == 16
assert all(c in "0123456789abcdef" for c in k)
# ── TTL / eviction unit tests ─────────────────────────────────────────────────
def test_expired_entry_is_not_returned_as_hit():
"""An entry past its TTL must not be treated as a cache hit."""
import api.main as _main
from api.main import _cache_key
key = _cache_key("gpu", None, None, 1, "", "all", "", "")
# Write an already-expired entry.
_main._search_result_cache[key] = (
{"listings": [], "market_price": None},
time.time() - 1.0, # expired 1 second ago
)
cached = _main._search_result_cache.get(key)
assert cached is not None
payload, expiry = cached
# Simulate the hit-check used in main.py
assert expiry <= time.time(), "Entry should be expired"
def test_evict_expired_cache_removes_stale_entries():
"""_evict_expired_cache must remove entries whose expiry has passed."""
import api.main as _main
from api.main import _cache_key, _evict_expired_cache
key_expired = _cache_key("old query", None, None, 1, "", "all", "", "")
key_valid = _cache_key("new query", None, None, 1, "", "all", "", "")
_main._search_result_cache[key_expired] = (
{"listings": [], "market_price": None},
time.time() - 10.0, # already expired
)
_main._search_result_cache[key_valid] = (
{"listings": [], "market_price": 99.0},
time.time() + 300.0, # valid for 5 min
)
# Reset throttle so eviction runs immediately.
_main._last_eviction_ts = 0.0
_evict_expired_cache()
assert key_expired not in _main._search_result_cache
assert key_valid in _main._search_result_cache
def test_evict_is_rate_limited():
"""_evict_expired_cache should skip eviction if called within 60 s."""
import api.main as _main
from api.main import _cache_key, _evict_expired_cache
key_expired = _cache_key("stale", None, None, 1, "", "all", "", "")
_main._search_result_cache[key_expired] = (
{"listings": [], "market_price": None},
time.time() - 5.0,
)
# Pretend eviction just ran.
_main._last_eviction_ts = time.time()
_evict_expired_cache()
# Entry should still be present because eviction was throttled.
assert key_expired in _main._search_result_cache
# ── Integration tests — async endpoint cache hit ──────────────────────────────
def test_async_cache_hit_skips_scraper(client, tmp_path):
"""On a warm cache hit the scraper adapter must not be called."""
import threading
import api.main as _main
from api.main import _cache_key
# Pre-seed a valid cache entry.
key = _cache_key("rtx 3080", None, None, 1, "", "all", "", "")
_main._search_result_cache[key] = (
{"listings": [], "market_price": 250.0},
time.time() + 300.0,
)
scraper_called = threading.Event()
def _fake_search(query, filters):
scraper_called.set()
return []
with (
patch("api.main._make_adapter") as mock_adapter_factory,
patch("api.main._trigger_scraper_enrichment"),
patch("api.main.TrustScorer") as mock_scorer_cls,
patch("api.main.Store") as mock_store_cls,
):
mock_adapter = MagicMock()
mock_adapter.search.side_effect = _fake_search
mock_adapter.get_completed_sales.return_value = None
mock_adapter_factory.return_value = mock_adapter
mock_scorer = MagicMock()
mock_scorer.score_batch.return_value = []
mock_scorer_cls.return_value = mock_scorer
mock_store = MagicMock()
mock_store.get_listings_staged.return_value = {}
mock_store.refresh_seller_categories.return_value = 0
mock_store.save_listings.return_value = None
mock_store.save_trust_scores.return_value = None
mock_store.get_market_comp.return_value = None
mock_store.get_seller.return_value = None
mock_store.get_user_preference.return_value = None
mock_store_cls.return_value = mock_store
resp = client.get("/api/search/async?q=rtx+3080")
assert resp.status_code == 202
# Give the background worker a moment to run.
scraper_called.wait(timeout=3.0)
# Scraper must NOT have been called on a cache hit.
assert not scraper_called.is_set(), "Scraper was called despite a warm cache hit"
def test_async_cache_miss_stores_result(client, tmp_path):
"""After a cache miss the result must be stored in _search_result_cache."""
import threading
import api.main as _main
from api.main import _cache_key
search_done = threading.Event()
real_listing = _make_real_listing()
def _fake_search(query, filters):
return [real_listing]
with (
patch("api.main._make_adapter") as mock_adapter_factory,
patch("api.main._trigger_scraper_enrichment") as mock_enrich,
patch("api.main.TrustScorer") as mock_scorer_cls,
patch("api.main.Store") as mock_store_cls,
):
mock_adapter = MagicMock()
mock_adapter.search.side_effect = _fake_search
mock_adapter.get_completed_sales.return_value = None
mock_adapter_factory.return_value = mock_adapter
mock_scorer = MagicMock()
mock_scorer.score_batch.return_value = []
mock_scorer_cls.return_value = mock_scorer
mock_store = MagicMock()
mock_store.get_listings_staged.return_value = {
real_listing.platform_listing_id: real_listing
}
mock_store.refresh_seller_categories.return_value = 0
mock_store.save_listings.return_value = None
mock_store.save_trust_scores.return_value = None
mock_store.get_market_comp.return_value = None
mock_store.get_seller.return_value = None
mock_store.get_user_preference.return_value = None
mock_store_cls.return_value = mock_store
def _enrich_side_effect(*args, **kwargs):
search_done.set()
mock_enrich.side_effect = _enrich_side_effect
resp = client.get("/api/search/async?q=rtx+3080")
assert resp.status_code == 202
# Wait until the background worker reaches _trigger_scraper_enrichment.
search_done.wait(timeout=5.0)
assert search_done.is_set(), "Background search worker never completed"
key = _cache_key("rtx 3080", None, None, 1, "", "all", "", "")
assert key in _main._search_result_cache, "Result was not stored in cache after miss"
payload, expiry = _main._search_result_cache[key]
assert expiry > time.time(), "Cache entry has already expired"
assert "listings" in payload
# ── Integration tests — async endpoint refresh=True ──────────────────────────
def test_async_refresh_bypasses_cache_read(client, tmp_path):
"""refresh=True must bypass cache read and invoke the scraper."""
import threading
import api.main as _main
from api.main import _cache_key
# Seed a valid cache entry so we can confirm it is bypassed.
key = _cache_key("rtx 3080", None, None, 1, "", "all", "", "")
_main._search_result_cache[key] = (
{"listings": [], "market_price": 100.0},
time.time() + 300.0,
)
scraper_called = threading.Event()
def _fake_search(query, filters):
scraper_called.set()
return []
with (
patch("api.main._make_adapter") as mock_adapter_factory,
patch("api.main._trigger_scraper_enrichment"),
patch("api.main.TrustScorer") as mock_scorer_cls,
patch("api.main.Store") as mock_store_cls,
):
mock_adapter = MagicMock()
mock_adapter.search.side_effect = _fake_search
mock_adapter.get_completed_sales.return_value = None
mock_adapter_factory.return_value = mock_adapter
mock_scorer = MagicMock()
mock_scorer.score_batch.return_value = []
mock_scorer_cls.return_value = mock_scorer
mock_store = MagicMock()
mock_store.get_listings_staged.return_value = {}
mock_store.refresh_seller_categories.return_value = 0
mock_store.save_listings.return_value = None
mock_store.save_trust_scores.return_value = None
mock_store.get_market_comp.return_value = None
mock_store.get_seller.return_value = None
mock_store.get_user_preference.return_value = None
mock_store_cls.return_value = mock_store
resp = client.get("/api/search/async?q=rtx+3080&refresh=true")
assert resp.status_code == 202
scraper_called.wait(timeout=5.0)
assert scraper_called.is_set(), "Scraper was not called even though refresh=True"

View file

@ -4,7 +4,7 @@ from __future__ import annotations
import json
import sqlite3
from pathlib import Path
from unittest.mock import patch
from unittest.mock import MagicMock, patch
import pytest

View file

@ -1,4 +1,4 @@
from app.tiers import can_use
from app.tiers import can_use, FEATURES, LOCAL_VISION_UNLOCKABLE
def test_metadata_scoring_is_free():
@ -22,13 +22,3 @@ def test_saved_searches_are_free():
# Ungated: retention feature — friction cost outweighs gate value (see tiers.py)
assert can_use("saved_searches", tier="free") is True
assert can_use("saved_searches", tier="paid") is True
def test_llm_query_builder_is_paid():
assert can_use("llm_query_builder", tier="free") is False
assert can_use("llm_query_builder", tier="paid") is True
def test_llm_query_builder_local_vision_does_not_unlock():
# local vision unlocks photo features only, not LLM query builder
assert can_use("llm_query_builder", tier="free", has_local_vision=True) is False

View file

@ -1,14 +1,6 @@
from datetime import datetime, timedelta, timezone
from app.db.models import Seller
from app.trust.aggregator import Aggregator
_ALL_20 = {k: 20 for k in ["account_age", "feedback_count", "feedback_ratio", "price_vs_market", "category_history"]}
def _iso_days_ago(n: int) -> str:
return (datetime.now(timezone.utc) - timedelta(days=n)).isoformat()
def test_composite_sum_of_five_signals():
agg = Aggregator()
@ -140,119 +132,3 @@ def test_new_account_not_flagged_when_age_absent():
result = agg.aggregate(scores, photo_hash_duplicate=False, seller=scraper_seller)
assert "new_account" not in result.red_flags_json
assert "account_under_30_days" not in result.red_flags_json
# ── zero_feedback ─────────────────────────────────────────────────────────────
def test_zero_feedback_adds_flag():
"""seller.feedback_count == 0 must add zero_feedback flag."""
agg = Aggregator()
seller = Seller(
platform="ebay", platform_seller_id="u", username="u",
account_age_days=365, feedback_count=0, feedback_ratio=1.0,
category_history_json="{}",
)
result = agg.aggregate(_ALL_20.copy(), photo_hash_duplicate=False, seller=seller)
assert "zero_feedback" in result.red_flags_json
def test_zero_feedback_caps_composite_at_35():
"""Even with perfect other signals (all 20/20), zero feedback caps composite at 35."""
agg = Aggregator()
seller = Seller(
platform="ebay", platform_seller_id="u", username="u",
account_age_days=365, feedback_count=0, feedback_ratio=1.0,
category_history_json="{}",
)
result = agg.aggregate(_ALL_20.copy(), photo_hash_duplicate=False, seller=seller)
assert result.composite_score <= 35
# ── long_on_market ────────────────────────────────────────────────────────────
def test_long_on_market_flagged_when_thresholds_met():
"""times_seen >= 5 AND listing age >= 14 days → long_on_market fires."""
agg = Aggregator()
result = agg.aggregate(
_ALL_20.copy(), photo_hash_duplicate=False, seller=None,
times_seen=5, first_seen_at=_iso_days_ago(20),
)
assert "long_on_market" in result.red_flags_json
def test_long_on_market_not_flagged_when_too_few_sightings():
"""times_seen < 5 must NOT trigger long_on_market even if listing is old."""
agg = Aggregator()
result = agg.aggregate(
_ALL_20.copy(), photo_hash_duplicate=False, seller=None,
times_seen=4, first_seen_at=_iso_days_ago(30),
)
assert "long_on_market" not in result.red_flags_json
def test_long_on_market_not_flagged_when_too_recent():
"""times_seen >= 5 but only seen for < 14 days → long_on_market must NOT fire."""
agg = Aggregator()
result = agg.aggregate(
_ALL_20.copy(), photo_hash_duplicate=False, seller=None,
times_seen=10, first_seen_at=_iso_days_ago(5),
)
assert "long_on_market" not in result.red_flags_json
# ── significant_price_drop ────────────────────────────────────────────────────
def test_significant_price_drop_flagged():
"""price >= 20% below price_at_first_seen → significant_price_drop fires."""
agg = Aggregator()
result = agg.aggregate(
_ALL_20.copy(), photo_hash_duplicate=False, seller=None,
price=75.00, price_at_first_seen=100.00, # 25% drop
)
assert "significant_price_drop" in result.red_flags_json
def test_significant_price_drop_not_flagged_when_drop_is_small():
"""< 20% drop must NOT trigger significant_price_drop."""
agg = Aggregator()
result = agg.aggregate(
_ALL_20.copy(), photo_hash_duplicate=False, seller=None,
price=95.00, price_at_first_seen=100.00, # 5% drop
)
assert "significant_price_drop" not in result.red_flags_json
def test_significant_price_drop_not_flagged_when_no_prior_price():
"""price_at_first_seen=None (first sighting) must NOT fire significant_price_drop."""
agg = Aggregator()
result = agg.aggregate(
_ALL_20.copy(), photo_hash_duplicate=False, seller=None,
price=50.00, price_at_first_seen=None,
)
assert "significant_price_drop" not in result.red_flags_json
# ── established retailer ──────────────────────────────────────────────────────
def test_established_retailer_suppresses_duplicate_photo():
"""feedback_count >= 1000 (established retailer) must suppress duplicate_photo flag."""
agg = Aggregator()
retailer = Seller(
platform="ebay", platform_seller_id="u", username="u",
account_age_days=1800, feedback_count=5000, feedback_ratio=0.99,
category_history_json="{}",
)
result = agg.aggregate(_ALL_20.copy(), photo_hash_duplicate=True, seller=retailer)
assert "duplicate_photo" not in result.red_flags_json
def test_non_retailer_does_not_suppress_duplicate_photo():
"""feedback_count < 1000 — duplicate_photo must still fire when hash matches."""
agg = Aggregator()
seller = Seller(
platform="ebay", platform_seller_id="u", username="u",
account_age_days=365, feedback_count=50, feedback_ratio=0.99,
category_history_json="{}",
)
result = agg.aggregate(_ALL_20.copy(), photo_hash_duplicate=True, seller=seller)
assert "duplicate_photo" in result.red_flags_json

View file

@ -4,8 +4,10 @@ from __future__ import annotations
import json
from datetime import datetime, timedelta, timezone
import pytest
from app.db.models import Listing, TrustScore
from app.ui.components.easter_eggs import auction_hours_remaining, is_steal
from app.ui.components.easter_eggs import is_steal, auction_hours_remaining
def _listing(**kwargs) -> Listing:

View file

@ -5,32 +5,12 @@
<!-- Emoji favicon: target reticle — inline SVG to avoid a separate file -->
<link rel="icon" href="data:image/svg+xml,<svg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 100 100'><text y='.9em' font-size='90'>🎯</text></svg>" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Snipe — eBay trust scoring before you bid</title>
<meta name="description" content="Score eBay listings and sellers for trustworthiness before you bid. Catches new accounts, suspicious prices, duplicate photos, and established scammers. Free, no account required." />
<meta name="theme-color" content="#e89122" />
<meta property="og:site_name" content="CircuitForge" />
<meta property="og:title" content="Snipe — eBay trust scoring before you bid" />
<meta property="og:description" content="Score eBay listings and sellers for trustworthiness before you bid. Free, no account required." />
<meta property="og:type" content="website" />
<meta property="og:url" content="https://menagerie.circuitforge.tech/snipe" />
<meta property="og:image" content="https://menagerie.circuitforge.tech/snipe/og-image.png" />
<meta property="og:image:width" content="1200" />
<meta property="og:image:height" content="630" />
<meta property="og:image:alt" content="Snipe — eBay trust scoring before you bid. Free. No account required." />
<meta name="twitter:card" content="summary_large_image" />
<meta name="twitter:title" content="Snipe — eBay trust scoring before you bid" />
<meta name="twitter:description" content="Free eBay trust scorer. Catches scammers before you bid. No account required." />
<meta name="twitter:image" content="https://menagerie.circuitforge.tech/snipe/og-image.png" />
<link rel="canonical" href="https://menagerie.circuitforge.tech/snipe" />
<!-- FOFT guard: prevents dark flash before CSS bundle loads.
theme.css overrides both html and body backgrounds via var(--color-surface)
once loaded, so this only applies for the brief pre-bundle window. -->
<title>Snipe</title>
<!-- Inline background prevents blank flash before CSS bundle loads -->
<!-- Matches --color-surface dark tactical theme from theme.css -->
<style>
html, body { margin: 0; background: #0d1117; min-height: 100vh; }
</style>
<!-- Plausible analytics: cookie-free, GDPR-compliant, self-hosted.
Skips localhost/127.0.0.1. Reports to hostname + circuitforge.tech rollup. -->
<script>(function(){if(/localhost|127\.0\.0\.1/.test(location.hostname))return;var s=document.createElement('script');s.defer=true;s.dataset.domain=location.hostname+',circuitforge.tech';s.dataset.api='https://analytics.circuitforge.tech/api/event';s.src='https://analytics.circuitforge.tech/js/script.js';document.head.appendChild(s);})();</script>
</head>
<body>
<!-- Mount target only — App.vue root must NOT use id="app". Gotcha #1. -->

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

View file

@ -2,10 +2,10 @@
<!-- Root uses .app-root class, NOT id="app" index.html owns #app.
Nested #app elements cause ambiguous CSS specificity. Gotcha #1. -->
<div class="app-root" :class="{ 'rich-motion': motion.rich.value }">
<!-- Skip to main content must be first focusable element before the nav -->
<a href="#main-content" class="skip-link">Skip to main content</a>
<AppNav />
<main class="app-main" id="main-content" tabindex="-1">
<!-- Skip to main content link (screen reader / keyboard nav) -->
<a href="#main-content" class="skip-link">Skip to main content</a>
<RouterView />
</main>
@ -19,33 +19,24 @@ import { onMounted } from 'vue'
import { RouterView, useRoute } from 'vue-router'
import { useMotion } from './composables/useMotion'
import { useSnipeMode } from './composables/useSnipeMode'
import { useTheme } from './composables/useTheme'
import { useKonamiCode } from './composables/useKonamiCode'
import { useSessionStore } from './stores/session'
import { useBlocklistStore } from './stores/blocklist'
import { usePreferencesStore } from './stores/preferences'
import { useReportedStore } from './stores/reported'
import AppNav from './components/AppNav.vue'
import FeedbackButton from './components/FeedbackButton.vue'
const motion = useMotion()
const { activate, restore } = useSnipeMode()
const { restore: restoreTheme } = useTheme()
const session = useSessionStore()
const blocklistStore = useBlocklistStore()
const preferencesStore = usePreferencesStore()
const reportedStore = useReportedStore()
const route = useRoute()
useKonamiCode(activate)
onMounted(async () => {
restore() // re-apply snipe mode from localStorage on hard reload
restoreTheme() // re-apply explicit theme override on hard reload
await session.bootstrap() // fetch tier + feature flags from API
blocklistStore.fetchBlocklist() // pre-load so card block buttons reflect state immediately
preferencesStore.load() // load user preferences after session resolves
reportedStore.load() // pre-load reported sellers so cards show badge immediately
onMounted(() => {
restore() // re-apply snipe mode from localStorage on hard reload
session.bootstrap() // fetch tier + feature flags from API
blocklistStore.fetchBlocklist() // pre-load so card block buttons reflect state immediately
})
</script>

View file

@ -1,255 +0,0 @@
import { mount } from '@vue/test-utils'
import { createPinia, setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import type { Listing, TrustScore, Seller } from '../stores/search'
import { useSearchStore } from '../stores/search'
// ── Mock vue-router — ListingView reads route.params.id ──────────────────────
const mockRouteId = { value: 'test-listing-id' }
vi.mock('vue-router', () => ({
useRoute: () => ({ params: { id: mockRouteId.value } }),
RouterLink: { template: '<a><slot /></a>' },
}))
// ── Helpers ──────────────────────────────────────────────────────────────────
function makeListing(id: string, overrides: Partial<Listing> = {}): Listing {
return {
id: null, platform: 'ebay', platform_listing_id: id,
title: 'NVIDIA RTX 4090 24GB — Used Excellent', price: 849.99,
currency: 'USD', condition: 'used_excellent', seller_platform_id: 'seller1',
url: 'https://ebay.com/itm/test', photo_urls: ['https://example.com/img.jpg'],
listing_age_days: 3, buying_format: 'fixed_price', ends_at: null,
fetched_at: null, trust_score_id: null, ...overrides,
}
}
function makeTrust(score: number, flags: string[] = [], partial = false): TrustScore {
return {
id: null, listing_id: 1, composite_score: score,
account_age_score: 18, feedback_count_score: 20, feedback_ratio_score: 20,
price_vs_market_score: 15, category_history_score: 14,
photo_hash_duplicate: false, photo_analysis_json: null,
red_flags_json: JSON.stringify(flags), score_is_partial: partial, scored_at: null,
}
}
function makeSeller(overrides: Partial<Seller> = {}): Seller {
return {
id: null, platform: 'ebay', platform_seller_id: 'seller1',
username: 'techdeals_rog', account_age_days: 720, feedback_count: 4711,
feedback_ratio: 0.997, category_history_json: '{}', fetched_at: null,
...overrides,
}
}
async function mountView(storeSetup?: (store: ReturnType<typeof useSearchStore>) => void) {
setActivePinia(createPinia())
const store = useSearchStore()
if (storeSetup) storeSetup(store)
const { default: ListingView } = await import('../views/ListingView.vue')
return mount(ListingView, {
global: { plugins: [] },
})
}
// ── Tests ────────────────────────────────────────────────────────────────────
describe('ListingView — not found', () => {
beforeEach(() => {
mockRouteId.value = 'missing-id'
sessionStorage.clear()
})
it('shows not-found state when listing is absent from store', async () => {
const wrapper = await mountView()
expect(wrapper.text()).toContain('Listing not found')
expect(wrapper.text()).toContain('Return to search')
})
it('does not render the trust section when listing is absent', async () => {
const wrapper = await mountView()
expect(wrapper.find('.lv-trust').exists()).toBe(false)
})
})
describe('ListingView — listing present', () => {
const ID = 'test-listing-id'
beforeEach(() => {
mockRouteId.value = ID
sessionStorage.clear()
})
it('renders the listing title', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(85))
store.sellers.set('seller1', makeSeller())
})
expect(wrapper.text()).toContain('NVIDIA RTX 4090 24GB')
})
it('renders the formatted price', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(85))
})
expect(wrapper.text()).toContain('$849.99')
})
it('shows the composite trust score in the ring', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(72))
})
expect(wrapper.find('.lv-ring__score').text()).toBe('72')
})
it('renders all five signal rows in the table', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(80))
store.sellers.set('seller1', makeSeller())
})
const rows = wrapper.findAll('.lv-signals__row')
expect(rows).toHaveLength(5)
})
it('shows score values in signal table', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(80))
store.sellers.set('seller1', makeSeller())
})
// feedback_count_score = 20
expect(wrapper.text()).toContain('20 / 20')
})
it('shows seller username', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(80))
store.sellers.set('seller1', makeSeller({ username: 'gpu_warehouse' }))
})
expect(wrapper.text()).toContain('gpu_warehouse')
})
})
describe('ListingView — red flags', () => {
const ID = 'test-listing-id'
beforeEach(() => {
mockRouteId.value = ID
sessionStorage.clear()
})
it('renders hard flag badge for new_account', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(40, ['new_account']))
})
const flags = wrapper.findAll('.lv-flag--hard')
expect(flags.length).toBeGreaterThan(0)
expect(wrapper.text()).toContain('New account')
})
it('renders soft flag badge for scratch_dent_mentioned', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(55, ['scratch_dent_mentioned']))
})
const flags = wrapper.findAll('.lv-flag--soft')
expect(flags.length).toBeGreaterThan(0)
expect(wrapper.text()).toContain('Damage mentioned')
})
it('shows no flag badges when red_flags_json is empty', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(90, []))
})
expect(wrapper.find('.lv-flag').exists()).toBe(false)
})
it('applies triple-red class when account + price + photo flags all present', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(12, [
'new_account', 'suspicious_price', 'duplicate_photo',
]))
})
expect(wrapper.find('.lv-layout--triple-red').exists()).toBe(true)
})
it('does not apply triple-red class when only two flag categories present', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(30, ['new_account', 'suspicious_price']))
})
expect(wrapper.find('.lv-layout--triple-red').exists()).toBe(false)
})
})
describe('ListingView — partial/pending signals', () => {
const ID = 'test-listing-id'
beforeEach(() => {
mockRouteId.value = ID
sessionStorage.clear()
})
it('shows pending for account age when seller.account_age_days is null', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(60, [], true))
store.sellers.set('seller1', makeSeller({ account_age_days: null }))
})
expect(wrapper.text()).toContain('pending')
})
it('shows partial warning text when score_is_partial is true', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(60, [], true))
store.sellers.set('seller1', makeSeller({ account_age_days: null }))
})
expect(wrapper.find('.lv-verdict__partial').exists()).toBe(true)
})
})
describe('ListingView — ring colour class', () => {
const ID = 'test-listing-id'
beforeEach(() => {
mockRouteId.value = ID
sessionStorage.clear()
})
it('applies lv-ring--high for score >= 80', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(82))
})
expect(wrapper.find('.lv-ring--high').exists()).toBe(true)
})
it('applies lv-ring--mid for score 5079', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(63))
})
expect(wrapper.find('.lv-ring--mid').exists()).toBe(true)
})
it('applies lv-ring--low for score < 50', async () => {
const wrapper = await mountView(store => {
store.results.push(makeListing(ID))
store.trustScores.set(ID, makeTrust(22))
})
expect(wrapper.find('.lv-ring--low').exists()).toBe(true)
})
})

View file

@ -1,110 +0,0 @@
import { createPinia, setActivePinia } from 'pinia'
import { beforeEach, describe, expect, it } from 'vitest'
import { useSearchStore } from '../stores/search'
import type { Listing, TrustScore, Seller } from '../stores/search'
function makeListing(id: string, overrides: Partial<Listing> = {}): Listing {
return {
id: null,
platform: 'ebay',
platform_listing_id: id,
title: `Listing ${id}`,
price: 100,
currency: 'USD',
condition: 'used',
seller_platform_id: 'seller1',
url: `https://ebay.com/itm/${id}`,
photo_urls: [],
listing_age_days: 1,
buying_format: 'fixed_price',
ends_at: null,
fetched_at: null,
trust_score_id: null,
...overrides,
}
}
function makeTrust(score: number, flags: string[] = []): TrustScore {
return {
id: null,
listing_id: 1,
composite_score: score,
account_age_score: 20,
feedback_count_score: 20,
feedback_ratio_score: 20,
price_vs_market_score: 20,
category_history_score: 20,
photo_hash_duplicate: false,
photo_analysis_json: null,
red_flags_json: JSON.stringify(flags),
score_is_partial: false,
scored_at: null,
}
}
describe('useSearchStore.getListing', () => {
beforeEach(() => {
setActivePinia(createPinia())
sessionStorage.clear()
})
it('returns undefined when results are empty', () => {
const store = useSearchStore()
expect(store.getListing('abc')).toBeUndefined()
})
it('returns the listing when present in results', () => {
const store = useSearchStore()
const listing = makeListing('v1|123|0')
store.results.push(listing)
expect(store.getListing('v1|123|0')).toEqual(listing)
})
it('returns undefined for an id not in results', () => {
const store = useSearchStore()
store.results.push(makeListing('v1|123|0'))
expect(store.getListing('v1|999|0')).toBeUndefined()
})
it('returns the correct listing when multiple are present', () => {
const store = useSearchStore()
store.results.push(makeListing('v1|001|0', { title: 'First' }))
store.results.push(makeListing('v1|002|0', { title: 'Second' }))
store.results.push(makeListing('v1|003|0', { title: 'Third' }))
expect(store.getListing('v1|002|0')?.title).toBe('Second')
})
it('handles URL-encoded pipe characters in listing IDs', () => {
const store = useSearchStore()
// The route param arrives decoded from vue-router; store uses decoded string
const listing = makeListing('v1|157831011297|0')
store.results.push(listing)
expect(store.getListing('v1|157831011297|0')).toEqual(listing)
})
})
describe('useSearchStore trust and seller maps', () => {
beforeEach(() => {
setActivePinia(createPinia())
sessionStorage.clear()
})
it('trustScores map returns trust by platform_listing_id', () => {
const store = useSearchStore()
const trust = makeTrust(85, ['low_feedback_count'])
store.trustScores.set('v1|123|0', trust)
expect(store.trustScores.get('v1|123|0')?.composite_score).toBe(85)
})
it('sellers map returns seller by seller_platform_id', () => {
const store = useSearchStore()
const seller: Seller = {
id: null, platform: 'ebay', platform_seller_id: 'sellerA',
username: 'powertech99', account_age_days: 720,
feedback_count: 1200, feedback_ratio: 0.998,
category_history_json: '{}', fetched_at: null,
}
store.sellers.set('sellerA', seller)
expect(store.sellers.get('sellerA')?.username).toBe('powertech99')
})
})

View file

@ -1,140 +0,0 @@
import { beforeEach, describe, expect, it, vi } from 'vitest'
// Reset module-level cache and fetch mock between tests
beforeEach(async () => {
vi.restoreAllMocks()
// Reset module-level cache so each test starts clean
const mod = await import('../composables/useCurrency')
mod._resetCacheForTest()
})
const MOCK_RATES: Record<string, number> = {
USD: 1,
GBP: 0.79,
EUR: 0.92,
JPY: 151.5,
CAD: 1.36,
}
function mockFetchSuccess(rates = MOCK_RATES) {
vi.stubGlobal('fetch', vi.fn().mockResolvedValue({
ok: true,
json: async () => ({ rates }),
}))
}
function mockFetchFailure() {
vi.stubGlobal('fetch', vi.fn().mockRejectedValue(new Error('Network error')))
}
describe('convertFromUSD', () => {
it('returns the same amount for USD (no conversion)', async () => {
mockFetchSuccess()
const { convertFromUSD } = await import('../composables/useCurrency')
const result = await convertFromUSD(100, 'USD')
expect(result).toBe(100)
// fetch should not be called for USD passthrough
expect(fetch).not.toHaveBeenCalled()
})
it('converts USD to GBP using fetched rates', async () => {
mockFetchSuccess()
const { convertFromUSD, _resetCacheForTest } = await import('../composables/useCurrency')
_resetCacheForTest()
const result = await convertFromUSD(100, 'GBP')
expect(result).toBeCloseTo(79, 1)
})
it('converts USD to JPY using fetched rates', async () => {
mockFetchSuccess()
const { convertFromUSD, _resetCacheForTest } = await import('../composables/useCurrency')
_resetCacheForTest()
const result = await convertFromUSD(10, 'JPY')
expect(result).toBeCloseTo(1515, 1)
})
it('returns the original amount when rates are unavailable (network failure)', async () => {
mockFetchFailure()
const { convertFromUSD, _resetCacheForTest } = await import('../composables/useCurrency')
_resetCacheForTest()
const result = await convertFromUSD(100, 'EUR')
expect(result).toBe(100)
})
it('returns the original amount when the currency code is unknown', async () => {
mockFetchSuccess({ USD: 1, EUR: 0.92 }) // no XYZ rate
const { convertFromUSD, _resetCacheForTest } = await import('../composables/useCurrency')
_resetCacheForTest()
const result = await convertFromUSD(50, 'XYZ')
expect(result).toBe(50)
})
it('only calls fetch once when called concurrently (deduplication)', async () => {
mockFetchSuccess()
const { convertFromUSD, _resetCacheForTest } = await import('../composables/useCurrency')
_resetCacheForTest()
await Promise.all([
convertFromUSD(100, 'GBP'),
convertFromUSD(200, 'EUR'),
convertFromUSD(50, 'CAD'),
])
expect((fetch as ReturnType<typeof vi.fn>).mock.calls.length).toBe(1)
})
})
describe('formatPrice', () => {
it('formats USD amount with dollar sign', async () => {
mockFetchSuccess()
const { formatPrice, _resetCacheForTest } = await import('../composables/useCurrency')
_resetCacheForTest()
const result = await formatPrice(99.99, 'USD')
expect(result).toMatch(/^\$99\.99$|^\$100$/) // Intl rounding may vary
expect(result).toContain('$')
})
it('formats GBP amount with correct symbol', async () => {
mockFetchSuccess()
const { formatPrice, _resetCacheForTest } = await import('../composables/useCurrency')
_resetCacheForTest()
const result = await formatPrice(100, 'GBP')
// GBP 79 — expect pound sign or "GBP" prefix
expect(result).toMatch(/[£]|GBP/)
})
it('formats JPY without decimal places (Intl rounds to zero decimals)', async () => {
mockFetchSuccess()
const { formatPrice, _resetCacheForTest } = await import('../composables/useCurrency')
_resetCacheForTest()
const result = await formatPrice(10, 'JPY')
// 10 * 151.5 = 1515 JPY — no decimal places for JPY
expect(result).toMatch(/¥1,515|JPY.*1,515|¥1515/)
})
it('falls back gracefully on network failure, showing USD', async () => {
mockFetchFailure()
const { formatPrice, _resetCacheForTest } = await import('../composables/useCurrency')
_resetCacheForTest()
// With failed rates, conversion returns original amount and uses Intl with target currency
// This may throw if Intl doesn't know EUR — but the function should not throw
const result = await formatPrice(50, 'EUR')
expect(typeof result).toBe('string')
expect(result.length).toBeGreaterThan(0)
})
})
describe('formatPriceUSD', () => {
it('formats a USD amount synchronously', async () => {
const { formatPriceUSD } = await import('../composables/useCurrency')
const result = formatPriceUSD(1234.5)
// Intl output varies by runtime locale data; check structure not exact string
expect(result).toContain('$')
expect(result).toContain('1,234')
})
it('formats zero as a USD string', async () => {
const { formatPriceUSD } = await import('../composables/useCurrency')
const result = formatPriceUSD(0)
expect(result).toContain('$')
expect(result).toMatch(/\$0/)
})
})

View file

@ -1,63 +0,0 @@
import { beforeEach, describe, expect, it } from 'vitest'
// Re-import after each test to get a fresh module-level ref
// (vi.resetModules() ensures module-level state is cleared between describe blocks)
describe('useTheme', () => {
beforeEach(() => {
localStorage.clear()
delete document.documentElement.dataset.theme
})
it('defaults to system when localStorage is empty', async () => {
const { useTheme } = await import('../composables/useTheme')
const { mode } = useTheme()
expect(mode.value).toBe('system')
})
it('setMode(dark) sets data-theme=dark on html element', async () => {
const { useTheme } = await import('../composables/useTheme')
const { setMode } = useTheme()
setMode('dark')
expect(document.documentElement.dataset.theme).toBe('dark')
})
it('setMode(light) sets data-theme=light on html element', async () => {
const { useTheme } = await import('../composables/useTheme')
const { setMode } = useTheme()
setMode('light')
expect(document.documentElement.dataset.theme).toBe('light')
})
it('setMode(system) removes data-theme attribute', async () => {
const { useTheme } = await import('../composables/useTheme')
const { setMode } = useTheme()
setMode('dark')
setMode('system')
expect(document.documentElement.dataset.theme).toBeUndefined()
})
it('setMode persists to localStorage', async () => {
const { useTheme } = await import('../composables/useTheme')
const { setMode } = useTheme()
setMode('dark')
expect(localStorage.getItem('snipe:theme')).toBe('dark')
})
it('restore() re-applies dark from localStorage', async () => {
localStorage.setItem('snipe:theme', 'dark')
// Dynamically import a fresh module to simulate hard reload
const { useTheme } = await import('../composables/useTheme')
const { restore } = useTheme()
restore()
expect(document.documentElement.dataset.theme).toBe('dark')
})
it('restore() with system mode leaves data-theme absent', async () => {
localStorage.setItem('snipe:theme', 'system')
const { useTheme } = await import('../composables/useTheme')
const { restore } = useTheme()
restore()
expect(document.documentElement.dataset.theme).toBeUndefined()
})
})

View file

@ -2,12 +2,6 @@
Dark tactical theme: near-black surfaces, amber accent, trust-signal colours.
ALL color/font/spacing tokens live here nowhere else.
Snipe Mode easter egg: activated by Konami code (cf-snipe-mode in localStorage).
Planned theme variants (add as [data-theme="<name>"] blocks using the same token set):
solarized-dark Ethan Schoonover's Solarized dark palette, amber accent
solarized-light Solarized light palette, amber accent
high-contrast WCAG AAA minimum contrast ratios, no mid-grey text
colorblind Deuteranopia-safe trust signal colours (blue/orange instead of green/red)
*/
/* Snipe dark tactical (default)
@ -44,7 +38,6 @@
--color-error: #f85149;
--color-warning: #d29922;
--color-info: #58a6ff;
--color-accent: #a478ff; /* purple — csv import badge, secondary accent */
/* Typography */
--font-display: 'Fraunces', Georgia, serif;
@ -56,7 +49,6 @@
--space-2: 0.5rem;
--space-3: 0.75rem;
--space-4: 1rem;
--space-5: 1.25rem;
--space-6: 1.5rem;
--space-8: 2rem;
--space-12: 3rem;
@ -86,34 +78,8 @@
Warm cream surfaces with the same amber accent.
Snipe Mode data attribute overrides this via higher specificity.
*/
/* Explicit dark override — beats OS preference when user forces dark in Settings */
[data-theme="dark"]:not([data-snipe-mode="active"]) {
--color-surface: #0d1117;
--color-surface-2: #161b22;
--color-surface-raised: #1c2129;
--color-border: #30363d;
--color-border-light: #21262d;
--color-text: #e6edf3;
--color-text-muted: #8b949e;
--color-text-inverse: #0d1117;
--app-primary: #f59e0b;
--app-primary-hover: #d97706;
--app-primary-light: rgba(245, 158, 11, 0.12);
--trust-high: #3fb950;
--trust-mid: #d29922;
--trust-low: #f85149;
--color-success: #3fb950;
--color-error: #f85149;
--color-warning: #d29922;
--color-info: #58a6ff;
--color-accent: #a478ff;
--shadow-sm: 0 1px 3px rgba(0,0,0,0.4), 0 1px 2px rgba(0,0,0,0.3);
--shadow-md: 0 4px 12px rgba(0,0,0,0.5), 0 2px 4px rgba(0,0,0,0.3);
--shadow-lg: 0 10px 30px rgba(0,0,0,0.6), 0 4px 8px rgba(0,0,0,0.3);
}
@media (prefers-color-scheme: light) {
:root:not([data-theme="dark"]):not([data-snipe-mode="active"]) {
:root:not([data-snipe-mode="active"]) {
/* Surfaces — warm cream, like a tactical field notebook */
--color-surface: #f8f5ee;
--color-surface-2: #f0ece3;
@ -143,7 +109,6 @@
--color-error: #dc2626;
--color-warning: #b45309;
--color-info: #2563eb;
--color-accent: #7c3aed; /* purple — deeper for contrast on cream */
/* Shadows — lighter, warm tint */
--shadow-sm: 0 1px 3px rgba(60, 45, 20, 0.12), 0 1px 2px rgba(60, 45, 20, 0.08);
@ -152,32 +117,6 @@
}
}
/* Explicit light override — beats OS preference when user forces light in Settings */
[data-theme="light"]:not([data-snipe-mode="active"]) {
--color-surface: #f8f5ee;
--color-surface-2: #f0ece3;
--color-surface-raised: #e8e3d8;
--color-border: #c8bfae;
--color-border-light: #dbd3c4;
--color-text: #1c1a16;
--color-text-muted: #6b6357;
--color-text-inverse: #f8f5ee;
--app-primary: #d97706;
--app-primary-hover: #b45309;
--app-primary-light: rgba(217, 119, 6, 0.12);
--trust-high: #16a34a;
--trust-mid: #b45309;
--trust-low: #dc2626;
--color-success: #16a34a;
--color-error: #dc2626;
--color-warning: #b45309;
--color-info: #2563eb;
--color-accent: #7c3aed;
--shadow-sm: 0 1px 3px rgba(60,45,20,0.12), 0 1px 2px rgba(60,45,20,0.08);
--shadow-md: 0 4px 12px rgba(60,45,20,0.15), 0 2px 4px rgba(60,45,20,0.1);
--shadow-lg: 0 10px 30px rgba(60,45,20,0.2), 0 4px 8px rgba(60,45,20,0.1);
}
/* ── Snipe Mode easter egg theme ─────────────────── */
/* Activated by Konami code; stored as 'cf-snipe-mode' in localStorage */
/* Applied: document.documentElement.dataset.snipeMode = 'active' */
@ -218,7 +157,7 @@ html {
-moz-osx-font-smoothing: grayscale;
}
body { margin: 0; min-height: 100vh; background: var(--color-surface); }
body { margin: 0; min-height: 100vh; }
h1, h2, h3, h4, h5, h6 {
font-family: var(--font-display);

View file

@ -140,13 +140,11 @@ import { ref, computed, onMounted } from 'vue'
const props = defineProps<{ currentView?: string }>()
const apiBase = (import.meta.env.VITE_API_BASE as string) ?? ''
// Probe once on mount hidden until confirmed enabled so button never flashes
const enabled = ref(false)
onMounted(async () => {
try {
const res = await fetch(`${apiBase}/api/feedback/status`)
const res = await fetch('/api/feedback/status')
if (res.ok) {
const data = await res.json()
enabled.value = data.enabled === true
@ -207,7 +205,7 @@ async function submit() {
loading.value = true
submitError.value = ''
try {
const res = await fetch(`${apiBase}/api/feedback`, {
const res = await fetch('/api/feedback', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
@ -239,18 +237,18 @@ async function submit() {
/* ── Floating action button ─────────────────────────────────────────── */
.feedback-fab {
position: fixed;
right: var(--space-4);
bottom: calc(68px + var(--space-4)); /* above mobile bottom nav */
right: var(--spacing-md);
bottom: calc(68px + var(--spacing-md)); /* above mobile bottom nav */
z-index: 190;
display: flex;
align-items: center;
gap: var(--space-2);
padding: 9px var(--space-4);
background: var(--color-surface-raised);
gap: var(--spacing-xs);
padding: 9px var(--spacing-md);
background: var(--color-bg-elevated);
border: 1px solid var(--color-border);
border-radius: 999px;
color: var(--color-text-muted);
font-size: 0.8125rem;
color: var(--color-text-secondary);
font-size: var(--font-size-sm);
font-family: var(--font-body);
font-weight: 500;
cursor: pointer;
@ -258,9 +256,9 @@ async function submit() {
transition: background 0.15s, color 0.15s, box-shadow 0.15s, border-color 0.15s;
}
.feedback-fab:hover {
background: var(--color-surface-2);
color: var(--color-text);
border-color: var(--app-primary);
background: var(--color-bg-card);
color: var(--color-text-primary);
border-color: var(--color-border-focus);
box-shadow: var(--shadow-lg);
}
.feedback-fab-icon { width: 15px; height: 15px; flex-shrink: 0; }
@ -269,7 +267,7 @@ async function submit() {
/* On desktop, bottom nav is gone — drop to standard corner */
@media (min-width: 769px) {
.feedback-fab {
bottom: var(--space-6);
bottom: var(--spacing-lg);
}
}
@ -288,13 +286,13 @@ async function submit() {
@media (min-width: 500px) {
.feedback-overlay {
align-items: center;
padding: var(--space-4);
padding: var(--spacing-md);
}
}
/* ── Modal ────────────────────────────────────────────────────────────── */
.feedback-modal {
background: var(--color-surface-raised);
background: var(--color-bg-elevated);
border: 1px solid var(--color-border);
border-radius: var(--radius-lg) var(--radius-lg) 0 0;
width: 100%;
@ -302,7 +300,7 @@ async function submit() {
overflow-y: auto;
display: flex;
flex-direction: column;
box-shadow: var(--shadow-lg);
box-shadow: var(--shadow-xl);
}
@media (min-width: 500px) {
@ -318,13 +316,13 @@ async function submit() {
display: flex;
align-items: center;
justify-content: space-between;
padding: var(--space-4) var(--space-4) var(--space-3);
padding: var(--spacing-md) var(--spacing-md) var(--spacing-sm);
border-bottom: 1px solid var(--color-border);
flex-shrink: 0;
}
.feedback-title {
font-family: var(--font-display);
font-size: 1.125rem;
font-size: var(--font-size-lg);
font-weight: 600;
margin: 0;
}
@ -339,23 +337,23 @@ async function submit() {
align-items: center;
justify-content: center;
}
.feedback-close:hover { color: var(--color-text); }
.feedback-close:hover { color: var(--color-text-primary); }
.feedback-body {
padding: var(--space-4);
padding: var(--spacing-md);
flex: 1;
overflow-y: auto;
display: flex;
flex-direction: column;
gap: var(--space-4);
gap: var(--spacing-md);
}
.feedback-footer {
display: flex;
align-items: center;
justify-content: flex-end;
gap: var(--space-3);
padding: var(--space-3) var(--space-4);
gap: var(--spacing-sm);
padding: var(--spacing-sm) var(--spacing-md);
border-top: 1px solid var(--color-border);
flex-shrink: 0;
}
@ -364,23 +362,23 @@ async function submit() {
resize: vertical;
min-height: 80px;
font-family: var(--font-body);
font-size: 0.8125rem;
font-size: var(--font-size-sm);
}
.form-required { color: var(--color-error); margin-left: 2px; }
.feedback-error {
color: var(--color-error);
font-size: 0.8125rem;
font-size: var(--font-size-sm);
margin: 0;
}
.feedback-success {
color: var(--color-success);
font-size: 0.8125rem;
padding: var(--space-3) var(--space-4);
background: color-mix(in srgb, var(--color-success) 10%, transparent);
border: 1px solid color-mix(in srgb, var(--color-success) 30%, transparent);
font-size: var(--font-size-sm);
padding: var(--spacing-sm) var(--spacing-md);
background: var(--color-success-bg);
border: 1px solid var(--color-success-border);
border-radius: var(--radius-md);
}
.feedback-link { color: var(--color-success); font-weight: 600; text-decoration: underline; }
@ -389,15 +387,15 @@ async function submit() {
.feedback-summary {
display: flex;
flex-direction: column;
gap: var(--space-2);
padding: var(--space-3) var(--space-4);
background: var(--color-surface-2);
gap: var(--spacing-xs);
padding: var(--spacing-sm) var(--spacing-md);
background: var(--color-bg-secondary);
border-radius: var(--radius-md);
border: 1px solid var(--color-border);
}
.feedback-summary-row {
display: flex;
gap: var(--space-4);
gap: var(--spacing-md);
align-items: flex-start;
}
.feedback-summary-row > :first-child { min-width: 72px; flex-shrink: 0; }
@ -406,115 +404,8 @@ async function submit() {
word-break: break-word;
}
.mt-md { margin-top: var(--space-4); }
.mt-xs { margin-top: var(--space-2); }
/* ── Form elements ────────────────────────────────────────────────────── */
.form-group {
display: flex;
flex-direction: column;
gap: var(--space-2);
}
.form-label {
font-size: 0.8125rem;
font-weight: 600;
color: var(--color-text-muted);
text-transform: uppercase;
letter-spacing: 0.06em;
}
.form-input {
width: 100%;
padding: var(--space-2) var(--space-3);
background: var(--color-surface-2);
border: 1px solid var(--color-border);
border-radius: var(--radius-md);
color: var(--color-text);
font-family: var(--font-body);
font-size: 0.875rem;
line-height: 1.5;
transition: border-color 0.15s;
box-sizing: border-box;
}
.form-input:focus {
outline: none;
border-color: var(--app-primary);
}
.form-input::placeholder { color: var(--color-text-muted); opacity: 0.7; }
/* ── Buttons ──────────────────────────────────────────────────────────── */
.btn {
display: inline-flex;
align-items: center;
justify-content: center;
gap: var(--space-2);
padding: var(--space-2) var(--space-4);
border-radius: var(--radius-md);
font-family: var(--font-body);
font-size: 0.875rem;
font-weight: 500;
cursor: pointer;
transition: background 0.15s, color 0.15s, border-color 0.15s;
white-space: nowrap;
}
.btn:disabled { opacity: 0.5; cursor: not-allowed; }
.btn-primary {
background: var(--app-primary);
color: #fff;
border: 1px solid var(--app-primary);
}
.btn-primary:hover:not(:disabled) { filter: brightness(1.1); }
.btn-ghost {
background: transparent;
color: var(--color-text-muted);
border: 1px solid var(--color-border);
}
.btn-ghost:hover:not(:disabled) {
background: var(--color-surface-2);
color: var(--color-text);
border-color: var(--app-primary);
}
/* ── Filter chips ─────────────────────────────────────────────────────── */
.filter-chip-row {
display: flex;
flex-wrap: wrap;
gap: var(--space-2);
}
.btn-chip {
padding: 5px var(--space-3);
background: var(--color-surface-2);
border: 1px solid var(--color-border);
border-radius: 999px;
font-family: var(--font-body);
font-size: 0.8125rem;
font-weight: 500;
color: var(--color-text-muted);
cursor: pointer;
transition: background 0.15s, color 0.15s, border-color 0.15s;
}
.btn-chip.active,
.btn-chip:hover {
background: color-mix(in srgb, var(--app-primary) 15%, transparent);
border-color: var(--app-primary);
color: var(--app-primary);
}
/* ── Card ─────────────────────────────────────────────────────────────── */
.card {
background: var(--color-surface-2);
border: 1px solid var(--color-border);
border-radius: var(--radius-md);
}
/* ── Text utilities ───────────────────────────────────────────────────── */
.text-muted { color: var(--color-text-muted); }
.text-sm { font-size: 0.8125rem; line-height: 1.5; }
.font-semibold { font-weight: 600; }
.mt-md { margin-top: var(--spacing-md); }
.mt-xs { margin-top: var(--spacing-xs); }
/* Transition */
.modal-fade-enter-active, .modal-fade-leave-active { transition: opacity 0.2s ease; }

View file

@ -1,282 +0,0 @@
<!-- web/src/components/LLMQueryPanel.vue -->
<!-- BSL 1.1 License -->
<template>
<div class="llm-panel-wrapper">
<button
type="button"
class="llm-panel-toggle"
:class="{ 'llm-panel-toggle--open': isOpen }"
:aria-expanded="String(isOpen)"
aria-controls="llm-panel"
@click="toggle"
>
Search with AI
<span class="llm-panel-toggle__chevron" aria-hidden="true">{{ isOpen ? '▲' : '▾' }}</span>
</button>
<section
id="llm-panel"
class="llm-panel"
:class="{ 'llm-panel--open': isOpen }"
:hidden="!isOpen"
>
<label for="llm-input" class="llm-panel__label">
Describe what you're looking for
</label>
<textarea
id="llm-input"
ref="textareaRef"
v-model="inputText"
class="llm-panel__textarea"
rows="2"
placeholder="e.g. used RTX 3080 under $300, no mining cards or for-parts listings"
:disabled="isLoading"
@keydown.escape.prevent="handleEscape"
@keydown.ctrl.enter.prevent="onSearch"
/>
<div class="llm-panel__actions">
<button
type="button"
class="llm-panel__search-btn"
:disabled="isLoading || !inputText.trim()"
@click="onSearch"
>
{{ isLoading ? 'Searching…' : 'Search' }}
</button>
<span
role="status"
aria-live="polite"
class="llm-panel__status-pill"
:class="`llm-panel__status-pill--${status}`"
>
<span v-if="status === 'thinking'">
<span class="llm-panel__spinner" aria-hidden="true" />
Thinking
</span>
<span v-else-if="status === 'done'">Filters ready</span>
<span v-else-if="status === 'error'">Error</span>
</span>
</div>
<p v-if="error" class="llm-panel__error" role="alert">
{{ error }}
</p>
<p v-if="status === 'done' && explanation" class="llm-panel__explanation">
{{ explanation }}
</p>
<label class="llm-panel__autorun">
<input
type="checkbox"
:checked="autoRun"
@change="setAutoRun(($event.target as HTMLInputElement).checked)"
/>
Run search automatically
</label>
</section>
</div>
</template>
<script setup lang="ts">
import { ref, nextTick, watch } from 'vue'
import { useLLMQueryBuilder } from '../composables/useLLMQueryBuilder'
const {
isOpen,
isLoading,
status,
explanation,
error,
autoRun,
toggle,
setAutoRun,
buildQuery,
} = useLLMQueryBuilder()
const inputText = ref('')
const textareaRef = ref<HTMLTextAreaElement | null>(null)
watch(isOpen, async (open) => {
if (open) {
await nextTick()
textareaRef.value?.focus()
}
})
async function onSearch() {
await buildQuery(inputText.value)
}
function handleEscape() {
toggle()
const toggleBtn = document.querySelector<HTMLButtonElement>('[aria-controls="llm-panel"]')
toggleBtn?.focus()
}
</script>
<style scoped>
.llm-panel-wrapper {
width: 100%;
}
/* Toggle — muted at rest, amber on hover/open. Matches sidebar toolbar buttons. */
.llm-panel-toggle {
display: inline-flex;
align-items: center;
gap: var(--space-2);
padding: var(--space-2) var(--space-3);
background: var(--color-surface-raised);
border: 1px solid var(--color-border);
border-radius: var(--radius-md);
color: var(--color-text-muted);
font-size: 0.8rem;
font-weight: 500;
cursor: pointer;
transition: background var(--transition), border-color var(--transition), color var(--transition);
margin-bottom: var(--space-2);
}
.llm-panel-toggle:hover {
background: var(--app-primary-light);
border-color: var(--app-primary);
color: var(--app-primary);
}
.llm-panel-toggle--open {
background: var(--app-primary-light);
border-color: var(--app-primary);
color: var(--app-primary);
}
/* Panel */
.llm-panel {
display: none;
flex-direction: column;
gap: var(--space-3);
padding: var(--space-4);
background: var(--color-surface-raised);
border: 1px solid var(--color-border);
border-radius: var(--radius-md);
margin-bottom: var(--space-3);
}
.llm-panel--open {
display: flex;
}
.llm-panel__label {
font-size: 0.8rem;
font-weight: 500;
color: var(--color-text-muted);
text-transform: uppercase;
letter-spacing: 0.05em;
}
.llm-panel__textarea {
width: 100%;
padding: var(--space-2) var(--space-3);
background: var(--color-surface);
border: 1px solid var(--color-border);
border-radius: var(--radius-sm);
color: var(--color-text);
font-size: 0.9rem;
resize: vertical;
font-family: inherit;
}
.llm-panel__textarea:focus {
outline: 2px solid var(--app-primary);
outline-offset: 1px;
border-color: var(--app-primary);
}
.llm-panel__actions {
display: flex;
align-items: center;
gap: var(--space-3);
flex-wrap: wrap;
}
/* Search button — same amber style as the main Search button */
.llm-panel__search-btn {
padding: var(--space-2) var(--space-4);
background: var(--app-primary);
color: var(--color-text-inverse);
border: none;
border-radius: var(--radius-sm);
font-weight: 600;
font-size: 0.875rem;
cursor: pointer;
transition: background var(--transition);
}
.llm-panel__search-btn:hover:not(:disabled) {
background: var(--app-primary-hover);
}
.llm-panel__search-btn:disabled {
opacity: 0.4;
cursor: not-allowed;
}
.llm-panel__status-pill {
font-size: 0.8rem;
color: var(--color-text-muted);
font-family: var(--font-mono);
}
.llm-panel__status-pill--idle {
visibility: hidden;
}
.llm-panel__status-pill--done {
color: var(--color-success);
}
.llm-panel__status-pill--error {
color: var(--color-error);
}
@media (prefers-reduced-motion: no-preference) {
.llm-panel__spinner {
display: inline-block;
width: 0.75em;
height: 0.75em;
border: 2px solid var(--app-primary);
border-top-color: transparent;
border-radius: 50%;
animation: llm-spin 0.7s linear infinite;
vertical-align: middle;
margin-right: 0.25em;
}
}
@keyframes llm-spin {
to { transform: rotate(360deg); }
}
.llm-panel__error {
font-size: 0.85rem;
color: var(--color-error);
margin: 0;
}
.llm-panel__explanation {
font-size: 0.85rem;
color: var(--color-text-muted);
margin: 0;
font-style: italic;
}
.llm-panel__autorun {
display: flex;
align-items: center;
gap: var(--space-2);
font-size: 0.8rem;
color: var(--color-text-muted);
cursor: pointer;
}
</style>

View file

@ -5,25 +5,10 @@
'steal-card': isSteal,
'listing-card--auction': isAuction && hoursRemaining !== null && hoursRemaining > 1,
'listing-card--triple-red': tripleRed,
'listing-card--selected': selected,
}"
@click="selectMode ? $emit('toggle') : undefined"
>
<!-- Thumbnail -->
<div class="card__thumb">
<!-- Selection checkbox always in DOM; shown on hover or when in select mode -->
<button
v-show="selectMode || selected"
class="card__select-btn"
:class="{ 'card__select-btn--checked': selected }"
:aria-pressed="selected"
:aria-label="selected ? 'Deselect listing' : 'Select listing'"
@click.stop="$emit('toggle')"
>
<svg v-if="selected" viewBox="0 0 12 12" fill="currentColor" width="10" height="10">
<path d="M1.5 6L4.5 9L10.5 3" stroke="currentColor" stroke-width="1.8" fill="none" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
</button>
<img
v-if="listing.photo_urls.length"
:src="listing.photo_urls[0]"
@ -40,13 +25,9 @@
<!-- Main info -->
<div class="card__body">
<!-- Title row -->
<a
:href="listing.url"
target="_blank"
rel="noopener noreferrer"
class="card__title"
@click="selectMode && $event.preventDefault()"
>{{ listing.title }}</a>
<a :href="listing.url" target="_blank" rel="noopener noreferrer" class="card__title">
{{ listing.title }}
</a>
<!-- Format + condition badges -->
<div class="card__badges">
@ -81,9 +62,6 @@
{{ flagLabel(flag) }}
</span>
</div>
<p v-if="sellerReported" class="card__reported-badge" aria-label="You reported this seller to eBay">
Reported to eBay
</p>
<p v-if="pendingSignalNames.length" class="card__score-pending">
Updating: {{ pendingSignalNames.join(', ') }}
</p>
@ -99,7 +77,6 @@
v-model="blockReason"
class="card__block-reason"
placeholder="Reason (optional)"
aria-label="Reason for blocking (optional)"
maxlength="200"
@keydown.enter="onBlock"
@keydown.esc="blockingOpen = false"
@ -119,8 +96,6 @@
class="card__trust"
:class="[trustClass, { 'card__trust--partial': trust?.score_is_partial }]"
:title="trustBadgeTitle"
:aria-label="trustBadgeTitle"
role="img"
>
<span class="card__trust-num">{{ trust?.composite_score ?? '?' }}</span>
<span class="card__trust-label">Trust</span>
@ -140,7 +115,6 @@
class="card__enrich-btn"
:class="{ 'card__enrich-btn--spinning': enriching, 'card__enrich-btn--error': enrichError }"
:title="enrichError ? 'Enrichment failed — try again' : 'Refresh score now'"
:aria-label="enrichError ? 'Enrichment failed, try again' : 'Refresh trust score'"
:disabled="enriching"
@click.stop="onEnrich"
>{{ enrichError ? '✗' : '↻' }}</button>
@ -150,15 +124,12 @@
class="card__block-btn"
:class="{ 'card__block-btn--active': isBlocked }"
:title="isBlocked ? 'Seller is blocked' : 'Block this seller'"
:aria-label="isBlocked ? `${seller.username} is blocked` : `Block seller ${seller.username}`"
:aria-pressed="isBlocked"
@click.stop="isBlocked ? null : (blockingOpen = !blockingOpen)"
></button>
</div>
<!-- Trust feedback: opt-in signal buttons (off by default, enabled in Settings) -->
<!-- Trust feedback: calm "looks right / wrong" signal buttons -->
<TrustFeedbackButtons
v-if="trustSignalEnabled"
:seller-id="`ebay::${listing.seller_platform_id}`"
:trust="trust"
/>
@ -177,43 +148,25 @@
<span v-if="marketPrice" class="card__market-price" title="Median market price">
market ~{{ formattedMarket }}
</span>
<RouterLink
:to="`/listing/${listing.platform_listing_id}`"
class="card__detail-link"
:aria-label="`View trust breakdown for: ${listing.title}`"
@click.stop
>Details</RouterLink>
</div>
</div>
</article>
</template>
<script setup lang="ts">
import { computed, ref, watch } from 'vue'
import { RouterLink } from 'vue-router'
import { computed, ref } from 'vue'
import type { Listing, TrustScore, Seller } from '../stores/search'
import { useSearchStore } from '../stores/search'
import { useBlocklistStore } from '../stores/blocklist'
import TrustFeedbackButtons from './TrustFeedbackButtons.vue'
import { useTrustSignalPref } from '../composables/useTrustSignalPref'
import { formatPrice, formatPriceUSD } from '../composables/useCurrency'
import { usePreferencesStore } from '../stores/preferences'
const { enabled: trustSignalEnabled } = useTrustSignalPref()
const prefsStore = usePreferencesStore()
const props = defineProps<{
listing: Listing
trust: TrustScore | null
seller: Seller | null
marketPrice: number | null
selected?: boolean
selectMode?: boolean
sellerReported?: boolean
}>()
const emit = defineEmits<{ toggle: [] }>()
const store = useSearchStore()
const blocklist = useBlocklistStore()
const enriching = ref(false)
@ -382,26 +335,15 @@ const isSteal = computed(() => {
return props.listing.price < props.marketPrice * 0.8
})
// Async price display show USD synchronously while rates load, then update
const formattedPrice = ref(formatPriceUSD(props.listing.price))
const formattedMarket = ref(props.marketPrice ? formatPriceUSD(props.marketPrice) : '')
const formattedPrice = computed(() => {
const sym = props.listing.currency === 'USD' ? '$' : props.listing.currency + ' '
return `${sym}${props.listing.price.toLocaleString('en-US', { minimumFractionDigits: 0, maximumFractionDigits: 2 })}`
})
async function _updatePrices() {
const currency = prefsStore.displayCurrency
formattedPrice.value = await formatPrice(props.listing.price, currency)
if (props.marketPrice) {
formattedMarket.value = await formatPrice(props.marketPrice, currency)
} else {
formattedMarket.value = ''
}
}
// Update when the listing, marketPrice, or display currency changes
watch(
[() => props.listing.price, () => props.marketPrice, () => prefsStore.displayCurrency],
() => { _updatePrices() },
{ immediate: true },
)
const formattedMarket = computed(() => {
if (!props.marketPrice) return ''
return `$${props.marketPrice.toLocaleString('en-US', { maximumFractionDigits: 0 })}`
})
</script>
<style scoped>
@ -423,55 +365,17 @@ watch(
box-shadow: var(--shadow-md);
}
/* Selection */
.listing-card--selected {
border-color: var(--app-primary);
box-shadow: 0 0 0 2px color-mix(in srgb, var(--app-primary) 30%, transparent);
}
.listing-card:hover .card__select-btn {
display: flex !important; /* reveal on hover even when v-show hides it */
}
.card__select-btn {
position: absolute;
top: var(--space-2);
left: var(--space-2);
z-index: 5;
width: 20px;
height: 20px;
border-radius: var(--radius-sm);
border: 1.5px solid var(--color-border);
background: var(--color-surface-raised);
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
padding: 0;
transition: background 120ms ease, border-color 120ms ease, opacity 120ms ease;
color: #fff;
opacity: 0.7;
}
.card__select-btn:hover,
.card__select-btn--checked { opacity: 1; }
.card__select-btn:hover { border-color: var(--app-primary); }
.card__select-btn--checked {
background: var(--app-primary);
border-color: var(--app-primary);
}
/* Thumbnail */
.card__thumb {
width: 80px;
height: 80px;
border-radius: var(--radius-md);
overflow: visible; /* allow checkbox to poke out */
overflow: hidden;
flex-shrink: 0;
background: var(--color-surface-raised);
display: flex;
align-items: center;
justify-content: center;
position: relative;
}
.card__img {
@ -538,26 +442,15 @@ watch(
}
.card__flag-badge {
background: color-mix(in srgb, var(--color-error) 15%, transparent);
background: rgba(248, 81, 73, 0.15);
color: var(--color-error);
border: 1px solid color-mix(in srgb, var(--color-error) 30%, transparent);
border: 1px solid rgba(248, 81, 73, 0.3);
padding: 1px var(--space-2);
border-radius: var(--radius-sm);
font-size: 0.6875rem;
font-weight: 600;
}
.card__reported-badge {
font-size: 0.6875rem;
color: var(--color-text-muted);
background: color-mix(in srgb, var(--color-text-muted) 10%, transparent);
border: 1px solid color-mix(in srgb, var(--color-text-muted) 20%, transparent);
border-radius: var(--radius-sm);
padding: 1px var(--space-2);
margin: 0;
display: inline-block;
}
.card__partial-warning {
font-size: 0.75rem;
color: var(--color-warning);
@ -670,7 +563,6 @@ watch(
}
.listing-card:hover .card__block-btn { opacity: 0.5; }
.listing-card:hover .card__block-btn:hover { opacity: 1; color: var(--color-error); border-color: var(--color-error); }
.card__block-btn:focus-visible { opacity: 0.6; outline: 2px solid var(--app-primary); outline-offset: 2px; }
.card__block-btn--active { opacity: 1 !important; color: var(--color-error); border-color: var(--color-error); cursor: default; }
/* Block popover */
@ -769,16 +661,6 @@ watch(
font-family: var(--font-mono);
}
.card__detail-link {
display: block;
font-size: 0.7rem;
color: var(--app-primary);
text-decoration: none;
margin-top: var(--space-1);
transition: opacity 150ms ease;
}
.card__detail-link:hover { opacity: 0.75; }
/* ── Triple Red easter egg ──────────────────────────────────────────────── */
/* Fires when: (new_account | account_under_30d) + suspicious_price + hard flag */
.listing-card--triple-red {
@ -806,7 +688,7 @@ watch(
.listing-card--triple-red:hover {
animation: none;
border-color: var(--color-error);
box-shadow: 0 0 10px 2px color-mix(in srgb, var(--color-error) 35%, transparent);
box-shadow: 0 0 10px 2px rgba(248, 81, 73, 0.35);
}
.listing-card--triple-red:hover::after {
@ -816,12 +698,12 @@ watch(
@keyframes triple-red-glow {
0%, 100% {
border-color: color-mix(in srgb, var(--color-error) 50%, transparent);
box-shadow: 0 0 5px 1px color-mix(in srgb, var(--color-error) 20%, transparent);
border-color: rgba(248, 81, 73, 0.5);
box-shadow: 0 0 5px 1px rgba(248, 81, 73, 0.2);
}
50% {
border-color: var(--color-error);
box-shadow: 0 0 14px 3px color-mix(in srgb, var(--color-error) 45%, transparent);
box-shadow: 0 0 14px 3px rgba(248, 81, 73, 0.45);
}
}

View file

@ -1,102 +0,0 @@
/**
* useCurrency live exchange rate conversion from USD to a target display currency.
*
* Rates are fetched lazily on first use from open.er-api.com (free, no key required).
* A module-level cache with a 1-hour TTL prevents redundant network calls.
* On fetch failure the composable falls back silently to USD display.
*/
const ER_API_URL = 'https://open.er-api.com/v6/latest/USD'
const CACHE_TTL_MS = 60 * 60 * 1000 // 1 hour
interface RateCache {
rates: Record<string, number>
fetchedAt: number
}
// Module-level cache shared across all composable instances
let _cache: RateCache | null = null
let _inflight: Promise<Record<string, number>> | null = null
async function _fetchRates(): Promise<Record<string, number>> {
const now = Date.now()
if (_cache && now - _cache.fetchedAt < CACHE_TTL_MS) {
return _cache.rates
}
// Deduplicate concurrent calls — reuse the same in-flight fetch
if (_inflight) {
return _inflight
}
_inflight = (async () => {
try {
const res = await fetch(ER_API_URL)
if (!res.ok) throw new Error(`ER-API responded ${res.status}`)
const data = await res.json()
const rates: Record<string, number> = data.rates ?? {}
_cache = { rates, fetchedAt: Date.now() }
return rates
} catch {
// Return cached stale data if available, otherwise empty object (USD passthrough)
return _cache?.rates ?? {}
} finally {
_inflight = null
}
})()
return _inflight
}
/**
* Convert an amount in USD to the target currency using the latest exchange rates.
* Returns the original amount unchanged if rates are unavailable or the currency is USD.
*/
export async function convertFromUSD(amountUSD: number, targetCurrency: string): Promise<number> {
if (targetCurrency === 'USD') return amountUSD
const rates = await _fetchRates()
const rate = rates[targetCurrency]
if (!rate) return amountUSD
return amountUSD * rate
}
/**
* Format a USD amount as a localized string in the target currency.
* Fetches exchange rates lazily. Falls back to USD display if rates are unavailable.
*
* Returns a plain USD string synchronously on first call while rates load;
* callers should use a ref that updates once the promise resolves.
*/
export async function formatPrice(amountUSD: number, currency: string): Promise<string> {
const converted = await convertFromUSD(amountUSD, currency)
try {
return new Intl.NumberFormat('en-US', {
style: 'currency',
currency,
minimumFractionDigits: 0,
maximumFractionDigits: 2,
}).format(converted)
} catch {
// Fallback if Intl doesn't know the currency code
return `${currency} ${converted.toLocaleString('en-US', { minimumFractionDigits: 0, maximumFractionDigits: 2 })}`
}
}
/**
* Synchronous USD-only formatter for use before rates have loaded.
*/
export function formatPriceUSD(amountUSD: number): string {
return new Intl.NumberFormat('en-US', {
style: 'currency',
currency: 'USD',
minimumFractionDigits: 0,
maximumFractionDigits: 2,
}).format(amountUSD)
}
// Exported for testing — allows resetting module-level cache between test cases
export function _resetCacheForTest(): void {
_cache = null
_inflight = null
}

View file

@ -1,92 +0,0 @@
// web/src/composables/useLLMQueryBuilder.ts
// BSL 1.1 License
/**
* State and API call logic for the LLM query builder panel.
*/
import { ref } from 'vue'
import { useSearchStore, type SearchParamsResult } from '../stores/search'
export type BuildStatus = 'idle' | 'thinking' | 'done' | 'error'
const LS_AUTORUN_KEY = 'snipe:llm-autorun'
// Module-level refs so state persists across component re-renders
const isOpen = ref(false)
const isLoading = ref(false)
const status = ref<BuildStatus>('idle')
const explanation = ref<string>('')
const error = ref<string | null>(null)
const autoRun = ref<boolean>(localStorage.getItem(LS_AUTORUN_KEY) === 'true')
export function useLLMQueryBuilder() {
const store = useSearchStore()
function toggle() {
isOpen.value = !isOpen.value
if (!isOpen.value) {
status.value = 'idle'
error.value = null
explanation.value = ''
}
}
function setAutoRun(value: boolean) {
autoRun.value = value
localStorage.setItem(LS_AUTORUN_KEY, value ? 'true' : 'false')
}
async function buildQuery(naturalLanguage: string): Promise<SearchParamsResult | null> {
if (!naturalLanguage.trim()) return null
isLoading.value = true
status.value = 'thinking'
error.value = null
explanation.value = ''
try {
const resp = await fetch('/api/search/build', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ natural_language: naturalLanguage.trim() }),
})
if (!resp.ok) {
const data = await resp.json().catch(() => ({}))
const msg = typeof data.detail === 'string'
? data.detail
: (data.detail?.message ?? `Server error (${resp.status})`)
throw new Error(msg)
}
const params: SearchParamsResult = await resp.json()
store.populateFromLLM(params)
explanation.value = params.explanation
status.value = 'done'
if (autoRun.value) {
await store.search(params.base_query, store.filters)
}
return params
} catch (err: unknown) {
const msg = err instanceof Error ? err.message : 'Something went wrong.'
error.value = msg
status.value = 'error'
return null
} finally {
isLoading.value = false
}
}
return {
isOpen,
isLoading,
status,
explanation,
error,
autoRun,
toggle,
setAutoRun,
buildQuery,
}
}

Some files were not shown because too many files have changed in this diff Show more