Compare commits

..

No commits in common. "main" and "a17dcc8c55bfcd8aaa09bd717bd90e8e1700716c" have entirely different histories.

257 changed files with 1492 additions and 35327 deletions

View file

@ -1,44 +0,0 @@
# git-cliff changelog configuration for Kiwi
# See: https://git-cliff.org/docs/configuration
[changelog]
header = """
# Changelog\n
"""
body = """
{% if version %}\
## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
{% else %}\
## [Unreleased]
{% endif %}\
{% for group, commits in commits | group_by(attribute="group") %}
### {{ group | upper_first }}
{% for commit in commits %}
- {% if commit.scope %}**{{ commit.scope }}:** {% endif %}{{ commit.message | upper_first }}\
{% endfor %}
{% endfor %}\n
"""
trim = true
[git]
conventional_commits = true
filter_unconventional = true
split_commits = false
commit_preprocessors = []
commit_parsers = [
{ message = "^feat", group = "Features" },
{ message = "^fix", group = "Bug Fixes" },
{ message = "^perf", group = "Performance" },
{ message = "^refactor", group = "Refactoring" },
{ message = "^docs", group = "Documentation" },
{ message = "^test", group = "Testing" },
{ message = "^chore", group = "Chores" },
{ message = "^ci", group = "CI/CD" },
{ message = "^revert", group = "Reverts" },
]
filter_commits = false
tag_pattern = "v[0-9].*"
skip_tags = ""
ignore_tags = ""
topo_order = false
sort_commits = "oldest"

View file

@ -11,33 +11,6 @@ DATA_DIR=./data
# Database (defaults to DATA_DIR/kiwi.db) # Database (defaults to DATA_DIR/kiwi.db)
# DB_PATH=./data/kiwi.db # DB_PATH=./data/kiwi.db
# Pipeline data directory for downloaded parquets (used by download_datasets.py)
# Override to store large datasets on a separate drive or NAS
# KIWI_PIPELINE_DATA_DIR=./data/pipeline
# CF-core resource coordinator (VRAM lease management)
# Set to the coordinator URL when running alongside cf-core orchestration
# COORDINATOR_URL=http://localhost:7700
# IP this machine advertises to the coordinator (must be reachable from coordinator host)
# CF_ORCH_ADVERTISE_HOST=10.1.10.71
# CF-core hosted coordinator (managed cloud GPU inference — Paid+ tier)
# Set CF_ORCH_URL to use a hosted cf-orch coordinator instead of self-hosting.
# CF_LICENSE_KEY is read automatically by CFOrchClient for bearer auth.
# CF_ORCH_URL=https://orch.circuitforge.tech
# CF_LICENSE_KEY=CFG-KIWI-xxxx-xxxx-xxxx
# LLM backend — env-var auto-config (no llm.yaml needed for bare-metal users)
# LLMRouter checks these in priority order:
# 1. Anthropic cloud — set ANTHROPIC_API_KEY
# 2. OpenAI cloud — set OPENAI_API_KEY
# 3. Local Ollama — set OLLAMA_HOST (+ optionally OLLAMA_MODEL)
# All three are optional; leave unset to rely on a local llm.yaml instead.
# ANTHROPIC_API_KEY=sk-ant-...
# OPENAI_API_KEY=sk-...
# OLLAMA_HOST=http://localhost:11434
# OLLAMA_MODEL=llama3.2
# Processing # Processing
USE_GPU=true USE_GPU=true
GPU_MEMORY_LIMIT=6144 GPU_MEMORY_LIMIT=6144
@ -51,53 +24,14 @@ ENABLE_OCR=false
DEBUG=false DEBUG=false
CLOUD_MODE=false CLOUD_MODE=false
DEMO_MODE=false DEMO_MODE=false
# Product identifier reported in cf-orch coordinator analytics for per-app breakdown
CF_APP_NAME=kiwi
# USE_ORCH_SCHEDULER: use coordinator-aware multi-GPU scheduler instead of local FIFO.
# Unset = auto-detect: true if CLOUD_MODE or circuitforge_orch is installed (paid+ local).
# Set false to force LocalScheduler even when cf-orch is present.
# USE_ORCH_SCHEDULER=false
# Cloud mode (set in compose.cloud.yml; also set here for reference) # Cloud mode (set in compose.cloud.yml; also set here for reference)
# CLOUD_DATA_ROOT=/devl/kiwi-cloud-data # CLOUD_DATA_ROOT=/devl/kiwi-cloud-data
# KIWI_DB=data/kiwi.db # local-mode DB path override # KIWI_DB=data/kiwi.db # local-mode DB path override
# DEV ONLY: bypass JWT auth for these IPs/CIDRs (LAN testing without Caddy in the path).
# NEVER set in production.
# IMPORTANT: Docker port mapping NATs source IPs to the bridge gateway. When hitting
# localhost:8515 (host → Docker → nginx → API), nginx sees 192.168.80.1, not 127.0.0.1.
# Include the Docker bridge CIDR to allow localhost and LAN access through nginx.
# Run: docker network inspect kiwi-cloud_kiwi-cloud-net | grep Subnet
# Example: CLOUD_AUTH_BYPASS_IPS=10.1.10.0/24,127.0.0.1,::1,192.168.80.0/20
# CLOUD_AUTH_BYPASS_IPS=
# Heimdall license server (required for cloud tier resolution) # Heimdall license server (required for cloud tier resolution)
# HEIMDALL_URL=https://license.circuitforge.tech # HEIMDALL_URL=https://license.circuitforge.tech
# HEIMDALL_ADMIN_TOKEN= # HEIMDALL_ADMIN_TOKEN=
# Directus JWT (must match cf-directus SECRET env var exactly, including base64 == padding) # Directus JWT (must match cf-directus SECRET env var)
# DIRECTUS_JWT_SECRET= # DIRECTUS_JWT_SECRET=
# E2E test account (Directus — free tier, used by automated tests)
# E2E_TEST_EMAIL=e2e@circuitforge.tech
# E2E_TEST_PASSWORD=
# E2E_TEST_USER_ID=
# In-app feedback → Forgejo issue creation
# FORGEJO_API_TOKEN=
# FORGEJO_REPO=Circuit-Forge/kiwi
# FORGEJO_API_URL=https://git.opensourcesolarpunk.com/api/v1
# Affiliate links (optional — plain URLs are shown if unset)
# Amazon Associates tag (circuitforge_core.affiliates, retailer="amazon")
# AMAZON_ASSOCIATES_TAG=circuitforge-20
# Instacart affiliate ID (circuitforge_core.affiliates, retailer="instacart")
# INSTACART_AFFILIATE_ID=circuitforge
# Walmart Impact network affiliate ID (inline, path-based redirect)
# WALMART_AFFILIATE_ID=
# Community PostgreSQL — shared across CF products (cloud only; leave unset for local dev)
# Points at cf-orch's cf-community-postgres container (port 5434 on the orch host).
# When unset, community write paths fail soft with a plain-language message.
# COMMUNITY_DB_URL=postgresql://cf_community:changeme@cf-orch-host:5434/cf_community
# COMMUNITY_PSEUDONYM_SALT=change-this-to-a-random-32-char-string

View file

@ -1,62 +0,0 @@
# Kiwi CI — lint, type-check, test on PR/push
# Full-stack: FastAPI (Python) + Vue 3 SPA (Node)
# Adapted from Circuit-Forge/cf-agents workflows/ci.yml (cf-agents#4 tracks the
# upstream ci-fullstack.yml variant; update this file when that lands).
#
# Note: frontend has no test suite yet — CI runs typecheck only.
# Add `npm run test` when vitest is wired (kiwi#XX).
#
# circuitforge-core is not on PyPI — installed from Forgejo git (public repo).
name: CI
on:
push:
branches: [main, 'feature/**', 'fix/**']
pull_request:
branches: [main]
jobs:
backend:
name: Backend (Python)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: pip
- name: Install circuitforge-core
run: pip install git+https://git.opensourcesolarpunk.com/Circuit-Forge/circuitforge-core.git@main
- name: Install dependencies
run: pip install -e ".[dev]" || pip install -e . pytest pytest-asyncio httpx ruff
- name: Lint
run: ruff check .
- name: Test
run: pytest tests/ -v --tb=short
frontend:
name: Frontend (Vue)
runs-on: ubuntu-latest
defaults:
run:
working-directory: frontend
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: npm
cache-dependency-path: frontend/package-lock.json
- name: Install dependencies
run: npm ci
- name: Type check
run: npx vue-tsc --noEmit

View file

@ -1,34 +0,0 @@
# Mirror push to GitHub and Codeberg on every push to main or tag.
# Copied from Circuit-Forge/cf-agents workflows/mirror.yml
# Required secrets: GITHUB_MIRROR_TOKEN, CODEBERG_MIRROR_TOKEN
name: Mirror
on:
push:
branches: [main]
tags: ['v*']
jobs:
mirror:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Mirror to GitHub
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_MIRROR_TOKEN }}
REPO: ${{ github.event.repository.name }}
run: |
git remote add github "https://x-access-token:${GITHUB_TOKEN}@github.com/CircuitForgeLLC/${REPO}.git"
git push github --mirror
- name: Mirror to Codeberg
env:
CODEBERG_TOKEN: ${{ secrets.CODEBERG_MIRROR_TOKEN }}
REPO: ${{ github.event.repository.name }}
run: |
git remote add codeberg "https://CircuitForge:${CODEBERG_TOKEN}@codeberg.org/CircuitForge/${REPO}.git"
git push codeberg --mirror

View file

@ -1,71 +0,0 @@
# Tag-triggered release workflow.
# Generates changelog and creates Forgejo release on v* tags.
# Copied from Circuit-Forge/cf-agents workflows/release.yml
#
# Docker push is intentionally disabled — BSL 1.1 registry policy not yet resolved.
# Tracked in Circuit-Forge/cf-agents#3. Re-enable the Docker steps when that lands.
#
# Required secrets: FORGEJO_RELEASE_TOKEN
# (GHCR_TOKEN not needed until Docker push is enabled)
name: Release
on:
push:
tags: ['v*']
jobs:
release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
# ── Changelog ────────────────────────────────────────────────────────────
- name: Generate changelog
uses: orhun/git-cliff-action@v3
id: cliff
with:
config: .cliff.toml
args: --latest --strip header
env:
OUTPUT: CHANGES.md
# ── Docker (disabled — BSL registry policy pending cf-agents#3) ──────────
# - name: Set up QEMU
# uses: docker/setup-qemu-action@v3
# - name: Set up Buildx
# uses: docker/setup-buildx-action@v3
# - name: Log in to GHCR
# uses: docker/login-action@v3
# with:
# registry: ghcr.io
# username: ${{ github.actor }}
# password: ${{ secrets.GHCR_TOKEN }}
# - name: Build and push Docker image
# uses: docker/build-push-action@v6
# with:
# context: .
# push: true
# platforms: linux/amd64,linux/arm64
# tags: |
# ghcr.io/circuitforgellc/kiwi:${{ github.ref_name }}
# ghcr.io/circuitforgellc/kiwi:latest
# cache-from: type=gha
# cache-to: type=gha,mode=max
# ── Forgejo Release ───────────────────────────────────────────────────────
- name: Create Forgejo release
env:
FORGEJO_TOKEN: ${{ secrets.FORGEJO_RELEASE_TOKEN }}
REPO: ${{ github.event.repository.name }}
TAG: ${{ github.ref_name }}
NOTES: ${{ steps.cliff.outputs.content }}
run: |
curl -sS -X POST \
"https://git.opensourcesolarpunk.com/api/v1/repos/Circuit-Forge/${REPO}/releases" \
-H "Authorization: token ${FORGEJO_TOKEN}" \
-H "Content-Type: application/json" \
-d "$(jq -n --arg tag "$TAG" --arg body "$NOTES" \
'{tag_name: $tag, name: $tag, body: $body}')"

View file

@ -1,59 +0,0 @@
# Kiwi CI — runs on GitHub mirror for public credibility badge.
# Forgejo (.forgejo/workflows/ci.yml) is the canonical CI — keep these in sync.
# No Forgejo-specific secrets used here; circuitforge-core is public on Forgejo.
#
# Note: frontend has no test suite yet — CI runs typecheck only.
# Add 'npm run test' when vitest is wired.
name: CI
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
backend:
name: Backend (Python)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.11'
cache: pip
- name: Install circuitforge-core
run: pip install git+https://git.opensourcesolarpunk.com/Circuit-Forge/circuitforge-core.git@main
- name: Install dependencies
run: pip install -e . pytest pytest-asyncio httpx ruff
- name: Lint
run: ruff check .
- name: Test
run: pytest tests/ -v --tb=short
frontend:
name: Frontend (Vue)
runs-on: ubuntu-latest
defaults:
run:
working-directory: frontend
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '20'
cache: npm
cache-dependency-path: frontend/package-lock.json
- name: Install dependencies
run: npm ci
- name: Type check
run: npx vue-tsc --noEmit

9
.gitignore vendored
View file

@ -1,7 +1,4 @@
# CLAUDE.md — gitignored per BSL 1.1 commercial policy
CLAUDE.md
# Superpowers brainstorming artifacts # Superpowers brainstorming artifacts
.superpowers/ .superpowers/
@ -22,9 +19,3 @@ dist/
# Data directories # Data directories
data/ data/
# Test artifacts (MagicMock sqlite files from pytest)
<MagicMock*
# Playwright / debug screenshots
debug-screenshots/

View file

@ -1,34 +0,0 @@
# Kiwi gitleaks config — extends base CircuitForge config with local rules
[extend]
path = "/Library/Development/CircuitForge/circuitforge-hooks/gitleaks.toml"
# ── Global allowlist ──────────────────────────────────────────────────────────
# Amazon grocery department IDs (rh=n:<10-digit>) false-positive as phone
# numbers. locale_config.py is a static lookup table with no secrets.
[allowlist]
# Amazon grocery dept IDs (rh=n:<digits>) false-positive as phone numbers.
regexes = [
'''rh=n:\d{8,12}''',
]
# ── Test fixture allowlists ───────────────────────────────────────────────────
[[rules]]
id = "cf-generic-env-token"
description = "Generic KEY=<token> in env-style assignment — catches FORGEJO_API_TOKEN=hex etc."
regex = '''(?i)(token|secret|key|password|passwd|pwd|api_key)\s*[=:]\s*['"]?[A-Za-z0-9\-_]{20,}['"]?'''
[rules.allowlist]
paths = [
'.*test.*',
]
regexes = [
'api_key:\s*ollama',
'api_key:\s*any',
'your-[a-z\-]+-here',
'replace-with-',
'xxxx',
'test-fixture-',
'CFG-KIWI-TEST-',
]

View file

@ -1,7 +0,0 @@
# Findings suppressed here are historical false positives or already-rotated secrets.
# .env was accidentally included in the initial commit; it is now gitignored.
# Rotate DIRECTUS_JWT_SECRET if it has not been changed since 2026-03-30.
# c166e5216 (chore: initial commit) — .env included by mistake
c166e5216af532a08112ef87e8542cd51c184115:.env:generic-api-key:25
c166e5216af532a08112ef87e8542cd51c184115:.env:cf-generic-env-token:25

View file

@ -11,23 +11,13 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
COPY circuitforge-core/ ./circuitforge-core/ COPY circuitforge-core/ ./circuitforge-core/
RUN conda run -n base pip install --no-cache-dir -e ./circuitforge-core RUN conda run -n base pip install --no-cache-dir -e ./circuitforge-core
# Install circuitforge-orch — needed for the cf-orch-agent sidecar (compose.override.yml)
COPY circuitforge-orch/ ./circuitforge-orch/
# Create kiwi conda env and install app # Create kiwi conda env and install app
COPY kiwi/environment.yml . COPY kiwi/environment.yml .
RUN conda env create -f environment.yml RUN conda env create -f environment.yml
COPY kiwi/ ./kiwi/ COPY kiwi/ ./kiwi/
# Install cf-core into the kiwi env BEFORE installing kiwi (kiwi lists it as a dep)
# Remove gitignored config files that may exist locally — defense-in-depth.
# The parent .dockerignore should exclude these, but an explicit rm guarantees
# they never end up in the cloud image regardless of .dockerignore placement.
RUN rm -f /app/kiwi/.env
# Install cf-core and cf-orch into the kiwi env BEFORE installing kiwi
RUN conda run -n kiwi pip install --no-cache-dir -e /app/circuitforge-core RUN conda run -n kiwi pip install --no-cache-dir -e /app/circuitforge-core
RUN conda run -n kiwi pip install --no-cache-dir -e /app/circuitforge-orch
WORKDIR /app/kiwi WORKDIR /app/kiwi
RUN conda run -n kiwi pip install --no-cache-dir -e . RUN conda run -n kiwi pip install --no-cache-dir -e .

View file

@ -1,28 +0,0 @@
Business Source License 1.1
Licensor: Circuit Forge LLC
Licensed Work: Kiwi — Pantry tracking and leftover recipe suggestions
Copyright (c) 2026 Circuit Forge LLC
Additional Use Grant: You may use the Licensed Work for personal,
non-commercial pantry tracking and recipe suggestion
purposes only.
Change Date: 2030-01-01
Change License: MIT License
For the full Business Source License 1.1 text, see:
https://mariadb.com/bsl11/
---
This license applies to the following components of Kiwi:
- app/services/recipe/recipe_engine.py
- app/services/recipe/assembly_recipes.py
- app/services/recipe/llm_recipe.py
- app/services/expiration_predictor.py
- app/tasks/scheduler.py
- app/tasks/runner.py
- app/tiers.py
- app/cloud_session.py
- frontend/src/components/RecipesView.vue
- frontend/src/stores/recipes.ts

View file

@ -1,34 +0,0 @@
MIT License
Copyright (c) 2026 Circuit Forge LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---
This license applies to the following components of Kiwi:
- app/api/endpoints/inventory.py
- app/api/endpoints/ocr.py
- app/db/store.py
- app/db/migrations/
- app/core/config.py
- scripts/pipeline/
- scripts/download_datasets.py
- scripts/backfill_texture_profiles.py

View file

@ -6,11 +6,7 @@
Scan barcodes, photograph receipts, and get recipe ideas based on what you already have — before it expires. Scan barcodes, photograph receipts, and get recipe ideas based on what you already have — before it expires.
**LLM support is optional.** Inventory tracking, barcode scanning, expiry alerts, CSV export, and receipt upload all work without any LLM configured. AI features (receipt OCR, recipe suggestions, meal planning) activate when a backend is available and are BYOK-unlockable at any tier. **Status:** Pre-alpha · CircuitForge LLC
**Status:** Beta · CircuitForge LLC
**[Documentation](https://docs.circuitforge.tech/kiwi/)** · [circuitforge.tech](https://circuitforge.tech)
--- ---
@ -18,14 +14,9 @@ Scan barcodes, photograph receipts, and get recipe ideas based on what you alrea
- **Inventory tracking** — add items by barcode scan, receipt upload, or manually - **Inventory tracking** — add items by barcode scan, receipt upload, or manually
- **Expiry alerts** — know what's about to go bad - **Expiry alerts** — know what's about to go bad
- **Recipe browser** — browse the full recipe corpus by cuisine, meal type, dietary preference, or main ingredient; pantry match percentage shown inline (Free) - **Receipt OCR** — extract line items from receipt photos automatically (Paid tier)
- **Saved recipes** — bookmark any recipe with notes, a 05 star rating, and free-text style tags (Free); organize into named collections (Paid) - **Recipe suggestions** — LLM-powered ideas based on what's expiring (Paid tier, BYOK-unlockable)
- **Receipt OCR** — extract line items from receipt photos automatically (Paid tier, BYOK-unlockable) - **Leftover mode** — prioritize nearly-expired items in recipe ranking (Premium tier)
- **Recipe suggestions** — four levels from pantry-match to full LLM generation (Paid tier, BYOK-unlockable)
- **Style auto-classifier** — LLM suggests style tags (comforting, hands-off, quick, etc.) for saved recipes (Paid tier, BYOK-unlockable)
- **Leftover mode** — prioritize nearly-expired items in recipe ranking (Free, 5/day; unlimited at Paid+)
- **LLM backend config** — configure inference via `circuitforge-core` env-var system; BYOK unlocks Paid AI features at any tier
- **Feedback FAB** — in-app feedback button; status probed on load, hidden if CF feedback endpoint unreachable
## Stack ## Stack
@ -61,16 +52,11 @@ cp .env.example .env
| Receipt upload | ✓ | ✓ | ✓ | | Receipt upload | ✓ | ✓ | ✓ |
| Expiry alerts | ✓ | ✓ | ✓ | | Expiry alerts | ✓ | ✓ | ✓ |
| CSV export | ✓ | ✓ | ✓ | | CSV export | ✓ | ✓ | ✓ |
| Recipe browser (domain/category) | ✓ | ✓ | ✓ |
| Save recipes + notes + star rating | ✓ | ✓ | ✓ |
| Style tags (manual, free-text) | ✓ | ✓ | ✓ |
| Receipt OCR | BYOK | ✓ | ✓ | | Receipt OCR | BYOK | ✓ | ✓ |
| Recipe suggestions (L1L4) | BYOK | ✓ | ✓ | | Recipe suggestions | BYOK | ✓ | ✓ |
| Named recipe collections | — | ✓ | ✓ |
| LLM style auto-classifier | — | BYOK | ✓ |
| Meal planning | — | ✓ | ✓ | | Meal planning | — | ✓ | ✓ |
| Multi-household | — | — | ✓ | | Multi-household | — | — | ✓ |
| Leftover mode (5/day) | ✓ | ✓ | ✓ | | Leftover mode | — | — | ✓ |
BYOK = bring your own LLM backend (configure `~/.config/circuitforge/llm.yaml`) BYOK = bring your own LLM backend (configure `~/.config/circuitforge/llm.yaml`)

View file

@ -3,5 +3,5 @@
Kiwi: Pantry tracking and leftover recipe suggestions. Kiwi: Pantry tracking and leftover recipe suggestions.
""" """
__version__ = "0.2.0" __version__ = "0.1.0"
__author__ = "Alan 'pyr0ball' Weinstock" __author__ = "Alan 'pyr0ball' Weinstock"

View file

@ -1,358 +0,0 @@
# app/api/endpoints/community.py
# MIT License
from __future__ import annotations
import asyncio
import logging
import re
import sqlite3
from datetime import datetime, timezone
from fastapi import APIRouter, Depends, HTTPException, Request, Response
from app.cloud_session import CloudUser, get_session
from app.core.config import settings
from app.db.store import Store
from app.services.community.feed import posts_to_rss
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/community", tags=["community"])
_community_store = None
def _get_community_store():
return _community_store
def init_community_store(community_db_url: str | None) -> None:
global _community_store
if not community_db_url:
logger.info(
"COMMUNITY_DB_URL not set — community write features disabled. "
"Browse still works via cloud feed."
)
return
from circuitforge_core.community import CommunityDB
from app.services.community.community_store import KiwiCommunityStore
db = CommunityDB(dsn=community_db_url)
db.run_migrations()
_community_store = KiwiCommunityStore(db)
logger.info("Community store initialized.")
def _visible(post, session=None) -> bool:
"""Return False for premium-tier posts when the session is not paid/premium."""
tier = getattr(post, "tier", None)
if tier == "premium":
if session is None or getattr(session, "tier", None) not in ("paid", "premium"):
return False
return True
@router.get("/posts")
async def list_posts(
post_type: str | None = None,
dietary_tags: str | None = None,
allergen_exclude: str | None = None,
page: int = 1,
page_size: int = 20,
):
store = _get_community_store()
if store is None:
return {
"posts": [],
"total": 0,
"page": page,
"page_size": page_size,
"note": "Community DB not available on this instance.",
}
dietary = [t.strip() for t in dietary_tags.split(",")] if dietary_tags else None
allergen_ex = [t.strip() for t in allergen_exclude.split(",")] if allergen_exclude else None
offset = (page - 1) * min(page_size, 100)
posts = await asyncio.to_thread(
store.list_posts,
limit=min(page_size, 100),
offset=offset,
post_type=post_type,
dietary_tags=dietary,
allergen_exclude=allergen_ex,
)
visible = [_post_to_dict(p) for p in posts if _visible(p)]
return {"posts": visible, "total": len(visible), "page": page, "page_size": page_size}
@router.get("/posts/{slug}")
async def get_post(slug: str, request: Request):
store = _get_community_store()
if store is None:
raise HTTPException(status_code=503, detail="Community DB not available on this instance.")
post = await asyncio.to_thread(store.get_post_by_slug, slug)
if post is None:
raise HTTPException(status_code=404, detail="Post not found.")
accept = request.headers.get("accept", "")
if "application/activity+json" in accept or "application/ld+json" in accept:
from app.services.community.ap_compat import post_to_ap_json_ld
base_url = str(request.base_url).rstrip("/")
return post_to_ap_json_ld(_post_to_dict(post), base_url=base_url)
return _post_to_dict(post)
@router.get("/feed.rss")
async def get_rss_feed(request: Request):
store = _get_community_store()
posts_data: list[dict] = []
if store is not None:
posts = await asyncio.to_thread(store.list_posts, limit=50)
posts_data = [_post_to_dict(p) for p in posts]
base_url = str(request.base_url).rstrip("/")
rss = posts_to_rss(posts_data, base_url=base_url)
return Response(content=rss, media_type="application/rss+xml; charset=utf-8")
@router.get("/local-feed")
async def local_feed():
store = _get_community_store()
if store is None:
return []
posts = await asyncio.to_thread(store.list_posts, limit=50)
return [_post_to_dict(p) for p in posts]
@router.get("/hall-of-chaos")
async def hall_of_chaos():
"""Hidden easter egg endpoint -- returns the 10 most chaotic bloopers."""
store = _get_community_store()
if store is None:
return {"posts": [], "chaos_level": 0}
posts = await asyncio.to_thread(
store.list_posts, limit=10, post_type="recipe_blooper"
)
return {
"posts": [_post_to_dict(p) for p in posts],
"chaos_level": len(posts),
}
_VALID_POST_TYPES = {"plan", "recipe_success", "recipe_blooper"}
_MAX_TITLE_LEN = 200
_MAX_TEXT_LEN = 2000
def _validate_publish_body(body: dict) -> None:
"""Raise HTTPException(422) for any invalid fields in a publish request."""
post_type = body.get("post_type", "plan")
if post_type not in _VALID_POST_TYPES:
raise HTTPException(
status_code=422,
detail=f"post_type must be one of: {', '.join(sorted(_VALID_POST_TYPES))}",
)
title = body.get("title") or ""
if len(title) > _MAX_TITLE_LEN:
raise HTTPException(status_code=422, detail=f"title exceeds {_MAX_TITLE_LEN} character limit.")
for field in ("description", "outcome_notes", "recipe_name"):
value = body.get(field)
if value and len(str(value)) > _MAX_TEXT_LEN:
raise HTTPException(status_code=422, detail=f"{field} exceeds {_MAX_TEXT_LEN} character limit.")
photo_url = body.get("photo_url")
if photo_url and not str(photo_url).startswith("https://"):
raise HTTPException(status_code=422, detail="photo_url must be an https:// URL.")
@router.post("/posts", status_code=201)
async def publish_post(body: dict, session: CloudUser = Depends(get_session)):
from app.tiers import can_use
if not can_use("community_publish", session.tier, session.has_byok):
raise HTTPException(status_code=402, detail="Community publishing requires Paid tier.")
_validate_publish_body(body)
store = _get_community_store()
if store is None:
raise HTTPException(
status_code=503,
detail="This Kiwi instance is not connected to a community database. "
"Publishing is only available on cloud instances.",
)
from app.services.community.community_store import get_or_create_pseudonym
def _get_pseudonym():
s = Store(session.db)
try:
return get_or_create_pseudonym(
store=s,
directus_user_id=session.user_id,
requested_name=body.get("pseudonym_name"),
)
finally:
s.close()
try:
pseudonym = await asyncio.to_thread(_get_pseudonym)
except ValueError as exc:
raise HTTPException(status_code=422, detail=str(exc)) from exc
recipe_ids = [slot["recipe_id"] for slot in body.get("slots", []) if slot.get("recipe_id")]
from app.services.community.element_snapshot import compute_snapshot
def _snapshot():
s = Store(session.db)
try:
return compute_snapshot(recipe_ids=recipe_ids, store=s)
finally:
s.close()
snapshot = await asyncio.to_thread(_snapshot)
post_type = body.get("post_type", "plan")
slug_title = re.sub(r"[^a-z0-9]+", "-", (body.get("title") or "plan").lower()).strip("-")
today = datetime.now(timezone.utc).strftime("%Y-%m-%d")
slug = f"kiwi-{_post_type_prefix(post_type)}-{pseudonym.lower().replace(' ', '')}-{today}-{slug_title}"[:120]
from circuitforge_core.community.models import CommunityPost
post = CommunityPost(
slug=slug,
pseudonym=pseudonym,
post_type=post_type,
published=datetime.now(timezone.utc),
title=(body.get("title") or "Untitled")[:_MAX_TITLE_LEN],
description=body.get("description"),
photo_url=body.get("photo_url"),
slots=body.get("slots", []),
recipe_id=body.get("recipe_id"),
recipe_name=body.get("recipe_name"),
level=body.get("level"),
outcome_notes=body.get("outcome_notes"),
seasoning_score=snapshot.seasoning_score,
richness_score=snapshot.richness_score,
brightness_score=snapshot.brightness_score,
depth_score=snapshot.depth_score,
aroma_score=snapshot.aroma_score,
structure_score=snapshot.structure_score,
texture_profile=snapshot.texture_profile,
dietary_tags=list(snapshot.dietary_tags),
allergen_flags=list(snapshot.allergen_flags),
flavor_molecules=list(snapshot.flavor_molecules),
fat_pct=snapshot.fat_pct,
protein_pct=snapshot.protein_pct,
moisture_pct=snapshot.moisture_pct,
)
try:
inserted = await asyncio.to_thread(store.insert_post, post)
except sqlite3.IntegrityError as exc:
raise HTTPException(
status_code=409,
detail="A post with this title already exists today. Try a different title.",
) from exc
return _post_to_dict(inserted)
@router.delete("/posts/{slug}", status_code=204)
async def delete_post(slug: str, session: CloudUser = Depends(get_session)):
store = _get_community_store()
if store is None:
raise HTTPException(status_code=503, detail="Community DB not available.")
def _get_pseudonym():
s = Store(session.db)
try:
return s.get_current_pseudonym(session.user_id)
finally:
s.close()
pseudonym = await asyncio.to_thread(_get_pseudonym)
if not pseudonym:
raise HTTPException(status_code=400, detail="No pseudonym set. Cannot delete posts.")
deleted = await asyncio.to_thread(store.delete_post, slug=slug, pseudonym=pseudonym)
if not deleted:
raise HTTPException(status_code=404, detail="Post not found or you are not the author.")
@router.post("/posts/{slug}/fork", status_code=201)
async def fork_post(slug: str, session: CloudUser = Depends(get_session)):
store = _get_community_store()
if store is None:
raise HTTPException(status_code=503, detail="Community DB not available.")
post = await asyncio.to_thread(store.get_post_by_slug, slug)
if post is None:
raise HTTPException(status_code=404, detail="Post not found.")
if post.post_type != "plan":
raise HTTPException(status_code=400, detail="Only plan posts can be forked as a meal plan.")
required_slot_keys = {"day", "meal_type", "recipe_id"}
if any(not required_slot_keys.issubset(slot) for slot in post.slots):
raise HTTPException(status_code=400, detail="Post contains malformed slots and cannot be forked.")
from datetime import date
week_start = date.today().strftime("%Y-%m-%d")
def _create_plan():
s = Store(session.db)
try:
meal_types = list({slot["meal_type"] for slot in post.slots})
plan = s.create_meal_plan(week_start=week_start, meal_types=meal_types or ["dinner"])
for slot in post.slots:
s.assign_recipe_to_slot(
plan_id=plan["id"],
day_of_week=slot["day"],
meal_type=slot["meal_type"],
recipe_id=slot["recipe_id"],
)
return plan
finally:
s.close()
plan = await asyncio.to_thread(_create_plan)
return {"plan_id": plan["id"], "week_start": plan["week_start"], "forked_from": slug}
@router.post("/posts/{slug}/fork-adapt", status_code=201)
async def fork_adapt_post(slug: str, session: CloudUser = Depends(get_session)):
from app.tiers import can_use
if not can_use("community_fork_adapt", session.tier, session.has_byok):
raise HTTPException(status_code=402, detail="Fork with adaptation requires Paid tier or BYOK.")
# Stub: full LLM adaptation deferred
raise HTTPException(status_code=501, detail="Fork-adapt not yet implemented.")
def _post_to_dict(post) -> dict:
return {
"slug": post.slug,
"pseudonym": post.pseudonym,
"post_type": post.post_type,
"published": post.published.isoformat() if hasattr(post.published, "isoformat") else str(post.published),
"title": post.title,
"description": post.description,
"photo_url": post.photo_url,
"slots": list(post.slots),
"recipe_id": post.recipe_id,
"recipe_name": post.recipe_name,
"level": post.level,
"outcome_notes": post.outcome_notes,
"element_profiles": {
"seasoning_score": post.seasoning_score,
"richness_score": post.richness_score,
"brightness_score": post.brightness_score,
"depth_score": post.depth_score,
"aroma_score": post.aroma_score,
"structure_score": post.structure_score,
"texture_profile": post.texture_profile,
},
"dietary_tags": list(post.dietary_tags),
"allergen_flags": list(post.allergen_flags),
"flavor_molecules": list(post.flavor_molecules),
"fat_pct": post.fat_pct,
"protein_pct": post.protein_pct,
"moisture_pct": post.moisture_pct,
}
def _post_type_prefix(post_type: str) -> str:
return {"plan": "plan", "recipe_success": "success", "recipe_blooper": "blooper"}.get(post_type, "post")

View file

@ -1,11 +1,9 @@
"""Export endpoints — CSV and JSON export of user data.""" """Export endpoints — CSV/Excel of receipt and inventory data."""
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
import csv import csv
import io import io
import json
from datetime import datetime, timezone
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
from fastapi.responses import StreamingResponse from fastapi.responses import StreamingResponse
@ -47,33 +45,3 @@ async def export_inventory_csv(store: Store = Depends(get_store)):
media_type="text/csv", media_type="text/csv",
headers={"Content-Disposition": "attachment; filename=inventory.csv"}, headers={"Content-Disposition": "attachment; filename=inventory.csv"},
) )
@router.get("/json")
async def export_full_json(store: Store = Depends(get_store)):
"""Export full pantry inventory + saved recipes as a single JSON file.
Intended for data portability users can import this into another
Kiwi instance or keep it as an offline backup.
"""
inventory, saved = await asyncio.gather(
asyncio.to_thread(store.list_inventory),
asyncio.to_thread(store.get_saved_recipes),
)
export_doc = {
"kiwi_export": {
"version": "1.0",
"exported_at": datetime.now(timezone.utc).isoformat(),
"inventory": [dict(row) for row in inventory],
"saved_recipes": [dict(row) for row in saved],
}
}
body = json.dumps(export_doc, default=str, indent=2)
filename = f"kiwi-export-{datetime.now(timezone.utc).strftime('%Y%m%d')}.json"
return StreamingResponse(
iter([body]),
media_type="application/json",
headers={"Content-Disposition": f"attachment; filename={filename}"},
)

View file

@ -1,9 +0,0 @@
"""Feedback router — provided by circuitforge-core."""
from circuitforge_core.api import make_feedback_router
from app.core.config import settings
router = make_feedback_router(
repo="Circuit-Forge/kiwi",
product="kiwi",
demo_mode_fn=lambda: settings.DEMO_MODE,
)

View file

@ -1,103 +0,0 @@
"""Screenshot attachment endpoint for in-app feedback.
After the cf-core feedback router creates a Forgejo issue, the frontend
can call POST /feedback/attach to upload a screenshot and pin it as a
comment on that issue.
The endpoint is separate from the cf-core router so Kiwi owns it
without modifying shared infrastructure.
"""
from __future__ import annotations
import base64
import os
import requests
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, Field
router = APIRouter()
_FORGEJO_BASE = os.environ.get(
"FORGEJO_API_URL", "https://git.opensourcesolarpunk.com/api/v1"
)
_REPO = "Circuit-Forge/kiwi"
_MAX_BYTES = 5 * 1024 * 1024 # 5 MB
class AttachRequest(BaseModel):
issue_number: int
filename: str = Field(default="screenshot.png", max_length=80)
image_b64: str # data URI or raw base64
class AttachResponse(BaseModel):
comment_url: str
def _forgejo_headers() -> dict[str, str]:
token = os.environ.get("FORGEJO_API_TOKEN", "")
return {"Authorization": f"token {token}"}
def _decode_image(image_b64: str) -> tuple[bytes, str]:
"""Return (raw_bytes, mime_type) from a base64 string or data URI."""
if image_b64.startswith("data:"):
header, _, data = image_b64.partition(",")
mime = header.split(";")[0].split(":")[1] if ":" in header else "image/png"
else:
data = image_b64
mime = "image/png"
return base64.b64decode(data), mime
@router.post("/attach", response_model=AttachResponse)
def attach_screenshot(payload: AttachRequest) -> AttachResponse:
"""Upload a screenshot to a Forgejo issue as a comment with embedded image.
The image is uploaded as an issue asset, then referenced in a comment
so it is visible inline when the issue is viewed.
"""
token = os.environ.get("FORGEJO_API_TOKEN", "")
if not token:
raise HTTPException(status_code=503, detail="Feedback not configured.")
raw_bytes, mime = _decode_image(payload.image_b64)
if len(raw_bytes) > _MAX_BYTES:
raise HTTPException(
status_code=413,
detail=f"Screenshot exceeds 5 MB limit ({len(raw_bytes) // 1024} KB received).",
)
# Upload image as issue asset
asset_resp = requests.post(
f"{_FORGEJO_BASE}/repos/{_REPO}/issues/{payload.issue_number}/assets",
headers=_forgejo_headers(),
files={"attachment": (payload.filename, raw_bytes, mime)},
timeout=20,
)
if not asset_resp.ok:
raise HTTPException(
status_code=502,
detail=f"Forgejo asset upload failed: {asset_resp.text[:200]}",
)
asset_url = asset_resp.json().get("browser_download_url", "")
# Pin as a comment so the image is visible inline
comment_body = f"**Screenshot attached by reporter:**\n\n![screenshot]({asset_url})"
comment_resp = requests.post(
f"{_FORGEJO_BASE}/repos/{_REPO}/issues/{payload.issue_number}/comments",
headers={**_forgejo_headers(), "Content-Type": "application/json"},
json={"body": comment_body},
timeout=15,
)
if not comment_resp.ok:
raise HTTPException(
status_code=502,
detail=f"Forgejo comment failed: {comment_resp.text[:200]}",
)
comment_url = comment_resp.json().get("html_url", "")
return AttachResponse(comment_url=comment_url)

View file

@ -1,217 +0,0 @@
"""Household management endpoints — shared pantry for Premium users."""
from __future__ import annotations
import logging
import os
import secrets
from datetime import datetime, timedelta, timezone
import sqlite3
import requests
from fastapi import APIRouter, Depends, HTTPException
from app.cloud_session import CloudUser, CLOUD_DATA_ROOT, HEIMDALL_URL, HEIMDALL_ADMIN_TOKEN, get_session
from app.db.store import Store
from app.models.schemas.household import (
HouseholdAcceptRequest,
HouseholdAcceptResponse,
HouseholdCreateResponse,
HouseholdInviteResponse,
HouseholdMember,
HouseholdRemoveMemberRequest,
HouseholdStatusResponse,
MessageResponse,
)
log = logging.getLogger(__name__)
router = APIRouter()
_INVITE_TTL_DAYS = 7
_KIWI_BASE_URL = os.environ.get("KIWI_BASE_URL", "https://menagerie.circuitforge.tech/kiwi")
def _require_premium(session: CloudUser = Depends(get_session)) -> CloudUser:
if session.tier not in ("premium", "ultra", "local"):
raise HTTPException(status_code=403, detail="Household features require Premium tier.")
return session
def _require_household_owner(session: CloudUser = Depends(_require_premium)) -> CloudUser:
if not session.is_household_owner or not session.household_id:
raise HTTPException(status_code=403, detail="Only the household owner can perform this action.")
return session
def _household_store(household_id: str) -> Store:
"""Open the household DB directly (used during invite acceptance).
Sets row_factory so dict-style column access works on raw conn queries.
"""
db_path = CLOUD_DATA_ROOT / f"household_{household_id}" / "kiwi.db"
db_path.parent.mkdir(parents=True, exist_ok=True)
store = Store(db_path)
store.conn.row_factory = sqlite3.Row
return store
def _heimdall_post(path: str, body: dict) -> dict:
"""Call Heimdall admin API. Returns response dict or raises HTTPException."""
if not HEIMDALL_ADMIN_TOKEN:
log.warning("HEIMDALL_ADMIN_TOKEN not set — household Heimdall call skipped")
return {}
try:
resp = requests.post(
f"{HEIMDALL_URL}{path}",
json=body,
headers={"Authorization": f"Bearer {HEIMDALL_ADMIN_TOKEN}"},
timeout=10,
)
if not resp.ok:
raise HTTPException(status_code=502, detail=f"Heimdall error: {resp.text}")
return resp.json()
except requests.RequestException as exc:
raise HTTPException(status_code=502, detail=f"Heimdall unreachable: {exc}")
@router.post("/create", response_model=HouseholdCreateResponse)
async def create_household(session: CloudUser = Depends(_require_premium)):
"""Create a new household. The calling user becomes owner."""
if session.household_id:
raise HTTPException(status_code=409, detail="You are already in a household.")
data = _heimdall_post("/admin/household/create", {"owner_user_id": session.user_id})
household_id = data.get("household_id")
if not household_id:
# Heimdall returned OK but without a household_id — treat as server error.
# Fall back to a local stub only when HEIMDALL_ADMIN_TOKEN is unset (dev mode).
if HEIMDALL_ADMIN_TOKEN:
raise HTTPException(status_code=500, detail="Heimdall did not return a household_id.")
household_id = "local-household"
return HouseholdCreateResponse(
household_id=household_id,
message="Household created. Share an invite link to add members.",
)
@router.get("/status", response_model=HouseholdStatusResponse)
async def household_status(session: CloudUser = Depends(_require_premium)):
"""Return current user's household membership status."""
if not session.household_id:
return HouseholdStatusResponse(in_household=False)
members: list[HouseholdMember] = []
if HEIMDALL_ADMIN_TOKEN:
try:
resp = requests.get(
f"{HEIMDALL_URL}/admin/household/{session.household_id}",
headers={"Authorization": f"Bearer {HEIMDALL_ADMIN_TOKEN}"},
timeout=5,
)
if resp.ok:
raw = resp.json()
for m in raw.get("members", []):
members.append(HouseholdMember(
user_id=m["user_id"],
joined_at=m.get("joined_at", ""),
is_owner=m["user_id"] == raw.get("owner_user_id"),
))
except Exception as exc:
log.warning("Could not fetch household members: %s", exc)
return HouseholdStatusResponse(
in_household=True,
household_id=session.household_id,
is_owner=session.is_household_owner,
members=members,
)
@router.post("/invite", response_model=HouseholdInviteResponse)
async def create_invite(session: CloudUser = Depends(_require_household_owner)):
"""Generate a one-time invite token valid for 7 days."""
token = secrets.token_hex(32)
expires_at = (datetime.now(timezone.utc) + timedelta(days=_INVITE_TTL_DAYS)).isoformat()
store = Store(session.db)
try:
store.conn.execute(
"""INSERT INTO household_invites (token, household_id, created_by, expires_at)
VALUES (?, ?, ?, ?)""",
(token, session.household_id, session.user_id, expires_at),
)
store.conn.commit()
finally:
store.close()
invite_url = f"{_KIWI_BASE_URL}/#/join?household_id={session.household_id}&token={token}"
return HouseholdInviteResponse(token=token, invite_url=invite_url, expires_at=expires_at)
@router.post("/accept", response_model=HouseholdAcceptResponse)
async def accept_invite(
body: HouseholdAcceptRequest,
session: CloudUser = Depends(get_session),
):
"""Accept a household invite. Opens the household DB directly to validate the token."""
if session.household_id:
raise HTTPException(status_code=409, detail="You are already in a household.")
hh_store = _household_store(body.household_id)
now = datetime.now(timezone.utc).isoformat()
try:
row = hh_store.conn.execute(
"""SELECT token, expires_at, used_at FROM household_invites
WHERE token = ? AND household_id = ?""",
(body.token, body.household_id),
).fetchone()
if not row:
raise HTTPException(status_code=404, detail="Invite not found.")
if row["used_at"] is not None:
raise HTTPException(status_code=410, detail="Invite already used.")
if row["expires_at"] < now:
raise HTTPException(status_code=410, detail="Invite has expired.")
hh_store.conn.execute(
"UPDATE household_invites SET used_at = ?, used_by = ? WHERE token = ?",
(now, session.user_id, body.token),
)
hh_store.conn.commit()
finally:
hh_store.close()
_heimdall_post("/admin/household/add-member", {
"household_id": body.household_id,
"user_id": session.user_id,
})
return HouseholdAcceptResponse(
message="You have joined the household. Reload the app to switch to the shared pantry.",
household_id=body.household_id,
)
@router.post("/leave", response_model=MessageResponse)
async def leave_household(session: CloudUser = Depends(_require_premium)) -> MessageResponse:
"""Leave the current household (non-owners only)."""
if not session.household_id:
raise HTTPException(status_code=400, detail="You are not in a household.")
if session.is_household_owner:
raise HTTPException(status_code=400, detail="The household owner cannot leave. Delete the household instead.")
_heimdall_post("/admin/household/remove-member", {
"household_id": session.household_id,
"user_id": session.user_id,
})
return MessageResponse(message="You have left the household. Reload the app to return to your personal pantry.")
@router.post("/remove-member", response_model=MessageResponse)
async def remove_member(
body: HouseholdRemoveMemberRequest,
session: CloudUser = Depends(_require_household_owner),
) -> MessageResponse:
"""Remove a member from the household (owner only)."""
if body.user_id == session.user_id:
raise HTTPException(status_code=400, detail="Use /leave to remove yourself.")
_heimdall_post("/admin/household/remove-member", {
"household_id": session.household_id,
"user_id": body.user_id,
})
return MessageResponse(message=f"Member {body.user_id} removed from household.")

View file

@ -1,185 +0,0 @@
"""Kiwi — /api/v1/imitate/samples endpoint for Avocet Imitate tab.
Returns the actual assembled prompt Kiwi sends to its LLM for recipe generation,
including the full pantry context (expiry-first ordering), dietary constraints
(from user_settings if present), and the Level 3 format instructions.
"""
from __future__ import annotations
from fastapi import APIRouter, Depends
from app.cloud_session import get_session, CloudUser
from app.db.store import Store
router = APIRouter()
_LEVEL3_FORMAT = [
"",
"Reply using EXACTLY this plain-text format — no markdown, no bold, no extra commentary:",
"Title: <name of the dish>",
"Ingredients: <comma-separated list>",
"Directions:",
"1. <first step>",
"2. <second step>",
"3. <continue for each step>",
"Notes: <optional tips>",
]
_LEVEL4_FORMAT = [
"",
"Reply using EXACTLY this plain-text format — no markdown, no bold:",
"Title: <name of the dish>",
"Ingredients: <comma-separated list>",
"Directions:",
"1. <first step>",
"2. <second step>",
"Notes: <optional tips>",
]
def _read_user_settings(store: Store) -> dict:
"""Read all key/value pairs from user_settings table."""
try:
rows = store.conn.execute("SELECT key, value FROM user_settings").fetchall()
return {r["key"]: r["value"] for r in rows}
except Exception:
return {}
def _build_recipe_prompt(
pantry_names: list[str],
expiring_names: list[str],
constraints: list[str],
allergies: list[str],
level: int = 3,
) -> str:
"""Assemble the recipe generation prompt matching Kiwi's Level 3/4 format."""
# Expiring items first, then remaining pantry items (deduped)
expiring_set = set(expiring_names)
ordered = list(expiring_names) + [n for n in pantry_names if n not in expiring_set]
if not ordered:
ordered = pantry_names
if level == 4:
lines = [
"Surprise me with a creative, unexpected recipe.",
"Only use ingredients that make culinary sense together. "
"Do not force flavoured/sweetened items (vanilla yoghurt, flavoured syrups, jam) into savoury dishes.",
f"Ingredients available: {', '.join(ordered)}",
]
if constraints:
lines.append(f"Constraints: {', '.join(constraints)}")
if allergies:
lines.append(f"Must NOT contain: {', '.join(allergies)}")
lines.append("Treat any mystery ingredient as a wildcard — use your imagination.")
lines += _LEVEL4_FORMAT
else:
lines = [
"You are a creative chef. Generate a recipe using the ingredients below.",
"IMPORTANT: When you use a pantry item, list it in Ingredients using its exact name "
"from the pantry list. Do not add adjectives, quantities, or cooking states "
"(e.g. use 'butter', not 'unsalted butter' or '2 tbsp butter').",
"IMPORTANT: Only use pantry items that make culinary sense for the dish. "
"Do NOT force flavoured/sweetened items (vanilla yoghurt, fruit yoghurt, jam, "
"dessert sauces, flavoured syrups) into savoury dishes.",
"IMPORTANT: Do not default to the same ingredient repeatedly across dishes. "
"If a pantry item does not genuinely improve this specific dish, leave it out.",
"",
f"Pantry items: {', '.join(ordered)}",
]
if expiring_names:
lines.append(
f"Priority — use these soon (expiring): {', '.join(expiring_names)}"
)
if constraints:
lines.append(f"Dietary constraints: {', '.join(constraints)}")
if allergies:
lines.append(f"IMPORTANT — must NOT contain: {', '.join(allergies)}")
lines += _LEVEL3_FORMAT
return "\n".join(lines)
@router.get("/samples")
async def imitate_samples(
limit: int = 5,
level: int = 3,
session: CloudUser = Depends(get_session),
):
"""Return assembled recipe generation prompts for Avocet's Imitate tab.
Each sample includes:
system_prompt empty (Kiwi uses no system context)
input_text full Level 3/4 prompt with pantry items, expiring items,
dietary constraints, and format instructions
output_text empty (no prior LLM output stored per-request)
level: 3 (structured with element biasing context) or 4 (wildcard creative)
limit: max number of distinct prompt variants to return (varies by pantry state)
"""
limit = max(1, min(limit, 10))
store = Store(session.db)
# Full pantry for context
all_items = store.list_inventory()
pantry_names = [r["product_name"] for r in all_items if r.get("product_name")]
# Expiring items as priority ingredients
expiring = store.expiring_soon(days=14)
expiring_names = [r["product_name"] for r in expiring if r.get("product_name")]
# Dietary constraints from user_settings (keys: constraints, allergies)
settings = _read_user_settings(store)
import json as _json
try:
constraints = _json.loads(settings.get("dietary_constraints", "[]")) or []
except Exception:
constraints = []
try:
allergies = _json.loads(settings.get("dietary_allergies", "[]")) or []
except Exception:
allergies = []
if not pantry_names:
return {"samples": [], "total": 0, "type": f"recipe_level{level}"}
# Build prompt variants: one per expiring item as the "anchor" ingredient,
# plus one general pantry prompt. Cap at limit.
samples = []
seen_anchors: set[str] = set()
for item in (expiring[:limit - 1] if expiring else []):
anchor = item.get("product_name", "")
if not anchor or anchor in seen_anchors:
continue
seen_anchors.add(anchor)
# Put this item first in the list for the prompt
ordered_expiring = [anchor] + [n for n in expiring_names if n != anchor]
prompt = _build_recipe_prompt(pantry_names, ordered_expiring, constraints, allergies, level)
samples.append({
"id": item.get("id", 0),
"anchor_item": anchor,
"expiring_count": len(expiring_names),
"pantry_count": len(pantry_names),
"system_prompt": "",
"input_text": prompt,
"output_text": "",
})
# One general prompt using all expiring as priority
if len(samples) < limit:
prompt = _build_recipe_prompt(pantry_names, expiring_names, constraints, allergies, level)
samples.append({
"id": 0,
"anchor_item": "full pantry",
"expiring_count": len(expiring_names),
"pantry_count": len(pantry_names),
"system_prompt": "",
"input_text": prompt,
"output_text": "",
})
return {"samples": samples, "total": len(samples), "type": f"recipe_level{level}"}

View file

@ -3,7 +3,6 @@
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
import logging
import uuid import uuid
from pathlib import Path from pathlib import Path
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
@ -12,73 +11,25 @@ import aiofiles
from fastapi import APIRouter, Depends, File, Form, HTTPException, UploadFile, status from fastapi import APIRouter, Depends, File, Form, HTTPException, UploadFile, status
from pydantic import BaseModel from pydantic import BaseModel
from app.cloud_session import CloudUser, _auth_label, get_session from app.cloud_session import CloudUser, get_session
log = logging.getLogger(__name__)
from app.db.session import get_store from app.db.session import get_store
from app.services.expiration_predictor import ExpirationPredictor
_predictor = ExpirationPredictor()
from app.db.store import Store from app.db.store import Store
from app.models.schemas.inventory import ( from app.models.schemas.inventory import (
BarcodeScanResponse, BarcodeScanResponse,
BulkAddByNameRequest,
BulkAddByNameResponse,
BulkAddItemResult,
DiscardRequest,
InventoryItemCreate, InventoryItemCreate,
InventoryItemResponse, InventoryItemResponse,
InventoryItemUpdate, InventoryItemUpdate,
InventoryStats, InventoryStats,
PartialConsumeRequest,
ProductCreate, ProductCreate,
ProductResponse, ProductResponse,
ProductUpdate, ProductUpdate,
TagCreate, TagCreate,
TagResponse, TagResponse,
) )
from app.models.schemas.label_capture import LabelConfirmRequest
router = APIRouter() router = APIRouter()
# ── Helpers ───────────────────────────────────────────────────────────────────
def _user_constraints(store) -> list[str]:
"""Load active dietary constraints from user settings (comma-separated string)."""
raw = store.get_setting("dietary_constraints") or ""
return [c.strip() for c in raw.split(",") if c.strip()]
def _enrich_item(item: dict, user_constraints: list[str] | None = None) -> dict:
"""Attach computed fields: opened_expiry_date, secondary_state/uses/warning/discard_signs."""
from datetime import date, timedelta
opened = item.get("opened_date")
if opened:
days = _predictor.days_after_opening(item.get("category"))
if days is not None:
try:
opened_expiry = date.fromisoformat(opened) + timedelta(days=days)
item = {**item, "opened_expiry_date": str(opened_expiry)}
except ValueError:
pass
if "opened_expiry_date" not in item:
item = {**item, "opened_expiry_date": None}
# Secondary use window — check sell-by date (not opened expiry).
# Apply dietary constraint filter (e.g. wine suppressed for halal/alcohol-free).
sec = _predictor.secondary_state(item.get("category"), item.get("expiration_date"))
sec = _predictor.filter_secondary_by_constraints(sec, user_constraints or [])
item = {
**item,
"secondary_state": sec["label"] if sec else None,
"secondary_uses": sec["uses"] if sec else None,
"secondary_warning": sec["warning"] if sec else None,
"secondary_discard_signs": sec["discard_signs"] if sec else None,
}
return item
# ── Products ────────────────────────────────────────────────────────────────── # ── Products ──────────────────────────────────────────────────────────────────
@router.post("/products", response_model=ProductResponse, status_code=status.HTTP_201_CREATED) @router.post("/products", response_model=ProductResponse, status_code=status.HTTP_201_CREATED)
@ -163,12 +114,7 @@ async def delete_product(product_id: int, store: Store = Depends(get_store)):
# ── Inventory items ─────────────────────────────────────────────────────────── # ── Inventory items ───────────────────────────────────────────────────────────
@router.post("/items", response_model=InventoryItemResponse, status_code=status.HTTP_201_CREATED) @router.post("/items", response_model=InventoryItemResponse, status_code=status.HTTP_201_CREATED)
async def create_inventory_item( async def create_inventory_item(body: InventoryItemCreate, store: Store = Depends(get_store)):
body: InventoryItemCreate,
store: Store = Depends(get_store),
session: CloudUser = Depends(get_session),
):
log.info("add_item auth=%s tier=%s product_id=%s", _auth_label(session.user_id), session.tier, body.product_id)
item = await asyncio.to_thread( item = await asyncio.to_thread(
store.add_inventory_item, store.add_inventory_item,
body.product_id, body.product_id,
@ -181,38 +127,7 @@ async def create_inventory_item(
notes=body.notes, notes=body.notes,
source=body.source, source=body.source,
) )
# RETURNING * omits joined columns (product_name, barcode, category). return InventoryItemResponse.model_validate(item)
# Re-fetch with the products JOIN so the response is fully populated (#99).
full_item = await asyncio.to_thread(store.get_inventory_item, item["id"])
return InventoryItemResponse.model_validate(full_item)
@router.post("/items/bulk-add-by-name", response_model=BulkAddByNameResponse)
async def bulk_add_items_by_name(body: BulkAddByNameRequest, store: Store = Depends(get_store)):
"""Create pantry items from a list of ingredient names (no barcode required).
Uses get_or_create_product so re-adding an existing product is idempotent.
"""
results: list[BulkAddItemResult] = []
for entry in body.items:
try:
product, _ = await asyncio.to_thread(
store.get_or_create_product, entry.name, None, source="manual"
)
item = await asyncio.to_thread(
store.add_inventory_item,
product["id"],
entry.location,
quantity=entry.quantity,
unit=entry.unit,
source="manual",
)
results.append(BulkAddItemResult(name=entry.name, ok=True, item_id=item["id"]))
except Exception as exc:
results.append(BulkAddItemResult(name=entry.name, ok=False, error=str(exc)))
added = sum(1 for r in results if r.ok)
return BulkAddByNameResponse(added=added, failed=len(results) - added, results=results)
@router.get("/items", response_model=List[InventoryItemResponse]) @router.get("/items", response_model=List[InventoryItemResponse])
@ -222,15 +137,13 @@ async def list_inventory_items(
store: Store = Depends(get_store), store: Store = Depends(get_store),
): ):
items = await asyncio.to_thread(store.list_inventory, location, item_status) items = await asyncio.to_thread(store.list_inventory, location, item_status)
constraints = await asyncio.to_thread(_user_constraints, store) return [InventoryItemResponse.model_validate(i) for i in items]
return [InventoryItemResponse.model_validate(_enrich_item(i, constraints)) for i in items]
@router.get("/items/expiring", response_model=List[InventoryItemResponse]) @router.get("/items/expiring", response_model=List[InventoryItemResponse])
async def get_expiring_items(days: int = 7, store: Store = Depends(get_store)): async def get_expiring_items(days: int = 7, store: Store = Depends(get_store)):
items = await asyncio.to_thread(store.expiring_soon, days) items = await asyncio.to_thread(store.expiring_soon, days)
constraints = await asyncio.to_thread(_user_constraints, store) return [InventoryItemResponse.model_validate(i) for i in items]
return [InventoryItemResponse.model_validate(_enrich_item(i, constraints)) for i in items]
@router.get("/items/{item_id}", response_model=InventoryItemResponse) @router.get("/items/{item_id}", response_model=InventoryItemResponse)
@ -238,8 +151,7 @@ async def get_inventory_item(item_id: int, store: Store = Depends(get_store)):
item = await asyncio.to_thread(store.get_inventory_item, item_id) item = await asyncio.to_thread(store.get_inventory_item, item_id)
if not item: if not item:
raise HTTPException(status_code=404, detail="Inventory item not found") raise HTTPException(status_code=404, detail="Inventory item not found")
constraints = await asyncio.to_thread(_user_constraints, store) return InventoryItemResponse.model_validate(item)
return InventoryItemResponse.model_validate(_enrich_item(item, constraints))
@router.patch("/items/{item_id}", response_model=InventoryItemResponse) @router.patch("/items/{item_id}", response_model=InventoryItemResponse)
@ -251,83 +163,24 @@ async def update_inventory_item(
updates["purchase_date"] = str(updates["purchase_date"]) updates["purchase_date"] = str(updates["purchase_date"])
if "expiration_date" in updates and updates["expiration_date"]: if "expiration_date" in updates and updates["expiration_date"]:
updates["expiration_date"] = str(updates["expiration_date"]) updates["expiration_date"] = str(updates["expiration_date"])
if "opened_date" in updates and updates["opened_date"]:
updates["opened_date"] = str(updates["opened_date"])
item = await asyncio.to_thread(store.update_inventory_item, item_id, **updates) item = await asyncio.to_thread(store.update_inventory_item, item_id, **updates)
if not item: if not item:
raise HTTPException(status_code=404, detail="Inventory item not found") raise HTTPException(status_code=404, detail="Inventory item not found")
constraints = await asyncio.to_thread(_user_constraints, store) return InventoryItemResponse.model_validate(item)
return InventoryItemResponse.model_validate(_enrich_item(item, constraints))
@router.post("/items/{item_id}/open", response_model=InventoryItemResponse)
async def mark_item_opened(item_id: int, store: Store = Depends(get_store)):
"""Record that this item was opened today, triggering secondary shelf-life tracking."""
from datetime import date
item = await asyncio.to_thread(
store.update_inventory_item,
item_id,
opened_date=str(date.today()),
)
if not item:
raise HTTPException(status_code=404, detail="Inventory item not found")
constraints = await asyncio.to_thread(_user_constraints, store)
return InventoryItemResponse.model_validate(_enrich_item(item, constraints))
@router.post("/items/{item_id}/consume", response_model=InventoryItemResponse) @router.post("/items/{item_id}/consume", response_model=InventoryItemResponse)
async def consume_item( async def consume_item(item_id: int, store: Store = Depends(get_store)):
item_id: int,
body: Optional[PartialConsumeRequest] = None,
store: Store = Depends(get_store),
):
"""Consume an inventory item fully or partially.
When body.quantity is provided, decrements by that amount and only marks
status=consumed when quantity reaches zero. Omit body to consume all.
"""
from datetime import datetime, timezone from datetime import datetime, timezone
now = datetime.now(timezone.utc).isoformat()
if body is not None:
item = await asyncio.to_thread(
store.partial_consume_item, item_id, body.quantity, now
)
else:
item = await asyncio.to_thread( item = await asyncio.to_thread(
store.update_inventory_item, store.update_inventory_item,
item_id, item_id,
status="consumed", status="consumed",
consumed_at=now,
)
if not item:
raise HTTPException(status_code=404, detail="Inventory item not found")
constraints = await asyncio.to_thread(_user_constraints, store)
return InventoryItemResponse.model_validate(_enrich_item(item, constraints))
@router.post("/items/{item_id}/discard", response_model=InventoryItemResponse)
async def discard_item(
item_id: int,
body: DiscardRequest = DiscardRequest(),
store: Store = Depends(get_store),
):
"""Mark an item as discarded (not used, spoiled, etc).
Optional reason field accepts free text or a preset label
('not used', 'spoiled', 'excess', 'other').
"""
from datetime import datetime, timezone
item = await asyncio.to_thread(
store.update_inventory_item,
item_id,
status="discarded",
consumed_at=datetime.now(timezone.utc).isoformat(), consumed_at=datetime.now(timezone.utc).isoformat(),
disposal_reason=body.reason,
) )
if not item: if not item:
raise HTTPException(status_code=404, detail="Inventory item not found") raise HTTPException(status_code=404, detail="Inventory item not found")
constraints = await asyncio.to_thread(_user_constraints, store) return InventoryItemResponse.model_validate(item)
return InventoryItemResponse.model_validate(_enrich_item(item, constraints))
@router.delete("/items/{item_id}", status_code=status.HTTP_204_NO_CONTENT) @router.delete("/items/{item_id}", status_code=status.HTTP_204_NO_CONTENT)
@ -350,31 +203,6 @@ class BarcodeScanTextRequest(BaseModel):
auto_add_to_inventory: bool = True auto_add_to_inventory: bool = True
def _captured_to_product_info(row: dict) -> dict:
"""Convert a captured_products row to the product_info dict shape used by
the barcode scan flow (mirrors what OpenFoodFactsService returns)."""
macros: dict = {}
for field in ("calories", "fat_g", "saturated_fat_g", "carbs_g", "sugar_g",
"fiber_g", "protein_g", "sodium_mg", "serving_size_g"):
if row.get(field) is not None:
macros[field] = row[field]
return {
"name": row.get("product_name") or row.get("barcode", "Unknown Product"),
"brand": row.get("brand"),
"category": None,
"nutrition_data": macros,
"ingredient_names": row.get("ingredient_names") or [],
"allergens": row.get("allergens") or [],
"source": "visual_capture",
}
def _gap_message(tier: str, has_visual_capture: bool) -> str:
if has_visual_capture:
return "We couldn't find this product. Photograph the nutrition label to add it."
return "Not found in any product database — add manually"
@router.post("/scan/text", response_model=BarcodeScanResponse) @router.post("/scan/text", response_model=BarcodeScanResponse)
async def scan_barcode_text( async def scan_barcode_text(
body: BarcodeScanTextRequest, body: BarcodeScanTextRequest,
@ -382,24 +210,12 @@ async def scan_barcode_text(
session: CloudUser = Depends(get_session), session: CloudUser = Depends(get_session),
): ):
"""Scan a barcode from a text string (e.g. from a hardware scanner or manual entry).""" """Scan a barcode from a text string (e.g. from a hardware scanner or manual entry)."""
log.info("scan auth=%s tier=%s barcode=%r", _auth_label(session.user_id), session.tier, body.barcode)
from app.services.openfoodfacts import OpenFoodFactsService from app.services.openfoodfacts import OpenFoodFactsService
from app.services.expiration_predictor import ExpirationPredictor from app.services.expiration_predictor import ExpirationPredictor
from app.tiers import can_use
predictor = ExpirationPredictor()
has_visual_capture = can_use("visual_label_capture", session.tier, session.has_byok)
# 1. Check local captured-products cache before hitting FDC/OFF
cached = await asyncio.to_thread(store.get_captured_product, body.barcode)
if cached and cached.get("confirmed_by_user"):
product_info: dict | None = _captured_to_product_info(cached)
product_source = "visual_capture"
else:
off = OpenFoodFactsService() off = OpenFoodFactsService()
predictor = ExpirationPredictor()
product_info = await off.lookup_product(body.barcode) product_info = await off.lookup_product(body.barcode)
product_source = "openfoodfacts"
inventory_item = None inventory_item = None
if product_info and body.auto_add_to_inventory: if product_info and body.auto_add_to_inventory:
@ -410,7 +226,7 @@ async def scan_barcode_text(
brand=product_info.get("brand"), brand=product_info.get("brand"),
category=product_info.get("category"), category=product_info.get("category"),
nutrition_data=product_info.get("nutrition_data", {}), nutrition_data=product_info.get("nutrition_data", {}),
source=product_source, source="openfoodfacts",
source_data=product_info, source_data=product_info,
) )
exp = predictor.predict_expiration( exp = predictor.predict_expiration(
@ -420,14 +236,10 @@ async def scan_barcode_text(
tier=session.tier, tier=session.tier,
has_byok=session.has_byok, has_byok=session.has_byok,
) )
# Use OFFs pack size when detected; caller-supplied quantity is a fallback
resolved_qty = product_info.get("pack_quantity") or body.quantity
resolved_unit = product_info.get("pack_unit") or "count"
inventory_item = await asyncio.to_thread( inventory_item = await asyncio.to_thread(
store.add_inventory_item, store.add_inventory_item,
product["id"], body.location, product["id"], body.location,
quantity=resolved_qty, quantity=body.quantity,
unit=resolved_unit,
expiration_date=str(exp) if exp else None, expiration_date=str(exp) if exp else None,
source="barcode_scan", source="barcode_scan",
) )
@ -435,8 +247,6 @@ async def scan_barcode_text(
else: else:
result_product = None result_product = None
product_found = product_info is not None
needs_capture = not product_found and has_visual_capture
return BarcodeScanResponse( return BarcodeScanResponse(
success=True, success=True,
barcodes_found=1, barcodes_found=1,
@ -446,9 +256,7 @@ async def scan_barcode_text(
"product": result_product, "product": result_product,
"inventory_item": InventoryItemResponse.model_validate(inventory_item) if inventory_item else None, "inventory_item": InventoryItemResponse.model_validate(inventory_item) if inventory_item else None,
"added_to_inventory": inventory_item is not None, "added_to_inventory": inventory_item is not None,
"needs_manual_entry": not product_found and not needs_capture, "message": "Added to inventory" if inventory_item else "Product not found in database",
"needs_visual_capture": needs_capture,
"message": "Added to inventory" if inventory_item else _gap_message(session.tier, needs_capture),
}], }],
message="Barcode processed", message="Barcode processed",
) )
@ -464,10 +272,6 @@ async def scan_barcode_image(
session: CloudUser = Depends(get_session), session: CloudUser = Depends(get_session),
): ):
"""Scan a barcode from an uploaded image. Requires Phase 2 scanner integration.""" """Scan a barcode from an uploaded image. Requires Phase 2 scanner integration."""
log.info("scan_image auth=%s tier=%s", _auth_label(session.user_id), session.tier)
from app.tiers import can_use
has_visual_capture = can_use("visual_label_capture", session.tier, session.has_byok)
temp_dir = Path("/tmp/kiwi_barcode_scans") temp_dir = Path("/tmp/kiwi_barcode_scans")
temp_dir.mkdir(parents=True, exist_ok=True) temp_dir.mkdir(parents=True, exist_ok=True)
temp_file = temp_dir / f"{uuid.uuid4()}_{file.filename}" temp_file = temp_dir / f"{uuid.uuid4()}_{file.filename}"
@ -490,16 +294,7 @@ async def scan_barcode_image(
results = [] results = []
for bc in barcodes: for bc in barcodes:
code = bc["data"] code = bc["data"]
# Check local visual-capture cache before hitting FDC/OFF
cached = await asyncio.to_thread(store.get_captured_product, code)
if cached and cached.get("confirmed_by_user"):
product_info: dict | None = _captured_to_product_info(cached)
product_source = "visual_capture"
else:
product_info = await off.lookup_product(code) product_info = await off.lookup_product(code)
product_source = "openfoodfacts"
inventory_item = None inventory_item = None
if product_info and auto_add_to_inventory: if product_info and auto_add_to_inventory:
product, _ = await asyncio.to_thread( product, _ = await asyncio.to_thread(
@ -509,7 +304,7 @@ async def scan_barcode_image(
brand=product_info.get("brand"), brand=product_info.get("brand"),
category=product_info.get("category"), category=product_info.get("category"),
nutrition_data=product_info.get("nutrition_data", {}), nutrition_data=product_info.get("nutrition_data", {}),
source=product_source, source="openfoodfacts",
source_data=product_info, source_data=product_info,
) )
exp = predictor.predict_expiration( exp = predictor.predict_expiration(
@ -519,27 +314,20 @@ async def scan_barcode_image(
tier=session.tier, tier=session.tier,
has_byok=session.has_byok, has_byok=session.has_byok,
) )
resolved_qty = product_info.get("pack_quantity") or quantity
resolved_unit = product_info.get("pack_unit") or "count"
inventory_item = await asyncio.to_thread( inventory_item = await asyncio.to_thread(
store.add_inventory_item, store.add_inventory_item,
product["id"], location, product["id"], location,
quantity=resolved_qty, quantity=quantity,
unit=resolved_unit,
expiration_date=str(exp) if exp else None, expiration_date=str(exp) if exp else None,
source="barcode_scan", source="barcode_scan",
) )
product_found = product_info is not None
needs_capture = not product_found and has_visual_capture
results.append({ results.append({
"barcode": code, "barcode": code,
"barcode_type": bc.get("type", "unknown"), "barcode_type": bc.get("type", "unknown"),
"product": ProductResponse.model_validate(product_info) if product_info else None, "product": ProductResponse.model_validate(product) if product_info else None,
"inventory_item": InventoryItemResponse.model_validate(inventory_item) if inventory_item else None, "inventory_item": InventoryItemResponse.model_validate(inventory_item) if inventory_item else None,
"added_to_inventory": inventory_item is not None, "added_to_inventory": inventory_item is not None,
"needs_manual_entry": not product_found and not needs_capture, "message": "Added to inventory" if inventory_item else "Barcode scanned",
"needs_visual_capture": needs_capture,
"message": "Added to inventory" if inventory_item else _gap_message(session.tier, needs_capture),
}) })
return BarcodeScanResponse( return BarcodeScanResponse(
success=True, barcodes_found=len(barcodes), results=results, success=True, barcodes_found=len(barcodes), results=results,
@ -550,143 +338,6 @@ async def scan_barcode_image(
temp_file.unlink() temp_file.unlink()
# ── Visual label capture (kiwi#79) ────────────────────────────────────────────
@router.post("/scan/label-capture")
async def capture_nutrition_label(
file: UploadFile = File(...),
barcode: str = Form(...),
store: Store = Depends(get_store),
session: CloudUser = Depends(get_session),
):
"""Photograph a nutrition label for an unenriched product (paid tier).
Sends the image to the vision model and returns structured nutrition data
for user review. Fields extracted with confidence < 0.7 should be
highlighted in amber in the UI.
"""
from app.tiers import can_use
from app.models.schemas.label_capture import LabelCaptureResponse
from app.services.label_capture import extract_label, needs_review as _needs_review
if not can_use("visual_label_capture", session.tier, session.has_byok):
raise HTTPException(status_code=403, detail="Visual label capture requires a Paid tier or higher.")
log.info("label_capture tier=%s barcode=%r", session.tier, barcode)
image_bytes = await file.read()
extraction = await asyncio.to_thread(extract_label, image_bytes)
return LabelCaptureResponse(
barcode=barcode,
product_name=extraction.get("product_name"),
brand=extraction.get("brand"),
serving_size_g=extraction.get("serving_size_g"),
calories=extraction.get("calories"),
fat_g=extraction.get("fat_g"),
saturated_fat_g=extraction.get("saturated_fat_g"),
carbs_g=extraction.get("carbs_g"),
sugar_g=extraction.get("sugar_g"),
fiber_g=extraction.get("fiber_g"),
protein_g=extraction.get("protein_g"),
sodium_mg=extraction.get("sodium_mg"),
ingredient_names=extraction.get("ingredient_names") or [],
allergens=extraction.get("allergens") or [],
confidence=extraction.get("confidence", 0.0),
needs_review=_needs_review(extraction),
)
@router.post("/scan/label-confirm")
async def confirm_nutrition_label(
body: LabelConfirmRequest,
store: Store = Depends(get_store),
session: CloudUser = Depends(get_session),
):
"""Confirm and save a user-reviewed label extraction.
Saves the product to the local cache so future scans of the same barcode
resolve instantly without another capture. Optionally adds the item to
the user's inventory.
"""
from app.tiers import can_use
from app.models.schemas.label_capture import LabelConfirmResponse
from app.services.expiration_predictor import ExpirationPredictor
if not can_use("visual_label_capture", session.tier, session.has_byok):
raise HTTPException(status_code=403, detail="Visual label capture requires a Paid tier or higher.")
log.info("label_confirm tier=%s barcode=%r", session.tier, body.barcode)
# Persist to local visual-capture cache
await asyncio.to_thread(
store.save_captured_product,
body.barcode,
product_name=body.product_name,
brand=body.brand,
serving_size_g=body.serving_size_g,
calories=body.calories,
fat_g=body.fat_g,
saturated_fat_g=body.saturated_fat_g,
carbs_g=body.carbs_g,
sugar_g=body.sugar_g,
fiber_g=body.fiber_g,
protein_g=body.protein_g,
sodium_mg=body.sodium_mg,
ingredient_names=body.ingredient_names,
allergens=body.allergens,
confidence=body.confidence,
confirmed_by_user=True,
)
product_id: int | None = None
inventory_item_id: int | None = None
if body.auto_add:
predictor = ExpirationPredictor()
nutrition = {}
for field in ("calories", "fat_g", "saturated_fat_g", "carbs_g", "sugar_g",
"fiber_g", "protein_g", "sodium_mg", "serving_size_g"):
val = getattr(body, field, None)
if val is not None:
nutrition[field] = val
product, _ = await asyncio.to_thread(
store.get_or_create_product,
body.product_name or body.barcode,
body.barcode,
brand=body.brand,
category=None,
nutrition_data=nutrition,
source="visual_capture",
source_data={},
)
product_id = product["id"]
exp = predictor.predict_expiration(
"",
body.location,
product_name=body.product_name or body.barcode,
tier=session.tier,
has_byok=session.has_byok,
)
inv_item = await asyncio.to_thread(
store.add_inventory_item,
product_id, body.location,
quantity=body.quantity,
unit="count",
expiration_date=str(exp) if exp else None,
source="visual_capture",
)
inventory_item_id = inv_item["id"]
return LabelConfirmResponse(
ok=True,
barcode=body.barcode,
product_id=product_id,
inventory_item_id=inventory_item_id,
message="Product saved" + (" and added to inventory" if body.auto_add else ""),
)
# ── Tags ────────────────────────────────────────────────────────────────────── # ── Tags ──────────────────────────────────────────────────────────────────────
@router.post("/tags", response_model=TagResponse, status_code=status.HTTP_201_CREATED) @router.post("/tags", response_model=TagResponse, status_code=status.HTTP_201_CREATED)
@ -718,23 +369,6 @@ async def list_tags(
# ── Stats ───────────────────────────────────────────────────────────────────── # ── Stats ─────────────────────────────────────────────────────────────────────
@router.post("/recalculate-expiry")
async def recalculate_expiry(
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> dict:
"""Re-run the expiration predictor over all available inventory items.
Uses each item's stored purchase_date and current location. Safe to call
multiple times idempotent per session.
"""
def _run(s: Store) -> tuple[int, int]:
return s.recalculate_expiry(tier=session.tier, has_byok=session.has_byok)
updated, skipped = await asyncio.to_thread(_run, store)
return {"updated": updated, "skipped": skipped}
@router.get("/stats", response_model=InventoryStats) @router.get("/stats", response_model=InventoryStats)
async def get_inventory_stats(store: Store = Depends(get_store)): async def get_inventory_stats(store: Store = Depends(get_store)):
def _stats(): def _stats():

View file

@ -1,325 +0,0 @@
# app/api/endpoints/meal_plans.py
"""Meal plan CRUD, shopping list, and prep session endpoints."""
from __future__ import annotations
import asyncio
import json
from datetime import date
from fastapi import APIRouter, Depends, HTTPException
from app.cloud_session import CloudUser, get_session
from app.db.session import get_store
from app.db.store import Store
from app.models.schemas.meal_plan import (
CreatePlanRequest,
GapItem,
PlanSummary,
PrepSessionSummary,
PrepTaskSummary,
ShoppingListResponse,
SlotSummary,
UpdatePlanRequest,
UpdatePrepTaskRequest,
UpsertSlotRequest,
VALID_MEAL_TYPES,
)
from app.services.meal_plan.affiliates import get_retailer_links
from app.services.meal_plan.prep_scheduler import build_prep_tasks
from app.services.meal_plan.shopping_list import compute_shopping_list
from app.tiers import can_use
router = APIRouter()
# ── helpers ───────────────────────────────────────────────────────────────────
def _slot_summary(row: dict) -> SlotSummary:
return SlotSummary(
id=row["id"],
plan_id=row["plan_id"],
day_of_week=row["day_of_week"],
meal_type=row["meal_type"],
recipe_id=row.get("recipe_id"),
recipe_title=row.get("recipe_title"),
servings=row["servings"],
custom_label=row.get("custom_label"),
)
def _plan_summary(plan: dict, slots: list[dict]) -> PlanSummary:
meal_types = plan.get("meal_types") or ["dinner"]
if isinstance(meal_types, str):
meal_types = json.loads(meal_types)
return PlanSummary(
id=plan["id"],
week_start=plan["week_start"],
meal_types=meal_types,
slots=[_slot_summary(s) for s in slots],
created_at=plan["created_at"],
)
def _prep_task_summary(row: dict) -> PrepTaskSummary:
return PrepTaskSummary(
id=row["id"],
recipe_id=row.get("recipe_id"),
task_label=row["task_label"],
duration_minutes=row.get("duration_minutes"),
sequence_order=row["sequence_order"],
equipment=row.get("equipment"),
is_parallel=bool(row.get("is_parallel", False)),
notes=row.get("notes"),
user_edited=bool(row.get("user_edited", False)),
)
# ── plan CRUD ─────────────────────────────────────────────────────────────────
@router.post("/", response_model=PlanSummary)
async def create_plan(
req: CreatePlanRequest,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> PlanSummary:
import sqlite3
# Free tier is locked to dinner-only; paid+ may configure meal types
if can_use("meal_plan_config", session.tier):
meal_types = [t for t in req.meal_types if t in VALID_MEAL_TYPES] or ["dinner"]
else:
meal_types = ["dinner"]
try:
plan = await asyncio.to_thread(store.create_meal_plan, str(req.week_start), meal_types)
except sqlite3.IntegrityError:
raise HTTPException(
status_code=409,
detail=f"A meal plan for the week of {req.week_start} already exists.",
)
slots = await asyncio.to_thread(store.get_plan_slots, plan["id"])
return _plan_summary(plan, slots)
@router.get("/", response_model=list[PlanSummary])
async def list_plans(
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> list[PlanSummary]:
plans = await asyncio.to_thread(store.list_meal_plans)
result = []
for p in plans:
slots = await asyncio.to_thread(store.get_plan_slots, p["id"])
result.append(_plan_summary(p, slots))
return result
@router.patch("/{plan_id}", response_model=PlanSummary)
async def update_plan(
plan_id: int,
req: UpdatePlanRequest,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> PlanSummary:
plan = await asyncio.to_thread(store.get_meal_plan, plan_id)
if plan is None:
raise HTTPException(status_code=404, detail="Plan not found.")
# Free tier stays dinner-only; paid+ may add meal types
if can_use("meal_plan_config", session.tier):
meal_types = [t for t in req.meal_types if t in VALID_MEAL_TYPES] or ["dinner"]
else:
meal_types = ["dinner"]
updated = await asyncio.to_thread(store.update_meal_plan_types, plan_id, meal_types)
if updated is None:
raise HTTPException(status_code=404, detail="Plan not found.")
slots = await asyncio.to_thread(store.get_plan_slots, plan_id)
return _plan_summary(updated, slots)
@router.get("/{plan_id}", response_model=PlanSummary)
async def get_plan(
plan_id: int,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> PlanSummary:
plan = await asyncio.to_thread(store.get_meal_plan, plan_id)
if plan is None:
raise HTTPException(status_code=404, detail="Plan not found.")
slots = await asyncio.to_thread(store.get_plan_slots, plan_id)
return _plan_summary(plan, slots)
# ── slots ─────────────────────────────────────────────────────────────────────
@router.put("/{plan_id}/slots/{day_of_week}/{meal_type}", response_model=SlotSummary)
async def upsert_slot(
plan_id: int,
day_of_week: int,
meal_type: str,
req: UpsertSlotRequest,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> SlotSummary:
if day_of_week < 0 or day_of_week > 6:
raise HTTPException(status_code=422, detail="day_of_week must be 0-6.")
if meal_type not in VALID_MEAL_TYPES:
raise HTTPException(status_code=422, detail=f"Invalid meal_type '{meal_type}'.")
plan = await asyncio.to_thread(store.get_meal_plan, plan_id)
if plan is None:
raise HTTPException(status_code=404, detail="Plan not found.")
row = await asyncio.to_thread(
store.upsert_slot,
plan_id, day_of_week, meal_type,
req.recipe_id, req.servings, req.custom_label,
)
return _slot_summary(row)
@router.delete("/{plan_id}/slots/{slot_id}", status_code=204)
async def delete_slot(
plan_id: int,
slot_id: int,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> None:
plan = await asyncio.to_thread(store.get_meal_plan, plan_id)
if plan is None:
raise HTTPException(status_code=404, detail="Plan not found.")
await asyncio.to_thread(store.delete_slot, slot_id)
# ── shopping list ─────────────────────────────────────────────────────────────
@router.get("/{plan_id}/shopping-list", response_model=ShoppingListResponse)
async def get_shopping_list(
plan_id: int,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> ShoppingListResponse:
plan = await asyncio.to_thread(store.get_meal_plan, plan_id)
if plan is None:
raise HTTPException(status_code=404, detail="Plan not found.")
recipes = await asyncio.to_thread(store.get_plan_recipes, plan_id)
inventory = await asyncio.to_thread(store.list_inventory)
gaps, covered = compute_shopping_list(recipes, inventory)
# Enrich gap items with retailer links
def _to_schema(item, enrich: bool) -> GapItem:
links = get_retailer_links(item.ingredient_name) if enrich else []
return GapItem(
ingredient_name=item.ingredient_name,
needed_raw=item.needed_raw,
have_quantity=item.have_quantity,
have_unit=item.have_unit,
covered=item.covered,
retailer_links=links,
)
gap_items = [_to_schema(g, enrich=True) for g in gaps]
covered_items = [_to_schema(c, enrich=False) for c in covered]
disclosure = (
"Some links may be affiliate links. Purchases through them support Kiwi development."
if gap_items else None
)
return ShoppingListResponse(
plan_id=plan_id,
gap_items=gap_items,
covered_items=covered_items,
disclosure=disclosure,
)
# ── prep session ──────────────────────────────────────────────────────────────
@router.get("/{plan_id}/prep-session", response_model=PrepSessionSummary)
async def get_prep_session(
plan_id: int,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> PrepSessionSummary:
plan = await asyncio.to_thread(store.get_meal_plan, plan_id)
if plan is None:
raise HTTPException(status_code=404, detail="Plan not found.")
prep_session = await asyncio.to_thread(store.get_prep_session_for_plan, plan_id)
if prep_session is None:
raise HTTPException(status_code=404, detail="No prep session for this plan.")
raw_tasks = await asyncio.to_thread(store.get_prep_tasks, prep_session["id"])
return PrepSessionSummary(
id=prep_session["id"],
plan_id=plan_id,
scheduled_date=prep_session["scheduled_date"],
status=prep_session["status"],
tasks=[_prep_task_summary(t) for t in raw_tasks],
)
@router.post("/{plan_id}/prep-session", response_model=PrepSessionSummary)
async def create_prep_session(
plan_id: int,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> PrepSessionSummary:
plan = await asyncio.to_thread(store.get_meal_plan, plan_id)
if plan is None:
raise HTTPException(status_code=404, detail="Plan not found.")
slots = await asyncio.to_thread(store.get_plan_slots, plan_id)
recipes = await asyncio.to_thread(store.get_plan_recipes, plan_id)
prep_tasks = build_prep_tasks(slots=slots, recipes=recipes)
scheduled_date = date.today().isoformat()
prep_session = await asyncio.to_thread(
store.create_prep_session, plan_id, scheduled_date
)
session_id = prep_session["id"]
task_dicts = [
{
"recipe_id": t.recipe_id,
"slot_id": t.slot_id,
"task_label": t.task_label,
"duration_minutes": t.duration_minutes,
"sequence_order": t.sequence_order,
"equipment": t.equipment,
"is_parallel": t.is_parallel,
"notes": t.notes,
}
for t in prep_tasks
]
inserted = await asyncio.to_thread(store.bulk_insert_prep_tasks, session_id, task_dicts)
return PrepSessionSummary(
id=prep_session["id"],
plan_id=prep_session["plan_id"],
scheduled_date=prep_session["scheduled_date"],
status=prep_session["status"],
tasks=[_prep_task_summary(r) for r in inserted],
)
@router.patch(
"/{plan_id}/prep-session/tasks/{task_id}",
response_model=PrepTaskSummary,
)
async def update_prep_task(
plan_id: int,
task_id: int,
req: UpdatePrepTaskRequest,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> PrepTaskSummary:
updated = await asyncio.to_thread(
store.update_prep_task,
task_id,
duration_minutes=req.duration_minutes,
sequence_order=req.sequence_order,
notes=req.notes,
equipment=req.equipment,
)
if updated is None:
raise HTTPException(status_code=404, detail="Task not found.")
return _prep_task_summary(updated)

View file

@ -219,7 +219,7 @@ def _commit_items(
receipt_id=receipt_id, receipt_id=receipt_id,
purchase_date=str(purchase_date) if purchase_date else None, purchase_date=str(purchase_date) if purchase_date else None,
expiration_date=str(exp) if exp else None, expiration_date=str(exp) if exp else None,
source="receipt", source="receipt_ocr",
) )
created.append(ApprovedInventoryItem( created.append(ApprovedInventoryItem(

View file

@ -1,27 +0,0 @@
"""Proxy endpoint: exposes cf-orch call budget to the Kiwi frontend.
Only lifetime/founders users have a license_key subscription and free
users receive null (no budget UI shown).
"""
from __future__ import annotations
from fastapi import APIRouter, Depends
from app.cloud_session import CloudUser, get_session
from app.services.heimdall_orch import get_orch_usage
router = APIRouter()
@router.get("")
async def orch_usage_endpoint(
session: CloudUser = Depends(get_session),
) -> dict | None:
"""Return the current period's orch usage for the authenticated user.
Returns null if the user has no lifetime/founders license key (i.e. they
are on a subscription or free plan no budget cap applies to them).
"""
if session.license_key is None:
return None
return get_orch_usage(session.license_key, "kiwi")

View file

@ -42,11 +42,9 @@ async def upload_receipt(
) )
# Only queue OCR if the feature is enabled server-side AND the user's tier allows it. # Only queue OCR if the feature is enabled server-side AND the user's tier allows it.
# Check tier here, not inside the background task — once dispatched it can't be cancelled. # Check tier here, not inside the background task — once dispatched it can't be cancelled.
# Pass session.db (a Path) rather than store — the store dependency closes before
# background tasks run, so the task opens its own store from the DB path.
ocr_allowed = settings.ENABLE_OCR and can_use("receipt_ocr", session.tier, session.has_byok) ocr_allowed = settings.ENABLE_OCR and can_use("receipt_ocr", session.tier, session.has_byok)
if ocr_allowed: if ocr_allowed:
background_tasks.add_task(_process_receipt_ocr, receipt["id"], saved, session.db) background_tasks.add_task(_process_receipt_ocr, receipt["id"], saved, store)
return ReceiptResponse.model_validate(receipt) return ReceiptResponse.model_validate(receipt)
@ -66,7 +64,7 @@ async def upload_receipts_batch(
store.create_receipt, file.filename, str(saved) store.create_receipt, file.filename, str(saved)
) )
if ocr_allowed: if ocr_allowed:
background_tasks.add_task(_process_receipt_ocr, receipt["id"], saved, session.db) background_tasks.add_task(_process_receipt_ocr, receipt["id"], saved, store)
results.append(ReceiptResponse.model_validate(receipt)) results.append(ReceiptResponse.model_validate(receipt))
return results return results
@ -99,13 +97,8 @@ async def get_receipt_quality(receipt_id: int, store: Store = Depends(get_store)
return QualityAssessment.model_validate(qa) return QualityAssessment.model_validate(qa)
async def _process_receipt_ocr(receipt_id: int, image_path: Path, db_path: Path) -> None: async def _process_receipt_ocr(receipt_id: int, image_path: Path, store: Store) -> None:
"""Background task: run OCR pipeline on an uploaded receipt. """Background task: run OCR pipeline on an uploaded receipt."""
Accepts db_path (not a Store instance) because FastAPI closes the request-scoped
store before background tasks execute. This task owns its store lifecycle.
"""
store = Store(db_path)
try: try:
await asyncio.to_thread(store.update_receipt_status, receipt_id, "processing") await asyncio.to_thread(store.update_receipt_status, receipt_id, "processing")
from app.services.receipt_service import ReceiptService from app.services.receipt_service import ReceiptService
@ -115,5 +108,3 @@ async def _process_receipt_ocr(receipt_id: int, image_path: Path, db_path: Path)
await asyncio.to_thread( await asyncio.to_thread(
store.update_receipt_status, receipt_id, "error", str(exc) store.update_receipt_status, receipt_id, "error", str(exc)
) )
finally:
store.close()

View file

@ -1,166 +0,0 @@
# app/api/endpoints/recipe_tags.py
"""Community subcategory tagging for corpus recipes.
Users can tag a recipe they're viewing with a domain/category/subcategory
from the browse taxonomy. Tags require a community pseudonym and reach
public visibility once two independent users have tagged the same recipe
to the same location (upvotes >= 2).
All tiers may submit and upvote tags community contribution is free.
"""
from __future__ import annotations
import logging
from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel
from app.api.endpoints.community import _get_community_store
from app.api.endpoints.session import get_session
from app.cloud_session import CloudUser
from app.services.recipe.browser_domains import DOMAINS
logger = logging.getLogger(__name__)
router = APIRouter()
ACCEPT_THRESHOLD = 2
# ── Request / response models ──────────────────────────────────────────────────
class TagSubmitBody(BaseModel):
recipe_id: int
domain: str
category: str
subcategory: str | None = None
pseudonym: str
class TagResponse(BaseModel):
id: int
recipe_id: int
domain: str
category: str
subcategory: str | None
pseudonym: str
upvotes: int
accepted: bool
def _to_response(row: dict) -> TagResponse:
return TagResponse(
id=row["id"],
recipe_id=int(row["recipe_ref"]),
domain=row["domain"],
category=row["category"],
subcategory=row.get("subcategory"),
pseudonym=row["pseudonym"],
upvotes=row["upvotes"],
accepted=row["upvotes"] >= ACCEPT_THRESHOLD,
)
def _validate_location(domain: str, category: str, subcategory: str | None) -> None:
"""Raise 422 if (domain, category, subcategory) isn't in the known taxonomy."""
if domain not in DOMAINS:
raise HTTPException(status_code=422, detail=f"Unknown domain '{domain}'.")
cats = DOMAINS[domain].get("categories", {})
if category not in cats:
raise HTTPException(
status_code=422,
detail=f"Unknown category '{category}' in domain '{domain}'.",
)
if subcategory is not None:
subcats = cats[category].get("subcategories", {})
if subcategory not in subcats:
raise HTTPException(
status_code=422,
detail=f"Unknown subcategory '{subcategory}' in '{domain}/{category}'.",
)
# ── Endpoints ──────────────────────────────────────────────────────────────────
@router.get("/recipes/community-tags/{recipe_id}", response_model=list[TagResponse])
async def list_recipe_tags(
recipe_id: int,
session: CloudUser = Depends(get_session),
) -> list[TagResponse]:
"""Return all community tags for a corpus recipe, accepted ones first."""
store = _get_community_store()
if store is None:
return []
tags = store.list_tags_for_recipe(recipe_id)
return [_to_response(r) for r in tags]
@router.post("/recipes/community-tags", response_model=TagResponse, status_code=201)
async def submit_recipe_tag(
body: TagSubmitBody,
session: CloudUser = Depends(get_session),
) -> TagResponse:
"""Tag a corpus recipe with a browse taxonomy location.
Requires the user to have a community pseudonym set. Returns 409 if this
user has already tagged this recipe to this exact location.
"""
store = _get_community_store()
if store is None:
raise HTTPException(
status_code=503,
detail="Community features are not available on this instance.",
)
_validate_location(body.domain, body.category, body.subcategory)
try:
import psycopg2.errors # type: ignore[import]
row = store.submit_recipe_tag(
recipe_id=body.recipe_id,
domain=body.domain,
category=body.category,
subcategory=body.subcategory,
pseudonym=body.pseudonym,
)
return _to_response(row)
except Exception as exc:
if "unique" in str(exc).lower() or "UniqueViolation" in type(exc).__name__:
raise HTTPException(
status_code=409,
detail="You have already tagged this recipe to this location.",
)
logger.error("submit_recipe_tag failed: %s", exc)
raise HTTPException(status_code=500, detail="Failed to submit tag.")
@router.post("/recipes/community-tags/{tag_id}/upvote", response_model=TagResponse)
async def upvote_recipe_tag(
tag_id: int,
pseudonym: str,
session: CloudUser = Depends(get_session),
) -> TagResponse:
"""Upvote an existing community tag.
Returns 409 if this pseudonym has already voted on this tag.
Returns 404 if the tag doesn't exist.
"""
store = _get_community_store()
if store is None:
raise HTTPException(status_code=503, detail="Community features unavailable.")
tag_row = store.get_recipe_tag_by_id(tag_id)
if tag_row is None:
raise HTTPException(status_code=404, detail=f"Tag {tag_id} not found.")
try:
new_upvotes = store.upvote_recipe_tag(tag_id, pseudonym)
except ValueError:
raise HTTPException(status_code=404, detail=f"Tag {tag_id} not found.")
except Exception as exc:
if "unique" in str(exc).lower() or "UniqueViolation" in type(exc).__name__:
raise HTTPException(status_code=409, detail="You have already voted on this tag.")
logger.error("upvote_recipe_tag failed: %s", exc)
raise HTTPException(status_code=500, detail="Failed to upvote tag.")
tag_row["upvotes"] = new_upvotes
return _to_response(tag_row)

View file

@ -1,610 +0,0 @@
"""Recipe suggestion and browser endpoints."""
from __future__ import annotations
import asyncio
import logging
from pathlib import Path
from typing import Annotated
from fastapi import APIRouter, Depends, HTTPException, Query
from app.cloud_session import CloudUser, _auth_label, get_session
log = logging.getLogger(__name__)
from app.db.session import get_store
from app.db.store import Store
from app.models.schemas.recipe import (
AssemblyTemplateOut,
BuildRequest,
RecipeJobStatus,
RecipeRequest,
RecipeResult,
RecipeSuggestion,
RoleCandidatesResponse,
StreamTokenRequest,
StreamTokenResponse,
)
from app.services.coordinator_proxy import CoordinatorError, coordinator_authorize
from app.api.endpoints.imitate import _build_recipe_prompt
from app.services.recipe.assembly_recipes import (
build_from_selection,
get_role_candidates,
get_templates_for_api,
)
from app.services.recipe.browser_domains import (
DOMAINS,
category_has_subcategories,
get_category_names,
get_domain_labels,
get_keywords_for_category,
get_keywords_for_subcategory,
get_subcategory_names,
)
from app.services.recipe.recipe_engine import RecipeEngine
from app.services.recipe.time_effort import parse_time_effort
from app.services.recipe.sensory import build_sensory_exclude
from app.services.heimdall_orch import check_orch_budget
from app.tiers import can_use
router = APIRouter()
def _suggest_in_thread(db_path: Path, req: RecipeRequest) -> RecipeResult:
"""Run recipe suggestion in a worker thread with its own Store connection.
SQLite connections cannot be shared across threads. This function creates
a fresh Store (and therefore a fresh sqlite3.Connection) in the same thread
where it will be used, avoiding ProgrammingError: SQLite objects created in
a thread can only be used in that same thread.
"""
store = Store(db_path)
try:
return RecipeEngine(store).suggest(req)
finally:
store.close()
def _build_stream_prompt(db_path: Path, level: int) -> str:
"""Fetch pantry + user settings from DB and build the recipe prompt.
Runs in a thread (called via asyncio.to_thread) so it can use sync Store.
"""
import datetime
store = Store(db_path)
try:
items = store.list_inventory(status="available")
pantry_names = [i["product_name"] for i in items if i.get("product_name")]
today = datetime.date.today()
expiring_names = [
i["product_name"]
for i in items
if i.get("product_name")
and i.get("expiry_date")
and (datetime.date.fromisoformat(i["expiry_date"]) - today).days <= 3
]
settings: dict = {}
try:
rows = store.conn.execute("SELECT key, value FROM user_settings").fetchall()
settings = {r["key"]: r["value"] for r in rows}
except Exception:
pass
constraints_raw = settings.get("dietary_constraints", "")
constraints = [c.strip() for c in constraints_raw.split(",") if c.strip()] if constraints_raw else []
allergies_raw = settings.get("allergies", "")
allergies = [a.strip() for a in allergies_raw.split(",") if a.strip()] if allergies_raw else []
return _build_recipe_prompt(pantry_names, expiring_names, constraints, allergies, level)
finally:
store.close()
async def _enqueue_recipe_job(session: CloudUser, req: RecipeRequest):
"""Queue an async recipe_llm job and return 202 with job_id.
Falls back to synchronous generation in CLOUD_MODE (scheduler polls only
the shared settings DB, not per-user DBs see snipe#45 / kiwi backlog).
"""
import json
import uuid
from fastapi.responses import JSONResponse
from app.cloud_session import CLOUD_MODE
from app.tasks.runner import insert_task
if CLOUD_MODE:
log.warning("recipe_llm async jobs not supported in CLOUD_MODE — falling back to sync")
result = await asyncio.to_thread(_suggest_in_thread, session.db, req)
return result
job_id = f"rec_{uuid.uuid4().hex}"
def _create(db_path: Path) -> int:
store = Store(db_path)
try:
row = store.create_recipe_job(job_id, session.user_id, req.model_dump_json())
return row["id"]
finally:
store.close()
int_id = await asyncio.to_thread(_create, session.db)
params_json = json.dumps({"job_id": job_id})
task_id, is_new = insert_task(session.db, "recipe_llm", int_id, params=params_json)
if is_new:
from app.tasks.scheduler import get_scheduler
get_scheduler(session.db).enqueue(task_id, "recipe_llm", int_id, params_json)
return JSONResponse(content={"job_id": job_id, "status": "queued"}, status_code=202)
@router.post("/suggest")
async def suggest_recipes(
req: RecipeRequest,
async_mode: bool = Query(default=False, alias="async"),
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
):
log.info("recipes auth=%s tier=%s level=%s", _auth_label(session.user_id), session.tier, req.level)
# Inject session-authoritative tier/byok immediately — client-supplied values are ignored.
# Also read stored unit_system preference; default to metric if not set.
unit_system = store.get_setting("unit_system") or "metric"
req = req.model_copy(update={"tier": session.tier, "has_byok": session.has_byok, "unit_system": unit_system})
if req.level == 4 and not req.wildcard_confirmed:
raise HTTPException(
status_code=400,
detail="Level 4 (Wildcard) requires wildcard_confirmed=true.",
)
if req.level in (3, 4) and not can_use("recipe_suggestions", req.tier, req.has_byok):
raise HTTPException(
status_code=403,
detail="LLM recipe levels require Paid tier or a configured LLM backend.",
)
if req.style_id and not can_use("style_picker", req.tier):
raise HTTPException(status_code=403, detail="Style picker requires Paid tier.")
# Orch budget check for lifetime/founders keys — downgrade to L2 (local) if exhausted.
# Subscription and local/BYOK users skip this check entirely.
orch_fallback = False
if (
req.level in (3, 4)
and session.license_key is not None
and not session.has_byok
and session.tier != "local"
):
budget = check_orch_budget(session.license_key, "kiwi")
if not budget.get("allowed", True):
req = req.model_copy(update={"level": 2})
orch_fallback = True
if req.level in (3, 4) and async_mode:
return await _enqueue_recipe_job(session, req)
result = await asyncio.to_thread(_suggest_in_thread, session.db, req)
if orch_fallback:
result = result.model_copy(update={"orch_fallback": True})
return result
@router.post("/stream-token", response_model=StreamTokenResponse)
async def get_stream_token(
req: StreamTokenRequest,
session: CloudUser = Depends(get_session),
) -> StreamTokenResponse:
"""Issue a one-time stream token for LLM recipe generation.
Tier-gated (Paid or BYOK). Builds the prompt from pantry + user settings,
then calls the cf-orch coordinator to obtain a stream URL. Returns
immediately the frontend opens EventSource to the stream URL directly.
"""
if not can_use("recipe_suggestions", session.tier, session.has_byok):
raise HTTPException(
status_code=403,
detail="Streaming recipe generation requires Paid tier or a configured LLM backend.",
)
if req.level == 4 and not req.wildcard_confirmed:
raise HTTPException(
status_code=400,
detail="Level 4 (Wildcard) streaming requires wildcard_confirmed=true.",
)
prompt = await asyncio.to_thread(_build_stream_prompt, session.db, req.level)
try:
result = await coordinator_authorize(prompt=prompt, caller="kiwi-recipe", ttl_s=300)
except CoordinatorError as exc:
raise HTTPException(status_code=exc.status_code, detail=str(exc))
return StreamTokenResponse(
stream_url=result.stream_url,
token=result.token,
expires_in_s=result.expires_in_s,
)
@router.get("/jobs/{job_id}", response_model=RecipeJobStatus)
async def get_recipe_job_status(
job_id: str,
session: CloudUser = Depends(get_session),
) -> RecipeJobStatus:
"""Poll the status of an async recipe generation job.
Returns 404 when job_id is unknown or belongs to a different user.
On status='done' with suggestions=[], the LLM returned empty client
should show a 'no recipe generated, try again' message.
"""
def _get(db_path: Path) -> dict | None:
store = Store(db_path)
try:
return store.get_recipe_job(job_id, session.user_id)
finally:
store.close()
row = await asyncio.to_thread(_get, session.db)
if row is None:
raise HTTPException(status_code=404, detail="Job not found.")
result = None
if row["status"] == "done" and row["result"]:
result = RecipeResult.model_validate_json(row["result"])
return RecipeJobStatus(
job_id=row["job_id"],
status=row["status"],
result=result,
error=row["error"],
)
@router.get("/browse/domains")
async def list_browse_domains(
session: CloudUser = Depends(get_session),
) -> list[dict]:
"""Return available domain schemas for the recipe browser."""
return get_domain_labels()
@router.get("/browse/{domain}")
async def list_browse_categories(
domain: str,
session: CloudUser = Depends(get_session),
) -> list[dict]:
"""Return categories with recipe counts for a given domain."""
if domain not in DOMAINS:
raise HTTPException(status_code=404, detail=f"Unknown domain '{domain}'.")
cat_names = get_category_names(domain)
keywords_by_category = {cat: get_keywords_for_category(domain, cat) for cat in cat_names}
has_subs = {cat: category_has_subcategories(domain, cat) for cat in cat_names}
def _get(db_path: Path) -> list[dict]:
store = Store(db_path)
try:
return store.get_browser_categories(domain, keywords_by_category, has_subs)
finally:
store.close()
return await asyncio.to_thread(_get, session.db)
@router.get("/browse/{domain}/{category}/subcategories")
async def list_browse_subcategories(
domain: str,
category: str,
session: CloudUser = Depends(get_session),
) -> list[dict]:
"""Return [{subcategory, recipe_count}] for a category that supports subcategories."""
if domain not in DOMAINS:
raise HTTPException(status_code=404, detail=f"Unknown domain '{domain}'.")
if not category_has_subcategories(domain, category):
return []
subcat_names = get_subcategory_names(domain, category)
keywords_by_subcat = {
sub: get_keywords_for_subcategory(domain, category, sub)
for sub in subcat_names
}
def _get(db_path: Path) -> list[dict]:
store = Store(db_path)
try:
return store.get_browser_subcategories(domain, keywords_by_subcat)
finally:
store.close()
return await asyncio.to_thread(_get, session.db)
@router.get("/browse/{domain}/{category}")
async def browse_recipes(
domain: str,
category: str,
page: Annotated[int, Query(ge=1)] = 1,
page_size: Annotated[int, Query(ge=1, le=100)] = 20,
pantry_items: Annotated[str | None, Query()] = None,
subcategory: Annotated[str | None, Query()] = None,
q: Annotated[str | None, Query(max_length=200)] = None,
sort: Annotated[str, Query(pattern="^(default|alpha|alpha_desc|match)$")] = "default",
session: CloudUser = Depends(get_session),
) -> dict:
"""Return a paginated list of recipes for a domain/category.
Pass pantry_items as a comma-separated string to receive match_pct badges.
Pass subcategory to narrow within a category that has subcategories.
Pass q to filter by title substring. Pass sort for ordering (default/alpha/alpha_desc/match).
sort=match orders by pantry coverage DESC; falls back to default when no pantry_items.
"""
if domain not in DOMAINS:
raise HTTPException(status_code=404, detail=f"Unknown domain '{domain}'.")
if category == "_all":
keywords = None # unfiltered browse
elif subcategory:
keywords = get_keywords_for_subcategory(domain, category, subcategory)
if not keywords:
raise HTTPException(
status_code=404,
detail=f"Unknown subcategory '{subcategory}' in '{category}'.",
)
else:
keywords = get_keywords_for_category(domain, category)
if not keywords:
raise HTTPException(
status_code=404,
detail=f"Unknown category '{category}' in domain '{domain}'.",
)
pantry_list = (
[p.strip() for p in pantry_items.split(",") if p.strip()]
if pantry_items
else None
)
def _browse(db_path: Path) -> dict:
store = Store(db_path)
try:
# Load sensory preferences
sensory_prefs_json = store.get_setting("sensory_preferences")
sensory_exclude = build_sensory_exclude(sensory_prefs_json)
result = store.browse_recipes(
keywords=keywords,
page=page,
page_size=page_size,
pantry_items=pantry_list,
q=q or None,
sort=sort,
sensory_exclude=sensory_exclude,
)
# ── Attach time/effort signals to each browse result ────────────────
import json as _json
for recipe_row in result.get("recipes", []):
directions_raw = recipe_row.get("directions") or []
if isinstance(directions_raw, str):
try:
directions_raw = _json.loads(directions_raw)
except Exception:
directions_raw = []
if directions_raw:
_profile = parse_time_effort(directions_raw)
recipe_row["active_min"] = _profile.active_min
recipe_row["passive_min"] = _profile.passive_min
else:
recipe_row["active_min"] = None
recipe_row["passive_min"] = None
# Remove directions from browse payload — not needed by the card UI
recipe_row.pop("directions", None)
# Community tag fallback: if FTS returned nothing for a subcategory,
# check whether accepted community tags exist for this location and
# fetch those corpus recipes directly by ID.
if result["total"] == 0 and subcategory and keywords:
try:
from app.api.endpoints.community import _get_community_store
cs = _get_community_store()
if cs is not None:
community_ids = cs.get_accepted_recipe_ids_for_subcategory(
domain=domain,
category=category,
subcategory=subcategory,
)
if community_ids:
offset = (page - 1) * page_size
paged_ids = community_ids[offset: offset + page_size]
recipes = store.fetch_recipes_by_ids(paged_ids, pantry_list)
import json as _json_c
for recipe_row in recipes:
directions_raw = recipe_row.get("directions") or []
if isinstance(directions_raw, str):
try:
directions_raw = _json_c.loads(directions_raw)
except Exception:
directions_raw = []
if directions_raw:
_profile = parse_time_effort(directions_raw)
recipe_row["active_min"] = _profile.active_min
recipe_row["passive_min"] = _profile.passive_min
else:
recipe_row["active_min"] = None
recipe_row["passive_min"] = None
recipe_row.pop("directions", None)
result = {
"recipes": recipes,
"total": len(community_ids),
"page": page,
"community_tagged": True,
}
except Exception as exc:
logger.warning("community tag fallback failed: %s", exc)
store.log_browser_telemetry(
domain=domain,
category=category,
page=page,
result_count=result["total"],
)
return result
finally:
store.close()
return await asyncio.to_thread(_browse, session.db)
@router.get("/templates", response_model=list[AssemblyTemplateOut])
async def list_assembly_templates() -> list[dict]:
"""Return all 13 assembly templates with ordered role sequences.
Cache-friendly: static data, no per-user state.
"""
return get_templates_for_api()
@router.get("/template-candidates", response_model=RoleCandidatesResponse)
async def get_template_role_candidates(
template_id: str = Query(..., description="Template slug, e.g. 'burrito_taco'"),
role: str = Query(..., description="Role display name, e.g. 'protein'"),
prior_picks: str = Query(default="", description="Comma-separated prior selections"),
session: CloudUser = Depends(get_session),
) -> dict:
"""Return pantry-matched candidates for one wizard step."""
def _get(db_path: Path) -> dict:
store = Store(db_path)
try:
items = store.list_inventory(status="available")
pantry_set = {
item["product_name"]
for item in items
if item.get("product_name")
}
pantry_list = list(pantry_set)
prior = [p.strip() for p in prior_picks.split(",") if p.strip()]
profile_index = store.get_element_profiles(pantry_list + prior)
return get_role_candidates(
template_slug=template_id,
role_display=role,
pantry_set=pantry_set,
prior_picks=prior,
profile_index=profile_index,
)
finally:
store.close()
return await asyncio.to_thread(_get, session.db)
@router.post("/build", response_model=RecipeSuggestion)
async def build_recipe(
req: BuildRequest,
session: CloudUser = Depends(get_session),
) -> RecipeSuggestion:
"""Build a recipe from explicit role selections."""
def _build(db_path: Path) -> RecipeSuggestion | None:
store = Store(db_path)
try:
items = store.list_inventory(status="available")
pantry_set = {
item["product_name"]
for item in items
if item.get("product_name")
}
suggestion = build_from_selection(
template_slug=req.template_id,
role_overrides=req.role_overrides,
pantry_set=pantry_set,
)
if suggestion is None:
return None
# Persist to recipes table so the result can be saved/bookmarked.
# external_id encodes template + selections for stable dedup.
import hashlib as _hl, json as _js
sel_hash = _hl.md5(
_js.dumps(req.role_overrides, sort_keys=True).encode()
).hexdigest()[:8]
external_id = f"assembly:{req.template_id}:{sel_hash}"
real_id = store.upsert_built_recipe(
external_id=external_id,
title=suggestion.title,
ingredients=suggestion.matched_ingredients,
directions=suggestion.directions,
)
return suggestion.model_copy(update={"id": real_id})
finally:
store.close()
result = await asyncio.to_thread(_build, session.db)
if result is None:
raise HTTPException(
status_code=404,
detail="Template not found or required ingredient missing.",
)
return result
@router.get("/{recipe_id}")
async def get_recipe(recipe_id: int, session: CloudUser = Depends(get_session)) -> dict:
def _get(db_path: Path, rid: int) -> dict | None:
store = Store(db_path)
try:
return store.get_recipe(rid)
finally:
store.close()
recipe = await asyncio.to_thread(_get, session.db, recipe_id)
if not recipe:
raise HTTPException(status_code=404, detail="Recipe not found.")
# Normalize corpus record into RecipeSuggestion shape so RecipeDetailPanel
# can render it without knowing it came from a direct DB lookup.
ingredient_names = recipe.get("ingredient_names") or []
if isinstance(ingredient_names, str):
import json as _json
try:
ingredient_names = _json.loads(ingredient_names)
except Exception:
ingredient_names = []
_directions_for_te = recipe.get("directions") or []
if isinstance(_directions_for_te, str):
import json as _json2
try:
_directions_for_te = _json2.loads(_directions_for_te)
except Exception:
_directions_for_te = []
if _directions_for_te:
_te = parse_time_effort(_directions_for_te)
_time_effort_out: dict | None = {
"active_min": _te.active_min,
"passive_min": _te.passive_min,
"total_min": _te.total_min,
"effort_label": _te.effort_label,
"equipment": _te.equipment,
"step_analyses": [
{"is_passive": sa.is_passive, "detected_minutes": sa.detected_minutes}
for sa in _te.step_analyses
],
}
else:
_time_effort_out = None
return {
"id": recipe.get("id"),
"title": recipe.get("title", ""),
"match_count": 0,
"matched_ingredients": ingredient_names,
"missing_ingredients": [],
"directions": recipe.get("directions") or [],
"prep_notes": [],
"swap_candidates": [],
"element_coverage": {},
"notes": recipe.get("notes") or "",
"level": 1,
"is_wildcard": False,
"nutrition": None,
"source_url": recipe.get("source_url") or None,
"complexity": None,
"estimated_time_min": None,
"time_effort": _time_effort_out,
}

View file

@ -1,188 +0,0 @@
"""Saved recipe bookmark endpoints."""
from __future__ import annotations
import asyncio
from pathlib import Path
from fastapi import APIRouter, Depends, HTTPException
from app.cloud_session import CloudUser, get_session
from app.db.store import Store
from app.models.schemas.saved_recipe import (
CollectionMemberRequest,
CollectionRequest,
CollectionSummary,
SavedRecipeSummary,
SaveRecipeRequest,
UpdateSavedRecipeRequest,
)
from app.tiers import can_use
router = APIRouter()
def _in_thread(db_path: Path, fn):
"""Run a Store operation in a worker thread with its own connection."""
store = Store(db_path)
try:
return fn(store)
finally:
store.close()
def _to_summary(row: dict, store: Store) -> SavedRecipeSummary:
collection_ids = store.get_saved_recipe_collection_ids(row["id"])
return SavedRecipeSummary(
id=row["id"],
recipe_id=row["recipe_id"],
title=row.get("title", ""),
saved_at=row["saved_at"],
notes=row.get("notes"),
rating=row.get("rating"),
style_tags=row.get("style_tags") or [],
collection_ids=collection_ids,
)
# ── save / unsave ─────────────────────────────────────────────────────────────
@router.post("", response_model=SavedRecipeSummary)
async def save_recipe(
req: SaveRecipeRequest,
session: CloudUser = Depends(get_session),
) -> SavedRecipeSummary:
def _run(store: Store) -> SavedRecipeSummary:
row = store.save_recipe(req.recipe_id, req.notes, req.rating)
return _to_summary(row, store)
return await asyncio.to_thread(_in_thread, session.db, _run)
@router.delete("/{recipe_id}", status_code=204)
async def unsave_recipe(
recipe_id: int,
session: CloudUser = Depends(get_session),
) -> None:
await asyncio.to_thread(
_in_thread, session.db, lambda s: s.unsave_recipe(recipe_id)
)
@router.patch("/{recipe_id}", response_model=SavedRecipeSummary)
async def update_saved_recipe(
recipe_id: int,
req: UpdateSavedRecipeRequest,
session: CloudUser = Depends(get_session),
) -> SavedRecipeSummary:
def _run(store: Store) -> SavedRecipeSummary:
if not store.is_recipe_saved(recipe_id):
raise HTTPException(status_code=404, detail="Recipe not saved.")
row = store.update_saved_recipe(
recipe_id, req.notes, req.rating, req.style_tags
)
return _to_summary(row, store)
return await asyncio.to_thread(_in_thread, session.db, _run)
@router.get("", response_model=list[SavedRecipeSummary])
async def list_saved_recipes(
sort_by: str = "saved_at",
collection_id: int | None = None,
session: CloudUser = Depends(get_session),
) -> list[SavedRecipeSummary]:
def _run(store: Store) -> list[SavedRecipeSummary]:
rows = store.get_saved_recipes(sort_by=sort_by, collection_id=collection_id)
return [_to_summary(r, store) for r in rows]
return await asyncio.to_thread(_in_thread, session.db, _run)
# ── collections (Paid) ────────────────────────────────────────────────────────
@router.get("/collections", response_model=list[CollectionSummary])
async def list_collections(
session: CloudUser = Depends(get_session),
) -> list[CollectionSummary]:
if not can_use("recipe_collections", session.tier):
raise HTTPException(status_code=403, detail="Collections require Paid tier.")
rows = await asyncio.to_thread(
_in_thread, session.db, lambda s: s.get_collections()
)
return [CollectionSummary(**r) for r in rows]
@router.post("/collections", response_model=CollectionSummary)
async def create_collection(
req: CollectionRequest,
session: CloudUser = Depends(get_session),
) -> CollectionSummary:
if not can_use("recipe_collections", session.tier):
raise HTTPException(
status_code=403,
detail="Collections require Paid tier.",
)
row = await asyncio.to_thread(
_in_thread, session.db,
lambda s: s.create_collection(req.name, req.description),
)
return CollectionSummary(**row)
@router.delete("/collections/{collection_id}", status_code=204)
async def delete_collection(
collection_id: int,
session: CloudUser = Depends(get_session),
) -> None:
if not can_use("recipe_collections", session.tier):
raise HTTPException(status_code=403, detail="Collections require Paid tier.")
await asyncio.to_thread(
_in_thread, session.db, lambda s: s.delete_collection(collection_id)
)
@router.patch("/collections/{collection_id}", response_model=CollectionSummary)
async def rename_collection(
collection_id: int,
req: CollectionRequest,
session: CloudUser = Depends(get_session),
) -> CollectionSummary:
if not can_use("recipe_collections", session.tier):
raise HTTPException(status_code=403, detail="Collections require Paid tier.")
row = await asyncio.to_thread(
_in_thread, session.db,
lambda s: s.rename_collection(collection_id, req.name, req.description),
)
if not row:
raise HTTPException(status_code=404, detail="Collection not found.")
return CollectionSummary(**row)
@router.post("/collections/{collection_id}/members", status_code=204)
async def add_to_collection(
collection_id: int,
req: CollectionMemberRequest,
session: CloudUser = Depends(get_session),
) -> None:
if not can_use("recipe_collections", session.tier):
raise HTTPException(status_code=403, detail="Collections require Paid tier.")
await asyncio.to_thread(
_in_thread, session.db,
lambda s: s.add_to_collection(collection_id, req.saved_recipe_id),
)
@router.delete(
"/collections/{collection_id}/members/{saved_recipe_id}", status_code=204
)
async def remove_from_collection(
collection_id: int,
saved_recipe_id: int,
session: CloudUser = Depends(get_session),
) -> None:
if not can_use("recipe_collections", session.tier):
raise HTTPException(status_code=403, detail="Collections require Paid tier.")
await asyncio.to_thread(
_in_thread, session.db,
lambda s: s.remove_from_collection(collection_id, saved_recipe_id),
)

View file

@ -1,37 +0,0 @@
"""Session bootstrap endpoint — called once per app load by the frontend.
Logs auth= + tier= for log-based analytics without client-side tracking.
See Circuit-Forge/kiwi#86.
"""
from __future__ import annotations
import logging
from fastapi import APIRouter, Depends
from app.cloud_session import CloudUser, _auth_label, get_session
from app.core.config import settings
router = APIRouter()
log = logging.getLogger(__name__)
@router.get("/bootstrap")
def session_bootstrap(session: CloudUser = Depends(get_session)) -> dict:
"""Record auth type and tier for log-based analytics.
Expected log output:
INFO:app.api.endpoints.session: session auth=authed tier=paid
INFO:app.api.endpoints.session: session auth=anon tier=free
E2E test sessions (E2E_TEST_USER_ID) are logged at DEBUG so they don't
pollute analytics counts while still being visible when DEBUG=true.
"""
is_test = bool(settings.E2E_TEST_USER_ID and session.user_id == settings.E2E_TEST_USER_ID)
logger = log.debug if is_test else log.info
logger("session auth=%s tier=%s%s", _auth_label(session.user_id), session.tier, " e2e=true" if is_test else "")
return {
"auth": _auth_label(session.user_id),
"tier": session.tier,
"has_byok": session.has_byok,
}

View file

@ -1,46 +0,0 @@
"""User settings endpoints."""
from __future__ import annotations
from fastapi import APIRouter, Depends, HTTPException
from pydantic import BaseModel
from app.cloud_session import CloudUser, get_session
from app.db.session import get_store
from app.db.store import Store
router = APIRouter()
_ALLOWED_KEYS = frozenset({"cooking_equipment", "unit_system", "shopping_locale", "sensory_preferences", "time_first_layout"})
class SettingBody(BaseModel):
value: str
@router.get("/{key}")
async def get_setting(
key: str,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> dict:
"""Return the stored value for a settings key."""
if key not in _ALLOWED_KEYS:
raise HTTPException(status_code=422, detail=f"Unknown settings key: '{key}'.")
value = store.get_setting(key)
if value is None:
raise HTTPException(status_code=404, detail=f"Setting '{key}' not found.")
return {"key": key, "value": value}
@router.put("/{key}")
async def set_setting(
key: str,
body: SettingBody,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
) -> dict:
"""Upsert a settings key-value pair."""
if key not in _ALLOWED_KEYS:
raise HTTPException(status_code=422, detail=f"Unknown settings key: '{key}'.")
store.set_setting(key, body.value)
return {"key": key, "value": body.value}

View file

@ -1,233 +0,0 @@
"""Shopping list endpoints.
Free tier for all users (anonymous guests included shopping list is the
primary affiliate revenue surface). Confirm-purchase action is also Free:
it moves a checked item into pantry inventory without a tier gate so the
flow works for anyone who signs up or browses without an account.
Routes:
GET /shopping list items (with affiliate links)
POST /shopping add item manually
PATCH /shopping/{id} update (check/uncheck, rename, qty)
DELETE /shopping/{id} remove single item
DELETE /shopping/checked clear all checked items
DELETE /shopping/all clear entire list
POST /shopping/from-recipe bulk add gaps from a recipe
POST /shopping/{id}/confirm confirm purchase add to pantry inventory
"""
from __future__ import annotations
import asyncio
import logging
from fastapi import APIRouter, Depends, HTTPException, status
from app.cloud_session import CloudUser, get_session
from app.db.session import get_store
from app.db.store import Store
from app.models.schemas.shopping import (
BulkAddFromRecipeRequest,
ConfirmPurchaseRequest,
ShoppingItemCreate,
ShoppingItemResponse,
ShoppingItemUpdate,
)
from app.services.recipe.grocery_links import GroceryLinkBuilder
log = logging.getLogger(__name__)
router = APIRouter()
def _enrich(item: dict, builder: GroceryLinkBuilder) -> ShoppingItemResponse:
"""Attach live affiliate links to a raw store row."""
links = builder.build_links(item["name"])
return ShoppingItemResponse(
**{**item, "checked": bool(item.get("checked", 0))},
grocery_links=[{"ingredient": l.ingredient, "retailer": l.retailer, "url": l.url} for l in links],
)
def _in_thread(db_path, fn):
store = Store(db_path)
try:
return fn(store)
finally:
store.close()
# ── List ──────────────────────────────────────────────────────────────────────
def _locale_from_store(store: Store) -> str:
return store.get_setting("shopping_locale") or "us"
@router.get("", response_model=list[ShoppingItemResponse])
async def list_shopping_items(
include_checked: bool = True,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
):
locale = await asyncio.to_thread(_in_thread, session.db, _locale_from_store)
builder = GroceryLinkBuilder(tier=session.tier, has_byok=session.has_byok, locale=locale)
items = await asyncio.to_thread(
_in_thread, session.db, lambda s: s.list_shopping_items(include_checked)
)
return [_enrich(i, builder) for i in items]
# ── Add manually ──────────────────────────────────────────────────────────────
@router.post("", response_model=ShoppingItemResponse, status_code=status.HTTP_201_CREATED)
async def add_shopping_item(
body: ShoppingItemCreate,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
):
builder = GroceryLinkBuilder(tier=session.tier, has_byok=session.has_byok, locale=_locale_from_store(store))
item = await asyncio.to_thread(
_in_thread,
session.db,
lambda s: s.add_shopping_item(
name=body.name,
quantity=body.quantity,
unit=body.unit,
category=body.category,
notes=body.notes,
source=body.source,
recipe_id=body.recipe_id,
sort_order=body.sort_order,
),
)
return _enrich(item, builder)
# ── Bulk add from recipe ───────────────────────────────────────────────────────
@router.post("/from-recipe", response_model=list[ShoppingItemResponse], status_code=status.HTTP_201_CREATED)
async def add_from_recipe(
body: BulkAddFromRecipeRequest,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
):
"""Add missing ingredients from a recipe to the shopping list.
Runs pantry gap analysis and adds only the items the user doesn't have
(unless include_covered=True). Skips duplicates already on the list.
"""
from app.services.meal_plan.shopping_list import compute_shopping_list
def _run(store: Store):
recipe = store.get_recipe(body.recipe_id)
if not recipe:
raise HTTPException(status_code=404, detail="Recipe not found")
inventory = store.list_inventory()
gaps, covered = compute_shopping_list([recipe], inventory)
targets = (gaps + covered) if body.include_covered else gaps
# Avoid duplicates already on the list
existing = {i["name"].lower() for i in store.list_shopping_items()}
added = []
for gap in targets:
if gap.ingredient_name.lower() in existing:
continue
item = store.add_shopping_item(
name=gap.ingredient_name,
quantity=None,
unit=gap.have_unit,
source="recipe",
recipe_id=body.recipe_id,
)
added.append(item)
return added
builder = GroceryLinkBuilder(tier=session.tier, has_byok=session.has_byok, locale=_locale_from_store(store))
items = await asyncio.to_thread(_in_thread, session.db, _run)
return [_enrich(i, builder) for i in items]
# ── Update ────────────────────────────────────────────────────────────────────
@router.patch("/{item_id}", response_model=ShoppingItemResponse)
async def update_shopping_item(
item_id: int,
body: ShoppingItemUpdate,
session: CloudUser = Depends(get_session),
store: Store = Depends(get_store),
):
builder = GroceryLinkBuilder(tier=session.tier, has_byok=session.has_byok, locale=_locale_from_store(store))
item = await asyncio.to_thread(
_in_thread,
session.db,
lambda s: s.update_shopping_item(item_id, **body.model_dump(exclude_none=True)),
)
if not item:
raise HTTPException(status_code=404, detail="Shopping item not found")
return _enrich(item, builder)
# ── Confirm purchase → pantry ─────────────────────────────────────────────────
@router.post("/{item_id}/confirm", status_code=status.HTTP_201_CREATED)
async def confirm_purchase(
item_id: int,
body: ConfirmPurchaseRequest,
session: CloudUser = Depends(get_session),
):
"""Confirm a checked item was purchased and add it to pantry inventory.
Human approval step: the user explicitly confirms what they actually bought
before it lands in their pantry. Returns the new inventory item.
"""
def _run(store: Store):
shopping_item = store.get_shopping_item(item_id)
if not shopping_item:
raise HTTPException(status_code=404, detail="Shopping item not found")
qty = body.quantity if body.quantity is not None else (shopping_item.get("quantity") or 1.0)
unit = body.unit or shopping_item.get("unit") or "count"
category = shopping_item.get("category")
product = store.get_or_create_product(
name=shopping_item["name"],
category=category,
)
inv_item = store.add_inventory_item(
product_id=product["id"],
location=body.location,
quantity=qty,
unit=unit,
source="manual",
)
# Mark the shopping item checked and leave it for the user to clear
store.update_shopping_item(item_id, checked=True)
return inv_item
return await asyncio.to_thread(_in_thread, session.db, _run)
# ── Delete ────────────────────────────────────────────────────────────────────
@router.delete("/{item_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_shopping_item(
item_id: int,
session: CloudUser = Depends(get_session),
):
deleted = await asyncio.to_thread(
_in_thread, session.db, lambda s: s.delete_shopping_item(item_id)
)
if not deleted:
raise HTTPException(status_code=404, detail="Shopping item not found")
@router.delete("/checked", status_code=status.HTTP_204_NO_CONTENT)
async def clear_checked(session: CloudUser = Depends(get_session)):
await asyncio.to_thread(
_in_thread, session.db, lambda s: s.clear_checked_shopping_items()
)
@router.delete("/all", status_code=status.HTTP_204_NO_CONTENT)
async def clear_all(session: CloudUser = Depends(get_session)):
await asyncio.to_thread(
_in_thread, session.db, lambda s: s.clear_all_shopping_items()
)

View file

@ -1,42 +0,0 @@
"""Staple library endpoints."""
from __future__ import annotations
from fastapi import APIRouter, HTTPException
from app.services.recipe.staple_library import StapleLibrary
router = APIRouter()
_lib = StapleLibrary()
@router.get("/")
async def list_staples(dietary: str | None = None) -> list[dict]:
staples = _lib.filter_by_dietary(dietary) if dietary else _lib.list_all()
return [
{
"slug": s.slug,
"name": s.name,
"description": s.description,
"dietary_labels": s.dietary_labels,
"yield_formats": list(s.yield_formats.keys()),
}
for s in staples
]
@router.get("/{slug}")
async def get_staple(slug: str) -> dict:
staple = _lib.get(slug)
if not staple:
raise HTTPException(status_code=404, detail=f"Staple '{slug}' not found.")
return {
"slug": staple.slug,
"name": staple.name,
"description": staple.description,
"dietary_labels": staple.dietary_labels,
"base_ingredients": staple.base_ingredients,
"base_method": staple.base_method,
"base_time_minutes": staple.base_time_minutes,
"yield_formats": staple.yield_formats,
"compatible_styles": staple.compatible_styles,
}

View file

@ -1,26 +1,10 @@
from fastapi import APIRouter from fastapi import APIRouter
from app.api.endpoints import health, receipts, export, inventory, ocr, recipes, settings, staples, feedback, feedback_attach, household, saved_recipes, imitate, meal_plans, orch_usage, session, shopping from app.api.endpoints import health, receipts, export, inventory, ocr
from app.api.endpoints.community import router as community_router
from app.api.endpoints.recipe_tags import router as recipe_tags_router
api_router = APIRouter() api_router = APIRouter()
api_router.include_router(session.router, prefix="/session", tags=["session"])
api_router.include_router(health.router, prefix="/health", tags=["health"]) api_router.include_router(health.router, prefix="/health", tags=["health"])
api_router.include_router(receipts.router, prefix="/receipts", tags=["receipts"]) api_router.include_router(receipts.router, prefix="/receipts", tags=["receipts"])
api_router.include_router(ocr.router, prefix="/receipts", tags=["ocr"]) api_router.include_router(ocr.router, prefix="/receipts", tags=["ocr"]) # OCR endpoints under /receipts
api_router.include_router(export.router, tags=["export"]) api_router.include_router(export.router, tags=["export"]) # No prefix, uses /export in the router
api_router.include_router(inventory.router, prefix="/inventory", tags=["inventory"]) api_router.include_router(inventory.router, prefix="/inventory", tags=["inventory"])
api_router.include_router(saved_recipes.router, prefix="/recipes/saved", tags=["saved-recipes"])
api_router.include_router(recipes.router, prefix="/recipes", tags=["recipes"])
api_router.include_router(settings.router, prefix="/settings", tags=["settings"])
api_router.include_router(staples.router, prefix="/staples", tags=["staples"])
api_router.include_router(feedback.router, prefix="/feedback", tags=["feedback"])
api_router.include_router(feedback_attach.router, prefix="/feedback", tags=["feedback"])
api_router.include_router(household.router, prefix="/household", tags=["household"])
api_router.include_router(imitate.router, prefix="/imitate", tags=["imitate"])
api_router.include_router(meal_plans.router, prefix="/meal-plans", tags=["meal-plans"])
api_router.include_router(orch_usage.router, prefix="/orch-usage", tags=["orch-usage"])
api_router.include_router(shopping.router, prefix="/shopping", tags=["shopping"])
api_router.include_router(community_router)
api_router.include_router(recipe_tags_router)

View file

@ -22,12 +22,10 @@ import time
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from pathlib import Path
import uuid
import jwt as pyjwt import jwt as pyjwt
import requests import requests
import yaml import yaml
from fastapi import Depends, HTTPException, Request, Response from fastapi import Depends, HTTPException, Request
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -39,60 +37,14 @@ DIRECTUS_JWT_SECRET: str = os.environ.get("DIRECTUS_JWT_SECRET", "")
HEIMDALL_URL: str = os.environ.get("HEIMDALL_URL", "https://license.circuitforge.tech") HEIMDALL_URL: str = os.environ.get("HEIMDALL_URL", "https://license.circuitforge.tech")
HEIMDALL_ADMIN_TOKEN: str = os.environ.get("HEIMDALL_ADMIN_TOKEN", "") HEIMDALL_ADMIN_TOKEN: str = os.environ.get("HEIMDALL_ADMIN_TOKEN", "")
# Dev bypass: comma-separated IPs or CIDR ranges that skip JWT auth.
# NEVER set this in production. Intended only for LAN developer testing when
# the request doesn't pass through Caddy (which normally injects X-CF-Session).
# Example: CLOUD_AUTH_BYPASS_IPS=10.1.10.0/24,127.0.0.1
import ipaddress as _ipaddress
_BYPASS_RAW: list[str] = [
e.strip()
for e in os.environ.get("CLOUD_AUTH_BYPASS_IPS", "").split(",")
if e.strip()
]
_BYPASS_NETS: list[_ipaddress.IPv4Network | _ipaddress.IPv6Network] = []
_BYPASS_IPS: frozenset[str] = frozenset()
if _BYPASS_RAW:
_nets, _ips = [], set()
for entry in _BYPASS_RAW:
try:
_nets.append(_ipaddress.ip_network(entry, strict=False))
except ValueError:
_ips.add(entry) # treat non-parseable entries as bare IPs
_BYPASS_NETS = _nets
_BYPASS_IPS = frozenset(_ips)
def _is_bypass_ip(ip: str) -> bool:
if not ip:
return False
if ip in _BYPASS_IPS:
return True
try:
addr = _ipaddress.ip_address(ip)
return any(addr in net for net in _BYPASS_NETS)
except ValueError:
return False
_LOCAL_KIWI_DB: Path = Path(os.environ.get("KIWI_DB", "data/kiwi.db")) _LOCAL_KIWI_DB: Path = Path(os.environ.get("KIWI_DB", "data/kiwi.db"))
_TIER_CACHE: dict[str, tuple[dict, float]] = {} _TIER_CACHE: dict[str, tuple[str, float]] = {}
_TIER_CACHE_TTL = 300 # 5 minutes _TIER_CACHE_TTL = 300 # 5 minutes
TIERS = ["free", "paid", "premium", "ultra"] TIERS = ["free", "paid", "premium", "ultra"]
def _auth_label(user_id: str) -> str:
"""Classify a user_id into a short tag for structured log lines. No PII emitted."""
if user_id in ("local", "local-dev"):
return "local"
if user_id.startswith("anon-"):
return "anon"
return "authed"
# ── Domain ──────────────────────────────────────────────────────────────────── # ── Domain ────────────────────────────────────────────────────────────────────
@dataclass(frozen=True) @dataclass(frozen=True)
@ -101,9 +53,6 @@ class CloudUser:
tier: str # free | paid | premium | ultra | local tier: str # free | paid | premium | ultra | local
db: Path # per-user SQLite DB path db: Path # per-user SQLite DB path
has_byok: bool # True if a configured LLM backend is present in llm.yaml has_byok: bool # True if a configured LLM backend is present in llm.yaml
household_id: str | None = None
is_household_owner: bool = False
license_key: str | None = None # key_display for lifetime/founders keys; None for subscription/free
# ── JWT validation ───────────────────────────────────────────────────────────── # ── JWT validation ─────────────────────────────────────────────────────────────
@ -144,16 +93,14 @@ def _ensure_provisioned(user_id: str) -> None:
log.warning("Heimdall provision failed for user %s: %s", user_id, exc) log.warning("Heimdall provision failed for user %s: %s", user_id, exc)
def _fetch_cloud_tier(user_id: str) -> tuple[str, str | None, bool, str | None]: def _fetch_cloud_tier(user_id: str) -> str:
"""Returns (tier, household_id | None, is_household_owner, license_key | None)."""
now = time.monotonic() now = time.monotonic()
cached = _TIER_CACHE.get(user_id) cached = _TIER_CACHE.get(user_id)
if cached and (now - cached[1]) < _TIER_CACHE_TTL: if cached and (now - cached[1]) < _TIER_CACHE_TTL:
entry = cached[0] return cached[0]
return entry["tier"], entry.get("household_id"), entry.get("is_household_owner", False), entry.get("license_key")
if not HEIMDALL_ADMIN_TOKEN: if not HEIMDALL_ADMIN_TOKEN:
return "free", None, False, None return "free"
try: try:
resp = requests.post( resp = requests.post(
f"{HEIMDALL_URL}/admin/cloud/resolve", f"{HEIMDALL_URL}/admin/cloud/resolve",
@ -161,39 +108,21 @@ def _fetch_cloud_tier(user_id: str) -> tuple[str, str | None, bool, str | None]:
headers={"Authorization": f"Bearer {HEIMDALL_ADMIN_TOKEN}"}, headers={"Authorization": f"Bearer {HEIMDALL_ADMIN_TOKEN}"},
timeout=5, timeout=5,
) )
data = resp.json() if resp.ok else {} tier = resp.json().get("tier", "free") if resp.ok else "free"
tier = data.get("tier", "free")
household_id = data.get("household_id")
is_owner = data.get("is_household_owner", False)
license_key = data.get("key_display")
except Exception as exc: except Exception as exc:
log.warning("Heimdall tier resolve failed for user %s: %s", user_id, exc) log.warning("Heimdall tier resolve failed for user %s: %s", user_id, exc)
tier, household_id, is_owner, license_key = "free", None, False, None tier = "free"
_TIER_CACHE[user_id] = ({"tier": tier, "household_id": household_id, "is_household_owner": is_owner, "license_key": license_key}, now) _TIER_CACHE[user_id] = (tier, now)
return tier, household_id, is_owner, license_key return tier
def _user_db_path(user_id: str, household_id: str | None = None) -> Path: def _user_db_path(user_id: str) -> Path:
if household_id:
path = CLOUD_DATA_ROOT / f"household_{household_id}" / "kiwi.db"
else:
path = CLOUD_DATA_ROOT / user_id / "kiwi.db" path = CLOUD_DATA_ROOT / user_id / "kiwi.db"
path.parent.mkdir(parents=True, exist_ok=True) path.parent.mkdir(parents=True, exist_ok=True)
return path return path
def _anon_guest_db_path(guest_id: str) -> Path:
"""Per-session DB for unauthenticated guest visitors.
Each anonymous visitor gets an isolated SQLite DB keyed by their guest UUID
cookie, so shopping lists and affiliate interactions never bleed across sessions.
"""
path = CLOUD_DATA_ROOT / f"anon-{guest_id}" / "kiwi.db"
path.parent.mkdir(parents=True, exist_ok=True)
return path
# ── BYOK detection ──────────────────────────────────────────────────────────── # ── BYOK detection ────────────────────────────────────────────────────────────
_LLM_CONFIG_PATH = Path.home() / ".config" / "circuitforge" / "llm.yaml" _LLM_CONFIG_PATH = Path.home() / ".config" / "circuitforge" / "llm.yaml"
@ -219,89 +148,32 @@ def _detect_byok(config_path: Path = _LLM_CONFIG_PATH) -> bool:
# ── FastAPI dependency ──────────────────────────────────────────────────────── # ── FastAPI dependency ────────────────────────────────────────────────────────
_GUEST_COOKIE = "kiwi_guest_id" def get_session(request: Request) -> CloudUser:
_GUEST_COOKIE_MAX_AGE = 60 * 60 * 24 * 90 # 90 days
def _resolve_guest_session(request: Request, response: Response, has_byok: bool) -> CloudUser:
"""Return a per-session anonymous CloudUser, creating a guest UUID cookie if needed."""
guest_id = request.cookies.get(_GUEST_COOKIE, "").strip()
is_new = not guest_id
if is_new:
guest_id = str(uuid.uuid4())
log.debug("New guest session assigned: anon-%s", guest_id[:8])
# Secure flag only when the request actually arrived over HTTPS
# (Caddy sets X-Forwarded-Proto=https in cloud; absent on direct port access).
# Avoids losing the session cookie on HTTP direct-port testing of the cloud stack.
is_https = request.headers.get("x-forwarded-proto", "http").lower() == "https"
response.set_cookie(
key=_GUEST_COOKIE,
value=guest_id,
max_age=_GUEST_COOKIE_MAX_AGE,
httponly=True,
samesite="lax",
secure=is_https,
)
return CloudUser(
user_id=f"anon-{guest_id}",
tier="free",
db=_anon_guest_db_path(guest_id),
has_byok=has_byok,
)
def get_session(request: Request, response: Response) -> CloudUser:
"""FastAPI dependency — resolves the current user from the request. """FastAPI dependency — resolves the current user from the request.
Local mode: fully-privileged "local" user pointing at local DB. Local mode: fully-privileged "local" user pointing at local DB.
Cloud mode: validates X-CF-Session JWT, provisions license, resolves tier. Cloud mode: validates X-CF-Session JWT, provisions license, resolves tier.
Dev bypass: if CLOUD_AUTH_BYPASS_IPS is set and the client IP matches,
returns a "local" session without JWT validation (dev/LAN use only).
Anonymous: per-session UUID cookie isolates each guest visitor's data.
""" """
has_byok = _detect_byok() has_byok = _detect_byok()
if not CLOUD_MODE: if not CLOUD_MODE:
return CloudUser(user_id="local", tier="local", db=_LOCAL_KIWI_DB, has_byok=has_byok) return CloudUser(user_id="local", tier="local", db=_LOCAL_KIWI_DB, has_byok=has_byok)
# Prefer X-Real-IP (set by Caddy from the actual client address) over the raw_header = (
# TCP peer address (which is nginx's container IP when behind the proxy). request.headers.get("x-cf-session", "")
client_ip = ( or request.headers.get("cookie", "")
request.headers.get("x-real-ip", "")
or (request.client.host if request.client else "")
) )
if (_BYPASS_IPS or _BYPASS_NETS) and _is_bypass_ip(client_ip): if not raw_header:
log.debug("CLOUD_AUTH_BYPASS_IPS match for %s — returning local session", client_ip) raise HTTPException(status_code=401, detail="Not authenticated")
# Use a dev DB under CLOUD_DATA_ROOT so the container has a writable path.
dev_db = _user_db_path("local-dev")
return CloudUser(user_id="local-dev", tier="local", db=dev_db, has_byok=has_byok)
# Resolve cf_session JWT: prefer the explicit header injected by Caddy, then token = _extract_session_token(raw_header)
# fall back to the cf_session cookie value. Other cookies (e.g. kiwi_guest_id)
# must never be treated as auth tokens.
raw_session = request.headers.get("x-cf-session", "").strip()
if not raw_session:
raw_session = request.cookies.get("cf_session", "").strip()
if not raw_session:
return _resolve_guest_session(request, response, has_byok)
token = _extract_session_token(raw_session) # gitleaks:allow — function name, not a secret
if not token: if not token:
return _resolve_guest_session(request, response, has_byok) raise HTTPException(status_code=401, detail="Not authenticated")
user_id = validate_session_jwt(token) user_id = validate_session_jwt(token)
_ensure_provisioned(user_id) _ensure_provisioned(user_id)
tier, household_id, is_household_owner, license_key = _fetch_cloud_tier(user_id) tier = _fetch_cloud_tier(user_id)
return CloudUser( return CloudUser(user_id=user_id, tier=tier, db=_user_db_path(user_id), has_byok=has_byok)
user_id=user_id,
tier=tier,
db=_user_db_path(user_id, household_id=household_id),
has_byok=has_byok,
household_id=household_id,
is_household_owner=is_household_owner,
license_key=license_key,
)
def require_tier(min_tier: str): def require_tier(min_tier: str):

View file

@ -35,24 +35,6 @@ class Settings:
# Database # Database
DB_PATH: Path = Path(os.environ.get("DB_PATH", str(DATA_DIR / "kiwi.db"))) DB_PATH: Path = Path(os.environ.get("DB_PATH", str(DATA_DIR / "kiwi.db")))
# Pre-computed browse counts cache (small SQLite, separate from corpus).
# Written by the nightly refresh task and by infer_recipe_tags.py.
# Set BROWSE_COUNTS_PATH to a bind-mounted path if you want the host
# pipeline to share counts with the container without re-running FTS.
BROWSE_COUNTS_PATH: Path = Path(
os.environ.get("BROWSE_COUNTS_PATH", str(DATA_DIR / "browse_counts.db"))
)
# Community feature settings
COMMUNITY_DB_URL: str | None = os.environ.get("COMMUNITY_DB_URL") or None
COMMUNITY_PSEUDONYM_SALT: str = os.environ.get(
"COMMUNITY_PSEUDONYM_SALT", "kiwi-default-salt-change-in-prod"
)
COMMUNITY_CLOUD_FEED_URL: str = os.environ.get(
"COMMUNITY_CLOUD_FEED_URL",
"https://menagerie.circuitforge.tech/kiwi/api/v1/community/posts",
)
# Processing # Processing
MAX_CONCURRENT_JOBS: int = int(os.environ.get("MAX_CONCURRENT_JOBS", "4")) MAX_CONCURRENT_JOBS: int = int(os.environ.get("MAX_CONCURRENT_JOBS", "4"))
USE_GPU: bool = os.environ.get("USE_GPU", "true").lower() in ("1", "true", "yes") USE_GPU: bool = os.environ.get("USE_GPU", "true").lower() in ("1", "true", "yes")
@ -61,26 +43,8 @@ class Settings:
# Quality # Quality
MIN_QUALITY_SCORE: float = float(os.environ.get("MIN_QUALITY_SCORE", "50.0")) MIN_QUALITY_SCORE: float = float(os.environ.get("MIN_QUALITY_SCORE", "50.0"))
# CF-core resource coordinator (VRAM lease management)
COORDINATOR_URL: str = os.environ.get("COORDINATOR_URL", "http://localhost:7700")
# Hosted cf-orch coordinator — bearer token for managed cloud GPU inference (Paid+)
# CFOrchClient reads CF_LICENSE_KEY automatically; exposed here for startup validation.
CF_LICENSE_KEY: str | None = os.environ.get("CF_LICENSE_KEY")
# E2E test account — analytics logging is suppressed for this user_id so test
# runs don't pollute session counts. Set to the Directus UUID of the test user.
E2E_TEST_USER_ID: str | None = os.environ.get("E2E_TEST_USER_ID") or None
# Feature flags # Feature flags
ENABLE_OCR: bool = os.environ.get("ENABLE_OCR", "false").lower() in ("1", "true", "yes") ENABLE_OCR: bool = os.environ.get("ENABLE_OCR", "false").lower() in ("1", "true", "yes")
# Use OrchestratedScheduler (coordinator-aware, multi-GPU fan-out) instead of
# LocalScheduler. Defaults to true in CLOUD_MODE; can be set independently
# for multi-GPU local rigs that don't need full cloud auth.
USE_ORCH_SCHEDULER: bool | None = (
None if os.environ.get("USE_ORCH_SCHEDULER") is None
else os.environ.get("USE_ORCH_SCHEDULER", "").lower() in ("1", "true", "yes")
)
# Runtime # Runtime
DEBUG: bool = os.environ.get("DEBUG", "false").lower() in ("1", "true", "yes") DEBUG: bool = os.environ.get("DEBUG", "false").lower() in ("1", "true", "yes")

View file

@ -9,7 +9,6 @@ CREATE TABLE receipts_new (
id INTEGER PRIMARY KEY AUTOINCREMENT, id INTEGER PRIMARY KEY AUTOINCREMENT,
filename TEXT NOT NULL, filename TEXT NOT NULL,
original_path TEXT NOT NULL, original_path TEXT NOT NULL,
processed_path TEXT,
status TEXT NOT NULL DEFAULT 'uploaded' status TEXT NOT NULL DEFAULT 'uploaded'
CHECK (status IN ( CHECK (status IN (
'uploaded', 'uploaded',

View file

@ -1,48 +0,0 @@
-- Migration 006: Ingredient element profiles + FlavorGraph molecule index.
CREATE TABLE ingredient_profiles (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
name_variants TEXT NOT NULL DEFAULT '[]', -- JSON array of aliases/alternate spellings
elements TEXT NOT NULL DEFAULT '[]', -- JSON array: ["Richness","Depth"]
-- Functional submetadata (from USDA FDC)
fat_pct REAL DEFAULT 0.0,
fat_saturated_pct REAL DEFAULT 0.0,
moisture_pct REAL DEFAULT 0.0,
protein_pct REAL DEFAULT 0.0,
starch_pct REAL DEFAULT 0.0,
binding_score INTEGER DEFAULT 0 CHECK (binding_score BETWEEN 0 AND 3),
glutamate_mg REAL DEFAULT 0.0,
ph_estimate REAL,
sodium_mg_per_100g REAL DEFAULT 0.0,
smoke_point_c REAL,
is_fermented INTEGER NOT NULL DEFAULT 0,
is_emulsifier INTEGER NOT NULL DEFAULT 0,
-- Aroma submetadata
flavor_molecule_ids TEXT NOT NULL DEFAULT '[]', -- JSON array of FlavorGraph compound IDs
heat_stable INTEGER NOT NULL DEFAULT 1,
add_timing TEXT NOT NULL DEFAULT 'any'
CHECK (add_timing IN ('early','finish','any')),
-- Brightness submetadata
acid_type TEXT CHECK (acid_type IN ('citric','acetic','lactic',NULL)),
-- Texture submetadata
texture_profile TEXT NOT NULL DEFAULT 'neutral',
water_activity REAL,
-- Source
usda_fdc_id TEXT,
source TEXT NOT NULL DEFAULT 'usda',
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE UNIQUE INDEX idx_ingredient_profiles_name ON ingredient_profiles (name);
CREATE INDEX idx_ingredient_profiles_elements ON ingredient_profiles (elements);
CREATE TABLE flavor_molecules (
id INTEGER PRIMARY KEY AUTOINCREMENT,
compound_id TEXT NOT NULL UNIQUE, -- FlavorGraph node ID
compound_name TEXT NOT NULL,
ingredient_names TEXT NOT NULL DEFAULT '[]', -- JSON array of ingredient names
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE INDEX idx_flavor_molecules_compound_id ON flavor_molecules (compound_id);

View file

@ -1,24 +0,0 @@
-- Migration 007: Recipe corpus index (food.com dataset).
CREATE TABLE recipes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
external_id TEXT,
title TEXT NOT NULL,
ingredients TEXT NOT NULL DEFAULT '[]', -- JSON array of raw ingredient strings
ingredient_names TEXT NOT NULL DEFAULT '[]', -- JSON array of normalized names
directions TEXT NOT NULL DEFAULT '[]', -- JSON array of step strings
category TEXT,
keywords TEXT NOT NULL DEFAULT '[]', -- JSON array
calories REAL,
fat_g REAL,
protein_g REAL,
sodium_mg REAL,
-- Element coverage scores computed at import time
element_coverage TEXT NOT NULL DEFAULT '{}', -- JSON {element: 0.0-1.0}
source TEXT NOT NULL DEFAULT 'foodcom',
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE INDEX idx_recipes_title ON recipes (title);
CREATE INDEX idx_recipes_category ON recipes (category);
CREATE UNIQUE INDEX idx_recipes_external_id ON recipes (external_id);

View file

@ -1,22 +0,0 @@
-- Migration 008: Derived substitution pairs.
-- Source: diff of lishuyang/recipepairs (GPL-3.0 derivation — raw data not shipped).
CREATE TABLE substitution_pairs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
original_name TEXT NOT NULL,
substitute_name TEXT NOT NULL,
constraint_label TEXT NOT NULL, -- 'vegan'|'vegetarian'|'dairy_free'|'gluten_free'|'low_fat'|'low_sodium'
fat_delta REAL DEFAULT 0.0,
moisture_delta REAL DEFAULT 0.0,
glutamate_delta REAL DEFAULT 0.0,
protein_delta REAL DEFAULT 0.0,
occurrence_count INTEGER DEFAULT 1,
compensation_hints TEXT NOT NULL DEFAULT '[]', -- JSON [{ingredient, reason, element}]
source TEXT NOT NULL DEFAULT 'derived',
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE INDEX idx_substitution_pairs_original ON substitution_pairs (original_name);
CREATE INDEX idx_substitution_pairs_constraint ON substitution_pairs (constraint_label);
CREATE UNIQUE INDEX idx_substitution_pairs_pair
ON substitution_pairs (original_name, substitute_name, constraint_label);

View file

@ -1,27 +0,0 @@
-- Migration 009: Staple library (bulk-preparable base components).
CREATE TABLE staples (
id INTEGER PRIMARY KEY AUTOINCREMENT,
slug TEXT NOT NULL UNIQUE,
name TEXT NOT NULL,
description TEXT,
base_ingredients TEXT NOT NULL DEFAULT '[]', -- JSON array of ingredient strings
base_method TEXT,
base_time_minutes INTEGER,
yield_formats TEXT NOT NULL DEFAULT '{}', -- JSON {format_name: {elements, shelf_days, methods, texture}}
dietary_labels TEXT NOT NULL DEFAULT '[]', -- JSON ['vegan','high-protein']
compatible_styles TEXT NOT NULL DEFAULT '[]', -- JSON [style_id]
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE TABLE user_staples (
id INTEGER PRIMARY KEY AUTOINCREMENT,
staple_slug TEXT NOT NULL REFERENCES staples(slug) ON DELETE CASCADE,
active_format TEXT NOT NULL,
quantity_g REAL,
prepared_at TEXT,
notes TEXT,
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE INDEX idx_user_staples_slug ON user_staples (staple_slug);

View file

@ -1,15 +0,0 @@
-- Migration 010: User substitution approval log (opt-in dataset moat).
CREATE TABLE substitution_feedback (
id INTEGER PRIMARY KEY AUTOINCREMENT,
original_name TEXT NOT NULL,
substitute_name TEXT NOT NULL,
constraint_label TEXT,
compensation_used TEXT NOT NULL DEFAULT '[]', -- JSON array of compensation ingredient names
approved INTEGER NOT NULL DEFAULT 0,
opted_in INTEGER NOT NULL DEFAULT 0, -- user consented to anonymized sharing
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE INDEX idx_substitution_feedback_original ON substitution_feedback (original_name);
CREATE INDEX idx_substitution_feedback_opted_in ON substitution_feedback (opted_in);

View file

@ -1,11 +0,0 @@
-- Migration 011: Daily rate limits (leftover mode: 5/day free tier).
CREATE TABLE rate_limits (
id INTEGER PRIMARY KEY AUTOINCREMENT,
feature TEXT NOT NULL,
window_date TEXT NOT NULL, -- YYYY-MM-DD
count INTEGER NOT NULL DEFAULT 0,
UNIQUE (feature, window_date)
);
CREATE INDEX idx_rate_limits_feature_date ON rate_limits (feature, window_date);

View file

@ -1,6 +0,0 @@
-- Migration 012: User settings key-value store.
CREATE TABLE IF NOT EXISTS user_settings (
key TEXT PRIMARY KEY,
value TEXT NOT NULL
);

View file

@ -1,18 +0,0 @@
-- Migration 014: Add macro nutrition columns to recipes and ingredient_profiles.
--
-- recipes: sugar, carbs, fiber, servings, and an estimated flag.
-- ingredient_profiles: carbs, fiber, calories, sugar per 100g (for estimation fallback).
ALTER TABLE recipes ADD COLUMN sugar_g REAL;
ALTER TABLE recipes ADD COLUMN carbs_g REAL;
ALTER TABLE recipes ADD COLUMN fiber_g REAL;
ALTER TABLE recipes ADD COLUMN servings REAL;
ALTER TABLE recipes ADD COLUMN nutrition_estimated INTEGER NOT NULL DEFAULT 0;
ALTER TABLE ingredient_profiles ADD COLUMN carbs_g_per_100g REAL DEFAULT 0.0;
ALTER TABLE ingredient_profiles ADD COLUMN fiber_g_per_100g REAL DEFAULT 0.0;
ALTER TABLE ingredient_profiles ADD COLUMN calories_per_100g REAL DEFAULT 0.0;
ALTER TABLE ingredient_profiles ADD COLUMN sugar_g_per_100g REAL DEFAULT 0.0;
CREATE INDEX idx_recipes_sugar_g ON recipes (sugar_g);
CREATE INDEX idx_recipes_carbs_g ON recipes (carbs_g);

View file

@ -1,38 +0,0 @@
-- Migration 015: FTS5 inverted index for recipe ingredient lookup.
--
-- Content table backed by `recipes` — stores only the inverted index, no text duplication.
-- MATCH queries replace O(N) LIKE scans with O(log N) token lookups.
--
-- One-time rebuild cost on 3.2M rows: ~15-30 seconds at startup.
-- Subsequent startups skip this migration entirely.
CREATE VIRTUAL TABLE IF NOT EXISTS recipes_fts USING fts5(
ingredient_names,
content=recipes,
content_rowid=id,
tokenize="unicode61"
);
INSERT INTO recipes_fts(recipes_fts) VALUES('rebuild');
-- Triggers to keep the FTS index in sync with the recipes table.
-- Without these, rows inserted after the initial rebuild are invisible to FTS queries.
CREATE TRIGGER IF NOT EXISTS recipes_fts_ai
AFTER INSERT ON recipes BEGIN
INSERT INTO recipes_fts(rowid, ingredient_names)
VALUES (new.id, new.ingredient_names);
END;
CREATE TRIGGER IF NOT EXISTS recipes_fts_ad
AFTER DELETE ON recipes BEGIN
INSERT INTO recipes_fts(recipes_fts, rowid, ingredient_names)
VALUES ('delete', old.id, old.ingredient_names);
END;
CREATE TRIGGER IF NOT EXISTS recipes_fts_au
AFTER UPDATE ON recipes BEGIN
INSERT INTO recipes_fts(recipes_fts, rowid, ingredient_names)
VALUES ('delete', old.id, old.ingredient_names);
INSERT INTO recipes_fts(rowid, ingredient_names)
VALUES (new.id, new.ingredient_names);
END;

View file

@ -1,27 +0,0 @@
-- Migration 016: Add FTS5 sync triggers for the recipes_fts content table.
--
-- Migration 015 created recipes_fts and did a one-time rebuild, but omitted
-- triggers. Without them, INSERT/UPDATE/DELETE on recipes does not update the
-- FTS index, so new rows are invisible to MATCH queries.
--
-- CREATE TRIGGER IF NOT EXISTS is idempotent — safe to re-run.
CREATE TRIGGER IF NOT EXISTS recipes_fts_ai
AFTER INSERT ON recipes BEGIN
INSERT INTO recipes_fts(rowid, ingredient_names)
VALUES (new.id, new.ingredient_names);
END;
CREATE TRIGGER IF NOT EXISTS recipes_fts_ad
AFTER DELETE ON recipes BEGIN
INSERT INTO recipes_fts(recipes_fts, rowid, ingredient_names)
VALUES ('delete', old.id, old.ingredient_names);
END;
CREATE TRIGGER IF NOT EXISTS recipes_fts_au
AFTER UPDATE ON recipes BEGIN
INSERT INTO recipes_fts(recipes_fts, rowid, ingredient_names)
VALUES ('delete', old.id, old.ingredient_names);
INSERT INTO recipes_fts(rowid, ingredient_names)
VALUES (new.id, new.ingredient_names);
END;

View file

@ -1,10 +0,0 @@
-- 017_household_invites.sql
CREATE TABLE IF NOT EXISTS household_invites (
token TEXT PRIMARY KEY,
household_id TEXT NOT NULL,
created_by TEXT NOT NULL,
created_at TEXT NOT NULL DEFAULT (datetime('now')),
expires_at TEXT NOT NULL,
used_at TEXT,
used_by TEXT
);

View file

@ -1,14 +0,0 @@
-- Migration 018: saved recipes bookmarks.
CREATE TABLE saved_recipes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
recipe_id INTEGER NOT NULL REFERENCES recipes(id) ON DELETE CASCADE,
saved_at TEXT NOT NULL DEFAULT (datetime('now')),
notes TEXT,
rating INTEGER CHECK (rating IS NULL OR (rating >= 0 AND rating <= 5)),
style_tags TEXT NOT NULL DEFAULT '[]',
UNIQUE (recipe_id)
);
CREATE INDEX idx_saved_recipes_saved_at ON saved_recipes (saved_at DESC);
CREATE INDEX idx_saved_recipes_rating ON saved_recipes (rating);

View file

@ -1,16 +0,0 @@
-- Migration 019: recipe collections (Paid tier organisation).
CREATE TABLE recipe_collections (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
description TEXT,
created_at TEXT NOT NULL DEFAULT (datetime('now')),
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE TABLE recipe_collection_members (
collection_id INTEGER NOT NULL REFERENCES recipe_collections(id) ON DELETE CASCADE,
saved_recipe_id INTEGER NOT NULL REFERENCES saved_recipes(id) ON DELETE CASCADE,
added_at TEXT NOT NULL DEFAULT (datetime('now')),
PRIMARY KEY (collection_id, saved_recipe_id)
);

View file

@ -1,13 +0,0 @@
-- Migration 020: recipe browser navigation telemetry.
-- Used to determine whether category nesting depth needs increasing.
-- Review: if any category has page > 5 and result_count > 100 consistently,
-- consider adding a third nesting level for that category.
CREATE TABLE browser_telemetry (
id INTEGER PRIMARY KEY AUTOINCREMENT,
domain TEXT NOT NULL,
category TEXT NOT NULL,
page INTEGER NOT NULL,
result_count INTEGER NOT NULL,
recorded_at TEXT NOT NULL DEFAULT (datetime('now'))
);

View file

@ -1,43 +0,0 @@
-- Migration 021: FTS5 inverted index for the recipe browser (category + keywords).
--
-- The browser domain queries were using LIKE '%keyword%' against category and
-- keywords columns — a leading wildcard prevents any B-tree index use, so every
-- query was a full sequential scan of 3.1M rows. This FTS5 index replaces those
-- scans with O(log N) token lookups.
--
-- Content-table backed: stores only the inverted index, no text duplication.
-- The keywords column is a JSON array; FTS5 tokenises it as plain text, stripping
-- the punctuation, which gives correct per-word matching.
--
-- One-time rebuild cost on 3.1M rows: ~20-40 seconds at first startup.
-- Subsequent startups skip this migration (IF NOT EXISTS guard).
CREATE VIRTUAL TABLE IF NOT EXISTS recipe_browser_fts USING fts5(
category,
keywords,
content=recipes,
content_rowid=id,
tokenize="unicode61"
);
INSERT INTO recipe_browser_fts(recipe_browser_fts) VALUES('rebuild');
CREATE TRIGGER IF NOT EXISTS recipe_browser_fts_ai
AFTER INSERT ON recipes BEGIN
INSERT INTO recipe_browser_fts(rowid, category, keywords)
VALUES (new.id, new.category, new.keywords);
END;
CREATE TRIGGER IF NOT EXISTS recipe_browser_fts_ad
AFTER DELETE ON recipes BEGIN
INSERT INTO recipe_browser_fts(recipe_browser_fts, rowid, category, keywords)
VALUES ('delete', old.id, old.category, old.keywords);
END;
CREATE TRIGGER IF NOT EXISTS recipe_browser_fts_au
AFTER UPDATE ON recipes BEGIN
INSERT INTO recipe_browser_fts(recipe_browser_fts, rowid, category, keywords)
VALUES ('delete', old.id, old.category, old.keywords);
INSERT INTO recipe_browser_fts(rowid, category, keywords)
VALUES (new.id, new.category, new.keywords);
END;

View file

@ -1,8 +0,0 @@
-- 022_meal_plans.sql
CREATE TABLE meal_plans (
id INTEGER PRIMARY KEY,
week_start TEXT NOT NULL,
meal_types TEXT NOT NULL DEFAULT '["dinner"]',
created_at TEXT NOT NULL DEFAULT (datetime('now')),
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);

View file

@ -1,5 +0,0 @@
-- Migration 022: Add is_generic flag to recipes
-- Generic recipes are catch-all/dump recipes with loose ingredient lists
-- that should not appear in Level 1 (deterministic "use what I have") results.
-- Admins can mark recipes via the recipe editor or a bulk backfill script.
ALTER TABLE recipes ADD COLUMN is_generic INTEGER NOT NULL DEFAULT 0;

View file

@ -1,11 +0,0 @@
-- 023_meal_plan_slots.sql
CREATE TABLE meal_plan_slots (
id INTEGER PRIMARY KEY,
plan_id INTEGER NOT NULL REFERENCES meal_plans(id) ON DELETE CASCADE,
day_of_week INTEGER NOT NULL CHECK(day_of_week BETWEEN 0 AND 6),
meal_type TEXT NOT NULL,
recipe_id INTEGER REFERENCES recipes(id),
servings REAL NOT NULL DEFAULT 2.0,
custom_label TEXT,
UNIQUE(plan_id, day_of_week, meal_type)
);

View file

@ -1,10 +0,0 @@
-- 024_prep_sessions.sql
CREATE TABLE prep_sessions (
id INTEGER PRIMARY KEY,
plan_id INTEGER NOT NULL REFERENCES meal_plans(id) ON DELETE CASCADE,
scheduled_date TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'draft'
CHECK(status IN ('draft','reviewed','done')),
created_at TEXT NOT NULL DEFAULT (datetime('now')),
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);

View file

@ -1,15 +0,0 @@
-- 025_prep_tasks.sql
CREATE TABLE prep_tasks (
id INTEGER PRIMARY KEY,
session_id INTEGER NOT NULL REFERENCES prep_sessions(id) ON DELETE CASCADE,
recipe_id INTEGER REFERENCES recipes(id),
slot_id INTEGER REFERENCES meal_plan_slots(id),
task_label TEXT NOT NULL,
duration_minutes INTEGER,
sequence_order INTEGER NOT NULL,
equipment TEXT,
is_parallel INTEGER NOT NULL DEFAULT 0,
notes TEXT,
user_edited INTEGER NOT NULL DEFAULT 0,
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);

View file

@ -1,21 +0,0 @@
-- 028_community_pseudonyms.sql
-- Per-user pseudonym store: maps the user's chosen community display name
-- to their Directus user ID. This table lives in per-user kiwi.db only.
-- It is NEVER replicated to the community PostgreSQL — pseudonym isolation is by design.
--
-- A user may have one active pseudonym. Old pseudonyms are retained for reference
-- (posts published under them keep their pseudonym attribution) but only one is
-- flagged as current (is_current = 1).
CREATE TABLE IF NOT EXISTS community_pseudonyms (
id INTEGER PRIMARY KEY AUTOINCREMENT,
pseudonym TEXT NOT NULL,
directus_user_id TEXT NOT NULL,
is_current INTEGER NOT NULL DEFAULT 1 CHECK (is_current IN (0, 1)),
created_at TEXT NOT NULL DEFAULT (datetime('now'))
);
-- Only one pseudonym can be current at a time per user
CREATE UNIQUE INDEX IF NOT EXISTS idx_community_pseudonyms_current
ON community_pseudonyms (directus_user_id)
WHERE is_current = 1;

View file

@ -1,49 +0,0 @@
-- Migration 029: Add inferred_tags column and update FTS index to include it.
--
-- inferred_tags holds a JSON array of normalized tag strings derived by
-- scripts/pipeline/infer_recipe_tags.py (e.g. ["cuisine:Italian",
-- "dietary:Low-Carb", "flavor:Umami", "can_be:Gluten-Free"]).
--
-- The FTS5 browser table is rebuilt to index inferred_tags alongside
-- category and keywords so browse domain queries match against all signals.
-- 1. Add inferred_tags column (empty array default; populated by pipeline run)
ALTER TABLE recipes ADD COLUMN inferred_tags TEXT NOT NULL DEFAULT '[]';
-- 2. Drop old FTS table and triggers that only covered category + keywords
DROP TRIGGER IF EXISTS recipes_ai;
DROP TRIGGER IF EXISTS recipes_ad;
DROP TRIGGER IF EXISTS recipes_au;
DROP TABLE IF EXISTS recipe_browser_fts;
-- 3. Recreate FTS5 table: now indexes category, keywords, AND inferred_tags
CREATE VIRTUAL TABLE recipe_browser_fts USING fts5(
category,
keywords,
inferred_tags,
content=recipes,
content_rowid=id
);
-- 4. Triggers to keep FTS in sync with recipes table changes
CREATE TRIGGER recipes_ai AFTER INSERT ON recipes BEGIN
INSERT INTO recipe_browser_fts(rowid, category, keywords, inferred_tags)
VALUES (new.id, new.category, new.keywords, new.inferred_tags);
END;
CREATE TRIGGER recipes_ad AFTER DELETE ON recipes BEGIN
INSERT INTO recipe_browser_fts(recipe_browser_fts, rowid, category, keywords, inferred_tags)
VALUES ('delete', old.id, old.category, old.keywords, old.inferred_tags);
END;
CREATE TRIGGER recipes_au AFTER UPDATE ON recipes BEGIN
INSERT INTO recipe_browser_fts(recipe_browser_fts, rowid, category, keywords, inferred_tags)
VALUES ('delete', old.id, old.category, old.keywords, old.inferred_tags);
INSERT INTO recipe_browser_fts(rowid, category, keywords, inferred_tags)
VALUES (new.id, new.category, new.keywords, new.inferred_tags);
END;
-- 5. Populate FTS from current table state
-- (inferred_tags is '[]' for all rows at this point; run infer_recipe_tags.py
-- to populate, then the FTS will be rebuilt as part of that script.)
INSERT INTO recipe_browser_fts(recipe_browser_fts) VALUES('rebuild');

View file

@ -1,5 +0,0 @@
-- Migration 030: open-package tracking
-- Adds opened_date to track when a multi-use item was first opened,
-- enabling secondary shelf-life windows (e.g. salsa: 1 year sealed → 2 weeks opened).
ALTER TABLE inventory_items ADD COLUMN opened_date TEXT;

View file

@ -1,4 +0,0 @@
-- Migration 031: add disposal_reason for waste logging (#60)
-- status='discarded' already exists in the CHECK constraint from migration 002.
-- This column stores free-text reason (optional) and calm-framing presets.
ALTER TABLE inventory_items ADD COLUMN disposal_reason TEXT;

View file

@ -1,4 +0,0 @@
-- 032_meal_plan_unique_week.sql
-- Prevent duplicate plans for the same week.
-- Existing duplicates must be resolved before applying (keep MIN(id) per week_start).
CREATE UNIQUE INDEX IF NOT EXISTS idx_meal_plans_week_start ON meal_plans (week_start);

View file

@ -1,21 +0,0 @@
-- Migration 033: standalone shopping list
-- Items can be added manually, from recipe gap analysis, or from the recipe browser.
-- Affiliate links are computed at query time by the API layer (never stored).
CREATE TABLE IF NOT EXISTS shopping_list_items (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
quantity REAL,
unit TEXT,
category TEXT,
checked INTEGER NOT NULL DEFAULT 0, -- 0=want, 1=in-cart/checked off
notes TEXT,
source TEXT NOT NULL DEFAULT 'manual', -- manual | recipe | meal_plan
recipe_id INTEGER REFERENCES recipes(id) ON DELETE SET NULL,
sort_order INTEGER NOT NULL DEFAULT 0,
created_at TEXT NOT NULL DEFAULT (datetime('now')),
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE INDEX IF NOT EXISTS idx_shopping_list_checked
ON shopping_list_items (checked, sort_order);

View file

@ -1,14 +0,0 @@
-- Migration 034: async recipe generation job queue
CREATE TABLE IF NOT EXISTS recipe_jobs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
job_id TEXT NOT NULL UNIQUE,
user_id TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'queued',
request TEXT NOT NULL,
result TEXT,
error TEXT,
created_at TEXT NOT NULL DEFAULT (datetime('now')),
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
);
CREATE INDEX IF NOT EXISTS idx_recipe_jobs_job_id ON recipe_jobs (job_id);
CREATE INDEX IF NOT EXISTS idx_recipe_jobs_user_id ON recipe_jobs (user_id, created_at DESC);

View file

@ -1,12 +0,0 @@
-- Migration 035: add sensory_tags column for sensory profile filtering
--
-- sensory_tags holds a JSON object with texture, smell, and noise signals:
-- {"textures": ["mushy", "creamy"], "smell": "pungent", "noise": "moderate"}
--
-- Empty object '{}' means untagged — these recipes pass ALL sensory filters
-- (graceful degradation when tag_sensory_profiles.py has not yet been run).
--
-- Populated offline by: python scripts/tag_sensory_profiles.py [path/to/kiwi.db]
-- No FTS rebuild needed — sensory_tags is filtered in Python after candidate fetch.
ALTER TABLE recipes ADD COLUMN sensory_tags TEXT NOT NULL DEFAULT '{}';

View file

@ -1,26 +0,0 @@
-- Migration 036: captured_products local cache
-- Products captured via visual label scanning (kiwi#79).
-- Keyed by barcode; checked before FDC/OFF on future scans so each product
-- is only captured once per device.
CREATE TABLE IF NOT EXISTS captured_products (
id INTEGER PRIMARY KEY AUTOINCREMENT,
barcode TEXT UNIQUE NOT NULL,
product_name TEXT,
brand TEXT,
serving_size_g REAL,
calories REAL,
fat_g REAL,
saturated_fat_g REAL,
carbs_g REAL,
sugar_g REAL,
fiber_g REAL,
protein_g REAL,
sodium_mg REAL,
ingredient_names TEXT NOT NULL DEFAULT '[]', -- JSON array
allergens TEXT NOT NULL DEFAULT '[]', -- JSON array
confidence REAL,
source TEXT NOT NULL DEFAULT 'visual_capture',
captured_at TEXT NOT NULL DEFAULT (datetime('now')),
confirmed_by_user INTEGER NOT NULL DEFAULT 0
);

File diff suppressed because it is too large Load diff

View file

@ -1,9 +1,7 @@
#!/usr/bin/env python #!/usr/bin/env python
# app/main.py # app/main.py
import asyncio
import logging import logging
import os
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from fastapi import FastAPI from fastapi import FastAPI
@ -11,70 +9,20 @@ from fastapi.middleware.cors import CORSMiddleware
from app.api.routes import api_router from app.api.routes import api_router
from app.core.config import settings from app.core.config import settings
from app.services.meal_plan.affiliates import register_kiwi_programs
# Structured key=value log lines — grep/awk-friendly for log-based analytics.
# Without basicConfig, app-level INFO logs are silently dropped.
logging.basicConfig(level=logging.INFO, format="%(levelname)s:%(name)s: %(message)s")
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
_BROWSE_REFRESH_INTERVAL_H = 24
async def _browse_counts_refresh_loop(corpus_path: str) -> None:
"""Refresh browse counts every 24 h while the container is running."""
from app.db.store import _COUNT_CACHE
from app.services.recipe.browse_counts_cache import load_into_memory, refresh
while True:
await asyncio.sleep(_BROWSE_REFRESH_INTERVAL_H * 3600)
try:
logger.info("browse_counts: starting scheduled refresh...")
computed = await asyncio.to_thread(
refresh, corpus_path, settings.BROWSE_COUNTS_PATH
)
load_into_memory(settings.BROWSE_COUNTS_PATH, _COUNT_CACHE, corpus_path)
logger.info("browse_counts: scheduled refresh complete (%d sets)", computed)
except Exception as exc:
logger.warning("browse_counts: scheduled refresh failed: %s", exc)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
logger.info("Starting Kiwi API...") logger.info("Starting Kiwi API...")
settings.ensure_dirs() settings.ensure_dirs()
register_kiwi_programs()
# Start LLM background task scheduler # Start LLM background task scheduler
from app.tasks.scheduler import get_scheduler from app.tasks.scheduler import get_scheduler
get_scheduler(settings.DB_PATH) get_scheduler(settings.DB_PATH)
logger.info("Task scheduler started.") logger.info("Task scheduler started.")
# Initialize community store (no-op if COMMUNITY_DB_URL is not set)
from app.api.endpoints.community import init_community_store
init_community_store(settings.COMMUNITY_DB_URL)
# Browse counts cache — warm in-memory cache from disk, refresh if stale.
# Uses the corpus path the store will attach to at request time.
corpus_path = os.environ.get("RECIPE_DB_PATH", str(settings.DB_PATH))
try:
from app.db.store import _COUNT_CACHE
from app.services.recipe.browse_counts_cache import (
is_stale, load_into_memory, refresh,
)
if is_stale(settings.BROWSE_COUNTS_PATH):
logger.info("browse_counts: cache stale — refreshing in background...")
asyncio.create_task(
asyncio.to_thread(refresh, corpus_path, settings.BROWSE_COUNTS_PATH)
)
else:
load_into_memory(settings.BROWSE_COUNTS_PATH, _COUNT_CACHE, corpus_path)
except Exception as exc:
logger.warning("browse_counts: startup init failed (live FTS fallback active): %s", exc)
# Nightly background refresh loop
asyncio.create_task(_browse_counts_refresh_loop(corpus_path))
yield yield
# Graceful scheduler shutdown # Graceful scheduler shutdown
@ -87,7 +35,7 @@ async def lifespan(app: FastAPI):
app = FastAPI( app = FastAPI(
title=settings.PROJECT_NAME, title=settings.PROJECT_NAME,
description="Pantry tracking + leftover recipe suggestions", description="Pantry tracking + leftover recipe suggestions",
version="0.2.0", version="0.1.0",
lifespan=lifespan, lifespan=lifespan,
) )

View file

@ -1,47 +0,0 @@
"""Pydantic schemas for household management endpoints."""
from __future__ import annotations
from pydantic import BaseModel, Field
class HouseholdCreateResponse(BaseModel):
household_id: str
message: str
class HouseholdMember(BaseModel):
user_id: str
joined_at: str
is_owner: bool
class HouseholdStatusResponse(BaseModel):
in_household: bool
household_id: str | None = None
is_owner: bool = False
members: list[HouseholdMember] = Field(default_factory=list)
max_seats: int = 4
class HouseholdInviteResponse(BaseModel):
invite_url: str
token: str
expires_at: str
class HouseholdAcceptRequest(BaseModel):
household_id: str
token: str
class HouseholdAcceptResponse(BaseModel):
message: str
household_id: str
class HouseholdRemoveMemberRequest(BaseModel):
user_id: str
class MessageResponse(BaseModel):
message: str

View file

@ -89,20 +89,9 @@ class InventoryItemUpdate(BaseModel):
unit: Optional[str] = None unit: Optional[str] = None
location: Optional[str] = None location: Optional[str] = None
sublocation: Optional[str] = None sublocation: Optional[str] = None
purchase_date: Optional[date] = None
expiration_date: Optional[date] = None expiration_date: Optional[date] = None
opened_date: Optional[date] = None
status: Optional[str] = None status: Optional[str] = None
notes: Optional[str] = None notes: Optional[str] = None
disposal_reason: Optional[str] = None
class PartialConsumeRequest(BaseModel):
quantity: float = Field(..., gt=0, description="Amount to consume from this item")
class DiscardRequest(BaseModel):
reason: Optional[str] = Field(None, max_length=200)
class InventoryItemResponse(BaseModel): class InventoryItemResponse(BaseModel):
@ -117,15 +106,8 @@ class InventoryItemResponse(BaseModel):
sublocation: Optional[str] sublocation: Optional[str]
purchase_date: Optional[str] purchase_date: Optional[str]
expiration_date: Optional[str] expiration_date: Optional[str]
opened_date: Optional[str] = None
opened_expiry_date: Optional[str] = None
secondary_state: Optional[str] = None
secondary_uses: Optional[List[str]] = None
secondary_warning: Optional[str] = None
secondary_discard_signs: Optional[str] = None
status: str status: str
notes: Optional[str] notes: Optional[str]
disposal_reason: Optional[str] = None
source: str source: str
created_at: str created_at: str
updated_at: str updated_at: str
@ -141,8 +123,6 @@ class BarcodeScanResult(BaseModel):
product: Optional[ProductResponse] product: Optional[ProductResponse]
inventory_item: Optional[InventoryItemResponse] inventory_item: Optional[InventoryItemResponse]
added_to_inventory: bool added_to_inventory: bool
needs_manual_entry: bool = False
needs_visual_capture: bool = False # Paid tier offer when no product data found
message: str message: str
@ -153,32 +133,6 @@ class BarcodeScanResponse(BaseModel):
message: str message: str
# ── Bulk add by name ─────────────────────────────────────────────────────────
class BulkAddItem(BaseModel):
name: str = Field(..., min_length=1, max_length=200)
quantity: float = Field(default=1.0, gt=0)
unit: str = "count"
location: str = "pantry"
class BulkAddByNameRequest(BaseModel):
items: List[BulkAddItem] = Field(..., min_length=1)
class BulkAddItemResult(BaseModel):
name: str
ok: bool
item_id: Optional[int] = None
error: Optional[str] = None
class BulkAddByNameResponse(BaseModel):
added: int
failed: int
results: List[BulkAddItemResult]
# ── Stats ───────────────────────────────────────────────────────────────────── # ── Stats ─────────────────────────────────────────────────────────────────────
class InventoryStats(BaseModel): class InventoryStats(BaseModel):

View file

@ -1,59 +0,0 @@
"""Pydantic schemas for visual label capture (kiwi#79)."""
from __future__ import annotations
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
class LabelCaptureResponse(BaseModel):
"""Extraction result returned after the user photographs a nutrition label."""
barcode: str
product_name: Optional[str] = None
brand: Optional[str] = None
serving_size_g: Optional[float] = None
calories: Optional[float] = None
fat_g: Optional[float] = None
saturated_fat_g: Optional[float] = None
carbs_g: Optional[float] = None
sugar_g: Optional[float] = None
fiber_g: Optional[float] = None
protein_g: Optional[float] = None
sodium_mg: Optional[float] = None
ingredient_names: List[str] = Field(default_factory=list)
allergens: List[str] = Field(default_factory=list)
confidence: float = 0.0
needs_review: bool = True # True when confidence < REVIEW_THRESHOLD
class LabelConfirmRequest(BaseModel):
"""User-confirmed extraction to save to the local product cache."""
barcode: str
product_name: Optional[str] = None
brand: Optional[str] = None
serving_size_g: Optional[float] = None
calories: Optional[float] = None
fat_g: Optional[float] = None
saturated_fat_g: Optional[float] = None
carbs_g: Optional[float] = None
sugar_g: Optional[float] = None
fiber_g: Optional[float] = None
protein_g: Optional[float] = None
sodium_mg: Optional[float] = None
ingredient_names: List[str] = Field(default_factory=list)
allergens: List[str] = Field(default_factory=list)
confidence: float = 0.0
# When True the confirmed product is also added to inventory
location: str = "pantry"
quantity: float = 1.0
auto_add: bool = True
class LabelConfirmResponse(BaseModel):
"""Result of confirming a captured product."""
ok: bool
barcode: str
product_id: Optional[int] = None
inventory_item_id: Optional[int] = None
message: str

View file

@ -1,100 +0,0 @@
# app/models/schemas/meal_plan.py
"""Pydantic schemas for meal planning endpoints."""
from __future__ import annotations
from datetime import date as _date
from pydantic import BaseModel, Field, field_validator
VALID_MEAL_TYPES = {"breakfast", "lunch", "dinner", "snack"}
class CreatePlanRequest(BaseModel):
week_start: _date
meal_types: list[str] = Field(default_factory=lambda: ["dinner"])
@field_validator("week_start")
@classmethod
def must_be_monday(cls, v: _date) -> _date:
if v.weekday() != 0:
raise ValueError("week_start must be a Monday (weekday 0)")
return v
class UpdatePlanRequest(BaseModel):
meal_types: list[str]
class UpsertSlotRequest(BaseModel):
recipe_id: int | None = None
servings: float = Field(2.0, gt=0)
custom_label: str | None = None
class SlotSummary(BaseModel):
id: int
plan_id: int
day_of_week: int
meal_type: str
recipe_id: int | None
recipe_title: str | None
servings: float
custom_label: str | None
class PlanSummary(BaseModel):
id: int
week_start: str
meal_types: list[str]
slots: list[SlotSummary]
created_at: str
class RetailerLink(BaseModel):
retailer: str
label: str
url: str
class GapItem(BaseModel):
ingredient_name: str
needed_raw: str | None # e.g. "2 cups" from recipe text
have_quantity: float | None # from pantry
have_unit: str | None
covered: bool # True = pantry has it
retailer_links: list[RetailerLink] = Field(default_factory=list)
class ShoppingListResponse(BaseModel):
plan_id: int
gap_items: list[GapItem]
covered_items: list[GapItem]
disclosure: str | None = None # affiliate disclosure text when links present
class PrepTaskSummary(BaseModel):
id: int
recipe_id: int | None
task_label: str
duration_minutes: int | None
sequence_order: int
equipment: str | None
is_parallel: bool
notes: str | None
user_edited: bool
class PrepSessionSummary(BaseModel):
id: int
plan_id: int
scheduled_date: str
status: str
tasks: list[PrepTaskSummary]
class UpdatePrepTaskRequest(BaseModel):
duration_minutes: int | None = None
sequence_order: int | None = None
notes: str | None = None
equipment: str | None = None

View file

@ -1,176 +0,0 @@
"""Pydantic schemas for the recipe engine API."""
from __future__ import annotations
from pydantic import BaseModel, Field
class SwapCandidate(BaseModel):
original_name: str
substitute_name: str
constraint_label: str
explanation: str
compensation_hints: list[dict] = Field(default_factory=list)
class NutritionPanel(BaseModel):
"""Per-recipe macro summary. All values are per-serving when servings is known,
otherwise for the full recipe. None means data is unavailable."""
calories: float | None = None
fat_g: float | None = None
protein_g: float | None = None
carbs_g: float | None = None
fiber_g: float | None = None
sugar_g: float | None = None
sodium_mg: float | None = None
servings: float | None = None
estimated: bool = False # True when nutrition was inferred from ingredient profiles
class RecipeSuggestion(BaseModel):
id: int
title: str
match_count: int
element_coverage: dict[str, float] = Field(default_factory=dict)
swap_candidates: list[SwapCandidate] = Field(default_factory=list)
matched_ingredients: list[str] = Field(default_factory=list)
missing_ingredients: list[str] = Field(default_factory=list)
directions: list[str] = Field(default_factory=list)
prep_notes: list[str] = Field(default_factory=list)
notes: str = ""
level: int = 1
is_wildcard: bool = False
nutrition: NutritionPanel | None = None
source_url: str | None = None
complexity: str | None = None # 'easy' | 'moderate' | 'involved'
estimated_time_min: int | None = None # derived from step count + method signals
rerank_score: float | None = None # cross-encoder relevance score (paid+ only, None for free tier)
class GroceryLink(BaseModel):
ingredient: str
retailer: str
url: str
class RecipeResult(BaseModel):
suggestions: list[RecipeSuggestion]
element_gaps: list[str]
grocery_list: list[str] = Field(default_factory=list)
grocery_links: list[GroceryLink] = Field(default_factory=list)
rate_limited: bool = False
rate_limit_count: int = 0
orch_fallback: bool = False # True when orch budget exhausted; fell back to local LLM
class RecipeJobQueued(BaseModel):
job_id: str
status: str = "queued"
class RecipeJobStatus(BaseModel):
job_id: str
status: str
result: RecipeResult | None = None
error: str | None = None
class NutritionFilters(BaseModel):
"""Optional per-serving upper bounds for macro filtering. None = no filter."""
max_calories: float | None = None
max_sugar_g: float | None = None
max_carbs_g: float | None = None
max_sodium_mg: float | None = None
class RecipeRequest(BaseModel):
pantry_items: list[str]
# Maps product name → secondary state label for items past nominal expiry
# but still within their secondary use window (e.g. {"Bread": "stale"}).
# Used by the recipe engine to boost recipes suited to those specific states.
secondary_pantry_items: dict[str, str] = Field(default_factory=dict)
level: int = Field(default=1, ge=1, le=4)
constraints: list[str] = Field(default_factory=list)
expiry_first: bool = False
hard_day_mode: bool = False
max_missing: int | None = None
style_id: str | None = None
category: str | None = None
tier: str = "free"
has_byok: bool = False
wildcard_confirmed: bool = False
allergies: list[str] = Field(default_factory=list)
nutrition_filters: NutritionFilters = Field(default_factory=NutritionFilters)
excluded_ids: list[int] = Field(default_factory=list)
exclude_ingredients: list[str] = Field(default_factory=list)
shopping_mode: bool = False
pantry_match_only: bool = False # when True, only return recipes with zero missing ingredients
complexity_filter: str | None = None # 'easy' | 'moderate' | 'involved' — None = any
max_time_min: int | None = None # filter by estimated cooking time ceiling
max_total_min: int | None = None # filter by parsed total time from recipe directions
unit_system: str = "metric" # "metric" | "imperial"
# ── Build Your Own schemas ──────────────────────────────────────────────────
class AssemblyRoleOut(BaseModel):
"""One role slot in a template, as returned by GET /api/recipes/templates."""
display: str
required: bool
keywords: list[str]
hint: str = ""
class AssemblyTemplateOut(BaseModel):
"""One assembly template, as returned by GET /api/recipes/templates."""
id: str # slug, e.g. "burrito_taco"
title: str
icon: str
descriptor: str
role_sequence: list[AssemblyRoleOut]
class RoleCandidateItem(BaseModel):
"""One candidate ingredient for a wizard picker step."""
name: str
in_pantry: bool
tags: list[str] = Field(default_factory=list)
class RoleCandidatesResponse(BaseModel):
"""Response from GET /api/recipes/template-candidates."""
compatible: list[RoleCandidateItem] = Field(default_factory=list)
other: list[RoleCandidateItem] = Field(default_factory=list)
available_tags: list[str] = Field(default_factory=list)
class BuildRequest(BaseModel):
"""Request body for POST /api/recipes/build."""
template_id: str
role_overrides: dict[str, str] = Field(default_factory=dict)
class StreamTokenRequest(BaseModel):
"""Request body for POST /recipes/stream-token.
Pantry items and dietary constraints are fetched from the DB at request
time the client does not supply them here.
"""
level: int = Field(4, ge=3, le=4, description="Recipe level: 3=styled, 4=wildcard")
wildcard_confirmed: bool = Field(False, description="Required true for level 4")
class StreamTokenResponse(BaseModel):
"""Response from POST /recipes/stream-token.
The frontend opens EventSource at stream_url?token=<token> to receive
SSE chunks directly from the coordinator.
"""
stream_url: str
token: str
expires_in_s: int

View file

@ -1,44 +0,0 @@
"""Pydantic schemas for saved recipes and collections."""
from __future__ import annotations
from pydantic import BaseModel, Field
class SaveRecipeRequest(BaseModel):
recipe_id: int
notes: str | None = None
rating: int | None = Field(None, ge=0, le=5)
class UpdateSavedRecipeRequest(BaseModel):
notes: str | None = None
rating: int | None = Field(None, ge=0, le=5)
style_tags: list[str] = Field(default_factory=list)
class SavedRecipeSummary(BaseModel):
id: int
recipe_id: int
title: str
saved_at: str
notes: str | None
rating: int | None
style_tags: list[str]
collection_ids: list[int] = Field(default_factory=list)
class CollectionSummary(BaseModel):
id: int
name: str
description: str | None
member_count: int
created_at: str
class CollectionRequest(BaseModel):
name: str
description: str | None = None
class CollectionMemberRequest(BaseModel):
saved_recipe_id: int

View file

@ -1,60 +0,0 @@
"""Pydantic schemas for the shopping list endpoints."""
from __future__ import annotations
from typing import Optional
from pydantic import BaseModel, Field
class ShoppingItemCreate(BaseModel):
name: str = Field(..., min_length=1, max_length=200)
quantity: Optional[float] = None
unit: Optional[str] = None
category: Optional[str] = None
notes: Optional[str] = None
source: str = "manual"
recipe_id: Optional[int] = None
sort_order: int = 0
class ShoppingItemUpdate(BaseModel):
name: Optional[str] = Field(None, min_length=1, max_length=200)
quantity: Optional[float] = None
unit: Optional[str] = None
category: Optional[str] = None
checked: Optional[bool] = None
notes: Optional[str] = None
sort_order: Optional[int] = None
class GroceryLinkOut(BaseModel):
ingredient: str
retailer: str
url: str
class ShoppingItemResponse(BaseModel):
id: int
name: str
quantity: Optional[float]
unit: Optional[str]
category: Optional[str]
checked: bool
notes: Optional[str]
source: str
recipe_id: Optional[int]
sort_order: int
created_at: str
updated_at: str
grocery_links: list[GroceryLinkOut] = []
class BulkAddFromRecipeRequest(BaseModel):
recipe_id: int
include_covered: bool = False # if True, add pantry-covered items too
class ConfirmPurchaseRequest(BaseModel):
"""Move a checked item into pantry inventory."""
location: str = "pantry"
quantity: Optional[float] = None # override the list quantity
unit: Optional[str] = None

View file

@ -3,11 +3,6 @@
Business logic services for Kiwi. Business logic services for Kiwi.
""" """
from app.services.receipt_service import ReceiptService
__all__ = ["ReceiptService"] __all__ = ["ReceiptService"]
def __getattr__(name: str):
if name == "ReceiptService":
from app.services.receipt_service import ReceiptService
return ReceiptService
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

View file

@ -5,8 +5,6 @@ This module provides functionality to detect and decode barcodes
from images (UPC, EAN, QR codes, etc.). from images (UPC, EAN, QR codes, etc.).
""" """
import io
import cv2 import cv2
import numpy as np import numpy as np
from pyzbar import pyzbar from pyzbar import pyzbar
@ -14,12 +12,6 @@ from pathlib import Path
from typing import List, Dict, Any, Optional from typing import List, Dict, Any, Optional
import logging import logging
try:
from PIL import Image as _PILImage
_HAS_PIL = True
except ImportError:
_HAS_PIL = False
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -84,7 +76,9 @@ class BarcodeScanner:
# 4. Try rotations if still no barcodes found (handles tilted/rotated barcodes) # 4. Try rotations if still no barcodes found (handles tilted/rotated barcodes)
if not barcodes: if not barcodes:
logger.info("No barcodes found in standard orientation, trying rotations...") logger.info("No barcodes found in standard orientation, trying rotations...")
for angle in [90, 180, 270, 45, 135]: # Try incremental angles: 30°, 60°, 90° (covers 0-90° range)
# 0° already tried, 180° is functionally same as 0°, 90°/270° are same axis
for angle in [30, 60, 90]:
rotated_gray = self._rotate_image(gray, angle) rotated_gray = self._rotate_image(gray, angle)
rotated_color = self._rotate_image(image, angle) rotated_color = self._rotate_image(image, angle)
detected = self._detect_barcodes(rotated_gray, rotated_color) detected = self._detect_barcodes(rotated_gray, rotated_color)
@ -270,26 +264,6 @@ class BarcodeScanner:
return list(seen.values()) return list(seen.values())
def _fix_exif_orientation(self, image_bytes: bytes) -> bytes:
"""Apply EXIF orientation correction so cv2 sees an upright image.
Phone cameras embed rotation in EXIF; cv2.imdecode ignores it,
so a photo taken in portrait may arrive physically sideways in memory.
"""
if not _HAS_PIL:
return image_bytes
try:
pil = _PILImage.open(io.BytesIO(image_bytes))
pil = _PILImage.fromarray(np.array(pil)) # strips EXIF but applies orientation via PIL
# Use ImageOps.exif_transpose for proper EXIF-aware rotation
import PIL.ImageOps
pil = PIL.ImageOps.exif_transpose(pil)
buf = io.BytesIO()
pil.save(buf, format="JPEG")
return buf.getvalue()
except Exception:
return image_bytes
def scan_from_bytes(self, image_bytes: bytes) -> List[Dict[str, Any]]: def scan_from_bytes(self, image_bytes: bytes) -> List[Dict[str, Any]]:
""" """
Scan barcodes from image bytes (uploaded file). Scan barcodes from image bytes (uploaded file).
@ -301,10 +275,6 @@ class BarcodeScanner:
List of detected barcodes List of detected barcodes
""" """
try: try:
# Apply EXIF orientation correction first (phone cameras embed rotation in EXIF;
# cv2.imdecode ignores it, causing sideways barcodes to appear rotated in memory).
image_bytes = self._fix_exif_orientation(image_bytes)
# Convert bytes to numpy array # Convert bytes to numpy array
nparr = np.frombuffer(image_bytes, np.uint8) nparr = np.frombuffer(image_bytes, np.uint8)
image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
@ -330,12 +300,11 @@ class BarcodeScanner:
) )
barcodes.extend(self._detect_barcodes(thresh, image)) barcodes.extend(self._detect_barcodes(thresh, image))
# 3. Try all 90° rotations + common tilt angles # 3. Try rotations if still no barcodes found
# 90/270 catches truly sideways barcodes; 180 catches upside-down;
# 45/135 catches tilted barcodes on flat surfaces.
if not barcodes: if not barcodes:
logger.info("No barcodes found in uploaded image, trying rotations...") logger.info("No barcodes found in uploaded image, trying rotations...")
for angle in [90, 180, 270, 45, 135]: # Try incremental angles: 30°, 60°, 90° (covers 0-90° range)
for angle in [30, 60, 90]:
rotated_gray = self._rotate_image(gray, angle) rotated_gray = self._rotate_image(gray, angle)
rotated_color = self._rotate_image(image, angle) rotated_color = self._rotate_image(image, angle)
detected = self._detect_barcodes(rotated_gray, rotated_color) detected = self._detect_barcodes(rotated_gray, rotated_color)

View file

@ -1,44 +0,0 @@
# app/services/community/ap_compat.py
# MIT License — AP scaffold only (no actor, inbox, outbox)
from __future__ import annotations
from datetime import datetime, timezone
def post_to_ap_json_ld(post: dict, base_url: str) -> dict:
"""Serialize a community post dict to an ActivityPub-compatible JSON-LD Note.
This is a read-only scaffold. No AP actor, inbox, or outbox.
The slug URI is stable so a future full AP implementation can reuse posts
without a DB migration.
"""
slug = post["slug"]
published = post.get("published")
if isinstance(published, datetime):
published_str = published.astimezone(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
else:
published_str = str(published)
dietary_tags: list[str] = post.get("dietary_tags") or []
tags = [{"type": "Hashtag", "name": "#kiwi"}]
for tag in dietary_tags:
tags.append({"type": "Hashtag", "name": f"#{tag.replace('-', '').replace(' ', '')}"})
return {
"@context": "https://www.w3.org/ns/activitystreams",
"type": "Note",
"id": f"{base_url}/api/v1/community/posts/{slug}",
"attributedTo": post.get("pseudonym", "anonymous"),
"content": _build_content(post),
"published": published_str,
"tag": tags,
}
def _build_content(post: dict) -> str:
title = post.get("title") or "Untitled"
desc = post.get("description")
if desc:
return f"{title}{desc}"
return title

View file

@ -1,90 +0,0 @@
# app/services/community/community_store.py
# MIT License
from __future__ import annotations
import logging
from circuitforge_core.community import CommunityPost, SharedStore
logger = logging.getLogger(__name__)
class KiwiCommunityStore(SharedStore):
"""Kiwi-specific community store: adds kiwi-domain query methods on top of SharedStore."""
def list_meal_plans(
self,
limit: int = 20,
offset: int = 0,
dietary_tags: list[str] | None = None,
allergen_exclude: list[str] | None = None,
) -> list[CommunityPost]:
return self.list_posts(
limit=limit,
offset=offset,
post_type="plan",
dietary_tags=dietary_tags,
allergen_exclude=allergen_exclude,
source_product="kiwi",
)
def list_outcomes(
self,
limit: int = 20,
offset: int = 0,
post_type: str | None = None,
) -> list[CommunityPost]:
if post_type in ("recipe_success", "recipe_blooper"):
return self.list_posts(
limit=limit,
offset=offset,
post_type=post_type,
source_product="kiwi",
)
success = self.list_posts(
limit=limit,
offset=0,
post_type="recipe_success",
source_product="kiwi",
)
bloopers = self.list_posts(
limit=limit,
offset=0,
post_type="recipe_blooper",
source_product="kiwi",
)
merged = sorted(success + bloopers, key=lambda p: p.published, reverse=True)
return merged[:limit]
def get_or_create_pseudonym(
store,
directus_user_id: str,
requested_name: str | None,
) -> str:
"""Return the user's current pseudonym, creating it if it doesn't exist.
If the user has an existing pseudonym, return it (ignore requested_name).
If not, create using requested_name (must be provided for first-time setup).
Raises ValueError if no existing pseudonym and requested_name is None or blank.
"""
existing = store.get_current_pseudonym(directus_user_id)
if existing:
return existing
if not requested_name or not requested_name.strip():
raise ValueError(
"A pseudonym is required for first publish. "
"Pass requested_name with the user's chosen display name."
)
name = requested_name.strip()
if "@" in name:
raise ValueError(
"Pseudonym must not contain '@' — use a display name, not an email address."
)
store.set_pseudonym(directus_user_id, name)
return name

View file

@ -1,138 +0,0 @@
# app/services/community/element_snapshot.py
# MIT License
from __future__ import annotations
from dataclasses import dataclass
# Ingredient name substrings → allergen flag
_ALLERGEN_MAP: dict[str, str] = {
"milk": "dairy", "cream": "dairy", "cheese": "dairy", "butter": "dairy",
"yogurt": "dairy", "whey": "dairy",
"egg": "eggs",
"wheat": "gluten", "pasta": "gluten", "flour": "gluten", "bread": "gluten",
"barley": "gluten", "rye": "gluten",
"peanut": "nuts", "almond": "nuts", "cashew": "nuts", "walnut": "nuts",
"pecan": "nuts", "hazelnut": "nuts", "pistachio": "nuts", "macadamia": "nuts",
"soy": "soy", "tofu": "soy", "edamame": "soy", "miso": "soy", "tempeh": "soy",
"shrimp": "shellfish", "crab": "shellfish", "lobster": "shellfish",
"clam": "shellfish", "mussel": "shellfish", "scallop": "shellfish",
"fish": "fish", "salmon": "fish", "tuna": "fish", "cod": "fish",
"tilapia": "fish", "halibut": "fish",
"sesame": "sesame",
}
_MEAT_KEYWORDS = frozenset([
"chicken", "beef", "pork", "lamb", "turkey", "bacon", "ham", "sausage",
"salami", "prosciutto", "guanciale", "pancetta", "steak", "ground meat",
"mince", "veal", "duck", "venison", "bison", "lard",
])
_SEAFOOD_KEYWORDS = frozenset([
"fish", "shrimp", "crab", "lobster", "tuna", "salmon", "clam", "mussel",
"scallop", "anchovy", "sardine", "cod", "tilapia",
])
_ANIMAL_PRODUCT_KEYWORDS = frozenset([
"milk", "cream", "cheese", "butter", "egg", "honey", "yogurt", "whey",
])
def _detect_allergens(ingredient_names: list[str]) -> list[str]:
found: set[str] = set()
lowered = [n.lower() for n in ingredient_names]
for ingredient in lowered:
for keyword, flag in _ALLERGEN_MAP.items():
if keyword in ingredient:
found.add(flag)
return sorted(found)
def _detect_dietary_tags(ingredient_names: list[str]) -> list[str]:
lowered = [n.lower() for n in ingredient_names]
all_text = " ".join(lowered)
has_meat = any(k in all_text for k in _MEAT_KEYWORDS)
has_seafood = any(k in all_text for k in _SEAFOOD_KEYWORDS)
has_animal_products = any(k in all_text for k in _ANIMAL_PRODUCT_KEYWORDS)
tags: list[str] = []
if not has_meat and not has_seafood:
tags.append("vegetarian")
if not has_meat and not has_seafood and not has_animal_products:
tags.append("vegan")
return tags
@dataclass(frozen=True)
class ElementSnapshot:
seasoning_score: float
richness_score: float
brightness_score: float
depth_score: float
aroma_score: float
structure_score: float
texture_profile: str
dietary_tags: tuple
allergen_flags: tuple
flavor_molecules: tuple
fat_pct: float | None
protein_pct: float | None
moisture_pct: float | None
def compute_snapshot(recipe_ids: list[int], store) -> ElementSnapshot:
"""Compute an element snapshot from a list of recipe IDs.
Pulls SFAH scores, ingredient lists, and USDA FDC macros from the corpus.
Averages numeric scores across all recipes. Unions allergen flags and dietary tags.
Call at publish time only snapshot is stored denormalized in community_posts.
"""
if not recipe_ids:
return ElementSnapshot(
seasoning_score=0.0, richness_score=0.0, brightness_score=0.0,
depth_score=0.0, aroma_score=0.0, structure_score=0.0,
texture_profile="", dietary_tags=(), allergen_flags=(),
flavor_molecules=(), fat_pct=None, protein_pct=None, moisture_pct=None,
)
rows = store.get_recipes_by_ids(recipe_ids)
if not rows:
return ElementSnapshot(
seasoning_score=0.0, richness_score=0.0, brightness_score=0.0,
depth_score=0.0, aroma_score=0.0, structure_score=0.0,
texture_profile="", dietary_tags=(), allergen_flags=(),
flavor_molecules=(), fat_pct=None, protein_pct=None, moisture_pct=None,
)
def _avg(field: str) -> float:
vals = [r.get(field) or 0.0 for r in rows]
return sum(vals) / len(vals)
all_ingredients: list[str] = []
for r in rows:
names = r.get("ingredient_names") or []
all_ingredients.extend(names if isinstance(names, list) else [])
allergens = _detect_allergens(all_ingredients)
dietary = _detect_dietary_tags(all_ingredients)
texture = rows[0].get("texture_profile") or ""
fat_vals = [r.get("fat") for r in rows if r.get("fat") is not None]
prot_vals = [r.get("protein") for r in rows if r.get("protein") is not None]
moist_vals = [r.get("moisture") for r in rows if r.get("moisture") is not None]
return ElementSnapshot(
seasoning_score=_avg("seasoning_score"),
richness_score=_avg("richness_score"),
brightness_score=_avg("brightness_score"),
depth_score=_avg("depth_score"),
aroma_score=_avg("aroma_score"),
structure_score=_avg("structure_score"),
texture_profile=texture,
dietary_tags=tuple(dietary),
allergen_flags=tuple(allergens),
flavor_molecules=(),
fat_pct=(sum(fat_vals) / len(fat_vals)) if fat_vals else None,
protein_pct=(sum(prot_vals) / len(prot_vals)) if prot_vals else None,
moisture_pct=(sum(moist_vals) / len(moist_vals)) if moist_vals else None,
)

View file

@ -1,43 +0,0 @@
# app/services/community/feed.py
# MIT License
from __future__ import annotations
from datetime import datetime, timezone
from email.utils import format_datetime
from xml.etree.ElementTree import Element, SubElement, tostring
def posts_to_rss(posts: list[dict], base_url: str) -> str:
"""Generate an RSS 2.0 feed from a list of community post dicts.
base_url: the root URL of this Kiwi instance (no trailing slash).
Returns UTF-8 XML string.
"""
rss = Element("rss", version="2.0")
channel = SubElement(rss, "channel")
_sub(channel, "title", "Kiwi Community Feed")
_sub(channel, "link", f"{base_url}/community")
_sub(channel, "description", "Meal plans and recipe outcomes from the Kiwi community")
_sub(channel, "language", "en")
_sub(channel, "lastBuildDate", format_datetime(datetime.now(timezone.utc)))
for post in posts:
item = SubElement(channel, "item")
_sub(item, "title", post.get("title") or "Untitled")
_sub(item, "link", f"{base_url}/api/v1/community/posts/{post['slug']}")
_sub(item, "guid", f"{base_url}/api/v1/community/posts/{post['slug']}")
if post.get("description"):
_sub(item, "description", post["description"])
published = post.get("published")
if isinstance(published, datetime):
_sub(item, "pubDate", format_datetime(published))
return '<?xml version="1.0" encoding="UTF-8"?>\n' + tostring(rss, encoding="unicode")
def _sub(parent: Element, tag: str, text: str) -> Element:
el = SubElement(parent, tag)
el.text = text
return el

View file

@ -1,72 +0,0 @@
# app/services/community/mdns.py
# MIT License
from __future__ import annotations
import logging
import socket
logger = logging.getLogger(__name__)
# Import deferred to avoid hard failure when zeroconf is not installed
try:
from zeroconf import ServiceInfo, Zeroconf
_ZEROCONF_AVAILABLE = True
except ImportError:
_ZEROCONF_AVAILABLE = False
class KiwiMDNS:
"""Advertise this Kiwi instance on the LAN via mDNS (_kiwi._tcp.local).
Defaults to disabled (enabled=False). User must explicitly opt in via the
Settings page. This matches the CF a11y requirement: no surprise broadcasting.
Usage:
mdns = KiwiMDNS(enabled=settings.MDNS_ENABLED, port=settings.PORT,
feed_url=f"http://{hostname}:{settings.PORT}/api/v1/community/local-feed")
mdns.start() # in lifespan startup
mdns.stop() # in lifespan shutdown
"""
SERVICE_TYPE = "_kiwi._tcp.local."
def __init__(self, enabled: bool, port: int, feed_url: str) -> None:
self._enabled = enabled
self._port = port
self._feed_url = feed_url
self._zc: "Zeroconf | None" = None
self._info: "ServiceInfo | None" = None
def start(self) -> None:
if not self._enabled:
logger.debug("mDNS advertisement disabled (user has not opted in)")
return
if not _ZEROCONF_AVAILABLE:
logger.warning("zeroconf package not installed — mDNS advertisement unavailable")
return
hostname = socket.gethostname()
service_name = f"kiwi-{hostname}.{self.SERVICE_TYPE}"
self._info = ServiceInfo(
type_=self.SERVICE_TYPE,
name=service_name,
port=self._port,
properties={
b"feed_url": self._feed_url.encode(),
b"version": b"1",
},
addresses=[socket.inet_aton("127.0.0.1")],
)
self._zc = Zeroconf()
self._zc.register_service(self._info)
logger.info("mDNS: advertising %s on port %d", service_name, self._port)
def stop(self) -> None:
if self._zc is None or self._info is None:
return
self._zc.unregister_service(self._info)
self._zc.close()
self._zc = None
self._info = None
logger.info("mDNS: advertisement stopped")

View file

@ -1,94 +0,0 @@
"""cf-orch coordinator proxy client.
Calls the coordinator's /proxy/authorize endpoint to obtain a one-time
stream URL + token for LLM streaming. Always raises CoordinatorError on
failure callers decide how to handle it (stream-token endpoint returns
503 or 403 as appropriate).
"""
from __future__ import annotations
import logging
import os
from dataclasses import dataclass
import httpx
log = logging.getLogger(__name__)
class CoordinatorError(Exception):
"""Raised when the coordinator returns an error or is unreachable."""
def __init__(self, message: str, status_code: int = 503):
super().__init__(message)
self.status_code = status_code
@dataclass(frozen=True)
class StreamTokenResult:
stream_url: str
token: str
expires_in_s: int
def _coordinator_url() -> str:
return os.environ.get("COORDINATOR_URL", "http://10.1.10.71:7700")
def _product_key() -> str:
return os.environ.get("COORDINATOR_KIWI_KEY", "")
async def coordinator_authorize(
prompt: str,
caller: str = "kiwi-recipe",
ttl_s: int = 300,
) -> StreamTokenResult:
"""Call POST /proxy/authorize on the coordinator.
Returns a StreamTokenResult with the stream URL and one-time token.
Raises CoordinatorError on any failure (network, auth, capacity).
"""
url = f"{_coordinator_url()}/proxy/authorize"
key = _product_key()
if not key:
raise CoordinatorError(
"COORDINATOR_KIWI_KEY env var is not set — streaming unavailable",
status_code=503,
)
payload = {
"product": "kiwi",
"product_key": key,
"caller": caller,
"prompt": prompt,
"params": {},
"ttl_s": ttl_s,
}
try:
async with httpx.AsyncClient(timeout=10.0) as client:
resp = await client.post(url, json=payload)
except httpx.RequestError as exc:
log.warning("coordinator_authorize network error: %s", exc)
raise CoordinatorError(f"Coordinator unreachable: {exc}", status_code=503)
if resp.status_code == 401:
raise CoordinatorError("Invalid product key", status_code=401)
if resp.status_code == 429:
raise CoordinatorError("Too many concurrent streams", status_code=429)
if resp.status_code == 503:
raise CoordinatorError("No GPU available for streaming", status_code=503)
if not resp.is_success:
raise CoordinatorError(
f"Coordinator error {resp.status_code}: {resp.text[:200]}",
status_code=503,
)
data = resp.json()
# Use public_stream_url if coordinator provides it (cloud mode), else stream_url
stream_url = data.get("public_stream_url") or data["stream_url"]
return StreamTokenResult(
stream_url=stream_url,
token=data["token"],
expires_in_s=data["expires_in_s"],
)

View file

@ -21,29 +21,6 @@ logger = logging.getLogger(__name__)
class ExpirationPredictor: class ExpirationPredictor:
"""Predict expiration dates based on product category and storage location.""" """Predict expiration dates based on product category and storage location."""
# Canonical location names and their aliases.
# All location strings are normalised through this before table lookup.
LOCATION_ALIASES: dict[str, str] = {
'garage_freezer': 'freezer',
'chest_freezer': 'freezer',
'deep_freezer': 'freezer',
'upright_freezer': 'freezer',
'refrigerator': 'fridge',
'frig': 'fridge',
'cupboard': 'cabinet',
'shelf': 'pantry',
'counter': 'pantry',
}
# When a category has no entry for the requested location, try these
# alternatives in order — prioritising same-temperature storage first.
LOCATION_FALLBACK: dict[str, tuple[str, ...]] = {
'freezer': ('freezer', 'fridge', 'pantry', 'cabinet'),
'fridge': ('fridge', 'pantry', 'cabinet', 'freezer'),
'pantry': ('pantry', 'cabinet', 'fridge', 'freezer'),
'cabinet': ('cabinet', 'pantry', 'fridge', 'freezer'),
}
# Default shelf life in days by category and location # Default shelf life in days by category and location
# Sources: USDA FoodKeeper app, FDA guidelines # Sources: USDA FoodKeeper app, FDA guidelines
SHELF_LIFE = { SHELF_LIFE = {
@ -62,8 +39,6 @@ class ExpirationPredictor:
'poultry': {'fridge': 2, 'freezer': 270}, 'poultry': {'fridge': 2, 'freezer': 270},
'chicken': {'fridge': 2, 'freezer': 270}, 'chicken': {'fridge': 2, 'freezer': 270},
'turkey': {'fridge': 2, 'freezer': 270}, 'turkey': {'fridge': 2, 'freezer': 270},
'tempeh': {'fridge': 10, 'freezer': 365},
'tofu': {'fridge': 5, 'freezer': 180},
'ground_meat': {'fridge': 2, 'freezer': 120}, 'ground_meat': {'fridge': 2, 'freezer': 120},
# Seafood # Seafood
'fish': {'fridge': 2, 'freezer': 180}, 'fish': {'fridge': 2, 'freezer': 180},
@ -84,9 +59,9 @@ class ExpirationPredictor:
'bread': {'pantry': 5, 'freezer': 90}, 'bread': {'pantry': 5, 'freezer': 90},
'bakery': {'pantry': 3, 'fridge': 7, 'freezer': 90}, 'bakery': {'pantry': 3, 'fridge': 7, 'freezer': 90},
# Frozen # Frozen
'frozen_foods': {'freezer': 180, 'fridge': 3}, 'frozen_foods': {'freezer': 180},
'frozen_vegetables': {'freezer': 270, 'fridge': 4}, 'frozen_vegetables': {'freezer': 270},
'frozen_fruit': {'freezer': 365, 'fridge': 4}, 'frozen_fruit': {'freezer': 365},
'ice_cream': {'freezer': 60}, 'ice_cream': {'freezer': 60},
# Pantry Staples # Pantry Staples
'canned_goods': {'pantry': 730, 'cabinet': 730}, 'canned_goods': {'pantry': 730, 'cabinet': 730},
@ -116,391 +91,44 @@ class ExpirationPredictor:
'prepared_foods': {'fridge': 4, 'freezer': 90}, 'prepared_foods': {'fridge': 4, 'freezer': 90},
} }
# Secondary shelf life in days after a package is opened.
# Sources: USDA FoodKeeper app, FDA consumer guides.
# Only categories where opening significantly shortens shelf life are listed.
# Items not listed default to None (no secondary window tracked).
SHELF_LIFE_AFTER_OPENING: dict[str, int] = {
# Dairy — once opened, clock ticks fast
'dairy': 5,
'milk': 5,
'cream': 3,
'yogurt': 7,
'cheese': 14,
'butter': 30,
# Condiments — refrigerated after opening
'condiments': 30,
'ketchup': 30,
'mustard': 30,
'mayo': 14,
'salad_dressing': 30,
'soy_sauce': 90,
# Canned goods — once opened, very short
'canned_goods': 4,
# Beverages
'juice': 7,
'soda': 4,
# Bread / Bakery
'bread': 5,
'bakery': 3,
# Produce
'leafy_greens': 3,
'berries': 3,
# Pantry staples (open bag)
'chips': 14,
'cookies': 14,
'cereal': 30,
'flour': 90,
}
# Post-expiry secondary use window.
# These are NOT spoilage extensions — they describe a qualitative state
# change where the ingredient is specifically suited for certain preparations.
# Sources: USDA FoodKeeper, food science, culinary tradition.
#
# Fields:
# window_days — days past nominal expiry still usable in secondary state
# label — short UI label for the state
# uses — recipe contexts suited to this state (shown in UI)
# warning — safety note, calm tone, None if none needed
# discard_signs — qualitative signs the item has gone past the secondary window
# constraints_exclude — dietary constraint labels that suppress this entry entirely
# (e.g. alcohol-containing items suppressed for halal/alcohol-free)
SECONDARY_WINDOW: dict[str, dict] = {
'bread': {
'window_days': 5,
'label': 'stale',
'uses': ['croutons', 'stuffing', 'bread pudding', 'French toast', 'panzanella'],
'warning': 'Check for mold before use — discard if any is visible.',
'discard_signs': 'Visible mold (any colour), or unpleasant smell beyond dry/yeasty.',
'constraints_exclude': [],
},
'bakery': {
'window_days': 3,
'label': 'day-old',
'uses': ['French toast', 'bread pudding', 'crumbles', 'trifle base', 'cake pops', 'streusel topping', 'bread crumbs'],
'warning': 'Check for mold before use — discard if any is visible.',
'discard_signs': 'Visible mold, sliminess, or strong sour smell.',
'constraints_exclude': [],
},
'bananas': {
'window_days': 5,
'label': 'overripe',
'uses': ['banana bread', 'smoothies', 'pancakes', 'muffins'],
'warning': None,
'discard_signs': 'Leaking liquid, fermented smell, or mold on skin.',
'constraints_exclude': [],
},
'milk': {
'window_days': 3,
'label': 'sour',
'uses': ['pancakes', 'scones', 'waffles', 'muffins', 'quick breads', 'béchamel', 'baked mac and cheese'],
'warning': 'Use only in cooked recipes — do not drink.',
'discard_signs': 'Chunky texture, strong unpleasant smell beyond tangy, or visible separation with grey colour.',
'constraints_exclude': [],
},
'dairy': {
'window_days': 2,
'label': 'sour',
'uses': ['pancakes', 'scones', 'quick breads', 'muffins', 'waffles'],
'warning': 'Use only in cooked recipes — do not drink.',
'discard_signs': 'Strong unpleasant smell, unusual colour, or chunky texture.',
'constraints_exclude': [],
},
'cheese': {
'window_days': 14,
'label': 'rind-ready',
'uses': ['parmesan broth', 'minestrone', 'ribollita', 'risotto', 'polenta', 'bean soups', 'gratins'],
'warning': None,
'discard_signs': 'Soft or wet texture on hard cheese, pink or black mold (white/green surface mold on hard cheese can be cut off with 1cm margin).',
'constraints_exclude': [],
},
'rice': {
'window_days': 2,
'label': 'day-old',
'uses': ['fried rice', 'onigiri', 'rice porridge', 'congee', 'arancini', 'stuffed peppers', 'rice fritters'],
'warning': 'Refrigerate immediately after cooking — do not leave at room temp.',
'discard_signs': 'Slimy texture, unusual smell, or more than 4 days since cooking.',
'constraints_exclude': [],
},
'tortillas': {
'window_days': 5,
'label': 'stale',
'uses': ['chilaquiles', 'migas', 'tortilla soup', 'casserole'],
'warning': 'Check for mold, especially if stored in a sealed bag — discard if any is visible.',
'discard_signs': 'Visible mold (check seams and edges), or strong sour smell.',
'constraints_exclude': [],
},
# ── New entries ──────────────────────────────────────────────────────
'apples': {
'window_days': 7,
'label': 'soft',
'uses': ['applesauce', 'apple butter', 'baked apples', 'apple crisp', 'smoothies', 'chutney'],
'warning': None,
'discard_signs': 'Large bruised areas with fermented smell, visible mold, or liquid leaking from skin.',
'constraints_exclude': [],
},
'leafy_greens': {
'window_days': 2,
'label': 'wilting',
'uses': ['sautéed greens', 'soups', 'smoothies', 'frittata', 'pasta add-in', 'stir fry'],
'warning': None,
'discard_signs': 'Slimy texture, strong unpleasant smell, or yellowed and mushy leaves.',
'constraints_exclude': [],
},
'tomatoes': {
'window_days': 4,
'label': 'soft',
'uses': ['roasted tomatoes', 'tomato sauce', 'shakshuka', 'bruschetta', 'soup', 'salsa'],
'warning': None,
'discard_signs': 'Broken skin with liquid pooling, mold, or fermented smell.',
'constraints_exclude': [],
},
'cooked_pasta': {
'window_days': 3,
'label': 'day-old',
'uses': ['pasta frittata', 'pasta salad', 'baked pasta', 'soup add-in', 'fried pasta cakes'],
'warning': 'Refrigerate within 2 hours of cooking.',
'discard_signs': 'Slimy texture, off smell, or more than 4 days since cooking.',
'constraints_exclude': [],
},
'cooked_potatoes': {
'window_days': 3,
'label': 'day-old',
'uses': ['potato pancakes', 'hash browns', 'potato soup', 'gnocchi', 'twice-baked potatoes', 'croquettes'],
'warning': 'Refrigerate within 2 hours of cooking.',
'discard_signs': 'Slimy texture, off smell, or more than 4 days since cooking.',
'constraints_exclude': [],
},
'yogurt': {
'window_days': 7,
'label': 'tangy',
'uses': ['marinades', 'flatbreads', 'smoothies', 'tzatziki', 'baked goods', 'salad dressings'],
'warning': None,
'discard_signs': 'Pink or orange discolouration, visible mold, or strongly unpleasant smell (not just tangy).',
'constraints_exclude': [],
},
'cream': {
'window_days': 2,
'label': 'sour',
'uses': ['soups', 'sauces', 'scones', 'quick breads', 'mashed potatoes'],
'warning': 'Use in cooked recipes only. Discard if the smell is strongly unpleasant rather than tangy.',
'discard_signs': 'Strong unpleasant smell beyond tangy, unusual colour, or chunky texture.',
'constraints_exclude': [],
},
'wine': {
'window_days': 4,
'label': 'open',
'uses': ['pan sauces', 'braises', 'risotto', 'marinades', 'poaching liquid', 'wine reduction'],
'warning': None,
'discard_signs': 'Strong vinegar smell (still usable in braises/marinades), or visible cloudiness with off-smell.',
'constraints_exclude': ['halal', 'alcohol-free'],
},
'cooked_beans': {
'window_days': 3,
'label': 'day-old',
'uses': ['refried beans', 'bean soup', 'bean fritters', 'hummus', 'bean dip', 'grain bowls'],
'warning': 'Refrigerate within 2 hours of cooking.',
'discard_signs': 'Slimy texture, off smell, or more than 4 days since cooking.',
'constraints_exclude': [],
},
'cooked_meat': {
'window_days': 2,
'label': 'leftover',
'uses': ['grain bowls', 'tacos', 'soups', 'fried rice', 'sandwiches', 'hash', 'pasta add-in'],
'warning': 'Refrigerate within 2 hours of cooking.',
'discard_signs': 'Off smell, slimy texture, or more than 34 days since cooking.',
'constraints_exclude': [],
},
}
def days_after_opening(self, category: str | None) -> int | None:
"""Return days of shelf life remaining once a package is opened.
Returns None if the category is unknown or not tracked after opening
(e.g. frozen items, raw meat category check irrelevant once opened).
"""
if not category:
return None
return self.SHELF_LIFE_AFTER_OPENING.get(category.lower())
def secondary_state(
self, category: str | None, expiry_date: str | None
) -> dict | None:
"""Return secondary use info if the item is in its post-expiry secondary window.
Returns a dict with label, uses, warning, discard_signs, constraints_exclude,
days_past, and window_days when the item is past its nominal expiry date but
still within the secondary use window.
Returns None in all other cases (unknown category, no window defined, not yet
expired, or past the secondary window).
Callers should apply constraints_exclude against user dietary constraints
and suppress the result entirely if any excluded constraint is active.
See filter_secondary_by_constraints().
"""
if not category or not expiry_date:
return None
entry = self.SECONDARY_WINDOW.get(category.lower())
if not entry:
return None
try:
from datetime import date
today = date.today()
exp = date.fromisoformat(expiry_date)
days_past = (today - exp).days
if 0 <= days_past <= entry['window_days']:
return {
'label': entry['label'],
'uses': list(entry['uses']),
'warning': entry['warning'],
'discard_signs': entry.get('discard_signs'),
'constraints_exclude': list(entry.get('constraints_exclude') or []),
'days_past': days_past,
'window_days': entry['window_days'],
}
except ValueError:
pass
return None
@staticmethod
def filter_secondary_by_constraints(
sec: dict | None,
user_constraints: list[str],
) -> dict | None:
"""Suppress secondary state entirely if any excluded constraint is active.
Call after secondary_state() when user dietary constraints are available.
Returns sec unchanged when no constraints match, or None when suppressed.
"""
if sec is None:
return None
excluded = sec.get('constraints_exclude') or []
if any(c.lower() in [e.lower() for e in excluded] for c in user_constraints):
return None
return sec
# Keyword lists are checked in declaration order — most specific first.
# Rules:
# - canned/processed goods BEFORE raw-meat terms (canned chicken != raw chicken)
# - frozen prepared foods BEFORE generic protein terms
# - multi-word phrases before single words where ambiguity exists
CATEGORY_KEYWORDS = { CATEGORY_KEYWORDS = {
# ── Frozen prepared foods ─────────────────────────────────────────────
# Before raw protein entries so plant-based frozen products don't
# inherit 23 day raw-meat shelf lives.
'ice_cream': ['ice cream', 'gelato', 'frozen yogurt', 'sorbet', 'sherbet'],
'frozen_fruit': [
'frozen berries', 'frozen mango', 'frozen strawberries',
'frozen blueberries', 'frozen raspberries', 'frozen peaches',
'frozen fruit', 'frozen cherries',
],
'frozen_vegetables': [
'frozen veg', 'frozen corn', 'frozen peas', 'frozen broccoli',
'frozen spinach', 'frozen edamame', 'frozen green beans',
'frozen mixed vegetables', 'frozen carrots',
'peas & carrots', 'peas and carrots', 'mixed vegetables',
'spring rolls', 'vegetable spring rolls',
],
'frozen_foods': [
'plant-based', 'plant based', 'meatless', 'impossible',
"chik'n", 'chikn', 'veggie burger', 'veggie patty',
'nugget', 'tater tot', 'waffle fries', 'hash brown',
'onion ring', 'fish stick', 'fish fillet', 'potsticker',
'dumpling', 'egg roll', 'empanada', 'tamale', 'falafel',
'mac & cheese bite', 'cauliflower wing', 'ranchero potato',
],
# ── Canned / shelf-stable processed goods ─────────────────────────────
# Before raw protein keywords so "canned chicken", "cream of chicken",
# and "lentil soup" resolve here rather than to raw chicken/cream.
'canned_goods': [
'canned', 'can of', 'tin of', 'tinned',
'cream of ', 'condensed soup', 'condensed cream',
'baked beans', 'refried beans',
'canned beans', 'canned tomatoes', 'canned corn', 'canned peas',
'canned soup', 'canned tuna', 'canned salmon', 'canned chicken',
'canned fruit', 'canned peaches', 'canned pears',
'enchilada sauce', 'tomato sauce', 'tomato paste',
'lentil soup', 'bean soup', 'chicken noodle soup',
],
# ── Condiments & brined items ─────────────────────────────────────────
# Before produce/protein terms so brined olives, jarred peppers, etc.
# don't inherit raw vegetable shelf lives.
'ketchup': ['ketchup', 'catsup'],
'mustard': ['mustard', 'dijon', 'dijion', 'stoneground mustard'],
'mayo': ['mayo', 'mayonnaise', 'miracle whip'],
'soy_sauce': ['soy sauce', 'tamari', 'shoyu'],
'salad_dressing': ['salad dressing', 'ranch', 'italian dressing', 'vinaigrette'],
'condiments': [
# brined / jarred items
'dill chips', 'hamburger chips', 'gherkin',
'olive', 'capers', 'jalapeño', 'jalapeno', 'pepperoncini',
'pimiento', 'banana pepper', 'cornichon',
# sauces
'hot sauce', 'hot pepper sauce', 'sriracha', 'cholula',
'worcestershire', 'barbecue sauce', 'bbq sauce',
'chipotle sauce', 'chipotle mayo', 'chipotle creamy',
'salsa', 'chutney', 'relish',
'teriyaki', 'hoisin', 'oyster sauce', 'fish sauce',
'miso', 'ssamjang', 'gochujang', 'doenjang',
'soybean paste', 'fermented soybean',
# nut butters / spreads
'peanut butter', 'almond butter', 'tahini', 'hummus',
# seasoning mixes
'seasoning', 'spice blend', 'borracho',
# other shelf-stable sauces
'yuzu', 'ponzu', 'lizano',
],
# ── Soy / fermented proteins ──────────────────────────────────────────
'tempeh': ['tempeh'],
'tofu': ['tofu', 'bean curd'],
# ── Dairy ─────────────────────────────────────────────────────────────
'milk': ['milk', 'whole milk', '2% milk', 'skim milk', 'almond milk', 'oat milk', 'soy milk'], 'milk': ['milk', 'whole milk', '2% milk', 'skim milk', 'almond milk', 'oat milk', 'soy milk'],
'cheese': ['cheese', 'cheddar', 'mozzarella', 'swiss', 'parmesan', 'feta', 'gouda', 'velveeta'], 'cheese': ['cheese', 'cheddar', 'mozzarella', 'swiss', 'parmesan', 'feta', 'gouda'],
'yogurt': ['yogurt', 'greek yogurt', 'yoghurt'], 'yogurt': ['yogurt', 'greek yogurt', 'yoghurt'],
'butter': ['butter', 'margarine'], 'butter': ['butter', 'margarine'],
# Bare 'cream' removed — "cream of X" is canned_goods (matched above). 'cream': ['cream', 'heavy cream', 'whipping cream', 'sour cream'],
'cream': ['heavy cream', 'whipping cream', 'sour cream', 'crème fraîche',
'cream cheese', 'whipped topping', 'whipped cream'],
'eggs': ['eggs', 'egg'], 'eggs': ['eggs', 'egg'],
# ── Raw proteins ────────────────────────────────────────────────────── 'beef': ['beef', 'steak', 'roast', 'brisket', 'ribeye', 'sirloin'],
# After canned/frozen so "canned chicken" is already resolved above. 'pork': ['pork', 'bacon', 'ham', 'sausage', 'pork chop'],
'chicken': ['chicken', 'chicken breast', 'chicken thigh', 'chicken wings'],
'turkey': ['turkey', 'turkey breast', 'ground turkey'],
'ground_meat': ['ground beef', 'ground pork', 'ground chicken', 'hamburger'],
'fish': ['fish', 'cod', 'tilapia', 'halibut'],
'salmon': ['salmon'], 'salmon': ['salmon'],
'shrimp': ['shrimp', 'prawns'], 'shrimp': ['shrimp', 'prawns'],
'fish': ['fish', 'cod', 'tilapia', 'halibut', 'pollock'], 'leafy_greens': ['lettuce', 'spinach', 'kale', 'arugula', 'mixed greens', 'salad'],
# Specific chicken cuts only — bare 'chicken' handled in generic fallback
'chicken': ['chicken breast', 'chicken thigh', 'chicken wings', 'chicken leg',
'whole chicken', 'rotisserie chicken', 'raw chicken'],
'turkey': ['turkey breast', 'whole turkey'],
'ground_meat': ['ground beef', 'ground pork', 'ground chicken', 'ground turkey',
'ground lamb', 'ground bison'],
'pork': ['pork', 'bacon', 'ham', 'pork chop', 'pork loin'],
'beef': ['beef', 'steak', 'brisket', 'ribeye', 'sirloin', 'roast beef'],
'deli_meat': ['deli', 'sliced turkey', 'sliced ham', 'lunch meat', 'cold cuts',
'prosciutto', 'salami', 'pepperoni'],
# ── Produce ───────────────────────────────────────────────────────────
'leafy_greens': ['lettuce', 'spinach', 'kale', 'arugula', 'mixed greens'],
'berries': ['strawberries', 'blueberries', 'raspberries', 'blackberries'], 'berries': ['strawberries', 'blueberries', 'raspberries', 'blackberries'],
'apples': ['apple', 'apples'], 'apples': ['apple', 'apples'],
'bananas': ['banana', 'bananas'], 'bananas': ['banana', 'bananas'],
'citrus': ['orange', 'lemon', 'lime', 'grapefruit', 'tangerine'], 'citrus': ['orange', 'lemon', 'lime', 'grapefruit', 'tangerine'],
# ── Bakery ──────────────────────────────────────────────────────────── 'bread': ['bread', 'loaf', 'baguette', 'roll', 'bagel', 'bun'],
'bakery': [ 'bakery': ['muffin', 'croissant', 'donut', 'danish', 'pastry'],
'muffin', 'croissant', 'donut', 'danish', 'puff pastry', 'pastry puff', 'deli_meat': ['deli', 'sliced turkey', 'sliced ham', 'lunch meat', 'cold cuts'],
'cinnamon roll', 'dinner roll', 'parkerhouse roll', 'scone', 'frozen_vegetables': ['frozen veg', 'frozen corn', 'frozen peas', 'frozen broccoli'],
], 'frozen_fruit': ['frozen berries', 'frozen mango', 'frozen strawberries'],
'bread': ['bread', 'loaf', 'baguette', 'bagel', 'bun', 'pita', 'naan', 'ice_cream': ['ice cream', 'gelato', 'frozen yogurt'],
'english muffin', 'sourdough'], 'pasta': ['pasta', 'spaghetti', 'penne', 'macaroni', 'noodles'],
# ── Dry pantry staples ──────────────────────────────────────────────── 'rice': ['rice', 'brown rice', 'white rice', 'jasmine'],
'pasta': ['pasta', 'spaghetti', 'penne', 'macaroni', 'noodles', 'couscous', 'orzo'],
'rice': ['rice', 'brown rice', 'white rice', 'jasmine rice', 'basmati',
'spanish rice', 'rice mix'],
'cereal': ['cereal', 'granola', 'oatmeal'], 'cereal': ['cereal', 'granola', 'oatmeal'],
'chips': ['chips', 'crisps', 'tortilla chips', 'pretzel', 'popcorn'], 'chips': ['chips', 'crisps', 'tortilla chips'],
'cookies': ['cookies', 'biscuits', 'crackers', 'graham cracker', 'wafer'], 'cookies': ['cookies', 'biscuits', 'crackers'],
# ── Beverages ───────────────────────────────────────────────────────── 'ketchup': ['ketchup', 'catsup'],
'juice': ['juice', 'orange juice', 'apple juice', 'lemonade'], 'mustard': ['mustard'],
'soda': ['soda', 'cola', 'sprite', 'pepsi', 'coke', 'carbonated soft drink'], 'mayo': ['mayo', 'mayonnaise', 'miracle whip'],
'salad_dressing': ['salad dressing', 'ranch', 'italian dressing', 'vinaigrette'],
'soy_sauce': ['soy sauce', 'tamari'],
'juice': ['juice', 'orange juice', 'apple juice'],
'soda': ['soda', 'pop', 'cola', 'sprite', 'pepsi', 'coke'],
} }
def __init__(self) -> None: def __init__(self) -> None:
@ -548,13 +176,8 @@ class ExpirationPredictor:
product_name: str, product_name: str,
product_category: Optional[str] = None, product_category: Optional[str] = None,
tags: Optional[List[str]] = None, tags: Optional[List[str]] = None,
location: Optional[str] = None,
) -> Optional[str]: ) -> Optional[str]:
"""Determine category from product name, existing category, and tags. """Determine category from product name, existing category, and tags."""
location is used as a last-resort hint: unknown items in the freezer
default to frozen_foods rather than dry_goods.
"""
if product_category: if product_category:
cat = product_category.lower().strip() cat = product_category.lower().strip()
if cat in self.SHELF_LIFE: if cat in self.SHELF_LIFE:
@ -574,36 +197,21 @@ class ExpirationPredictor:
if any(kw in name for kw in keywords): if any(kw in name for kw in keywords):
return category return category
# Generic single-word fallbacks — checked after the keyword dict so
# multi-word phrases (e.g. "canned chicken") already matched above.
for words, fallback in [ for words, fallback in [
(['frozen'], 'frozen_foods'), (['meat', 'beef', 'pork', 'chicken'], 'meat'),
(['canned', 'tinned'], 'canned_goods'),
# bare 'chicken' / 'sausage' / 'ham' kept here so raw-meat names
# that don't appear in the specific keyword lists still resolve.
(['chicken', 'turkey'], 'poultry'),
(['sausage', 'ham', 'bacon'], 'pork'),
(['beef', 'steak'], 'beef'),
(['meat', 'pork'], 'meat'),
(['vegetable', 'veggie', 'produce'], 'vegetables'), (['vegetable', 'veggie', 'produce'], 'vegetables'),
(['fruit'], 'fruits'), (['fruit'], 'fruits'),
(['dairy'], 'dairy'), (['dairy'], 'dairy'),
(['frozen'], 'frozen_foods'),
]: ]:
if any(w in name for w in words): if any(w in name for w in words):
return fallback return fallback
# Location-aware final fallback: unknown item in a freezer → frozen_foods.
# This handles unlabelled frozen products (e.g. "Birthday Littles",
# "Pulled BBQ Crumbles") without requiring every brand name to be listed.
canon_loc = self._normalize_location(location or '')
if canon_loc == 'freezer':
return 'frozen_foods'
return 'dry_goods' return 'dry_goods'
def get_shelf_life_info(self, category: str, location: str) -> Optional[int]: def get_shelf_life_info(self, category: str, location: str) -> Optional[int]:
"""Shelf life in days for a given category + location, or None.""" """Shelf life in days for a given category + location, or None."""
return self._lookup_days(category, location) return self.SHELF_LIFE.get(category.lower().strip(), {}).get(location)
def list_categories(self) -> List[str]: def list_categories(self) -> List[str]:
return list(self.SHELF_LIFE.keys()) return list(self.SHELF_LIFE.keys())
@ -616,18 +224,8 @@ class ExpirationPredictor:
# ── Private helpers ─────────────────────────────────────────────────────── # ── Private helpers ───────────────────────────────────────────────────────
def _normalize_location(self, location: str) -> str:
"""Resolve location aliases to canonical names."""
loc = location.lower().strip()
return self.LOCATION_ALIASES.get(loc, loc)
def _lookup_days(self, category: Optional[str], location: str) -> Optional[int]: def _lookup_days(self, category: Optional[str], location: str) -> Optional[int]:
"""Pure deterministic lookup — no I/O. """Pure deterministic lookup — no I/O."""
Normalises location aliases (e.g. garage_freezer freezer) and uses
a context-aware fallback order so pantry items don't accidentally get
fridge shelf-life and vice versa.
"""
if not category: if not category:
return None return None
cat = category.lower().strip() cat = category.lower().strip()
@ -639,19 +237,13 @@ class ExpirationPredictor:
else: else:
return None return None
canon_loc = self._normalize_location(location) days = self.SHELF_LIFE[cat].get(location)
shelf = self.SHELF_LIFE[cat] if days is None:
for loc in ('fridge', 'pantry', 'freezer', 'cabinet'):
# Try the canonical location first, then work through the days = self.SHELF_LIFE[cat].get(loc)
# context-aware fallback chain for that location type.
fallback_order = self.LOCATION_FALLBACK.get(
canon_loc, (canon_loc, 'pantry', 'fridge', 'cabinet', 'freezer')
)
for loc in fallback_order:
days = shelf.get(loc)
if days is not None: if days is not None:
break
return days return days
return None
def _llm_predict_days( def _llm_predict_days(
self, self,

View file

@ -1,80 +0,0 @@
"""Heimdall cf-orch budget client.
Calls Heimdall's /orch/* endpoints to gate and record cf-orch usage for
lifetime/founders license holders. Always fails open on network errors
a Heimdall outage should never block the user.
"""
from __future__ import annotations
import logging
import os
import requests
log = logging.getLogger(__name__)
HEIMDALL_URL: str = os.environ.get("HEIMDALL_URL", "https://license.circuitforge.tech")
HEIMDALL_ADMIN_TOKEN: str = os.environ.get("HEIMDALL_ADMIN_TOKEN", "")
def _headers() -> dict[str, str]:
if HEIMDALL_ADMIN_TOKEN:
return {"Authorization": f"Bearer {HEIMDALL_ADMIN_TOKEN}"}
return {}
def check_orch_budget(key_display: str, product: str) -> dict:
"""Call POST /orch/check and return the response dict.
On any error (network, auth, etc.) returns a permissive dict so the
caller can proceed without blocking the user.
"""
try:
resp = requests.post(
f"{HEIMDALL_URL}/orch/check",
json={"key_display": key_display, "product": product},
headers=_headers(),
timeout=5,
)
if resp.ok:
return resp.json()
log.warning("Heimdall orch/check returned %s for key %s", resp.status_code, key_display[:12])
except Exception as exc:
log.warning("Heimdall orch/check failed (fail-open): %s", exc)
# Fail open — Heimdall outage must never block the user
return {
"allowed": True,
"calls_used": 0,
"calls_total": 0,
"topup_calls": 0,
"period_start": "",
"resets_on": "",
}
def get_orch_usage(key_display: str, product: str) -> dict:
"""Call GET /orch/usage and return the response dict.
Returns zeros on error (non-blocking).
"""
try:
resp = requests.get(
f"{HEIMDALL_URL}/orch/usage",
params={"key_display": key_display, "product": product},
headers=_headers(),
timeout=5,
)
if resp.ok:
return resp.json()
log.warning("Heimdall orch/usage returned %s", resp.status_code)
except Exception as exc:
log.warning("Heimdall orch/usage failed: %s", exc)
return {
"calls_used": 0,
"topup_calls": 0,
"calls_total": 0,
"period_start": "",
"resets_on": "",
}

View file

@ -1,140 +0,0 @@
"""Visual label capture service for unenriched products (kiwi#79).
Wraps the cf-core VisionRouter to extract structured nutrition data from a
photographed nutrition facts panel. When the VisionRouter is not yet wired
(NotImplementedError) the service falls back to a mock extraction so the
barcode scan flow can be exercised end-to-end in development.
JSON contract returned by the vision model (and mock):
{
"product_name": str | null,
"brand": str | null,
"serving_size_g": number | null,
"calories": number | null,
"fat_g": number | null,
"saturated_fat_g": number | null,
"carbs_g": number | null,
"sugar_g": number | null,
"fiber_g": number | null,
"protein_g": number | null,
"sodium_mg": number | null,
"ingredient_names": [str],
"allergens": [str],
"confidence": number (0.01.0)
}
"""
from __future__ import annotations
import json
import logging
import os
from typing import Any
log = logging.getLogger(__name__)
# Confidence below this threshold surfaces amber highlights in the UI.
REVIEW_THRESHOLD = 0.7
_MOCK_EXTRACTION: dict[str, Any] = {
"product_name": "Unknown Product",
"brand": None,
"serving_size_g": None,
"calories": None,
"fat_g": None,
"saturated_fat_g": None,
"carbs_g": None,
"sugar_g": None,
"fiber_g": None,
"protein_g": None,
"sodium_mg": None,
"ingredient_names": [],
"allergens": [],
"confidence": 0.0,
}
_EXTRACTION_PROMPT = """You are reading a nutrition facts label photograph.
Extract the following fields as a JSON object with no extra text:
{
"product_name": <product name or null>,
"brand": <brand name or null>,
"serving_size_g": <serving size in grams as a number or null>,
"calories": <calories per serving as a number or null>,
"fat_g": <total fat grams or null>,
"saturated_fat_g": <saturated fat grams or null>,
"carbs_g": <total carbohydrates grams or null>,
"sugar_g": <sugars grams or null>,
"fiber_g": <dietary fiber grams or null>,
"protein_g": <protein grams or null>,
"sodium_mg": <sodium milligrams or null>,
"ingredient_names": [list of individual ingredients as strings],
"allergens": [list of allergens explicitly stated on label],
"confidence": <your confidence this extraction is correct, 0.0 to 1.0>
}
Use null for any field you cannot read clearly. Do not guess values.
Respond with JSON only."""
def extract_label(image_bytes: bytes) -> dict[str, Any]:
"""Run vision model extraction on raw label image bytes.
Returns a dict matching the nutrition JSON contract above.
Falls back to a zero-confidence mock if the VisionRouter is not yet
implemented (stub) or if the model returns unparseable output.
"""
# Allow unit tests to bypass the vision model entirely.
if os.environ.get("KIWI_LABEL_CAPTURE_MOCK") == "1":
log.debug("label_capture: mock mode active")
return dict(_MOCK_EXTRACTION)
try:
from circuitforge_core.vision import caption as vision_caption
result = vision_caption(image_bytes, prompt=_EXTRACTION_PROMPT)
raw = result.caption or ""
return _parse_extraction(raw)
except Exception as exc:
log.warning("label_capture: extraction failed (%s) — returning mock extraction", exc)
return dict(_MOCK_EXTRACTION)
def _parse_extraction(raw: str) -> dict[str, Any]:
"""Parse the JSON string returned by the vision model.
Strips markdown code fences if present. Validates required shape.
Returns the mock on any parse error.
"""
text = raw.strip()
if text.startswith("```"):
# Strip ```json ... ``` fences
lines = text.splitlines()
text = "\n".join(lines[1:-1] if lines[-1].strip() == "```" else lines[1:])
try:
data = json.loads(text)
except json.JSONDecodeError as exc:
log.warning("label_capture: could not parse vision response: %s", exc)
return dict(_MOCK_EXTRACTION)
if not isinstance(data, dict):
log.warning("label_capture: vision response is not a dict")
return dict(_MOCK_EXTRACTION)
# Normalise list fields — model may return None instead of []
for list_key in ("ingredient_names", "allergens"):
if not isinstance(data.get(list_key), list):
data[list_key] = []
# Clamp confidence to [0, 1]
confidence = data.get("confidence")
if not isinstance(confidence, (int, float)):
confidence = 0.0
data["confidence"] = max(0.0, min(1.0, float(confidence)))
return data
def needs_review(extraction: dict[str, Any]) -> bool:
"""Return True when the extraction confidence is below REVIEW_THRESHOLD."""
return float(extraction.get("confidence", 0.0)) < REVIEW_THRESHOLD

View file

@ -1 +0,0 @@
"""Meal planning service layer — no FastAPI imports (extraction-ready for cf-core)."""

View file

@ -1,108 +0,0 @@
# app/services/meal_plan/affiliates.py
"""Register Kiwi-specific affiliate programs and provide search URL builders.
Called once at API startup. Programs not yet in core.affiliates are registered
here. The actual affiliate IDs are read from environment variables at call
time, so the process can start before accounts are approved (plain URLs
returned when env vars are absent).
"""
from __future__ import annotations
from urllib.parse import quote_plus
from circuitforge_core.affiliates import AffiliateProgram, register_program, wrap_url
# ── URL builders ──────────────────────────────────────────────────────────────
def _walmart_search(url: str, affiliate_id: str) -> str:
sep = "&" if "?" in url else "?"
return f"{url}{sep}affil=apa&affiliateId={affiliate_id}"
def _target_search(url: str, affiliate_id: str) -> str:
sep = "&" if "?" in url else "?"
return f"{url}{sep}afid={affiliate_id}"
def _thrive_search(url: str, affiliate_id: str) -> str:
sep = "&" if "?" in url else "?"
return f"{url}{sep}raf={affiliate_id}"
def _misfits_search(url: str, affiliate_id: str) -> str:
sep = "&" if "?" in url else "?"
return f"{url}{sep}ref={affiliate_id}"
# ── Registration ──────────────────────────────────────────────────────────────
def register_kiwi_programs() -> None:
"""Register Kiwi retailer programs. Safe to call multiple times (idempotent)."""
register_program(AffiliateProgram(
name="Walmart",
retailer_key="walmart",
env_var="WALMART_AFFILIATE_ID",
build_url=_walmart_search,
))
register_program(AffiliateProgram(
name="Target",
retailer_key="target",
env_var="TARGET_AFFILIATE_ID",
build_url=_target_search,
))
register_program(AffiliateProgram(
name="Thrive Market",
retailer_key="thrive",
env_var="THRIVE_AFFILIATE_ID",
build_url=_thrive_search,
))
register_program(AffiliateProgram(
name="Misfits Market",
retailer_key="misfits",
env_var="MISFITS_AFFILIATE_ID",
build_url=_misfits_search,
))
# ── Search URL helpers ─────────────────────────────────────────────────────────
_SEARCH_TEMPLATES: dict[str, str] = {
"amazon": "https://www.amazon.com/s?k={q}",
"instacart": "https://www.instacart.com/store/search_v3/term?term={q}",
"walmart": "https://www.walmart.com/search?q={q}",
"target": "https://www.target.com/s?searchTerm={q}",
"thrive": "https://thrivemarket.com/search?q={q}",
"misfits": "https://www.misfitsmarket.com/shop?search={q}",
}
KIWI_RETAILERS = list(_SEARCH_TEMPLATES.keys())
def get_retailer_links(ingredient_name: str) -> list[dict]:
"""Return affiliate-wrapped search links for *ingredient_name*.
Returns a list of dicts: {"retailer": str, "label": str, "url": str}.
Falls back to plain search URL when no affiliate ID is configured.
"""
q = quote_plus(ingredient_name)
links = []
for key, template in _SEARCH_TEMPLATES.items():
plain_url = template.format(q=q)
try:
affiliate_url = wrap_url(plain_url, retailer=key)
except Exception:
affiliate_url = plain_url
links.append({"retailer": key, "label": _label(key), "url": affiliate_url})
return links
def _label(key: str) -> str:
return {
"amazon": "Amazon",
"instacart": "Instacart",
"walmart": "Walmart",
"target": "Target",
"thrive": "Thrive Market",
"misfits": "Misfits Market",
}.get(key, key.title())

View file

@ -1,91 +0,0 @@
# app/services/meal_plan/llm_planner.py
# BSL 1.1 — LLM feature
"""LLM-assisted full-week meal plan generation.
Returns suggestions for human review never writes to the DB directly.
The API endpoint presents the suggestions and waits for user approval
before calling store.upsert_slot().
Routing: pass a router from get_meal_plan_router() in llm_router.py.
Cloud: cf-text via cf-orch (3B-7B GGUF, ~2GB VRAM).
Local: LLMRouter (ollama / vllm / openai-compat per llm.yaml).
"""
from __future__ import annotations
import json
import logging
from dataclasses import dataclass
logger = logging.getLogger(__name__)
_PLAN_SYSTEM = """\
You are a practical meal planning assistant. Given a pantry inventory and
dietary preferences, suggest a week of dinners (or other configured meals).
Prioritise ingredients that are expiring soon. Prefer variety across the week.
Respect all dietary restrictions.
Respond with a JSON array only no prose, no markdown fences.
Each item: {"day": 0-6, "meal_type": "dinner", "recipe_id": <int or null>, "suggestion": "<recipe name>"}
day 0 = Monday, day 6 = Sunday.
If you cannot match a known recipe_id, set recipe_id to null and provide a suggestion name.
"""
@dataclass(frozen=True)
class PlanSuggestion:
day: int # 0 = Monday
meal_type: str
recipe_id: int | None
suggestion: str # human-readable name
def generate_plan(
pantry_items: list[str],
meal_types: list[str],
dietary_notes: str,
router,
) -> list[PlanSuggestion]:
"""Return a list of PlanSuggestion for user review.
Never writes to DB caller must upsert slots after user approves.
Returns an empty list if router is None or response is unparseable.
"""
if router is None:
return []
pantry_text = "\n".join(f"- {item}" for item in pantry_items[:50])
meal_text = ", ".join(meal_types)
user_msg = (
f"Meal types: {meal_text}\n"
f"Dietary notes: {dietary_notes or 'none'}\n\n"
f"Pantry (partial):\n{pantry_text}"
)
try:
response = router.complete(
system=_PLAN_SYSTEM,
user=user_msg,
max_tokens=512,
temperature=0.7,
)
items = json.loads(response.strip())
suggestions = []
for item in items:
if not isinstance(item, dict):
continue
day = item.get("day")
meal_type = item.get("meal_type", "dinner")
if not isinstance(day, int) or day < 0 or day > 6:
continue
suggestions.append(PlanSuggestion(
day=day,
meal_type=meal_type,
recipe_id=item.get("recipe_id"),
suggestion=str(item.get("suggestion", "")),
))
return suggestions
except Exception as exc:
logger.debug("LLM plan generation failed: %s", exc)
return []

View file

@ -1,96 +0,0 @@
# app/services/meal_plan/llm_router.py
# BSL 1.1 — LLM feature
"""Provide a router-compatible LLM client for meal plan generation tasks.
Cloud (CF_ORCH_URL set):
Allocates a cf-text service via cf-orch (3B-7B GGUF, ~2GB VRAM).
Returns an _OrchTextRouter that wraps the cf-text HTTP endpoint
with a .complete(system, user, **kwargs) interface.
Local / self-hosted (no CF_ORCH_URL):
Returns an LLMRouter instance which tries ollama, vllm, or any
backend configured in ~/.config/circuitforge/llm.yaml.
Both paths expose the same interface so llm_timing.py and llm_planner.py
need no knowledge of the backend.
"""
from __future__ import annotations
import logging
import os
from contextlib import nullcontext
logger = logging.getLogger(__name__)
# cf-orch service name and VRAM budget for meal plan LLM tasks.
# These are lighter than recipe_llm (4.0 GB) — cf-text handles them.
_SERVICE_TYPE = "cf-text"
_TTL_S = 120.0
_CALLER = "kiwi-meal-plan"
class _OrchTextRouter:
"""Thin adapter that makes a cf-text HTTP endpoint look like LLMRouter."""
def __init__(self, base_url: str) -> None:
self._base_url = base_url.rstrip("/")
def complete(
self,
system: str = "",
user: str = "",
max_tokens: int = 512,
temperature: float = 0.7,
**_kwargs,
) -> str:
from openai import OpenAI
client = OpenAI(base_url=self._base_url + "/v1", api_key="any")
messages = []
if system:
messages.append({"role": "system", "content": system})
messages.append({"role": "user", "content": user})
try:
model = client.models.list().data[0].id
except Exception:
model = "local"
resp = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
return resp.choices[0].message.content or ""
def get_meal_plan_router():
"""Return an LLM client for meal plan tasks.
Tries cf-orch cf-text allocation first (cloud); falls back to LLMRouter
(local ollama/vllm). Returns None if no backend is available.
"""
cf_orch_url = os.environ.get("CF_ORCH_URL")
if cf_orch_url:
try:
from circuitforge_orch.client import CFOrchClient
client = CFOrchClient(cf_orch_url)
ctx = client.allocate(
service=_SERVICE_TYPE,
ttl_s=_TTL_S,
caller=_CALLER,
)
alloc = ctx.__enter__()
if alloc is not None:
return _OrchTextRouter(alloc.url), ctx
except Exception as exc:
logger.debug("cf-orch cf-text allocation failed, falling back to LLMRouter: %s", exc)
# Local fallback: LLMRouter (ollama / vllm / openai-compat)
try:
from circuitforge_core.llm.router import LLMRouter
return LLMRouter(), nullcontext(None)
except FileNotFoundError:
logger.debug("LLMRouter: no llm.yaml and no LLM env vars — meal plan LLM disabled")
return None, nullcontext(None)
except Exception as exc:
logger.debug("LLMRouter init failed: %s", exc)
return None, nullcontext(None)

View file

@ -1,65 +0,0 @@
# app/services/meal_plan/llm_timing.py
# BSL 1.1 — LLM feature
"""Estimate cook times for recipes missing corpus prep/cook time fields.
Used only when tier allows `meal_plan_llm_timing`. Falls back gracefully
when no LLM backend is available.
Routing: pass a router from get_meal_plan_router() in llm_router.py.
Cloud: cf-text via cf-orch (3B GGUF, ~2GB VRAM).
Local: LLMRouter (ollama / vllm / openai-compat per llm.yaml).
"""
from __future__ import annotations
import logging
logger = logging.getLogger(__name__)
_TIMING_PROMPT = """\
You are a practical cook. Given a recipe name and its ingredients, estimate:
1. prep_time: minutes of active prep work (chopping, mixing, etc.)
2. cook_time: minutes of cooking (oven, stovetop, etc.)
Respond with ONLY two integers on separate lines:
prep_time
cook_time
If you cannot estimate, respond with:
0
0
"""
def estimate_timing(recipe_name: str, ingredients: list[str], router) -> tuple[int | None, int | None]:
"""Return (prep_minutes, cook_minutes) for a recipe using LLMRouter.
Returns (None, None) if the router is unavailable or the response is
unparseable. Never raises.
Args:
recipe_name: Name of the recipe.
ingredients: List of raw ingredient strings from the corpus.
router: An LLMRouter instance (from circuitforge_core.llm).
"""
if router is None:
return None, None
ingredient_list = "\n".join(f"- {i}" for i in (ingredients or [])[:15])
prompt = f"Recipe: {recipe_name}\n\nIngredients:\n{ingredient_list}"
try:
response = router.complete(
system=_TIMING_PROMPT,
user=prompt,
max_tokens=16,
temperature=0.0,
)
lines = response.strip().splitlines()
prep = int(lines[0].strip()) if lines else 0
cook = int(lines[1].strip()) if len(lines) > 1 else 0
if prep == 0 and cook == 0:
return None, None
return prep or None, cook or None
except Exception as exc:
logger.debug("LLM timing estimation failed for %r: %s", recipe_name, exc)
return None, None

View file

@ -1,26 +0,0 @@
# app/services/meal_plan/planner.py
"""Plan and slot orchestration — thin layer over Store.
No FastAPI imports. Provides helpers used by the API endpoint.
"""
from __future__ import annotations
from app.db.store import Store
from app.models.schemas.meal_plan import VALID_MEAL_TYPES
def create_plan(store: Store, week_start: str, meal_types: list[str]) -> dict:
"""Create a plan, filtering meal_types to valid values only."""
valid = [t for t in meal_types if t in VALID_MEAL_TYPES]
if not valid:
valid = ["dinner"]
return store.create_meal_plan(week_start, valid)
def get_plan_with_slots(store: Store, plan_id: int) -> dict | None:
"""Return a plan row with its slots list attached, or None."""
plan = store.get_meal_plan(plan_id)
if plan is None:
return None
slots = store.get_plan_slots(plan_id)
return {**plan, "slots": slots}

View file

@ -1,91 +0,0 @@
# app/services/meal_plan/prep_scheduler.py
"""Sequence prep tasks for a batch cooking session.
Pure function no DB or network calls. Sorts tasks by equipment priority
(oven first to maximise oven utilisation) then assigns sequence_order.
"""
from __future__ import annotations
from dataclasses import dataclass
_EQUIPMENT_PRIORITY = {"oven": 0, "stovetop": 1, "cold": 2, "no-heat": 3}
_DEFAULT_PRIORITY = 4
@dataclass(frozen=True)
class PrepTask:
recipe_id: int | None
slot_id: int | None
task_label: str
duration_minutes: int | None
sequence_order: int
equipment: str | None
is_parallel: bool = False
notes: str | None = None
user_edited: bool = False
def _total_minutes(recipe: dict) -> int | None:
prep = recipe.get("prep_time")
cook = recipe.get("cook_time")
if prep is None and cook is None:
return None
return (prep or 0) + (cook or 0)
def _equipment(recipe: dict) -> str | None:
# Corpus recipes don't have an explicit equipment field; use test helper
# field if present, otherwise infer from cook_time (long = oven heuristic).
if "_equipment" in recipe:
return recipe["_equipment"]
minutes = _total_minutes(recipe)
if minutes and minutes >= 45:
return "oven"
return "stovetop"
def build_prep_tasks(slots: list[dict], recipes: list[dict]) -> list[PrepTask]:
"""Return a sequenced list of PrepTask objects from plan slots + recipe rows.
Algorithm:
1. Build a recipe_id recipe dict lookup.
2. Create one task per slot that has a recipe assigned.
3. Sort by equipment priority (oven first).
4. Assign contiguous sequence_order starting at 1.
"""
if not slots or not recipes:
return []
recipe_map: dict[int, dict] = {r["id"]: r for r in recipes}
raw_tasks: list[tuple[int, dict]] = [] # (priority, kwargs)
for slot in slots:
recipe_id = slot.get("recipe_id")
if not recipe_id:
continue
recipe = recipe_map.get(recipe_id)
if not recipe:
continue
eq = _equipment(recipe)
priority = _EQUIPMENT_PRIORITY.get(eq or "", _DEFAULT_PRIORITY)
raw_tasks.append((priority, {
"recipe_id": recipe_id,
"slot_id": slot.get("id"),
"task_label": recipe.get("name", f"Recipe {recipe_id}"),
"duration_minutes": _total_minutes(recipe),
"equipment": eq,
}))
raw_tasks.sort(key=lambda t: t[0])
return [
PrepTask(
recipe_id=kw["recipe_id"],
slot_id=kw["slot_id"],
task_label=kw["task_label"],
duration_minutes=kw["duration_minutes"],
sequence_order=i,
equipment=kw["equipment"],
)
for i, (_, kw) in enumerate(raw_tasks, 1)
]

View file

@ -1,88 +0,0 @@
# app/services/meal_plan/shopping_list.py
"""Compute a shopping list from a meal plan and current pantry inventory.
Pure function no DB or network calls. Takes plain dicts from the Store
and returns GapItem dataclasses.
"""
from __future__ import annotations
import re
from dataclasses import dataclass, field
@dataclass(frozen=True)
class GapItem:
ingredient_name: str
needed_raw: str | None # first quantity token from recipe text, e.g. "300g"
have_quantity: float | None # pantry quantity when partial match
have_unit: str | None
covered: bool
retailer_links: list = field(default_factory=list) # filled by API layer
_QUANTITY_RE = re.compile(r"^(\d+[\d./]*\s*(?:g|kg|ml|l|oz|lb|cup|cups|tsp|tbsp|tbsps|tsps)?)\b", re.I)
def _extract_quantity(ingredient_text: str) -> str | None:
"""Pull the leading quantity string from a raw ingredient line."""
m = _QUANTITY_RE.match(ingredient_text.strip())
return m.group(1).strip() if m else None
def _normalise(name: str) -> str:
"""Lowercase, strip possessives and plural -s for fuzzy matching."""
return name.lower().strip().rstrip("s")
def compute_shopping_list(
recipes: list[dict],
inventory: list[dict],
) -> tuple[list[GapItem], list[GapItem]]:
"""Return (gap_items, covered_items) for a list of recipe dicts + inventory dicts.
Deduplicates by normalised ingredient name the first recipe's quantity
string wins when the same ingredient appears in multiple recipes.
"""
if not recipes:
return [], []
# Build pantry lookup: normalised_name → inventory row
pantry: dict[str, dict] = {}
for item in inventory:
pantry[_normalise(item["name"])] = item
# Collect unique ingredients with their first quantity token
seen: dict[str, str | None] = {} # normalised_name → needed_raw
for recipe in recipes:
names: list[str] = recipe.get("ingredient_names") or []
raw_lines: list[str] = recipe.get("ingredients") or []
for i, name in enumerate(names):
key = _normalise(name)
if key in seen:
continue
raw = raw_lines[i] if i < len(raw_lines) else ""
seen[key] = _extract_quantity(raw)
gaps: list[GapItem] = []
covered: list[GapItem] = []
for norm_name, needed_raw in seen.items():
pantry_row = pantry.get(norm_name)
if pantry_row:
covered.append(GapItem(
ingredient_name=norm_name,
needed_raw=needed_raw,
have_quantity=pantry_row.get("quantity"),
have_unit=pantry_row.get("unit"),
covered=True,
))
else:
gaps.append(GapItem(
ingredient_name=norm_name,
needed_raw=needed_raw,
have_quantity=None,
have_unit=None,
covered=False,
))
return gaps, covered

View file

@ -1,60 +0,0 @@
"""Thin HTTP client for the cf-docuvision document vision service."""
from __future__ import annotations
import base64
from dataclasses import dataclass
from pathlib import Path
import httpx
@dataclass
class DocuvisionResult:
text: str
confidence: float | None = None
raw: dict | None = None
class DocuvisionClient:
"""Thin client for the cf-docuvision service."""
def __init__(self, base_url: str) -> None:
self._base_url = base_url.rstrip("/")
def extract_text(self, image_path: str | Path) -> DocuvisionResult:
"""Send an image to docuvision and return extracted text."""
image_bytes = Path(image_path).read_bytes()
b64 = base64.b64encode(image_bytes).decode()
with httpx.Client(timeout=30.0) as client:
resp = client.post(
f"{self._base_url}/extract",
json={"image": b64},
)
resp.raise_for_status()
data = resp.json()
return DocuvisionResult(
text=data.get("text", ""),
confidence=data.get("confidence"),
raw=data,
)
async def extract_text_async(self, image_path: str | Path) -> DocuvisionResult:
"""Async version."""
image_bytes = Path(image_path).read_bytes()
b64 = base64.b64encode(image_bytes).decode()
async with httpx.AsyncClient(timeout=30.0) as client:
resp = await client.post(
f"{self._base_url}/extract",
json={"image": b64},
)
resp.raise_for_status()
data = resp.json()
return DocuvisionResult(
text=data.get("text", ""),
confidence=data.get("confidence"),
raw=data,
)

View file

@ -8,7 +8,6 @@ OCR with understanding of receipt structure to extract structured JSON data.
import json import json
import logging import logging
import os
import re import re
from pathlib import Path from pathlib import Path
from typing import Dict, Any, Optional, List from typing import Dict, Any, Optional, List
@ -27,32 +26,6 @@ from app.core.config import settings
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _try_docuvision(image_path: str | Path) -> str | None:
"""Try to extract text via cf-docuvision. Returns None if unavailable."""
cf_orch_url = os.environ.get("CF_ORCH_URL")
if not cf_orch_url:
return None
try:
from circuitforge_orch.client import CFOrchClient
from app.services.ocr.docuvision_client import DocuvisionClient
client = CFOrchClient(cf_orch_url)
with client.allocate(
service="cf-docuvision",
model_candidates=["cf-docuvision"],
ttl_s=60.0,
caller="kiwi-ocr",
) as alloc:
if alloc is None:
return None
doc_client = DocuvisionClient(alloc.url)
result = doc_client.extract_text(image_path)
return result.text if result.text else None
except Exception as exc:
logger.debug("cf-docuvision fast-path failed, falling back: %s", exc)
return None
class VisionLanguageOCR: class VisionLanguageOCR:
"""Vision-Language Model for receipt OCR and structured extraction.""" """Vision-Language Model for receipt OCR and structured extraction."""
@ -67,7 +40,7 @@ class VisionLanguageOCR:
self.processor = None self.processor = None
self.device = "cuda" if torch.cuda.is_available() and settings.USE_GPU else "cpu" self.device = "cuda" if torch.cuda.is_available() and settings.USE_GPU else "cpu"
self.use_quantization = use_quantization self.use_quantization = use_quantization
self.model_name = "Qwen/Qwen2.5-VL-7B-Instruct" self.model_name = "Qwen/Qwen2-VL-2B-Instruct"
logger.info(f"Initializing VisionLanguageOCR with device: {self.device}") logger.info(f"Initializing VisionLanguageOCR with device: {self.device}")
@ -139,18 +112,6 @@ class VisionLanguageOCR:
"warnings": [...] "warnings": [...]
} }
""" """
# Try docuvision fast path first (skips heavy local VLM if available)
docuvision_text = _try_docuvision(image_path)
if docuvision_text is not None:
parsed = self._parse_json_from_text(docuvision_text)
# Only accept the docuvision result if it yielded meaningful content;
# an empty-skeleton dict (no items, no merchant) means the text was
# garbled and we should fall through to the local VLM instead.
if parsed.get("items") or parsed.get("merchant"):
parsed["raw_text"] = docuvision_text
return self._validate_result(parsed)
# Parsed result has no meaningful content — fall through to local VLM
self._load_model() self._load_model()
try: try:

View file

@ -15,72 +15,63 @@ logger = logging.getLogger(__name__)
class OpenFoodFactsService: class OpenFoodFactsService:
""" """
Service for interacting with the Open*Facts family of databases. Service for interacting with the OpenFoodFacts API.
Primary: OpenFoodFacts (food products). OpenFoodFacts is a free, open database of food products with
Fallback chain: Open Beauty Facts (personal care) Open Products Facts (household). ingredients, allergens, and nutrition facts.
All three databases share the same API path and JSON format.
""" """
BASE_URL = "https://world.openfoodfacts.org/api/v2" BASE_URL = "https://world.openfoodfacts.org/api/v2"
USER_AGENT = "Kiwi/0.1.0 (https://circuitforge.tech)" USER_AGENT = "Kiwi/0.1.0 (https://circuitforge.tech)"
# Fallback databases tried in order when OFFs returns no match.
# Same API format as OFFs — only the host differs.
_FALLBACK_DATABASES = [
"https://world.openbeautyfacts.org/api/v2",
"https://world.openproductsfacts.org/api/v2",
]
async def _lookup_in_database(
self, barcode: str, base_url: str, client: httpx.AsyncClient
) -> Optional[Dict[str, Any]]:
"""Try one Open*Facts database using an existing client. Returns parsed product dict or None."""
try:
response = await client.get(
f"{base_url}/product/{barcode}.json",
headers={"User-Agent": self.USER_AGENT},
timeout=10.0,
)
if response.status_code == 404:
return None
response.raise_for_status()
data = response.json()
if data.get("status") != 1:
return None
return self._parse_product_data(data, barcode)
except httpx.HTTPError as e:
logger.debug("HTTP error for %s at %s: %s", barcode, base_url, e)
return None
except Exception as e:
logger.debug("Lookup failed for %s at %s: %s", barcode, base_url, e)
return None
async def lookup_product(self, barcode: str) -> Optional[Dict[str, Any]]: async def lookup_product(self, barcode: str) -> Optional[Dict[str, Any]]:
""" """
Look up a product by barcode, trying OFFs then fallback databases. Look up a product by barcode in the OpenFoodFacts database.
A single httpx.AsyncClient is created for the whole lookup chain so that
connection pooling and TLS session reuse apply across all database attempts.
Args: Args:
barcode: UPC/EAN barcode (8-13 digits) barcode: UPC/EAN barcode (8-13 digits)
Returns: Returns:
Dictionary with product information, or None if not found in any database. Dictionary with product information, or None if not found
Example response:
{
"name": "Organic Milk",
"brand": "Horizon",
"categories": ["Dairy", "Milk"],
"image_url": "https://...",
"nutrition_data": {...},
"raw_data": {...} # Full API response
}
""" """
try:
async with httpx.AsyncClient() as client: async with httpx.AsyncClient() as client:
result = await self._lookup_in_database(barcode, self.BASE_URL, client) url = f"{self.BASE_URL}/product/{barcode}.json"
if result:
return result
for db_url in self._FALLBACK_DATABASES: response = await client.get(
result = await self._lookup_in_database(barcode, db_url, client) url,
if result: headers={"User-Agent": self.USER_AGENT},
logger.info("Barcode %s found in fallback database: %s", barcode, db_url) timeout=10.0,
return result )
logger.info("Barcode %s not found in any Open*Facts database", barcode) if response.status_code == 404:
logger.info(f"Product not found in OpenFoodFacts: {barcode}")
return None
response.raise_for_status()
data = response.json()
if data.get("status") != 1:
logger.info(f"Product not found in OpenFoodFacts: {barcode}")
return None
return self._parse_product_data(data, barcode)
except httpx.HTTPError as e:
logger.error(f"HTTP error looking up barcode {barcode}: {e}")
return None
except Exception as e:
logger.error(f"Error looking up barcode {barcode}: {e}")
return None return None
def _parse_product_data(self, data: Dict[str, Any], barcode: str) -> Dict[str, Any]: def _parse_product_data(self, data: Dict[str, Any], barcode: str) -> Dict[str, Any]:
@ -123,9 +114,6 @@ class OpenFoodFactsService:
allergens = product.get("allergens_tags", []) allergens = product.get("allergens_tags", [])
labels = product.get("labels_tags", []) labels = product.get("labels_tags", [])
# Pack size detection: prefer explicit unit_count, fall back to serving count
pack_quantity, pack_unit = self._extract_pack_size(product)
return { return {
"name": name, "name": name,
"brand": brand, "brand": brand,
@ -136,47 +124,9 @@ class OpenFoodFactsService:
"nutrition_data": nutrition_data, "nutrition_data": nutrition_data,
"allergens": allergens, "allergens": allergens,
"labels": labels, "labels": labels,
"pack_quantity": pack_quantity,
"pack_unit": pack_unit,
"raw_data": product, # Store full response for debugging "raw_data": product, # Store full response for debugging
} }
def _extract_pack_size(self, product: Dict[str, Any]) -> tuple[float | None, str | None]:
"""Return (quantity, unit) for multi-pack products, or (None, None).
OFFs fields tried in order:
1. `number_of_units` (explicit count, highest confidence)
2. `serving_quantity` + `product_quantity_unit` (e.g. 6 x 150g yoghurt)
3. Parse `quantity` string like "4 x 113 g" or "6 pack"
Returns None, None when data is absent, ambiguous, or single-unit.
"""
import re
# Field 1: explicit unit count
unit_count = product.get("number_of_units")
if unit_count:
try:
n = float(unit_count)
if n > 1:
return n, product.get("serving_size_unit") or "unit"
except (ValueError, TypeError):
pass
# Field 2: parse quantity string for "N x ..." pattern
qty_str = product.get("quantity", "")
if qty_str:
m = re.match(r"^(\d+(?:\.\d+)?)\s*[xX×]\s*", qty_str.strip())
if m:
n = float(m.group(1))
if n > 1:
# Try to get a sensible sub-unit label from the rest
rest = qty_str[m.end():].strip()
unit_label = re.sub(r"[\d.,\s]+", "", rest).strip()[:20] or "unit"
return n, unit_label
return None, None
def _extract_nutrition_data(self, product: Dict[str, Any]) -> Dict[str, Any]: def _extract_nutrition_data(self, product: Dict[str, Any]) -> Dict[str, Any]:
""" """
Extract nutrition facts from product data. Extract nutrition facts from product data.

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more