circuitforge-core/pyproject.toml
pyr0ball 8c1daf3b6c
Some checks are pending
CI / test (push) Waiting to run
Mirror / mirror (push) Waiting to run
feat: cf-vision managed service (#43)
SigLIP so400m-patch14-384 as default backend (classify + embed, ~1.4 GB VRAM).
VLM backend (moondream2, LLaVA, Qwen-VL, etc.) as callable alternative for
caption generation and VQA.  Follows the same factory/Protocol/mock pattern
as cf-stt and cf-tts.

New module: circuitforge_core.vision
- backends/base.py  — VisionBackend Protocol, VisionResult, make_vision_backend()
- backends/mock.py  — MockVisionBackend (no GPU, deterministic)
- backends/siglip.py — SigLIPBackend: sigmoid zero-shot classify + L2 embed
- backends/vlm.py   — VLMBackend: AutoModelForVision2Seq caption + prompt classify
- __init__.py       — process singleton; classify(), embed(), caption(), make_backend()
- app.py            — FastAPI service (port 8006): /health /classify /embed /caption

Backend selection: CF_VISION_BACKEND=siglip|vlm, auto-detected from model path.
VLM backend: supports_embed=False, caption()/classify() only.
SigLIP backend: supports_caption=False, classify()/embed() only.

52 new tests, 385 total passing. Closes #43.
2026-04-09 06:53:43 -07:00

88 lines
1.7 KiB
TOML

[build-system]
requires = ["setuptools>=68"]
build-backend = "setuptools.build_meta"
[project]
name = "circuitforge-core"
version = "0.9.0"
description = "Shared scaffold for CircuitForge products (MIT)"
requires-python = ">=3.11"
dependencies = [
"pyyaml>=6.0",
"requests>=2.31",
"openai>=1.0",
]
[project.optional-dependencies]
manage = [
"platformdirs>=4.0",
"typer[all]>=0.12",
]
text-llamacpp = [
"llama-cpp-python>=0.2.0",
]
text-transformers = [
"torch>=2.0",
"transformers>=4.40",
"accelerate>=0.27",
]
text-transformers-4bit = [
"circuitforge-core[text-transformers]",
"bitsandbytes>=0.43",
]
stt-faster-whisper = [
"faster-whisper>=1.0",
]
stt-service = [
"circuitforge-core[stt-faster-whisper]",
"fastapi>=0.110",
"uvicorn[standard]>=0.29",
"python-multipart>=0.0.9",
]
tts-chatterbox = [
"chatterbox-tts>=0.1",
"torchaudio>=2.0",
]
tts-service = [
"circuitforge-core[tts-chatterbox]",
"fastapi>=0.110",
"uvicorn[standard]>=0.29",
"python-multipart>=0.0.9",
]
vision-siglip = [
"torch>=2.0",
"transformers>=4.40",
"Pillow>=10.0",
]
vision-vlm = [
"torch>=2.0",
"transformers>=4.40",
"Pillow>=10.0",
"accelerate>=0.27",
]
vision-service = [
"circuitforge-core[vision-siglip]",
"fastapi>=0.110",
"uvicorn[standard]>=0.29",
"python-multipart>=0.0.9",
]
dev = [
"circuitforge-core[manage]",
"pytest>=8.0",
"pytest-asyncio>=0.23",
"fastapi>=0.110",
"httpx>=0.27",
"ruff>=0.4",
"mypy>=1.10",
]
[project.scripts]
cf-manage = "circuitforge_core.manage.cli:app"
[tool.setuptools.packages.find]
where = ["."]
include = ["circuitforge_core*"]
[tool.pytest.ini_options]
testpaths = ["tests"]
asyncio_mode = "auto"