feat(mcp): Snipe MCP server for Claude Code integration (#27)
Three tools: snipe_search (GPU-scored trust-ranked), snipe_enrich (deep BTF scraping), snipe_save (persist search to Snipe UI). GPU inference scoring uses VRAM + arch tier weighted composite. LLM-condensed output trims verbose listing dicts to trust/price/GPU/url. Configure via ~/.claude.json with SNIPE_API_URL env var pointing at local or cloud API.
This commit is contained in:
parent
fb81422c54
commit
c93466c037
4 changed files with 515 additions and 0 deletions
0
app/mcp/__init__.py
Normal file
0
app/mcp/__init__.py
Normal file
110
app/mcp/formatters.py
Normal file
110
app/mcp/formatters.py
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
"""Condense Snipe API search results into LLM-friendly format.
|
||||
|
||||
Raw Snipe responses are verbose — full listing dicts, nested seller objects,
|
||||
redundant fields. This module trims to what an LLM needs for reasoning:
|
||||
title, price, market delta, trust summary, GPU inference score, url.
|
||||
|
||||
Results are sorted by a composite key: trust × gpu_inference_score / price.
|
||||
This surfaces high-trust, VRAM-rich, underpriced boards at the top.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any
|
||||
|
||||
from app.mcp.gpu_scoring import parse_gpu, score_gpu
|
||||
|
||||
|
||||
def format_results(
|
||||
response: dict[str, Any],
|
||||
vram_weight: float = 0.6,
|
||||
arch_weight: float = 0.4,
|
||||
top_n: int = 20,
|
||||
) -> dict[str, Any]:
|
||||
"""Return a condensed, LLM-ready summary of a Snipe search response."""
|
||||
listings: list[dict] = response.get("listings", [])
|
||||
trust_map: dict = response.get("trust_scores", {})
|
||||
seller_map: dict = response.get("sellers", {})
|
||||
market_price: float | None = response.get("market_price")
|
||||
|
||||
condensed = []
|
||||
for listing in listings:
|
||||
lid = listing.get("platform_listing_id", "")
|
||||
title = listing.get("title", "")
|
||||
price = float(listing.get("price") or 0)
|
||||
trust = trust_map.get(lid, {})
|
||||
seller_id = listing.get("seller_platform_id", "")
|
||||
seller = seller_map.get(seller_id, {})
|
||||
|
||||
gpu_info = _gpu_info(title, vram_weight, arch_weight)
|
||||
trust_score = trust.get("composite_score", 0) or 0
|
||||
inference_score = gpu_info["inference_score"] if gpu_info else 0.0
|
||||
|
||||
condensed.append({
|
||||
"id": lid,
|
||||
"title": title,
|
||||
"price": price,
|
||||
"vs_market": _vs_market(price, market_price),
|
||||
"trust_score": trust_score,
|
||||
"trust_partial": bool(trust.get("score_is_partial")),
|
||||
"red_flags": _parse_flags(trust.get("red_flags_json", "[]")),
|
||||
"seller_age_days": seller.get("account_age_days"),
|
||||
"seller_feedback": seller.get("feedback_count"),
|
||||
"gpu": gpu_info,
|
||||
"url": listing.get("url", ""),
|
||||
# Sort key — not included in output
|
||||
"_sort_key": _composite_key(trust_score, inference_score, price),
|
||||
})
|
||||
|
||||
condensed.sort(key=lambda r: r["_sort_key"], reverse=True)
|
||||
for r in condensed:
|
||||
del r["_sort_key"]
|
||||
|
||||
no_gpu = sum(1 for r in condensed if r["gpu"] is None)
|
||||
return {
|
||||
"total_found": len(listings),
|
||||
"showing": min(top_n, len(condensed)),
|
||||
"market_price": market_price,
|
||||
"adapter": response.get("adapter_used"),
|
||||
"no_gpu_detected": no_gpu,
|
||||
"results": condensed[:top_n],
|
||||
}
|
||||
|
||||
|
||||
def _gpu_info(title: str, vram_weight: float, arch_weight: float) -> dict | None:
|
||||
spec = parse_gpu(title)
|
||||
if not spec:
|
||||
return None
|
||||
match = score_gpu(spec, vram_weight, arch_weight)
|
||||
return {
|
||||
"model": spec.model,
|
||||
"vram_gb": spec.vram_gb,
|
||||
"arch": spec.arch_name,
|
||||
"vendor": spec.vendor,
|
||||
"vram_score": match.vram_score,
|
||||
"arch_score": match.arch_score,
|
||||
"inference_score": match.inference_score,
|
||||
}
|
||||
|
||||
|
||||
def _vs_market(price: float, market_price: float | None) -> str | None:
|
||||
if not market_price or price <= 0:
|
||||
return None
|
||||
delta_pct = ((market_price - price) / market_price) * 100
|
||||
if delta_pct >= 0:
|
||||
return f"{delta_pct:.0f}% below market (${market_price:.0f} median)"
|
||||
return f"{abs(delta_pct):.0f}% above market (${market_price:.0f} median)"
|
||||
|
||||
|
||||
def _composite_key(trust_score: float, inference_score: float, price: float) -> float:
|
||||
"""Higher = better value. Zero price or zero trust scores near zero."""
|
||||
if price <= 0 or trust_score <= 0:
|
||||
return 0.0
|
||||
return (trust_score * (inference_score or 50.0)) / price
|
||||
|
||||
|
||||
def _parse_flags(flags_json: str) -> list[str]:
|
||||
try:
|
||||
return json.loads(flags_json) or []
|
||||
except (ValueError, TypeError):
|
||||
return []
|
||||
143
app/mcp/gpu_scoring.py
Normal file
143
app/mcp/gpu_scoring.py
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
"""GPU architecture and VRAM scoring for laptop mainboard inference-value ranking.
|
||||
|
||||
Parses GPU model names from eBay listing titles and scores them on two axes:
|
||||
- vram_score: linear 0–100, anchored at 24 GB = 100
|
||||
- arch_score: linear 0–100, architecture tier 1–5 (5 = newest)
|
||||
|
||||
inference_score = (vram_score × vram_weight + arch_score × arch_weight)
|
||||
/ (vram_weight + arch_weight)
|
||||
|
||||
Patterns are matched longest-first to prevent "RTX 3070" matching before "RTX 3070 Ti".
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class GpuSpec:
|
||||
model: str # canonical name, e.g. "RTX 3070 Ti"
|
||||
vram_gb: int
|
||||
arch_tier: int # 1–5; 5 = newest generation
|
||||
arch_name: str # human-readable, e.g. "Ampere"
|
||||
vendor: str # "nvidia" | "amd" | "intel"
|
||||
|
||||
|
||||
@dataclass
|
||||
class GpuMatch:
|
||||
spec: GpuSpec
|
||||
vram_score: float
|
||||
arch_score: float
|
||||
inference_score: float
|
||||
|
||||
|
||||
# ── GPU database ──────────────────────────────────────────────────────────────
|
||||
# Laptop VRAM often differs from desktop; using common laptop variants.
|
||||
# Listed longest-name-first within each family to guide sort order.
|
||||
|
||||
_GPU_DB: list[GpuSpec] = [
|
||||
# NVIDIA Ada Lovelace — tier 5
|
||||
GpuSpec("RTX 4090", 16, 5, "Ada Lovelace", "nvidia"),
|
||||
GpuSpec("RTX 4080", 12, 5, "Ada Lovelace", "nvidia"),
|
||||
GpuSpec("RTX 4070 Ti", 12, 5, "Ada Lovelace", "nvidia"),
|
||||
GpuSpec("RTX 4070", 8, 5, "Ada Lovelace", "nvidia"),
|
||||
GpuSpec("RTX 4060 Ti", 8, 5, "Ada Lovelace", "nvidia"),
|
||||
GpuSpec("RTX 4060", 8, 5, "Ada Lovelace", "nvidia"),
|
||||
GpuSpec("RTX 4050", 6, 5, "Ada Lovelace", "nvidia"),
|
||||
# NVIDIA Ampere — tier 4
|
||||
GpuSpec("RTX 3090", 24, 4, "Ampere", "nvidia"), # rare laptop variant
|
||||
GpuSpec("RTX 3080 Ti", 16, 4, "Ampere", "nvidia"),
|
||||
GpuSpec("RTX 3080", 8, 4, "Ampere", "nvidia"), # most laptop 3080s = 8 GB
|
||||
GpuSpec("RTX 3070 Ti", 8, 4, "Ampere", "nvidia"),
|
||||
GpuSpec("RTX 3070", 8, 4, "Ampere", "nvidia"),
|
||||
GpuSpec("RTX 3060", 6, 4, "Ampere", "nvidia"),
|
||||
GpuSpec("RTX 3050 Ti", 4, 4, "Ampere", "nvidia"),
|
||||
GpuSpec("RTX 3050", 4, 4, "Ampere", "nvidia"),
|
||||
# NVIDIA Turing — tier 3
|
||||
GpuSpec("RTX 2080", 8, 3, "Turing", "nvidia"),
|
||||
GpuSpec("RTX 2070", 8, 3, "Turing", "nvidia"),
|
||||
GpuSpec("RTX 2060", 6, 3, "Turing", "nvidia"),
|
||||
GpuSpec("GTX 1660 Ti", 6, 3, "Turing", "nvidia"),
|
||||
GpuSpec("GTX 1660", 6, 3, "Turing", "nvidia"),
|
||||
GpuSpec("GTX 1650 Ti", 4, 3, "Turing", "nvidia"),
|
||||
GpuSpec("GTX 1650", 4, 3, "Turing", "nvidia"),
|
||||
# NVIDIA Pascal — tier 2
|
||||
GpuSpec("GTX 1080", 8, 2, "Pascal", "nvidia"),
|
||||
GpuSpec("GTX 1070", 8, 2, "Pascal", "nvidia"),
|
||||
GpuSpec("GTX 1060", 6, 2, "Pascal", "nvidia"),
|
||||
GpuSpec("GTX 1050 Ti", 4, 2, "Pascal", "nvidia"),
|
||||
GpuSpec("GTX 1050", 4, 2, "Pascal", "nvidia"),
|
||||
# AMD RDNA3 — tier 5
|
||||
GpuSpec("RX 7900M", 16, 5, "RDNA3", "amd"),
|
||||
GpuSpec("RX 7700S", 8, 5, "RDNA3", "amd"),
|
||||
GpuSpec("RX 7600M XT", 8, 5, "RDNA3", "amd"),
|
||||
GpuSpec("RX 7600S", 8, 5, "RDNA3", "amd"),
|
||||
GpuSpec("RX 7600M", 8, 5, "RDNA3", "amd"),
|
||||
# AMD RDNA2 — tier 4
|
||||
GpuSpec("RX 6850M XT", 12, 4, "RDNA2", "amd"),
|
||||
GpuSpec("RX 6800S", 12, 4, "RDNA2", "amd"),
|
||||
GpuSpec("RX 6800M", 12, 4, "RDNA2", "amd"),
|
||||
GpuSpec("RX 6700S", 10, 4, "RDNA2", "amd"),
|
||||
GpuSpec("RX 6700M", 10, 4, "RDNA2", "amd"),
|
||||
GpuSpec("RX 6650M", 8, 4, "RDNA2", "amd"),
|
||||
GpuSpec("RX 6600S", 8, 4, "RDNA2", "amd"),
|
||||
GpuSpec("RX 6600M", 8, 4, "RDNA2", "amd"),
|
||||
GpuSpec("RX 6500M", 4, 4, "RDNA2", "amd"),
|
||||
# AMD RDNA1 — tier 3
|
||||
GpuSpec("RX 5700M", 8, 3, "RDNA1", "amd"),
|
||||
GpuSpec("RX 5600M", 6, 3, "RDNA1", "amd"),
|
||||
GpuSpec("RX 5500M", 4, 3, "RDNA1", "amd"),
|
||||
# Intel Arc Alchemist — tier 4 (improving ROCm/IPEX-LLM support)
|
||||
GpuSpec("Arc A770M", 16, 4, "Alchemist", "intel"),
|
||||
GpuSpec("Arc A550M", 8, 4, "Alchemist", "intel"),
|
||||
GpuSpec("Arc A370M", 4, 4, "Alchemist", "intel"),
|
||||
GpuSpec("Arc A350M", 4, 4, "Alchemist", "intel"),
|
||||
]
|
||||
|
||||
|
||||
def _build_patterns() -> list[tuple[re.Pattern[str], GpuSpec]]:
|
||||
"""Compile regex patterns, sorted longest-model-name first to prevent prefix shadowing."""
|
||||
result = []
|
||||
for spec in sorted(_GPU_DB, key=lambda s: -len(s.model)):
|
||||
# Allow optional space or hyphen between tokens (e.g. "RTX3070" or "RTX-3070")
|
||||
escaped = re.escape(spec.model).replace(r"\ ", r"[\s\-]?")
|
||||
result.append((re.compile(escaped, re.IGNORECASE), spec))
|
||||
return result
|
||||
|
||||
|
||||
_PATTERNS: list[tuple[re.Pattern[str], GpuSpec]] = _build_patterns()
|
||||
|
||||
|
||||
def parse_gpu(title: str) -> GpuSpec | None:
|
||||
"""Return the first GPU model found in a listing title, or None."""
|
||||
for pattern, spec in _PATTERNS:
|
||||
if pattern.search(title):
|
||||
return spec
|
||||
return None
|
||||
|
||||
|
||||
def score_gpu(spec: GpuSpec, vram_weight: float, arch_weight: float) -> GpuMatch:
|
||||
"""Compute normalized inference value scores for a GPU spec.
|
||||
|
||||
vram_score: linear scale, 24 GB anchors at 100. Capped at 100.
|
||||
arch_score: linear scale, tier 1 = 0, tier 5 = 100.
|
||||
inference_score: weighted average of both, normalized to the total weight.
|
||||
"""
|
||||
vram_score = min(100.0, (spec.vram_gb / 24.0) * 100.0)
|
||||
arch_score = ((spec.arch_tier - 1) / 4.0) * 100.0
|
||||
|
||||
total_weight = vram_weight + arch_weight
|
||||
if total_weight <= 0:
|
||||
inference_score = 0.0
|
||||
else:
|
||||
inference_score = (
|
||||
vram_score * vram_weight + arch_score * arch_weight
|
||||
) / total_weight
|
||||
|
||||
return GpuMatch(
|
||||
spec=spec,
|
||||
vram_score=round(vram_score, 1),
|
||||
arch_score=round(arch_score, 1),
|
||||
inference_score=round(inference_score, 1),
|
||||
)
|
||||
262
app/mcp/server.py
Normal file
262
app/mcp/server.py
Normal file
|
|
@ -0,0 +1,262 @@
|
|||
"""Snipe MCP Server — eBay search with trust scoring and GPU inference-value ranking.
|
||||
|
||||
Exposes three tools to Claude:
|
||||
snipe_search — search eBay via Snipe, GPU-scored and trust-ranked
|
||||
snipe_enrich — deep seller/listing enrichment for a specific result
|
||||
snipe_save — persist a productive search for ongoing monitoring
|
||||
|
||||
Run with:
|
||||
python -m app.mcp.server
|
||||
(from /Library/Development/CircuitForge/snipe with cf conda env active)
|
||||
|
||||
Configure in Claude Code ~/.claude.json:
|
||||
"snipe": {
|
||||
"command": "/devl/miniconda3/envs/cf/bin/python",
|
||||
"args": ["-m", "app.mcp.server"],
|
||||
"cwd": "/Library/Development/CircuitForge/snipe",
|
||||
"env": { "SNIPE_API_URL": "http://localhost:8510" }
|
||||
}
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
|
||||
import httpx
|
||||
from mcp.server import Server
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import TextContent, Tool
|
||||
|
||||
_SNIPE_API = os.environ.get("SNIPE_API_URL", "http://localhost:8510")
|
||||
_TIMEOUT = 120.0
|
||||
|
||||
server = Server("snipe")
|
||||
|
||||
|
||||
@server.list_tools()
|
||||
async def list_tools() -> list[Tool]:
|
||||
return [
|
||||
Tool(
|
||||
name="snipe_search",
|
||||
description=(
|
||||
"Search eBay listings via Snipe. Returns results condensed for LLM reasoning, "
|
||||
"sorted by composite value: trust_score × gpu_inference_score / price. "
|
||||
"GPU inference_score weights VRAM and architecture tier — tune with vram_weight/arch_weight. "
|
||||
"Use must_include_mode='groups' with pipe-separated OR alternatives for broad GPU coverage "
|
||||
"(e.g. 'rtx 3060|rtx 3070|rtx 3080'). "
|
||||
"Laptop Motherboard category ID: 177946."
|
||||
),
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"required": ["query"],
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Base eBay search keywords, e.g. 'laptop motherboard'",
|
||||
},
|
||||
"must_include": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Comma-separated AND groups; use | for OR within a group. "
|
||||
"E.g. 'rtx 3060|rtx 3070|rx 6700m, 8gb|12gb|16gb'"
|
||||
),
|
||||
},
|
||||
"must_include_mode": {
|
||||
"type": "string",
|
||||
"enum": ["all", "any", "groups"],
|
||||
"default": "groups",
|
||||
"description": "groups: pipe=OR comma=AND. Recommended for multi-GPU searches.",
|
||||
},
|
||||
"must_exclude": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Comma-separated terms to exclude. "
|
||||
"Suggested: 'broken,cracked,no post,for parts,parts only,untested,"
|
||||
"lcd,screen,chassis,housing,bios locked'"
|
||||
),
|
||||
},
|
||||
"max_price": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Max price USD (0 = no limit)",
|
||||
},
|
||||
"min_price": {
|
||||
"type": "number",
|
||||
"default": 0,
|
||||
"description": "Min price USD (0 = no limit)",
|
||||
},
|
||||
"pages": {
|
||||
"type": "integer",
|
||||
"default": 2,
|
||||
"description": "Pages of eBay results to fetch (1 page ≈ 50 listings)",
|
||||
},
|
||||
"category_id": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": (
|
||||
"eBay category ID. "
|
||||
"177946 = Laptop Motherboards & System Boards. "
|
||||
"27386 = Graphics Cards (PCIe, for price comparison). "
|
||||
"Leave empty to search all categories."
|
||||
),
|
||||
},
|
||||
"vram_weight": {
|
||||
"type": "number",
|
||||
"default": 0.6,
|
||||
"description": (
|
||||
"0–1. Weight of VRAM in GPU inference score. "
|
||||
"Higher = VRAM is primary ranking factor. "
|
||||
"Use 1.0 to rank purely by VRAM (ignores arch generation)."
|
||||
),
|
||||
},
|
||||
"arch_weight": {
|
||||
"type": "number",
|
||||
"default": 0.4,
|
||||
"description": (
|
||||
"0–1. Weight of architecture generation in GPU inference score. "
|
||||
"Higher = prefer newer GPU arch (Ada > Ampere > Turing etc.). "
|
||||
"Use 0.0 to ignore arch and rank purely by VRAM."
|
||||
),
|
||||
},
|
||||
"top_n": {
|
||||
"type": "integer",
|
||||
"default": 20,
|
||||
"description": "Max results to return after sorting",
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
Tool(
|
||||
name="snipe_enrich",
|
||||
description=(
|
||||
"Deep-dive enrichment for a specific seller + listing. "
|
||||
"Runs BTF scraping and category history to fill partial trust scores (~20s). "
|
||||
"Use when snipe_search returns trust_partial=true on a promising listing."
|
||||
),
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"required": ["seller_id", "listing_id"],
|
||||
"properties": {
|
||||
"seller_id": {
|
||||
"type": "string",
|
||||
"description": "eBay seller platform ID (from snipe_search result seller_id field)",
|
||||
},
|
||||
"listing_id": {
|
||||
"type": "string",
|
||||
"description": "eBay listing platform ID (from snipe_search result id field)",
|
||||
},
|
||||
"query": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Original search query — provides market comp context for re-scoring",
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
Tool(
|
||||
name="snipe_save",
|
||||
description="Persist a productive search for ongoing monitoring in the Snipe UI.",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"required": ["name", "query"],
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Human-readable label, e.g. 'RTX 3070+ laptop boards under $250'",
|
||||
},
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The eBay search query string",
|
||||
},
|
||||
"filters_json": {
|
||||
"type": "string",
|
||||
"default": "{}",
|
||||
"description": "JSON string of filter params to preserve (max_price, must_include, etc.)",
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@server.call_tool()
|
||||
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
|
||||
if name == "snipe_search":
|
||||
return await _search(arguments)
|
||||
if name == "snipe_enrich":
|
||||
return await _enrich(arguments)
|
||||
if name == "snipe_save":
|
||||
return await _save(arguments)
|
||||
return [TextContent(type="text", text=f"Unknown tool: {name}")]
|
||||
|
||||
|
||||
async def _search(args: dict) -> list[TextContent]:
|
||||
from app.mcp.formatters import format_results
|
||||
|
||||
# Build params — omit empty strings and zero numerics (except q)
|
||||
raw = {
|
||||
"q": args.get("query", ""),
|
||||
"must_include": args.get("must_include", ""),
|
||||
"must_include_mode": args.get("must_include_mode", "groups"),
|
||||
"must_exclude": args.get("must_exclude", ""),
|
||||
"max_price": args.get("max_price", 0),
|
||||
"min_price": args.get("min_price", 0),
|
||||
"pages": args.get("pages", 2),
|
||||
"category_id": args.get("category_id", ""),
|
||||
}
|
||||
params = {k: v for k, v in raw.items() if v != "" and v != 0 or k == "q"}
|
||||
|
||||
async with httpx.AsyncClient(timeout=_TIMEOUT) as client:
|
||||
resp = await client.get(f"{_SNIPE_API}/api/search", params=params)
|
||||
resp.raise_for_status()
|
||||
|
||||
formatted = format_results(
|
||||
resp.json(),
|
||||
vram_weight=float(args.get("vram_weight", 0.6)),
|
||||
arch_weight=float(args.get("arch_weight", 0.4)),
|
||||
top_n=int(args.get("top_n", 20)),
|
||||
)
|
||||
return [TextContent(type="text", text=json.dumps(formatted, indent=2))]
|
||||
|
||||
|
||||
async def _enrich(args: dict) -> list[TextContent]:
|
||||
async with httpx.AsyncClient(timeout=_TIMEOUT) as client:
|
||||
resp = await client.post(
|
||||
f"{_SNIPE_API}/api/enrich",
|
||||
params={
|
||||
"seller": args["seller_id"],
|
||||
"listing_id": args["listing_id"],
|
||||
"query": args.get("query", ""),
|
||||
},
|
||||
)
|
||||
resp.raise_for_status()
|
||||
return [TextContent(type="text", text=json.dumps(resp.json(), indent=2))]
|
||||
|
||||
|
||||
async def _save(args: dict) -> list[TextContent]:
|
||||
async with httpx.AsyncClient(timeout=_TIMEOUT) as client:
|
||||
resp = await client.post(
|
||||
f"{_SNIPE_API}/api/saved-searches",
|
||||
json={
|
||||
"name": args["name"],
|
||||
"query": args["query"],
|
||||
"filters_json": args.get("filters_json", "{}"),
|
||||
},
|
||||
)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
return [TextContent(type="text", text=f"Saved (id={data.get('id')}): {args['name']}")]
|
||||
|
||||
|
||||
async def _main() -> None:
|
||||
async with stdio_server() as (read_stream, write_stream):
|
||||
await server.run(
|
||||
read_stream,
|
||||
write_stream,
|
||||
server.create_initialization_options(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(_main())
|
||||
Loading…
Reference in a new issue