feat: LLM reply draft, tiers BYOK gate, and messaging API endpoints (#74)

This commit is contained in:
pyr0ball 2026-04-20 12:36:16 -07:00
parent 091834f1ae
commit 715a8aa33e
3 changed files with 211 additions and 0 deletions

View file

@ -49,6 +49,7 @@ FEATURES: dict[str, str] = {
"company_research": "paid",
"interview_prep": "paid",
"survey_assistant": "paid",
"llm_reply_draft": "paid",
# Orchestration / infrastructure — stays gated
"email_classifier": "paid",
@ -81,6 +82,7 @@ BYOK_UNLOCKABLE: frozenset[str] = frozenset({
"company_research",
"interview_prep",
"survey_assistant",
"llm_reply_draft",
})
# Demo mode flag — read from environment at module load time.

View file

@ -4178,3 +4178,170 @@ def wizard_complete():
return {"ok": True}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
# ── Messaging models ──────────────────────────────────────────────────────────
class MessageCreateBody(BaseModel):
job_id: Optional[int] = None
job_contact_id: Optional[int] = None
type: str = "email"
direction: Optional[str] = None
subject: Optional[str] = None
body: Optional[str] = None
from_addr: Optional[str] = None
to_addr: Optional[str] = None
template_id: Optional[int] = None
class TemplateCreateBody(BaseModel):
title: str
category: str = "custom"
subject_template: Optional[str] = None
body_template: str
class TemplateUpdateBody(BaseModel):
title: Optional[str] = None
category: Optional[str] = None
subject_template: Optional[str] = None
body_template: Optional[str] = None
# ── Messaging (MIT) ───────────────────────────────────────────────────────────
@app.get("/api/messages")
def get_messages(
job_id: Optional[int] = None,
type: Optional[str] = None,
direction: Optional[str] = None,
limit: int = 100,
):
from scripts.messaging import list_messages
return list_messages(
Path(_request_db.get() or DB_PATH),
job_id=job_id, type=type, direction=direction, limit=limit,
)
@app.post("/api/messages")
def post_message(body: MessageCreateBody):
from scripts.messaging import create_message
return create_message(Path(_request_db.get() or DB_PATH), **body.model_dump())
@app.delete("/api/messages/{message_id}")
def del_message(message_id: int):
from scripts.messaging import delete_message
try:
delete_message(Path(_request_db.get() or DB_PATH), message_id)
return {"ok": True}
except KeyError:
raise HTTPException(404, "message not found")
@app.get("/api/message-templates")
def get_templates():
from scripts.messaging import list_templates
return list_templates(Path(_request_db.get() or DB_PATH))
@app.post("/api/message-templates")
def post_template(body: TemplateCreateBody):
from scripts.messaging import create_template
return create_template(Path(_request_db.get() or DB_PATH), **body.model_dump())
@app.put("/api/message-templates/{template_id}")
def put_template(template_id: int, body: TemplateUpdateBody):
from scripts.messaging import update_template
try:
return update_template(
Path(_request_db.get() or DB_PATH),
template_id,
**body.model_dump(exclude_none=True),
)
except PermissionError:
raise HTTPException(403, "cannot modify built-in templates")
except KeyError:
raise HTTPException(404, "template not found")
@app.delete("/api/message-templates/{template_id}")
def del_template(template_id: int):
from scripts.messaging import delete_template
try:
delete_template(Path(_request_db.get() or DB_PATH), template_id)
return {"ok": True}
except PermissionError:
raise HTTPException(403, "cannot delete built-in templates")
except KeyError:
raise HTTPException(404, "template not found")
# ── LLM Reply Draft (BSL 1.1) ─────────────────────────────────────────────────
def _get_effective_tier(request: Request) -> str:
"""Resolve effective tier from request header or environment."""
header_tier = request.headers.get("X-CF-Tier")
if header_tier:
return header_tier
from app.wizard.tiers import effective_tier
return effective_tier()
@app.post("/api/contacts/{contact_id}/draft-reply")
def draft_reply(contact_id: int, request: Request):
"""Generate an LLM draft reply for an inbound job_contacts row. Tier-gated."""
from app.wizard.tiers import can_use, has_configured_llm
from scripts.messaging import create_message
from scripts.llm_reply_draft import generate_draft_reply
db_path = Path(_request_db.get() or DB_PATH)
tier = _get_effective_tier(request)
if not can_use(tier, "llm_reply_draft", has_byok=has_configured_llm()):
raise HTTPException(402, detail={"error": "tier_required", "min_tier": "free+byok"})
con = _get_db()
row = con.execute("SELECT * FROM job_contacts WHERE id=?", (contact_id,)).fetchone()
con.close()
if not row:
raise HTTPException(404, "contact not found")
profile = _imitate_load_profile()
user_name = getattr(profile, "name", "") or ""
target_role = getattr(profile, "target_role", "") or ""
cfg_path = db_path.parent / "config" / "llm.yaml"
draft_body = generate_draft_reply(
subject=row["subject"] or "",
from_addr=row["from_addr"] or "",
body=row["body"] or "",
user_name=user_name,
target_role=target_role,
config_path=cfg_path if cfg_path.exists() else None,
)
msg = create_message(
db_path,
job_id=row["job_id"],
job_contact_id=contact_id,
type="draft",
direction="outbound",
subject=f"Re: {row['subject'] or ''}".strip(),
body=draft_body,
to_addr=row["from_addr"],
template_id=None,
from_addr=None,
)
return {"message_id": msg["id"]}
@app.post("/api/messages/{message_id}/approve")
def approve_message_endpoint(message_id: int):
"""Set approved_at=now(). Returns approved body for copy-to-clipboard."""
from scripts.messaging import approve_message
try:
msg = approve_message(Path(_request_db.get() or DB_PATH), message_id)
return {"body": msg["body"], "approved_at": msg["approved_at"]}
except KeyError:
raise HTTPException(404, "message not found")

View file

@ -0,0 +1,42 @@
# BSL 1.1 — see LICENSE-BSL
"""LLM-assisted reply draft generation for inbound job contacts (BSL 1.1)."""
from __future__ import annotations
from pathlib import Path
from typing import Optional
_SYSTEM = (
"You are drafting a professional email reply on behalf of a job seeker. "
"Be concise and professional. Do not fabricate facts. If you are uncertain "
"about a detail, leave a [TODO: fill in] placeholder. "
"Output the reply body only — no subject line, no salutation preamble."
)
def _build_prompt(subject: str, from_addr: str, body: str, user_name: str, target_role: str) -> str:
return (
f"ORIGINAL EMAIL:\n"
f"Subject: {subject}\n"
f"From: {from_addr}\n"
f"Body:\n{body}\n\n"
f"USER PROFILE CONTEXT:\n"
f"Name: {user_name}\n"
f"Target role: {target_role}\n\n"
"Write a concise, professional reply to this email."
)
def generate_draft_reply(
subject: str,
from_addr: str,
body: str,
user_name: str,
target_role: str,
config_path: Optional[Path] = None,
) -> str:
"""Return a draft reply body string."""
from scripts.llm_router import LLMRouter
router = LLMRouter(config_path=config_path)
prompt = _build_prompt(subject, from_addr, body, user_name, target_role)
return router.complete(system=_SYSTEM, user=prompt).strip()