feat: cover letter iterative refinement — feedback UI + backend params

- generate() accepts previous_result + feedback; appends both to LLM prompt
- task_runner cover_letter handler parses params JSON, passes fields through
- Apply Workspace: "Refine with Feedback" expander with text area + Regenerate
  button; only shown when a draft exists; clears feedback after submitting
- 8 new tests (TestGenerateRefinement + TestTaskRunnerCoverLetterParams)
This commit is contained in:
pyr0ball 2026-02-25 14:44:20 -08:00
parent 1d61683c5b
commit 97bb0819b4
4 changed files with 186 additions and 1 deletions

View file

@ -255,6 +255,32 @@ with col_tools:
label_visibility="collapsed",
)
# ── Iterative refinement ──────────────────────
if cl_text and not _cl_running:
with st.expander("✏️ Refine with Feedback"):
st.caption("Describe what to change. The current draft is passed to the LLM as context.")
_fb_key = f"fb_{selected_id}"
feedback_text = st.text_area(
"Feedback",
placeholder="e.g. Shorten the second paragraph and add a line about cross-functional leadership.",
height=80,
key=_fb_key,
label_visibility="collapsed",
)
if st.button("✨ Regenerate with Feedback", use_container_width=True,
disabled=not (feedback_text or "").strip(),
key=f"cl_refine_{selected_id}"):
import json as _json
submit_task(
DEFAULT_DB, "cover_letter", selected_id,
params=_json.dumps({
"previous_result": cl_text,
"feedback": feedback_text.strip(),
}),
)
st.session_state.pop(_fb_key, None)
st.rerun()
# Copy + Save row
c1, c2 = st.columns(2)
with c1:

View file

@ -169,9 +169,20 @@ def build_prompt(
return "\n".join(parts)
def generate(title: str, company: str, description: str = "", _router=None) -> str:
def generate(
title: str,
company: str,
description: str = "",
previous_result: str = "",
feedback: str = "",
_router=None,
) -> str:
"""Generate a cover letter and return it as a string.
Pass previous_result + feedback for iterative refinement the prior draft
and requested changes are appended to the prompt so the LLM revises rather
than starting from scratch.
_router is an optional pre-built LLMRouter (used in tests to avoid real LLM calls).
"""
corpus = load_corpus()
@ -181,6 +192,11 @@ def generate(title: str, company: str, description: str = "", _router=None) -> s
print(f"[cover-letter] Mission alignment detected for {company}", file=sys.stderr)
prompt = build_prompt(title, company, description, examples, mission_hint=mission_hint)
if previous_result:
prompt += f"\n\n---\nPrevious draft:\n{previous_result}"
if feedback:
prompt += f"\n\nUser feedback / requested changes:\n{feedback}\n\nPlease revise accordingly."
if _router is None:
sys.path.insert(0, str(Path(__file__).parent.parent))
from scripts.llm_router import LLMRouter
@ -188,6 +204,8 @@ def generate(title: str, company: str, description: str = "", _router=None) -> s
print(f"[cover-letter] Generating for: {title} @ {company}", file=sys.stderr)
print(f"[cover-letter] Style examples: {[e['company'] for e in examples]}", file=sys.stderr)
if feedback:
print("[cover-letter] Refinement mode: feedback provided", file=sys.stderr)
result = _router.complete(prompt)
return result.strip()

View file

@ -150,11 +150,15 @@ def _run_task(db_path: Path, task_id: int, task_type: str, job_id: int,
return
elif task_type == "cover_letter":
import json as _json
p = _json.loads(params or "{}")
from scripts.generate_cover_letter import generate
result = generate(
job.get("title", ""),
job.get("company", ""),
job.get("description", ""),
previous_result=p.get("previous_result", ""),
feedback=p.get("feedback", ""),
)
update_cover_letter(db_path, job_id, result)

View file

@ -0,0 +1,137 @@
# tests/test_cover_letter_refinement.py
"""
TDD tests for cover letter iterative refinement:
- generate() accepts previous_result + feedback params
- task_runner cover_letter handler passes params through
"""
import json
import sys
from pathlib import Path
from unittest.mock import MagicMock, patch
sys.path.insert(0, str(Path(__file__).parent.parent))
# ── generate() refinement params ──────────────────────────────────────────────
class TestGenerateRefinement:
"""generate() appends previous_result and feedback to the LLM prompt."""
def _call_generate(self, previous_result="", feedback=""):
"""Call generate() with a mock router and return the captured prompt."""
captured = {}
mock_router = MagicMock()
mock_router.complete.side_effect = lambda p: (captured.update({"prompt": p}), "result")[1]
with patch("scripts.generate_cover_letter.load_corpus", return_value=[]), \
patch("scripts.generate_cover_letter.find_similar_letters", return_value=[]):
from scripts.generate_cover_letter import generate
generate(
"Software Engineer", "Acme",
previous_result=previous_result,
feedback=feedback,
_router=mock_router,
)
return captured["prompt"]
def test_no_refinement_prompt_unchanged(self):
"""When no previous_result or feedback, prompt has no refinement section."""
prompt = self._call_generate()
assert "Previous draft" not in prompt
assert "User feedback" not in prompt
def test_previous_result_appended(self):
"""previous_result is appended to the prompt."""
prompt = self._call_generate(previous_result="Old letter text here.")
assert "Previous draft" in prompt
assert "Old letter text here." in prompt
def test_feedback_appended(self):
"""feedback is appended with revision instruction."""
prompt = self._call_generate(feedback="Make it shorter and punchier.")
assert "User feedback" in prompt
assert "Make it shorter and punchier." in prompt
assert "revise" in prompt.lower()
def test_both_fields_appended(self):
"""Both previous_result and feedback appear when both supplied."""
prompt = self._call_generate(
previous_result="Draft v1 text.",
feedback="Add more about leadership.",
)
assert "Previous draft" in prompt
assert "Draft v1 text." in prompt
assert "User feedback" in prompt
assert "Add more about leadership." in prompt
def test_empty_strings_ignored(self):
"""Empty string values produce no refinement section."""
prompt = self._call_generate(previous_result="", feedback="")
assert "Previous draft" not in prompt
assert "User feedback" not in prompt
# ── task_runner cover_letter params passthrough ───────────────────────────────
class TestTaskRunnerCoverLetterParams:
"""task_runner passes previous_result and feedback from params JSON to generate()."""
def _run_cover_letter_task(self, params_json: str | None, job: dict):
"""Invoke _run_task for cover_letter and return captured generate() kwargs."""
captured = {}
def mock_generate(title, company, description="", previous_result="", feedback="", _router=None):
captured.update({
"title": title, "company": company,
"previous_result": previous_result, "feedback": feedback,
})
return "Generated letter"
with patch("scripts.task_runner.insert_task", return_value=(1, True)), \
patch("scripts.task_runner.update_task_status"), \
patch("scripts.task_runner.update_cover_letter"), \
patch("sqlite3.connect") as mock_conn, \
patch("scripts.task_runner.generate_cover_letter_fn", mock_generate, create=True):
import sqlite3
mock_row = MagicMock()
mock_row.__iter__ = lambda s: iter(job.items())
mock_row.keys = lambda: job.keys()
mock_conn.return_value.__enter__ = MagicMock(return_value=mock_conn.return_value)
mock_conn.return_value.row_factory = None
mock_row_factory_row = dict(job)
conn_mock = MagicMock()
conn_mock.row_factory = None
conn_mock.execute.return_value.fetchone.return_value = job
mock_conn.return_value = conn_mock
from scripts.task_runner import _run_task
with patch("scripts.generate_cover_letter.generate", mock_generate):
_run_task(Path(":memory:"), 1, "cover_letter", job["id"], params_json)
return captured
def test_no_params_uses_empty_refinement(self):
"""When params is None, generate() receives empty previous_result and feedback."""
job = {"id": 1, "title": "Dev", "company": "Corp", "description": "desc"}
captured = self._run_cover_letter_task(None, job)
assert captured.get("previous_result", "") == ""
assert captured.get("feedback", "") == ""
def test_params_with_feedback_passed_through(self):
"""previous_result and feedback from params JSON are passed to generate()."""
job = {"id": 1, "title": "Dev", "company": "Corp", "description": "desc"}
params = json.dumps({
"previous_result": "Old draft text.",
"feedback": "Make it more concise.",
})
captured = self._run_cover_letter_task(params, job)
assert captured.get("previous_result") == "Old draft text."
assert captured.get("feedback") == "Make it more concise."
def test_empty_params_json_uses_empty_refinement(self):
"""Empty JSON object produces no refinement."""
job = {"id": 1, "title": "Dev", "company": "Corp", "description": "desc"}
captured = self._run_cover_letter_task("{}", job)
assert captured.get("previous_result", "") == ""
assert captured.get("feedback", "") == ""