From 33d3994fb8073e6913d6019919d7b58c202206ab Mon Sep 17 00:00:00 2001 From: pyr0ball Date: Tue, 24 Feb 2026 19:10:54 -0800 Subject: [PATCH] feat: auto-generate llm.yaml base_url values from user profile services config --- app/pages/2_Settings.py | 11 ++++++- scripts/generate_llm_config.py | 18 +++++++++++ tests/test_llm_config_generation.py | 47 +++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 scripts/generate_llm_config.py create mode 100644 tests/test_llm_config_generation.py diff --git a/app/pages/2_Settings.py b/app/pages/2_Settings.py index 16ebbc2..0275932 100644 --- a/app/pages/2_Settings.py +++ b/app/pages/2_Settings.py @@ -364,12 +364,21 @@ with tab_llm: for n in new_order )) - if st.button("💾 Save LLM settings", type="primary"): + col_save_llm, col_sync_llm = st.columns(2) + if col_save_llm.button("💾 Save LLM settings", type="primary"): save_yaml(LLM_CFG, {**cfg, "backends": updated_backends, "fallback_order": new_order}) st.session_state.pop("_llm_order", None) st.session_state.pop("_llm_order_cfg_key", None) st.success("LLM settings saved!") + if col_sync_llm.button("🔄 Sync URLs from Profile", help="Regenerate backend base_url values from your service host/port settings in user.yaml"): + if _profile is not None: + from scripts.generate_llm_config import apply_service_urls as _apply_urls + _apply_urls(_profile, LLM_CFG) + st.success("Profile saved and service URLs updated.") + else: + st.warning("No user profile found — configure it in the My Profile tab first.") + # ── Notion tab ──────────────────────────────────────────────────────────────── with tab_notion: cfg = load_yaml(NOTION_CFG) if NOTION_CFG.exists() else {} diff --git a/scripts/generate_llm_config.py b/scripts/generate_llm_config.py new file mode 100644 index 0000000..3a2916a --- /dev/null +++ b/scripts/generate_llm_config.py @@ -0,0 +1,18 @@ +"""Update config/llm.yaml base_url values from the user profile's services block.""" +from pathlib import Path +import yaml +from scripts.user_profile import UserProfile + + +def apply_service_urls(profile: UserProfile, llm_yaml_path: Path) -> None: + """Rewrite base_url for ollama, ollama_research, and vllm backends in llm.yaml.""" + if not llm_yaml_path.exists(): + return + cfg = yaml.safe_load(llm_yaml_path.read_text()) or {} + urls = profile.generate_llm_urls() + backends = cfg.get("backends", {}) + for backend_name, url in urls.items(): + if backend_name in backends: + backends[backend_name]["base_url"] = url + cfg["backends"] = backends + llm_yaml_path.write_text(yaml.dump(cfg, default_flow_style=False, allow_unicode=True)) diff --git a/tests/test_llm_config_generation.py b/tests/test_llm_config_generation.py new file mode 100644 index 0000000..ba778df --- /dev/null +++ b/tests/test_llm_config_generation.py @@ -0,0 +1,47 @@ +from pathlib import Path +import yaml +from scripts.user_profile import UserProfile +from scripts.generate_llm_config import apply_service_urls + + +def test_urls_applied_to_llm_yaml(tmp_path): + user_yaml = tmp_path / "user.yaml" + user_yaml.write_text(yaml.dump({ + "name": "Test", + "services": { + "ollama_host": "myserver", "ollama_port": 11434, "ollama_ssl": False, + "ollama_ssl_verify": True, + "vllm_host": "localhost", "vllm_port": 8000, "vllm_ssl": False, + "vllm_ssl_verify": True, + "searxng_host": "localhost", "searxng_port": 8888, + "searxng_ssl": False, "searxng_ssl_verify": True, + } + })) + llm_yaml = tmp_path / "llm.yaml" + llm_yaml.write_text(yaml.dump({"backends": { + "ollama": {"base_url": "http://old:11434/v1", "type": "openai_compat"}, + "vllm": {"base_url": "http://old:8000/v1", "type": "openai_compat"}, + }})) + + profile = UserProfile(user_yaml) + apply_service_urls(profile, llm_yaml) + + result = yaml.safe_load(llm_yaml.read_text()) + assert result["backends"]["ollama"]["base_url"] == "http://myserver:11434/v1" + assert result["backends"]["vllm"]["base_url"] == "http://localhost:8000/v1" + + +def test_missing_llm_yaml_is_noop(tmp_path): + """apply_service_urls should not crash if llm.yaml doesn't exist.""" + user_yaml = tmp_path / "user.yaml" + user_yaml.write_text(yaml.dump({"name": "Test", "services": { + "ollama_host": "localhost", "ollama_port": 11434, "ollama_ssl": False, + "ollama_ssl_verify": True, + "vllm_host": "localhost", "vllm_port": 8000, "vllm_ssl": False, + "vllm_ssl_verify": True, + "searxng_host": "localhost", "searxng_port": 8888, + "searxng_ssl": False, "searxng_ssl_verify": True, + }})) + profile = UserProfile(user_yaml) + # Should not raise + apply_service_urls(profile, tmp_path / "nonexistent.yaml")