test: add ollama_research URL assertion to llm config generation test

This commit is contained in:
pyr0ball 2026-02-24 19:14:33 -08:00
parent dc679dedc3
commit 6df5a7aeaf

View file

@ -20,6 +20,7 @@ def test_urls_applied_to_llm_yaml(tmp_path):
llm_yaml = tmp_path / "llm.yaml"
llm_yaml.write_text(yaml.dump({"backends": {
"ollama": {"base_url": "http://old:11434/v1", "type": "openai_compat"},
"ollama_research": {"base_url": "http://old:11434/v1", "type": "openai_compat"},
"vllm": {"base_url": "http://old:8000/v1", "type": "openai_compat"},
}}))
@ -28,6 +29,7 @@ def test_urls_applied_to_llm_yaml(tmp_path):
result = yaml.safe_load(llm_yaml.read_text())
assert result["backends"]["ollama"]["base_url"] == "http://myserver:11434/v1"
assert result["backends"]["ollama_research"]["base_url"] == "http://myserver:11434/v1"
assert result["backends"]["vllm"]["base_url"] == "http://localhost:8000/v1"