test: add ollama_research URL assertion to llm config generation test
This commit is contained in:
parent
33d3994fb8
commit
306c90c9da
1 changed files with 4 additions and 2 deletions
|
|
@ -19,8 +19,9 @@ def test_urls_applied_to_llm_yaml(tmp_path):
|
||||||
}))
|
}))
|
||||||
llm_yaml = tmp_path / "llm.yaml"
|
llm_yaml = tmp_path / "llm.yaml"
|
||||||
llm_yaml.write_text(yaml.dump({"backends": {
|
llm_yaml.write_text(yaml.dump({"backends": {
|
||||||
"ollama": {"base_url": "http://old:11434/v1", "type": "openai_compat"},
|
"ollama": {"base_url": "http://old:11434/v1", "type": "openai_compat"},
|
||||||
"vllm": {"base_url": "http://old:8000/v1", "type": "openai_compat"},
|
"ollama_research": {"base_url": "http://old:11434/v1", "type": "openai_compat"},
|
||||||
|
"vllm": {"base_url": "http://old:8000/v1", "type": "openai_compat"},
|
||||||
}}))
|
}}))
|
||||||
|
|
||||||
profile = UserProfile(user_yaml)
|
profile = UserProfile(user_yaml)
|
||||||
|
|
@ -28,6 +29,7 @@ def test_urls_applied_to_llm_yaml(tmp_path):
|
||||||
|
|
||||||
result = yaml.safe_load(llm_yaml.read_text())
|
result = yaml.safe_load(llm_yaml.read_text())
|
||||||
assert result["backends"]["ollama"]["base_url"] == "http://myserver:11434/v1"
|
assert result["backends"]["ollama"]["base_url"] == "http://myserver:11434/v1"
|
||||||
|
assert result["backends"]["ollama_research"]["base_url"] == "http://myserver:11434/v1"
|
||||||
assert result["backends"]["vllm"]["base_url"] == "http://localhost:8000/v1"
|
assert result["backends"]["vllm"]["base_url"] == "http://localhost:8000/v1"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue