Spaces:
Running
Running
Commit
·
9d21bf8
1
Parent(s):
05ffeb4
fix: update references to Europe PMC in examples and documentation
Browse filesReplaced mentions of bioRxiv with Europe PMC in the full stack and hypothesis demo scripts to reflect the current data sources. Updated the pyproject.toml to exclude specific directories from mypy checks using regex patterns for better clarity.
- examples/full_stack_demo/run_full.py +2 -2
- examples/hypothesis_demo/run_hypothesis.py +3 -3
- examples/orchestrator_demo/run_magentic.py +5 -3
- pyproject.toml +2 -2
- src/orchestrator_magentic.py +3 -5
- src/utils/config.py +24 -0
- src/utils/llm_factory.py +106 -0
- tests/unit/utils/test_config.py +30 -19
examples/full_stack_demo/run_full.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
| 3 |
Demo: Full Stack DeepCritical Agent (Phases 1-8).
|
| 4 |
|
| 5 |
This script demonstrates the COMPLETE REAL drug repurposing research pipeline:
|
| 6 |
-
- Phase 2: REAL Search (PubMed + ClinicalTrials +
|
| 7 |
- Phase 6: REAL Embeddings (sentence-transformers + ChromaDB)
|
| 8 |
- Phase 7: REAL Hypothesis (LLM mechanistic reasoning)
|
| 9 |
- Phase 3: REAL Judge (LLM evidence assessment)
|
|
@@ -225,7 +225,7 @@ Examples:
|
|
| 225 |
print(" DeepCritical Full Stack Demo Complete!")
|
| 226 |
print(" ")
|
| 227 |
print(" Everything you just saw was REAL:")
|
| 228 |
-
print(" - Real PubMed + ClinicalTrials +
|
| 229 |
print(" - Real embedding computations")
|
| 230 |
print(" - Real LLM reasoning")
|
| 231 |
print(" - Real scientific report")
|
|
|
|
| 3 |
Demo: Full Stack DeepCritical Agent (Phases 1-8).
|
| 4 |
|
| 5 |
This script demonstrates the COMPLETE REAL drug repurposing research pipeline:
|
| 6 |
+
- Phase 2: REAL Search (PubMed + ClinicalTrials + Europe PMC)
|
| 7 |
- Phase 6: REAL Embeddings (sentence-transformers + ChromaDB)
|
| 8 |
- Phase 7: REAL Hypothesis (LLM mechanistic reasoning)
|
| 9 |
- Phase 3: REAL Judge (LLM evidence assessment)
|
|
|
|
| 225 |
print(" DeepCritical Full Stack Demo Complete!")
|
| 226 |
print(" ")
|
| 227 |
print(" Everything you just saw was REAL:")
|
| 228 |
+
print(" - Real PubMed + ClinicalTrials + Europe PMC searches")
|
| 229 |
print(" - Real embedding computations")
|
| 230 |
print(" - Real LLM reasoning")
|
| 231 |
print(" - Real scientific report")
|
examples/hypothesis_demo/run_hypothesis.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
| 3 |
Demo: Hypothesis Generation (Phase 7).
|
| 4 |
|
| 5 |
This script demonstrates the REAL hypothesis generation pipeline:
|
| 6 |
-
1. REAL search: PubMed + ClinicalTrials +
|
| 7 |
2. REAL embeddings: Semantic deduplication
|
| 8 |
3. REAL LLM: Mechanistic hypothesis generation
|
| 9 |
|
|
@@ -37,7 +37,7 @@ async def run_hypothesis_demo(query: str) -> None:
|
|
| 37 |
print(f"{'=' * 60}\n")
|
| 38 |
|
| 39 |
# Step 1: REAL Search
|
| 40 |
-
print("[Step 1] Searching PubMed + ClinicalTrials +
|
| 41 |
search_handler = SearchHandler(
|
| 42 |
tools=[PubMedTool(), ClinicalTrialsTool(), EuropePMCTool()], timeout=30.0
|
| 43 |
)
|
|
@@ -132,7 +132,7 @@ Examples:
|
|
| 132 |
|
| 133 |
print("\n" + "=" * 60)
|
| 134 |
print("Demo complete! This was a REAL pipeline:")
|
| 135 |
-
print(" 1. REAL search: PubMed + ClinicalTrials +
|
| 136 |
print(" 2. REAL embeddings: Actual sentence-transformers")
|
| 137 |
print(" 3. REAL LLM: Actual hypothesis generation")
|
| 138 |
print("=" * 60 + "\n")
|
|
|
|
| 3 |
Demo: Hypothesis Generation (Phase 7).
|
| 4 |
|
| 5 |
This script demonstrates the REAL hypothesis generation pipeline:
|
| 6 |
+
1. REAL search: PubMed + ClinicalTrials + Europe PMC (actual API calls)
|
| 7 |
2. REAL embeddings: Semantic deduplication
|
| 8 |
3. REAL LLM: Mechanistic hypothesis generation
|
| 9 |
|
|
|
|
| 37 |
print(f"{'=' * 60}\n")
|
| 38 |
|
| 39 |
# Step 1: REAL Search
|
| 40 |
+
print("[Step 1] Searching PubMed + ClinicalTrials + Europe PMC...")
|
| 41 |
search_handler = SearchHandler(
|
| 42 |
tools=[PubMedTool(), ClinicalTrialsTool(), EuropePMCTool()], timeout=30.0
|
| 43 |
)
|
|
|
|
| 132 |
|
| 133 |
print("\n" + "=" * 60)
|
| 134 |
print("Demo complete! This was a REAL pipeline:")
|
| 135 |
+
print(" 1. REAL search: PubMed + ClinicalTrials + Europe PMC APIs")
|
| 136 |
print(" 2. REAL embeddings: Actual sentence-transformers")
|
| 137 |
print(" 3. REAL LLM: Actual hypothesis generation")
|
| 138 |
print("=" * 60 + "\n")
|
examples/orchestrator_demo/run_magentic.py
CHANGED
|
@@ -32,9 +32,11 @@ async def main() -> None:
|
|
| 32 |
parser.add_argument("--iterations", type=int, default=10, help="Max rounds")
|
| 33 |
args = parser.parse_args()
|
| 34 |
|
| 35 |
-
# Check for
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
| 38 |
sys.exit(1)
|
| 39 |
|
| 40 |
print(f"\n{'=' * 60}")
|
|
|
|
| 32 |
parser.add_argument("--iterations", type=int, default=10, help="Max rounds")
|
| 33 |
args = parser.parse_args()
|
| 34 |
|
| 35 |
+
# Check for OpenAI key specifically - Magentic requires function calling
|
| 36 |
+
# which is only supported by OpenAI's API (not Anthropic or HF Inference)
|
| 37 |
+
if not os.getenv("OPENAI_API_KEY"):
|
| 38 |
+
print("Error: OPENAI_API_KEY required. Magentic uses function calling")
|
| 39 |
+
print(" which requires OpenAI's API. For other providers, use mode='simple'.")
|
| 40 |
sys.exit(1)
|
| 41 |
|
| 42 |
print(f"\n{'=' * 60}")
|
pyproject.toml
CHANGED
|
@@ -106,8 +106,8 @@ disallow_untyped_defs = true
|
|
| 106 |
warn_return_any = true
|
| 107 |
warn_unused_ignores = false
|
| 108 |
exclude = [
|
| 109 |
-
"reference_repos",
|
| 110 |
-
"examples",
|
| 111 |
]
|
| 112 |
|
| 113 |
# ============== PYTEST CONFIG ==============
|
|
|
|
| 106 |
warn_return_any = true
|
| 107 |
warn_unused_ignores = false
|
| 108 |
exclude = [
|
| 109 |
+
"^reference_repos/",
|
| 110 |
+
"^examples/",
|
| 111 |
]
|
| 112 |
|
| 113 |
# ============== PYTEST CONFIG ==============
|
src/orchestrator_magentic.py
CHANGED
|
@@ -22,7 +22,7 @@ from src.agents.magentic_agents import (
|
|
| 22 |
)
|
| 23 |
from src.agents.state import init_magentic_state
|
| 24 |
from src.utils.config import settings
|
| 25 |
-
from src.utils.
|
| 26 |
from src.utils.models import AgentEvent
|
| 27 |
|
| 28 |
if TYPE_CHECKING:
|
|
@@ -50,10 +50,8 @@ class MagenticOrchestrator:
|
|
| 50 |
max_rounds: Maximum coordination rounds
|
| 51 |
chat_client: Optional shared chat client for agents
|
| 52 |
"""
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
"Magentic mode requires OPENAI_API_KEY. Set the key or use mode='simple'."
|
| 56 |
-
)
|
| 57 |
|
| 58 |
self._max_rounds = max_rounds
|
| 59 |
self._chat_client = chat_client
|
|
|
|
| 22 |
)
|
| 23 |
from src.agents.state import init_magentic_state
|
| 24 |
from src.utils.config import settings
|
| 25 |
+
from src.utils.llm_factory import check_magentic_requirements
|
| 26 |
from src.utils.models import AgentEvent
|
| 27 |
|
| 28 |
if TYPE_CHECKING:
|
|
|
|
| 50 |
max_rounds: Maximum coordination rounds
|
| 51 |
chat_client: Optional shared chat client for agents
|
| 52 |
"""
|
| 53 |
+
# Validate requirements via centralized factory
|
| 54 |
+
check_magentic_requirements()
|
|
|
|
|
|
|
| 55 |
|
| 56 |
self._max_rounds = max_rounds
|
| 57 |
self._chat_client = chat_client
|
src/utils/config.py
CHANGED
|
@@ -78,6 +78,30 @@ class Settings(BaseSettings):
|
|
| 78 |
|
| 79 |
raise ConfigurationError(f"Unknown LLM provider: {self.llm_provider}")
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
def get_settings() -> Settings:
|
| 83 |
"""Factory function to get settings (allows mocking in tests)."""
|
|
|
|
| 78 |
|
| 79 |
raise ConfigurationError(f"Unknown LLM provider: {self.llm_provider}")
|
| 80 |
|
| 81 |
+
def get_openai_api_key(self) -> str:
|
| 82 |
+
"""Get OpenAI API key (required for Magentic function calling)."""
|
| 83 |
+
if not self.openai_api_key:
|
| 84 |
+
raise ConfigurationError(
|
| 85 |
+
"OPENAI_API_KEY not set. Magentic mode requires OpenAI for function calling. "
|
| 86 |
+
"Use mode='simple' for other providers."
|
| 87 |
+
)
|
| 88 |
+
return self.openai_api_key
|
| 89 |
+
|
| 90 |
+
@property
|
| 91 |
+
def has_openai_key(self) -> bool:
|
| 92 |
+
"""Check if OpenAI API key is available."""
|
| 93 |
+
return bool(self.openai_api_key)
|
| 94 |
+
|
| 95 |
+
@property
|
| 96 |
+
def has_anthropic_key(self) -> bool:
|
| 97 |
+
"""Check if Anthropic API key is available."""
|
| 98 |
+
return bool(self.anthropic_api_key)
|
| 99 |
+
|
| 100 |
+
@property
|
| 101 |
+
def has_any_llm_key(self) -> bool:
|
| 102 |
+
"""Check if any LLM API key is available."""
|
| 103 |
+
return self.has_openai_key or self.has_anthropic_key
|
| 104 |
+
|
| 105 |
|
| 106 |
def get_settings() -> Settings:
|
| 107 |
"""Factory function to get settings (allows mocking in tests)."""
|
src/utils/llm_factory.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Centralized LLM client factory.
|
| 2 |
+
|
| 3 |
+
This module provides factory functions for creating LLM clients,
|
| 4 |
+
ensuring consistent configuration and clear error messages.
|
| 5 |
+
|
| 6 |
+
Why Magentic requires OpenAI:
|
| 7 |
+
- Magentic agents use the @ai_function decorator for tool calling
|
| 8 |
+
- This requires structured function calling protocol (tools, tool_choice)
|
| 9 |
+
- OpenAI's API supports this natively
|
| 10 |
+
- Anthropic/HuggingFace Inference APIs are text-in/text-out only
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from typing import TYPE_CHECKING, Any
|
| 14 |
+
|
| 15 |
+
from src.utils.config import settings
|
| 16 |
+
from src.utils.exceptions import ConfigurationError
|
| 17 |
+
|
| 18 |
+
if TYPE_CHECKING:
|
| 19 |
+
from agent_framework.openai import OpenAIChatClient
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_magentic_client() -> "OpenAIChatClient":
|
| 23 |
+
"""
|
| 24 |
+
Get the OpenAI client for Magentic agents.
|
| 25 |
+
|
| 26 |
+
Magentic requires OpenAI because it uses function calling protocol:
|
| 27 |
+
- @ai_function decorators define callable tools
|
| 28 |
+
- LLM returns structured tool calls (not just text)
|
| 29 |
+
- Requires OpenAI's tools/function_call API support
|
| 30 |
+
|
| 31 |
+
Raises:
|
| 32 |
+
ConfigurationError: If OPENAI_API_KEY is not set
|
| 33 |
+
|
| 34 |
+
Returns:
|
| 35 |
+
Configured OpenAIChatClient for Magentic agents
|
| 36 |
+
"""
|
| 37 |
+
# Import here to avoid requiring agent-framework for simple mode
|
| 38 |
+
from agent_framework.openai import OpenAIChatClient
|
| 39 |
+
|
| 40 |
+
api_key = settings.get_openai_api_key()
|
| 41 |
+
|
| 42 |
+
return OpenAIChatClient(
|
| 43 |
+
model_id=settings.openai_model,
|
| 44 |
+
api_key=api_key,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def get_pydantic_ai_model() -> Any:
|
| 49 |
+
"""
|
| 50 |
+
Get the appropriate model for pydantic-ai based on configuration.
|
| 51 |
+
|
| 52 |
+
Uses the configured LLM_PROVIDER to select between OpenAI and Anthropic.
|
| 53 |
+
This is used by simple mode components (JudgeHandler, etc.)
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
Configured pydantic-ai model
|
| 57 |
+
"""
|
| 58 |
+
from pydantic_ai.models.anthropic import AnthropicModel
|
| 59 |
+
from pydantic_ai.models.openai import OpenAIModel
|
| 60 |
+
from pydantic_ai.providers.anthropic import AnthropicProvider
|
| 61 |
+
from pydantic_ai.providers.openai import OpenAIProvider
|
| 62 |
+
|
| 63 |
+
if settings.llm_provider == "openai":
|
| 64 |
+
if not settings.openai_api_key:
|
| 65 |
+
raise ConfigurationError("OPENAI_API_KEY not set for pydantic-ai")
|
| 66 |
+
provider = OpenAIProvider(api_key=settings.openai_api_key)
|
| 67 |
+
return OpenAIModel(settings.openai_model, provider=provider)
|
| 68 |
+
|
| 69 |
+
if settings.llm_provider == "anthropic":
|
| 70 |
+
if not settings.anthropic_api_key:
|
| 71 |
+
raise ConfigurationError("ANTHROPIC_API_KEY not set for pydantic-ai")
|
| 72 |
+
anthropic_provider = AnthropicProvider(api_key=settings.anthropic_api_key)
|
| 73 |
+
return AnthropicModel(settings.anthropic_model, provider=anthropic_provider)
|
| 74 |
+
|
| 75 |
+
raise ConfigurationError(f"Unknown LLM provider: {settings.llm_provider}")
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def check_magentic_requirements() -> None:
|
| 79 |
+
"""
|
| 80 |
+
Check if Magentic mode requirements are met.
|
| 81 |
+
|
| 82 |
+
Raises:
|
| 83 |
+
ConfigurationError: If requirements not met
|
| 84 |
+
"""
|
| 85 |
+
if not settings.has_openai_key:
|
| 86 |
+
raise ConfigurationError(
|
| 87 |
+
"Magentic mode requires OPENAI_API_KEY for function calling support. "
|
| 88 |
+
"Anthropic and HuggingFace Inference do not support the structured "
|
| 89 |
+
"function calling protocol that Magentic agents require. "
|
| 90 |
+
"Use mode='simple' for other LLM providers."
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def check_simple_mode_requirements() -> None:
|
| 95 |
+
"""
|
| 96 |
+
Check if simple mode requirements are met.
|
| 97 |
+
|
| 98 |
+
Simple mode supports both OpenAI and Anthropic.
|
| 99 |
+
|
| 100 |
+
Raises:
|
| 101 |
+
ConfigurationError: If no LLM API key is configured
|
| 102 |
+
"""
|
| 103 |
+
if not settings.has_any_llm_key:
|
| 104 |
+
raise ConfigurationError(
|
| 105 |
+
"No LLM API key configured. Set OPENAI_API_KEY or ANTHROPIC_API_KEY."
|
| 106 |
+
)
|
tests/unit/utils/test_config.py
CHANGED
|
@@ -11,50 +11,61 @@ from src.utils.exceptions import ConfigurationError
|
|
| 11 |
|
| 12 |
|
| 13 |
class TestSettings:
|
| 14 |
-
"""Tests for Settings class.
|
| 15 |
|
| 16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
"""Settings should have default max_iterations of 10."""
|
| 18 |
with patch.dict(os.environ, {}, clear=True):
|
| 19 |
-
settings = Settings()
|
| 20 |
assert settings.max_iterations == 10 # noqa: PLR2004
|
| 21 |
|
| 22 |
-
def test_max_iterations_from_env(self):
|
| 23 |
"""Settings should read MAX_ITERATIONS from env."""
|
| 24 |
-
with patch.dict(os.environ, {"MAX_ITERATIONS": "25"}):
|
| 25 |
-
settings = Settings()
|
| 26 |
assert settings.max_iterations == 25 # noqa: PLR2004
|
| 27 |
|
| 28 |
-
def test_invalid_max_iterations_raises(self):
|
| 29 |
"""Settings should reject invalid max_iterations."""
|
| 30 |
-
with patch.dict(os.environ, {"MAX_ITERATIONS": "100"}):
|
| 31 |
with pytest.raises(ValidationError):
|
| 32 |
-
Settings() # 100 > 50 (max)
|
| 33 |
|
| 34 |
-
def test_get_api_key_openai(self):
|
| 35 |
"""get_api_key should return OpenAI key when provider is openai."""
|
| 36 |
-
with patch.dict(
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
assert settings.get_api_key() == "sk-test-key"
|
| 39 |
|
| 40 |
-
def test_get_api_key_openai_missing_raises(self):
|
| 41 |
"""get_api_key should raise ConfigurationError when OpenAI key is not set."""
|
| 42 |
with patch.dict(os.environ, {"LLM_PROVIDER": "openai"}, clear=True):
|
| 43 |
-
settings = Settings()
|
| 44 |
with pytest.raises(ConfigurationError, match="OPENAI_API_KEY not set"):
|
| 45 |
settings.get_api_key()
|
| 46 |
|
| 47 |
-
def test_get_api_key_anthropic(self):
|
| 48 |
"""get_api_key should return Anthropic key when provider is anthropic."""
|
| 49 |
with patch.dict(
|
| 50 |
-
os.environ,
|
|
|
|
|
|
|
| 51 |
):
|
| 52 |
-
settings = Settings()
|
| 53 |
assert settings.get_api_key() == "sk-ant-test-key"
|
| 54 |
|
| 55 |
-
def test_get_api_key_anthropic_missing_raises(self):
|
| 56 |
"""get_api_key should raise ConfigurationError when Anthropic key is not set."""
|
| 57 |
with patch.dict(os.environ, {"LLM_PROVIDER": "anthropic"}, clear=True):
|
| 58 |
-
settings = Settings()
|
| 59 |
with pytest.raises(ConfigurationError, match="ANTHROPIC_API_KEY not set"):
|
| 60 |
settings.get_api_key()
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
class TestSettings:
|
| 14 |
+
"""Tests for Settings class.
|
| 15 |
|
| 16 |
+
Note: We use _env_file=None to disable .env file reading in tests.
|
| 17 |
+
This ensures tests are isolated from the developer's local .env file.
|
| 18 |
+
pydantic-settings reads from both os.environ AND .env file by default.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def test_default_max_iterations(self) -> None:
|
| 22 |
"""Settings should have default max_iterations of 10."""
|
| 23 |
with patch.dict(os.environ, {}, clear=True):
|
| 24 |
+
settings = Settings(_env_file=None)
|
| 25 |
assert settings.max_iterations == 10 # noqa: PLR2004
|
| 26 |
|
| 27 |
+
def test_max_iterations_from_env(self) -> None:
|
| 28 |
"""Settings should read MAX_ITERATIONS from env."""
|
| 29 |
+
with patch.dict(os.environ, {"MAX_ITERATIONS": "25"}, clear=True):
|
| 30 |
+
settings = Settings(_env_file=None)
|
| 31 |
assert settings.max_iterations == 25 # noqa: PLR2004
|
| 32 |
|
| 33 |
+
def test_invalid_max_iterations_raises(self) -> None:
|
| 34 |
"""Settings should reject invalid max_iterations."""
|
| 35 |
+
with patch.dict(os.environ, {"MAX_ITERATIONS": "100"}, clear=True):
|
| 36 |
with pytest.raises(ValidationError):
|
| 37 |
+
Settings(_env_file=None) # 100 > 50 (max)
|
| 38 |
|
| 39 |
+
def test_get_api_key_openai(self) -> None:
|
| 40 |
"""get_api_key should return OpenAI key when provider is openai."""
|
| 41 |
+
with patch.dict(
|
| 42 |
+
os.environ,
|
| 43 |
+
{"LLM_PROVIDER": "openai", "OPENAI_API_KEY": "sk-test-key"},
|
| 44 |
+
clear=True,
|
| 45 |
+
):
|
| 46 |
+
settings = Settings(_env_file=None)
|
| 47 |
assert settings.get_api_key() == "sk-test-key"
|
| 48 |
|
| 49 |
+
def test_get_api_key_openai_missing_raises(self) -> None:
|
| 50 |
"""get_api_key should raise ConfigurationError when OpenAI key is not set."""
|
| 51 |
with patch.dict(os.environ, {"LLM_PROVIDER": "openai"}, clear=True):
|
| 52 |
+
settings = Settings(_env_file=None)
|
| 53 |
with pytest.raises(ConfigurationError, match="OPENAI_API_KEY not set"):
|
| 54 |
settings.get_api_key()
|
| 55 |
|
| 56 |
+
def test_get_api_key_anthropic(self) -> None:
|
| 57 |
"""get_api_key should return Anthropic key when provider is anthropic."""
|
| 58 |
with patch.dict(
|
| 59 |
+
os.environ,
|
| 60 |
+
{"LLM_PROVIDER": "anthropic", "ANTHROPIC_API_KEY": "sk-ant-test-key"},
|
| 61 |
+
clear=True,
|
| 62 |
):
|
| 63 |
+
settings = Settings(_env_file=None)
|
| 64 |
assert settings.get_api_key() == "sk-ant-test-key"
|
| 65 |
|
| 66 |
+
def test_get_api_key_anthropic_missing_raises(self) -> None:
|
| 67 |
"""get_api_key should raise ConfigurationError when Anthropic key is not set."""
|
| 68 |
with patch.dict(os.environ, {"LLM_PROVIDER": "anthropic"}, clear=True):
|
| 69 |
+
settings = Settings(_env_file=None)
|
| 70 |
with pytest.raises(ConfigurationError, match="ANTHROPIC_API_KEY not set"):
|
| 71 |
settings.get_api_key()
|