Loading src/tdoc-ai/tdoc_ai/config.py +4 −0 Original line number Diff line number Diff line Loading @@ -101,6 +101,7 @@ class AiConfig(BaseConfigModel): description="LLM model in <provider>/<model_name> format", ) llm_api_base: str | None = Field(None, description="Override LLM API base URL") llm_api_key: str | None = Field(None, description="Override LLM API key (takes precedence over provider-specific env vars)") abstract_min_words: int = Field(150, ge=1, description="Minimum abstract word count") abstract_max_words: int = Field(250, ge=1, description="Maximum abstract word count") Loading Loading @@ -136,6 +137,9 @@ class AiConfig(BaseConfigModel): if llm_api_base := os.getenv("TDC_AI_LLM_API_BASE"): data["llm_api_base"] = llm_api_base # Check for TDC_AI_LLM_API_KEY - takes precedence over provider-specific keys data["llm_api_key"] = os.environ.get("TDC_AI_LLM_API_KEY") max_chunk_size = _env_int("TDC_AI_MAX_CHUNK_SIZE") if max_chunk_size is not None: data["max_chunk_size"] = max_chunk_size Loading src/tdoc-ai/tdoc_ai/operations/summarize.py +8 −11 Original line number Diff line number Diff line Loading @@ -5,7 +5,6 @@ from __future__ import annotations import hashlib import json import logging import os import re import litellm Loading @@ -19,11 +18,6 @@ from tdoc_crawler.tdocs.sources.whatthespec import resolve_via_whatthespec from tdoc_crawler.utils.misc import utc_now logger = logging.getLogger(__name__) _ = None # Summary settings _ = 150 _ = 250 # Input size limits for LLM prompts ABSTRACT_INPUT_LIMIT = 5000 Loading Loading @@ -124,18 +118,21 @@ class LiteLLMClient: Returns: Generated text. """ try: # Check for TDC_AI_LLM_API_KEY - takes precedence over provider-specific keys api_key = os.environ.get("TDC_AI_LLM_API_KEY") cfg = AiConfig.from_env() if cfg.llm_api_base is not None and cfg.llm_api_key is None: msg = f"LLM API base URL is set ({cfg.llm_api_base}) but API key is missing. Please set TDC_AI_LLM_API_KEY environment variable." logger.warning(msg) try: response = litellm.completion( model=model or AiConfig.from_env().llm_model, model=model or cfg.llm_model, messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}, ], max_tokens=max_tokens, api_key=api_key, # Pass TDC_AI_LLM_API_KEY if set, otherwise None (LiteLLM uses provider-specific env vars) api_key=cfg.llm_api_key, base_url=cfg.llm_api_base, ) return response.choices[0].message.content except Exception as e: Loading Loading
src/tdoc-ai/tdoc_ai/config.py +4 −0 Original line number Diff line number Diff line Loading @@ -101,6 +101,7 @@ class AiConfig(BaseConfigModel): description="LLM model in <provider>/<model_name> format", ) llm_api_base: str | None = Field(None, description="Override LLM API base URL") llm_api_key: str | None = Field(None, description="Override LLM API key (takes precedence over provider-specific env vars)") abstract_min_words: int = Field(150, ge=1, description="Minimum abstract word count") abstract_max_words: int = Field(250, ge=1, description="Maximum abstract word count") Loading Loading @@ -136,6 +137,9 @@ class AiConfig(BaseConfigModel): if llm_api_base := os.getenv("TDC_AI_LLM_API_BASE"): data["llm_api_base"] = llm_api_base # Check for TDC_AI_LLM_API_KEY - takes precedence over provider-specific keys data["llm_api_key"] = os.environ.get("TDC_AI_LLM_API_KEY") max_chunk_size = _env_int("TDC_AI_MAX_CHUNK_SIZE") if max_chunk_size is not None: data["max_chunk_size"] = max_chunk_size Loading
src/tdoc-ai/tdoc_ai/operations/summarize.py +8 −11 Original line number Diff line number Diff line Loading @@ -5,7 +5,6 @@ from __future__ import annotations import hashlib import json import logging import os import re import litellm Loading @@ -19,11 +18,6 @@ from tdoc_crawler.tdocs.sources.whatthespec import resolve_via_whatthespec from tdoc_crawler.utils.misc import utc_now logger = logging.getLogger(__name__) _ = None # Summary settings _ = 150 _ = 250 # Input size limits for LLM prompts ABSTRACT_INPUT_LIMIT = 5000 Loading Loading @@ -124,18 +118,21 @@ class LiteLLMClient: Returns: Generated text. """ try: # Check for TDC_AI_LLM_API_KEY - takes precedence over provider-specific keys api_key = os.environ.get("TDC_AI_LLM_API_KEY") cfg = AiConfig.from_env() if cfg.llm_api_base is not None and cfg.llm_api_key is None: msg = f"LLM API base URL is set ({cfg.llm_api_base}) but API key is missing. Please set TDC_AI_LLM_API_KEY environment variable." logger.warning(msg) try: response = litellm.completion( model=model or AiConfig.from_env().llm_model, model=model or cfg.llm_model, messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}, ], max_tokens=max_tokens, api_key=api_key, # Pass TDC_AI_LLM_API_KEY if set, otherwise None (LiteLLM uses provider-specific env vars) api_key=cfg.llm_api_key, base_url=cfg.llm_api_base, ) return response.choices[0].message.content except Exception as e: Loading