Spaces:
Build error
Build error
File size: 4,355 Bytes
365d87f 349fbec c413127 365d87f 1603571 349fbec c413127 1603571 742cb9c 349fbec 365d87f 1603571 c413127 349fbec 3705b3d 349fbec c413127 365d87f 3705b3d 349fbec c413127 365d87f 349fbec 742cb9c bf58afb 3705b3d bf58afb 3705b3d bf58afb 742cb9c 1603571 365d87f 1603571 365d87f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 | import logging
import os
from enum import Enum
from typing import Any, Optional
from dotenv import load_dotenv
from langchain_core.embeddings import Embeddings
from langchain_core.language_models.base import BaseLanguageModel
from langchain_openai import ChatOpenAI
from langchain_openai.embeddings import OpenAIEmbeddings
from src.testing_utils.echo_llm import EchoLLM
load_dotenv()
logger = logging.getLogger(__name__)
class Provider(str, Enum):
VSEGPT = "vsegpt"
OPENAI = "openai"
class ModelConfig:
"""
Configuration class for loading different language models.
Provides methods to load various model providers.
"""
def load_vsegpt(
self,
model: str = "vis-openai/gpt-4o-mini",
temperature: float = 0.2,
*args,
**kwargs,
) -> ChatOpenAI:
"""Load VSEGPT OpenAI-compatible model.
Args:
model: Model identifier from vsegpt.ru/Docs/Models
temperature: Sampling temperature (0.0 = deterministic)
Returns:
Configured language model instance
"""
api_base = os.environ["VSEGPT_API_BASE"]
api_key = os.environ["VSEGPT_API_KEY"]
return ChatOpenAI(
base_url=api_base,
model=model,
api_key=api_key,
temperature=temperature,
*args,
**kwargs,
)
def load_openai(
self, model: str = "gpt-4o-mini", temperature: float = 0.2
) -> ChatOpenAI:
"""Load OpenAI model.
Args:
model: Model identifier from vsegpt.ru/Docs/Models
temperature: Sampling temperature (0.2 = deterministic)
Returns:
Configured language model instance
"""
api_key = os.environ["OPENAI_API_KEY"]
return ChatOpenAI(model=model, api_key=api_key, temperature=temperature)
def load_echo_llm(self) -> EchoLLM:
return EchoLLM()
def get_llm(
self,
provider: Provider,
model_name: Optional[str] = None,
temperature: float = 0.2,
) -> Any:
"""Get LLM based on type and name
Args:
model_type: Type of model to use (vsegpt or openai)
model_name: Optional model name (e.g. "gpt-4-vision-preview")
Returns:
Configured LLM instance
"""
if provider == Provider.VSEGPT:
model_name = model_name or "openai/gpt-4o-mini"
logger.info(f"Using VSEGPT model: {model_name}")
return self.load_vsegpt(model=model_name, temperature=temperature)
elif provider == Provider.OPENAI:
model_name = model_name or "gpt-4o-mini"
logger.info(f"Using OpenAI model: {model_name}")
return self.load_openai(model=model_name, temperature=temperature)
else:
raise ValueError(f"Unknown model type: {model_type}")
class EmbeddingConfig:
"""
Configuration class for loading different language models.
Provides methods to load various model providers.
"""
def load_openai(self, model: str = "text-embedding-3-small") -> Embeddings:
api_key = os.environ["OPENAI_API_KEY"]
return OpenAIEmbeddings(model=model, api_key=api_key)
def load_vsegpt(self, model: str = "text-embedding-3-small") -> Embeddings:
api_base = os.environ["VSEGPT_API_BASE"]
api_key = os.environ["VSEGPT_API_KEY"]
return OpenAIEmbeddings(model=model, api_key=api_key, base_url=api_base)
def get_embeddings(
self,
provider: Provider,
model_name: str = "text-embedding-3-small",
temperature: float = 0.2,
) -> Any:
"""Get Embeddings based on type and name
Args:
model_type: Type of model to use (vsegpt or openai)
model_name: Optional model name (e.g. "gpt-4-vision-preview")
Returns:
Configured LLM instance
"""
if provider == Provider.VSEGPT:
logger.info(f"Using VSEGPT model: {model_name}")
return self.load_vsegpt(model=model_name)
elif provider == Provider.OPENAI:
logger.info(f"Using OpenAI model: {model_name}")
return self.load_openai(model=model_name)
else:
raise ValueError(f"Unknown model type: {model_type}")
|