Katya Beresneva commited on
Commit ·
b75609c
1
Parent(s): 364795f
fix
Browse files- requirements.txt +2 -1
- utils.py +28 -0
requirements.txt
CHANGED
|
@@ -5,4 +5,5 @@ langchain-core>=0.1.0
|
|
| 5 |
langchain-community>=0.0.1
|
| 6 |
pydantic==2.0
|
| 7 |
python-dotenv
|
| 8 |
-
smolagents
|
|
|
|
|
|
| 5 |
langchain-community>=0.0.1
|
| 6 |
pydantic==2.0
|
| 7 |
python-dotenv
|
| 8 |
+
smolagents
|
| 9 |
+
langchain-google-genai
|
utils.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 3 |
+
|
| 4 |
+
def get_llm(
|
| 5 |
+
llm_provider_api_key: str,
|
| 6 |
+
model_name: Optional[str] = None,
|
| 7 |
+
temperature: float = 0.7,
|
| 8 |
+
max_tokens: Optional[int] = None
|
| 9 |
+
) -> ChatGoogleGenerativeAI:
|
| 10 |
+
"""
|
| 11 |
+
Initialize and return a Google Generative AI language model.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
llm_provider_api_key: Google API key
|
| 15 |
+
model_name: Name of the model to use (default: None)
|
| 16 |
+
temperature: Sampling temperature (default: 0.7)
|
| 17 |
+
max_tokens: Maximum number of tokens to generate (default: None)
|
| 18 |
+
|
| 19 |
+
Returns:
|
| 20 |
+
ChatGoogleGenerativeAI: Initialized language model
|
| 21 |
+
"""
|
| 22 |
+
return ChatGoogleGenerativeAI(
|
| 23 |
+
google_api_key=llm_provider_api_key,
|
| 24 |
+
model=model_name,
|
| 25 |
+
temperature=temperature,
|
| 26 |
+
max_output_tokens=max_tokens,
|
| 27 |
+
convert_system_message_to_human=True
|
| 28 |
+
)
|