Update TextGen/router.py
Browse files- TextGen/router.py +2 -2
TextGen/router.py
CHANGED
|
@@ -130,7 +130,7 @@ def generate_text(messages: List[str], npc:str):
|
|
| 130 |
print(new_messages)
|
| 131 |
# Initialize the LLM
|
| 132 |
llm = ChatGoogleGenerativeAI(
|
| 133 |
-
model="gemini-2.5-pro
|
| 134 |
max_output_tokens=100,
|
| 135 |
temperature=1,
|
| 136 |
safety_settings={
|
|
@@ -152,7 +152,7 @@ def inference_model(system_messsage, prompt):
|
|
| 152 |
|
| 153 |
new_messages=[{"role": "user", "content": system_messsage},{"role": "user", "content": prompt}]
|
| 154 |
llm = ChatGoogleGenerativeAI(
|
| 155 |
-
model="gemini-2.5-pro
|
| 156 |
max_output_tokens=100,
|
| 157 |
temperature=1,
|
| 158 |
safety_settings={
|
|
|
|
| 130 |
print(new_messages)
|
| 131 |
# Initialize the LLM
|
| 132 |
llm = ChatGoogleGenerativeAI(
|
| 133 |
+
model="gemini-2.5-pro",
|
| 134 |
max_output_tokens=100,
|
| 135 |
temperature=1,
|
| 136 |
safety_settings={
|
|
|
|
| 152 |
|
| 153 |
new_messages=[{"role": "user", "content": system_messsage},{"role": "user", "content": prompt}]
|
| 154 |
llm = ChatGoogleGenerativeAI(
|
| 155 |
+
model="gemini-2.5-pro",
|
| 156 |
max_output_tokens=100,
|
| 157 |
temperature=1,
|
| 158 |
safety_settings={
|