Spaces:
Sleeping
Sleeping
File size: 1,568 Bytes
08583a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from langchain_ollama import ChatOllama
from langchain_core.messages import SystemMessage
def llm(
model_name: str,
messages: list[dict],
temperature: float = 0.1,
max_tokens: int = 1024,
**kwargs,
) -> str:
"""
Calls the Ollama Chat model and returns the generated response content.
"""
try:
print("[NODE] ----- Calling Ollama Chat -----")
# Construct the prompt with explicit separation for SystemMessage
prompt_parts = []
for message in messages:
if isinstance(message, dict):
prompt_parts.append(f"{message['role'].capitalize()}: {message['content']}")
elif isinstance(message, SystemMessage):
prompt_parts.append(f"System: {message.content}")
else:
prompt_parts.append(message)
prompt = "\n\n".join(prompt_parts) # Add extra separation for clarity
print(f"Constructed Prompt:\n{prompt}")
chat = ChatOllama(
model=model_name,
temperature=temperature,
max_tokens=max_tokens,
**kwargs
)
response = chat.invoke(prompt)
print("----- Ollama Chat response -----")
print(response.content)
if not response or not response.content:
print("No content returned from the Ollama Chat model.")
return "No content generated."
return response.content
except Exception as e:
print("An error occurred while calling the Ollama Chat model: %s", str(e))
raise |