Spaces:
Sleeping
Sleeping
| from langchain_ollama import ChatOllama | |
| from langchain_core.messages import SystemMessage | |
| def llm( | |
| model_name: str, | |
| messages: list[dict], | |
| temperature: float = 0.1, | |
| max_tokens: int = 1024, | |
| **kwargs, | |
| ) -> str: | |
| """ | |
| Calls the Ollama Chat model and returns the generated response content. | |
| """ | |
| try: | |
| print("[NODE] ----- Calling Ollama Chat -----") | |
| # Construct the prompt with explicit separation for SystemMessage | |
| prompt_parts = [] | |
| for message in messages: | |
| if isinstance(message, dict): | |
| prompt_parts.append(f"{message['role'].capitalize()}: {message['content']}") | |
| elif isinstance(message, SystemMessage): | |
| prompt_parts.append(f"System: {message.content}") | |
| else: | |
| prompt_parts.append(message) | |
| prompt = "\n\n".join(prompt_parts) # Add extra separation for clarity | |
| print(f"Constructed Prompt:\n{prompt}") | |
| chat = ChatOllama( | |
| model=model_name, | |
| temperature=temperature, | |
| max_tokens=max_tokens, | |
| **kwargs | |
| ) | |
| response = chat.invoke(prompt) | |
| print("----- Ollama Chat response -----") | |
| print(response.content) | |
| if not response or not response.content: | |
| print("No content returned from the Ollama Chat model.") | |
| return "No content generated." | |
| return response.content | |
| except Exception as e: | |
| print("An error occurred while calling the Ollama Chat model: %s", str(e)) | |
| raise |