Spaces:
Sleeping
Sleeping
File size: 1,592 Bytes
4327cda |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
import os
from dotenv import load_dotenv
class ChatModel:
def __init__(self):
load_dotenv()
self.model = ChatOpenAI(
model_name="google/gemini-2.0-flash-lite-preview-02-05:free",
openai_api_base="https://openrouter.ai/api/v1",
openai_api_key=os.getenv("OPENROUTER_API_KEY"),
temperature=0.7,
)
self.system_prompt = """You are a helpful assistant focused on drying items.
Your role is to help users understand how to dry different items and show them
what items would look like when dry. You should:
1. Ask for item descriptions if not provided
2. Only process drying-related requests
3. Maintain conversation context
4. Be helpful and informative about drying processes
"""
def get_response(self, message: str, chat_history: list = None) -> str:
"""
Get a response from the chat model.
Args:
message: User's message
chat_history: List of previous messages (optional)
Returns:
Model's response
"""
messages = [SystemMessage(content=self.system_prompt)]
if chat_history:
for msg in chat_history:
messages.append(HumanMessage(content=msg))
messages.append(HumanMessage(content=message))
response = self.model(messages)
return response.content |