Spaces:
Paused
Paused
| import os | |
| from dotenv import load_dotenv | |
| from langchain_cohere.llms import Cohere | |
| from langchain_community.chat_models import ChatDeepInfra | |
| from langchain_groq import ChatGroq | |
| from langchain_openai import ChatOpenAI | |
| from langchain_together import ChatTogether | |
| load_dotenv() | |
| def chat_openai_llm(): | |
| return ChatOpenAI( | |
| model_name="gpt-4o", | |
| temperature=os.environ["temperature"], | |
| openai_api_key=os.environ["OPENAI_API_KEY"], | |
| openai_organization=os.environ["OPENAI_ORGANIZATION_ID"] | |
| ) | |
| def groq_chat(model: str): | |
| return ChatGroq( | |
| model_name=model, | |
| temperature=os.environ["temperature"], | |
| groq_api_key=os.environ["GROQ_API_KEY"], | |
| max_tokens=2000 | |
| ) | |
| def cohere_llm(): | |
| return Cohere( | |
| model="command-r-plus", | |
| max_tokens=2048, | |
| temperature=os.environ["temperature"], | |
| ) | |
| def together_ai_chat(model, temperature): | |
| return ChatTogether( | |
| model_name=model, | |
| together_api_key=os.environ["TOGETHER_AI_API_KEY"], | |
| temperature=temperature, | |
| top_p=os.environ["top_p"], | |
| max_tokens=4096, | |
| model_kwargs={"stop": ["%%%%"]}, | |
| ) | |
| def deepinfra_chat(model, temperature): | |
| return ChatDeepInfra( | |
| model=model, | |
| deepinfra_api_token=os.environ["DEEPINFRA_API_KEY"], | |
| temperature=temperature, | |
| top_p=os.environ["top_p"], | |
| max_tokens=4096, | |
| model_kwargs={"stop": ["%%%%"]}, | |
| ) | |