trygithubactions / src /genai /utils /models_loader.py
subashpoudel's picture
Refined chatbot
6f57d05
import os
import numpy as np
from langchain_groq import ChatGroq
from langchain_openai import ChatOpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_anthropic import ChatAnthropic
from langchain_openai import OpenAIEmbeddings
from huggingface_hub import login
from dotenv import load_dotenv
import tiktoken
load_dotenv()
os.environ['HUGGINGFACEHUB_ACCESS_TOKEN']=os.getenv('HUGGINGFACEHUB_ACCESS_TOKEN')
login(os.environ['HUGGINGFACEHUB_ACCESS_TOKEN'])
embedding_model = OpenAIEmbeddings(model="text-embedding-3-small", dimensions=1536)
llm_anthropic = ChatAnthropic(model='claude-3-7-sonnet-latest', temperature=1)
llm_gemini = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
llm_groq_openai = ChatGroq(model="openai/gpt-oss-120b",temperature=0.7)
llm_groq = ChatGroq(model="llama-3.3-70b-versatile",temperature=0)
llm_gpt_small = ChatOpenAI(model="gpt-4o-mini",temperature=0.3)
llm_gpt = ChatOpenAI(model="gpt-4o-mini",temperature=0.3)
llm_gpt_high = ChatOpenAI(model="gpt-4o-mini",temperature=0.5)
encoding_model = tiktoken.encoding_for_model('gpt-4o-mini')
captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct"
image_generation_model = "black-forest-labs/FLUX.1-schnell"
ideator_llm = llm_gpt_small
moderator_llm = llm_gpt_small
critic_llm = llm_gpt_small
simplifier_llm = llm_gpt_small
normalizer_llm = llm_gpt_small
validator_llm = llm_gpt_small
judge1_llm = llm_gpt_small
judge2_llm = llm_gpt_small