|
|
import os |
|
|
import numpy as np |
|
|
from langchain_groq import ChatGroq |
|
|
from langchain_openai import ChatOpenAI |
|
|
from langchain_google_genai import ChatGoogleGenerativeAI |
|
|
from langchain_anthropic import ChatAnthropic |
|
|
from langchain_openai import OpenAIEmbeddings |
|
|
from huggingface_hub import login |
|
|
from dotenv import load_dotenv |
|
|
import tiktoken |
|
|
|
|
|
load_dotenv() |
|
|
os.environ['HUGGINGFACEHUB_ACCESS_TOKEN']=os.getenv('HUGGINGFACEHUB_ACCESS_TOKEN') |
|
|
login(os.environ['HUGGINGFACEHUB_ACCESS_TOKEN']) |
|
|
|
|
|
|
|
|
embedding_model = OpenAIEmbeddings(model="text-embedding-3-small", dimensions=1536) |
|
|
llm_anthropic = ChatAnthropic(model='claude-3-7-sonnet-latest', temperature=1) |
|
|
llm_gemini = ChatGoogleGenerativeAI(model="gemini-1.5-flash") |
|
|
llm_groq_openai = ChatGroq(model="openai/gpt-oss-120b",temperature=0.7) |
|
|
llm_groq = ChatGroq(model="llama-3.3-70b-versatile",temperature=0) |
|
|
|
|
|
llm_gpt_small = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3) |
|
|
llm_gpt = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3) |
|
|
llm_gpt_high = ChatOpenAI(model="gpt-5-nano",temperature=0.5) |
|
|
|
|
|
encoding_model = 'encoding_model' |
|
|
|
|
|
|
|
|
captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct" |
|
|
image_generation_model = "black-forest-labs/FLUX.1-schnell" |
|
|
|
|
|
ideator_llm = llm_gpt_high |
|
|
moderator_llm = llm_gpt |
|
|
critic_llm = llm_gpt |
|
|
simplifier_llm = llm_gpt |
|
|
normalizer_llm = llm_gpt |
|
|
validator_llm = llm_gpt_small |
|
|
judge1_llm = llm_gpt |
|
|
judge2_llm = llm_gpt |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|