File size: 1,466 Bytes
6c655a3
 
be3a5c4
3f2f8aa
6874dac
6c655a3
 
be3a5c4
 
6f57d05
5c271a3
be3a5c4
 
 
5c271a3
be3a5c4
6c655a3
11bd168
fbc17f4
3002e1b
8ce97f0
3002e1b
a6a0614
 
 
6b61df1
 
6f57d05
6874dac
5c271a3
 
eb40d68
a6a0614
 
 
 
 
8ce97f0
a6a0614
 
5c271a3
 
 
 
 
 
 
 
 
eb40d68
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import os
import numpy as np
from langchain_groq import ChatGroq
from langchain_openai import ChatOpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_anthropic import ChatAnthropic
from langchain_openai import OpenAIEmbeddings
from huggingface_hub import login
from dotenv import load_dotenv
import tiktoken

load_dotenv()
os.environ['HUGGINGFACEHUB_ACCESS_TOKEN']=os.getenv('HUGGINGFACEHUB_ACCESS_TOKEN')
login(os.environ['HUGGINGFACEHUB_ACCESS_TOKEN'])


embedding_model = OpenAIEmbeddings(model="text-embedding-3-small", dimensions=1536)
llm_anthropic = ChatAnthropic(model='claude-3-7-sonnet-latest', temperature=1)
llm_gemini = ChatGoogleGenerativeAI(model="gemini-1.5-flash")
llm_groq_openai = ChatGroq(model="openai/gpt-oss-120b",temperature=0.7)
llm_groq = ChatGroq(model="llama-3.3-70b-versatile",temperature=0)

llm_gpt_small = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
llm_gpt = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.3)
llm_gpt_high = ChatOpenAI(model="gpt-5-nano",temperature=0.5)
# encoding_model = tiktoken.encoding_for_model('gpt-4o-mini')
encoding_model = 'encoding_model'


captioning_model = "meta-llama/llama-4-scout-17b-16e-instruct"
image_generation_model = "black-forest-labs/FLUX.1-schnell"

ideator_llm = llm_gpt_high
moderator_llm = llm_gpt
critic_llm = llm_gpt
simplifier_llm = llm_gpt
normalizer_llm = llm_gpt
validator_llm = llm_gpt_small
judge1_llm = llm_gpt
judge2_llm = llm_gpt