trygithubactions / my_agent /utils /models_loader.py
subashpoudel's picture
Implemented the workflow and integrated in fast api
be3a5c4
raw
history blame
613 Bytes
from langchain_groq import ChatGroq
from sentence_transformers import SentenceTransformer
from huggingface_hub import login
from dotenv import load_dotenv
load_dotenv()
import os
from langchain_huggingface import HuggingFaceEndpoint
os.environ['HUGGINGFACEHUB_ACCESS_TOKEN']=os.getenv('HUGGINGFACEHUB_ACCESS_TOKEN')
login(os.environ['HUGGINGFACEHUB_ACCESS_TOKEN'])
os.environ['GROQ_API_KEY']=os.getenv('GROQ_API_KEY')
ST = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
llm = ChatGroq(
model="llama3-8b-8192",
temperature=0,
max_tokens=None,
timeout=None,
max_retries=2,
)