GenAI-FASTAPI / utils /models_loader.py
subashpoudel's picture
Updated commit
b623e6c
raw
history blame
2.23 kB
from langchain_groq import ChatGroq
from langchain_openai import ChatOpenAI
# from sentence_transformers import SentenceTransformer
# from huggingface_hub import InferenceClient
from huggingface_hub import login
from dotenv import load_dotenv
load_dotenv()
import os
import requests
import numpy as np
# from langchain_huggingface import HuggingFaceEndpoint
os.environ['HUGGINGFACEHUB_ACCESS_TOKEN']=os.getenv('HUGGINGFACEHUB_ACCESS_TOKEN')
login(os.environ['HUGGINGFACEHUB_ACCESS_TOKEN'])
os.environ['GROQ_API_KEY']=os.getenv('GROQ_API_KEY')
llm = ChatGroq(
model="llama3-8b-8192",
temperature=0.3,
)
# llm = ChatOpenAI(
# model="gpt-4o-mini",
# temperature=0.3,
# )
class HFEmbeddingAPI:
def __init__(self, api_url, token):
self.api_url = api_url
self.headers = {
"Authorization": f"Bearer {os.environ.get('HUGGINGFACEHUB_ACCESS_TOKEN')}",
}
def encode(self, texts):
if isinstance(texts, str):
texts = [texts]
response = requests.post(
self.api_url,
headers=self.headers,
json={"inputs": texts}
)
response.raise_for_status()
embeddings=response.json()
return np.array(embeddings[0]) if len(embeddings) == 1 else np.array(embeddings)
# Instantiate your API-backed "SentenceTransformer"
ST = HFEmbeddingAPI(
api_url="https://router.huggingface.co/hf-inference/models/mixedbread-ai/mxbai-embed-large-v1/pipeline/feature-extraction",
token=os.environ.get('HUGGINGFACEHUB_ACCESS_TOKEN')
)
# Initiated the models for ideation
# ideator_llm = ChatGroq(
# model="llama-3.1-8b-instant",
# temperature=0.7,
# max_tokens=500,
# )
# critic_llm = ChatGroq(
# model="llama-3.3-70b-versatile",
# temperature=0.7,
# max_tokens=500,
# )
# improver_llm = ChatOpenAI(
# model="gpt-4o-mini",
# temperature=0.7,
# max_tokens=500,
# )
improver_llm = ChatGroq(
model="llama3-8b-8192",
temperature=0.7,
max_tokens=500,
)
ideator_llm = improver_llm
critic_llm = improver_llm
validator_llm = improver_llm
# validator_llm = ChatGroq(
# model="llama-3.3-70b-versatile",
# temperature=0.7,
# max_tokens=500,
# )