Spaces:
Sleeping
Sleeping
File size: 2,229 Bytes
be3a5c4 3f2f8aa b623e6c be3a5c4 07387fb 94962e5 b62c91f be3a5c4 c636895 3f2f8aa c636895 be3a5c4 c636895 3f2f8aa 07387fb 32131c3 07387fb 94962e5 07387fb 32131c3 07387fb eb40d68 b623e6c eb40d68 b623e6c eb40d68 b623e6c eb40d68 b623e6c eb40d68 9acd478 b623e6c eb40d68 9acd478 b623e6c eb40d68 b623e6c eb40d68 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
from langchain_groq import ChatGroq
from langchain_openai import ChatOpenAI
# from sentence_transformers import SentenceTransformer
# from huggingface_hub import InferenceClient
from huggingface_hub import login
from dotenv import load_dotenv
load_dotenv()
import os
import requests
import numpy as np
# from langchain_huggingface import HuggingFaceEndpoint
os.environ['HUGGINGFACEHUB_ACCESS_TOKEN']=os.getenv('HUGGINGFACEHUB_ACCESS_TOKEN')
login(os.environ['HUGGINGFACEHUB_ACCESS_TOKEN'])
os.environ['GROQ_API_KEY']=os.getenv('GROQ_API_KEY')
llm = ChatGroq(
model="llama3-8b-8192",
temperature=0.3,
)
# llm = ChatOpenAI(
# model="gpt-4o-mini",
# temperature=0.3,
# )
class HFEmbeddingAPI:
def __init__(self, api_url, token):
self.api_url = api_url
self.headers = {
"Authorization": f"Bearer {os.environ.get('HUGGINGFACEHUB_ACCESS_TOKEN')}",
}
def encode(self, texts):
if isinstance(texts, str):
texts = [texts]
response = requests.post(
self.api_url,
headers=self.headers,
json={"inputs": texts}
)
response.raise_for_status()
embeddings=response.json()
return np.array(embeddings[0]) if len(embeddings) == 1 else np.array(embeddings)
# Instantiate your API-backed "SentenceTransformer"
ST = HFEmbeddingAPI(
api_url="https://router.huggingface.co/hf-inference/models/mixedbread-ai/mxbai-embed-large-v1/pipeline/feature-extraction",
token=os.environ.get('HUGGINGFACEHUB_ACCESS_TOKEN')
)
# Initiated the models for ideation
# ideator_llm = ChatGroq(
# model="llama-3.1-8b-instant",
# temperature=0.7,
# max_tokens=500,
# )
# critic_llm = ChatGroq(
# model="llama-3.3-70b-versatile",
# temperature=0.7,
# max_tokens=500,
# )
improver_llm = ChatOpenAI(
model="gpt-4o-mini",
temperature=0.7,
max_tokens=500,
)
# improver_llm = ChatGroq(
# model="llama3-8b-8192",
# temperature=0.7,
# max_tokens=500,
# )
ideator_llm = improver_llm
critic_llm = improver_llm
validator_llm = improver_llm
# validator_llm = ChatGroq(
# model="llama-3.3-70b-versatile",
# temperature=0.7,
# max_tokens=500,
# )
|