File size: 1,603 Bytes
be3a5c4
3f2f8aa
be3a5c4
07387fb
be3a5c4
 
 
 
07387fb
94962e5
b62c91f
be3a5c4
 
 
 
 
 
19d4bc3
3f2f8aa
a668f5a
be3a5c4
 
 
3f2f8aa
 
 
 
 
 
 
07387fb
 
 
 
32131c3
07387fb
 
 
 
 
 
 
 
 
 
 
94962e5
 
07387fb
 
 
 
32131c3
07387fb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
from langchain_groq import ChatGroq
from langchain_openai import ChatOpenAI
from sentence_transformers import SentenceTransformer
from huggingface_hub import InferenceClient
from huggingface_hub import login
from dotenv import load_dotenv
load_dotenv()
import os
import requests
import numpy as np
# from langchain_huggingface import HuggingFaceEndpoint
os.environ['HUGGINGFACEHUB_ACCESS_TOKEN']=os.getenv('HUGGINGFACEHUB_ACCESS_TOKEN')
login(os.environ['HUGGINGFACEHUB_ACCESS_TOKEN'])
os.environ['GROQ_API_KEY']=os.getenv('GROQ_API_KEY')


llm = ChatGroq(
    model="llama-3.3-70b-versatile",
    temperature=0.3,
    max_tokens=500,

)

# llm = ChatOpenAI(
#     model="gpt-4o-mini",
#     temperature=0,
#     max_tokens=500,
# )


class HFEmbeddingAPI:
    def __init__(self, api_url, token):
        self.api_url = api_url
        self.headers = {
            "Authorization": f"Bearer {os.environ.get('HUGGINGFACEHUB_ACCESS_TOKEN')}",
        }

    def encode(self, texts):
        if isinstance(texts, str):
            texts = [texts]
        response = requests.post(
            self.api_url,
            headers=self.headers,
            json={"inputs": texts}
        )
        response.raise_for_status()
        embeddings=response.json()
        return np.array(embeddings[0]) if len(embeddings) == 1 else np.array(embeddings)

# Instantiate your API-backed "SentenceTransformer"
ST = HFEmbeddingAPI(
    api_url="https://router.huggingface.co/hf-inference/models/mixedbread-ai/mxbai-embed-large-v1/pipeline/feature-extraction",
    token=os.environ.get('HUGGINGFACEHUB_ACCESS_TOKEN')
)