File size: 2,116 Bytes
6566e37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import os
import torch
import pandas
import dotenv
import requests
from datasets import load_dataset
from sentence_transformers.util import semantic_search

dotenv.load_dotenv(dotenv.find_dotenv())
HF_TOKEN = os.environ['YOUR_TOKEN']
YOUR_API_KEY = os.environ['YOUR_API_KEY']
REQUEST_SUCESSFULL = 200

def obtener_temas():
    headers = {"subdomain-X": "demotest", 'X-Subdomain': 'demotest', "Authorization": f"Bearer {YOUR_API_KEY}"}
    respuesta = requests.request(method="GET", url="https://api.applearnify.es/api/subjects-with-topics", headers=headers)
    temas = []
    if respuesta.status_code == REQUEST_SUCESSFULL:
        for subject in respuesta.json():
                for topic in subject["topics"]:
                    for file in topic.get("files", []):
                        if file["type"] == "temario":
                            temas.append(file.get("friendly_name"))
    return temas

def query(api_url, headers, texts):
    response = requests.post(api_url, headers=headers, json={"inputs": texts, "options":{"wait_for_model":True}})
    if response.status_code == REQUEST_SUCESSFULL:
        return response.json()
    else:
        raise Exception(response.json())

def main():
    model_id = ""
    api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}"
    headers = {"Authorization": f"Bearer {HF_TOKEN}"}
    texts = obtener_temas()
    output = query(api_url, headers, texts)
    embeddings = pandas.DataFrame(output)
    embeddings.to_csv("embeddings.csv", index=False)
    faqs_embeddings = load_dataset('ricitos2001/ecope-dataset')
    dataset_embeddings = torch.from_numpy(faqs_embeddings["train"].to_pandas().to_numpy()).to(torch.float)
    question = []
    text = input("pon algo: ")
    question.append(text)
    output = query(api_url, headers, question)
    query_embeddings = torch.FloatTensor(output)
    hits = semantic_search(query_embeddings, dataset_embeddings, top_k=5)
    print([texts[hits[0][i]['corpus_id']] for i in range(len(hits[0]))])

if __name__ == "__main__":
    main()