import os import torch import numpy import pandas import dotenv import requests from datasets import load_dataset from sentence_transformers.util import semantic_search dotenv.load_dotenv(dotenv.find_dotenv()) HF_TOKEN = os.environ['YOUR_TOKEN'] def query(api_url, headers, texts): response = requests.post(api_url, headers=headers, json={"inputs": texts, "options":{"wait_for_model":True}}) return response.json() def main(): model_id = "sentence-transformers/all-MiniLM-L6-v2" api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}" headers = {"Authorization": f"Bearer {HF_TOKEN}"} texts = ["How do I get a replacement Medicare card?", "What is the monthly premium for Medicare Part B?", "How do I terminate my Medicare Part B (medical insurance)?", "How do I sign up for Medicare?", "Can I sign up for Medicare Part B if I am working and have health insurance through an employer?", "How do I sign up for Medicare Part B if I already have Part A?", "What are Medicare late enrollment penalties?", "What is Medicare and who can get it?", "How can I get help with my Medicare Part A and Part B premiums?", "What are the different parts of Medicare?", "Will my Medicare premiums be higher because of my higher income?", "What is TRICARE ?", "Should I sign up for Medicare Part B if I have Veterans' Benefits?"] output = query(api_url, headers, texts) embeddings = pandas.DataFrame(output) embeddings.to_csv("embeddings.csv", index=False) faqs_embeddings = load_dataset('ricitos2001/OMEGAI') dataset_embeddings = torch.from_numpy(faqs_embeddings["train"].to_pandas().to_numpy()).to(torch.float) question = ["How can Medicare help me?"] output = query(api_url, headers, question) query_embeddings = torch.FloatTensor(output) hits = semantic_search(query_embeddings, dataset_embeddings, top_k=5) print([texts[hits[0][i]['corpus_id']] for i in range(len(hits[0]))]) if __name__ == "__main__": main()