Upload inference.py
Browse files- inference.py +22 -0
inference.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
| 2 |
+
from langchain.vectorstores import FAISS
|
| 3 |
+
from langchain.chains import RetrievalQA
|
| 4 |
+
from transformers import pipeline
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
# Load model and tokenizer
|
| 8 |
+
model_name = "PranavKeshav/upf-finetuned-model"
|
| 9 |
+
hf_pipeline = pipeline("text-generation", model=model_name, device=0 if torch.cuda.is_available() else -1)
|
| 10 |
+
llm = HuggingFacePipeline(pipeline=hf_pipeline)
|
| 11 |
+
|
| 12 |
+
# Load FAISS store and embeddings
|
| 13 |
+
vectorstore = FAISS.load_local("faiss_store", HuggingFaceEmbeddings())
|
| 14 |
+
qa_chain = RetrievalQA.from_chain_type(
|
| 15 |
+
llm=llm,
|
| 16 |
+
chain_type="stuff",
|
| 17 |
+
retriever=vectorstore.as_retriever()
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
def handler(input_data):
|
| 21 |
+
query = input_data.get("query", "")
|
| 22 |
+
return qa_chain.run(query)
|