File size: 2,697 Bytes
a69d668
 
 
5c23eb3
a69d668
 
 
 
 
 
 
 
 
 
5c23eb3
 
a69d668
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206c966
a69d668
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206c966
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import gradio as gr
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, ServiceContext
from llama_index import StorageContext, load_index_from_storage
from llama_index import LLMPredictor
from langchain import HuggingFaceHub
from pathlib import Path
import random
import string
import os
from dotenv import load_dotenv
load_dotenv()
from time import sleep
import requests

HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
repo_id = os.getenv("repo_id")
model_name = os.getenv("model_name")

def generate_random_string(length):
    letters = string.ascii_lowercase
    return ''.join(random.choice(letters) for i in range(length))

def process_documents_and_query(pdf_files, question):
    random_string = generate_random_string(20)
    directory_path = random_string
    os.makedirs(directory_path)

    documents = []
    for pdf_file in pdf_files:
        file_path = os.path.join(directory_path, pdf_file.name)
        with open(file_path, 'wb') as f:
            f.write(pdf_file.read())
        documents.append(file_path)

    embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name=model_name))

    llm = HuggingFaceHub(repo_id=repo_id,
                         model_kwargs={"min_length": 512, "max_new_tokens": 5632,
                                       "do_sample": True, "temperature": 0.1,
                                       "top_k": 50, "top_p": 0.95, "eos_token_id": 49155})

    llm_predictor = LLMPredictor(llm)

    service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model)

    new_index = VectorStoreIndex.from_documents(documents, service_context=service_context)

    if question:
        new_index.storage_context.persist("directory_path")
        storage_context = StorageContext.from_defaults(persist_dir="directory_path")
        loadedindex = load_index_from_storage(storage_context=storage_context, service_context=service_context)
        query_engine = loadedindex.as_query_engine()
        initial_response = query_engine.query(question)
        return str(initial_response)

    return "Please enter a question."

gr_interface = gr.Interface(
    fn=process_documents_and_query,
    inputs=[gr.inputs.File(label="Upload PDF files", type="file", accept=".pdf", multiple=True), gr.inputs.Textbox(label="Enter your query here:")],
    outputs=gr.outputs.Textbox(label="AI Response"),
    title="AI Doc-Chat",
    description="Upload PDF files and ask questions!",
    allow_flagging="never"
)

gr_interface.launch()