|
|
import os |
|
|
from typing import Type |
|
|
from tools.faiss_store import FAISS |
|
|
import gradio as gr |
|
|
import pandas as pd |
|
|
from torch import float16, float32 |
|
|
|
|
|
from huggingface_hub import hf_hub_download |
|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM |
|
|
|
|
|
from tools.ingest import embed_faiss_save_to_zip, load_embeddings_model, get_faiss_store |
|
|
from tools.helper_functions import get_connection_params, reveal_feedback_buttons, wipe_logs |
|
|
from tools.aws_functions import upload_file_to_s3 |
|
|
from tools.auth import authenticate_user |
|
|
from tools.config import FEEDBACK_LOGS_FOLDER, ACCESS_LOGS_FOLDER, USAGE_LOGS_FOLDER, HOST_NAME, COGNITO_AUTH, INPUT_FOLDER, OUTPUT_FOLDER, MAX_QUEUE_SIZE, DEFAULT_CONCURRENCY_LIMIT, MAX_FILE_SIZE, GRADIO_SERVER_PORT, ROOT_PATH, DEFAULT_EMBEDDINGS_LOCATION, EMBEDDINGS_MODEL_NAME, DEFAULT_DATA_SOURCE, HF_TOKEN, LARGE_MODEL_REPO_ID, LARGE_MODEL_GGUF_FILE, LARGE_MODEL_NAME, SMALL_MODEL_NAME, SMALL_MODEL_REPO_ID, DEFAULT_DATA_SOURCE_NAME, DEFAULT_EXAMPLES, DEFAULT_MODEL_CHOICES, RUN_GEMINI_MODELS, LOAD_LARGE_MODEL, GEMINI_API_KEY |
|
|
from tools.model_load import torch_device, gpu_config, cpu_config, context_length |
|
|
import tools.chatfuncs as chatf |
|
|
import tools.ingest as ing |
|
|
|
|
|
PandasDataFrame = Type[pd.DataFrame] |
|
|
|
|
|
from datetime import datetime |
|
|
today_rev = datetime.now().strftime("%Y%m%d") |
|
|
|
|
|
host_name = HOST_NAME |
|
|
access_logs_data_folder = ACCESS_LOGS_FOLDER |
|
|
feedback_data_folder = FEEDBACK_LOGS_FOLDER |
|
|
usage_data_folder = USAGE_LOGS_FOLDER |
|
|
|
|
|
if isinstance(DEFAULT_EXAMPLES, str): default_examples_set = eval(DEFAULT_EXAMPLES) |
|
|
if isinstance(DEFAULT_MODEL_CHOICES, str): default_model_choices = eval(DEFAULT_MODEL_CHOICES) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
embeddings_model = load_embeddings_model(EMBEDDINGS_MODEL_NAME) |
|
|
|
|
|
vectorstore = None |
|
|
|
|
|
chatf.embeddings = embeddings_model |
|
|
|
|
|
|
|
|
def docs_to_faiss_save(docs_out:PandasDataFrame, embeddings_model=embeddings_model): |
|
|
|
|
|
print(f"> Total split documents: {len(docs_out)}") |
|
|
|
|
|
print(docs_out) |
|
|
|
|
|
vectorstore_func = FAISS.from_documents(documents=docs_out, embedding=embeddings_model) |
|
|
|
|
|
chatf.vectorstore = vectorstore_func |
|
|
|
|
|
out_message = "Document processing complete" |
|
|
|
|
|
return out_message, vectorstore_func |
|
|
|
|
|
def create_hf_model(model_name:str, hf_token=HF_TOKEN): |
|
|
if torch_device == "cuda": |
|
|
if "flan" in model_name: |
|
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, device_map="auto") |
|
|
else: |
|
|
if hf_token: |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", token=hf_token) |
|
|
else: |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") |
|
|
else: |
|
|
if "flan" in model_name: |
|
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
|
else: |
|
|
if hf_token: |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token) |
|
|
else: |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
if hf_token: |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length = context_length, token=hf_token) |
|
|
else: |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length = context_length) |
|
|
|
|
|
return model, tokenizer |
|
|
|
|
|
def load_model(model_type:str, gpu_layers:int, gpu_config:dict=gpu_config, cpu_config:dict=cpu_config, torch_device:str=torch_device): |
|
|
print("Loading model") |
|
|
|
|
|
if model_type == LARGE_MODEL_NAME: |
|
|
if torch_device == "cuda": |
|
|
gpu_config.update_gpu(gpu_layers) |
|
|
print("Loading with", gpu_config.n_gpu_layers, "model layers sent to GPU.") |
|
|
else: |
|
|
gpu_config.update_gpu(gpu_layers) |
|
|
cpu_config.update_gpu(gpu_layers) |
|
|
|
|
|
print("Loading with", cpu_config.n_gpu_layers, "model layers sent to GPU.") |
|
|
|
|
|
try: |
|
|
from llama_cpp import Llama |
|
|
model = Llama( |
|
|
model_path=hf_hub_download( |
|
|
repo_id=LARGE_MODEL_REPO_ID, |
|
|
filename=LARGE_MODEL_GGUF_FILE |
|
|
), |
|
|
**vars(gpu_config) |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
print("GPU load failed", e, "loading CPU version instead") |
|
|
model = Llama( |
|
|
model_path=hf_hub_download( |
|
|
repo_id=LARGE_MODEL_REPO_ID, |
|
|
filename=LARGE_MODEL_GGUF_FILE |
|
|
), |
|
|
**vars(cpu_config) |
|
|
) |
|
|
|
|
|
tokenizer = [] |
|
|
|
|
|
if model_type == SMALL_MODEL_NAME: |
|
|
|
|
|
hf_checkpoint = SMALL_MODEL_REPO_ID |
|
|
|
|
|
model, tokenizer = create_hf_model(model_name = hf_checkpoint) |
|
|
|
|
|
else: |
|
|
model = model_type |
|
|
tokenizer = "" |
|
|
|
|
|
chatf.model_object = model |
|
|
chatf.tokenizer = tokenizer |
|
|
chatf.model_type = model_type |
|
|
|
|
|
load_confirmation = "Finished loading model: " + model_type |
|
|
|
|
|
print(load_confirmation) |
|
|
|
|
|
return model_type, load_confirmation, model_type |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app = gr.Blocks(fill_width=True) |
|
|
|
|
|
with app: |
|
|
model_type = SMALL_MODEL_NAME |
|
|
load_model(model_type, 0, gpu_config, cpu_config, torch_device) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ingest_text = gr.State() |
|
|
ingest_metadata = gr.State() |
|
|
ingest_docs = gr.State() |
|
|
|
|
|
model_type_state = gr.State(model_type) |
|
|
gpu_config_state = gr.State(gpu_config) |
|
|
cpu_config_state = gr.State(cpu_config) |
|
|
torch_device_state = gr.State(torch_device) |
|
|
|
|
|
|
|
|
embeddings_model_object_state = gr.State(embeddings_model) |
|
|
vectorstore_state = gr.State(vectorstore) |
|
|
default_embeddings_store_text = gr.Textbox(value=DEFAULT_EMBEDDINGS_LOCATION, visible=False) |
|
|
|
|
|
|
|
|
relevant_query_state = gr.Checkbox(value=True, visible=False) |
|
|
|
|
|
|
|
|
model_state = gr.State() |
|
|
tokenizer_state = gr.State() |
|
|
|
|
|
chat_history_state = gr.State() |
|
|
instruction_prompt_out = gr.State() |
|
|
|
|
|
session_hash_state = gr.State() |
|
|
output_folder_textbox = gr.Textbox(value=OUTPUT_FOLDER, visible=False) |
|
|
input_folder_textbox = gr.Textbox(value=INPUT_FOLDER, visible=False) |
|
|
|
|
|
session_hash_textbox = gr.Textbox(value="", visible=False) |
|
|
s3_logs_output_textbox = gr.Textbox(label="S3 logs", visible=False) |
|
|
latest_user_rating_data_path = gr.Textbox(label="output_ratings_textbox", visible=False) |
|
|
|
|
|
access_logs_state = gr.State(access_logs_data_folder + 'dataset1.csv') |
|
|
access_s3_logs_loc_state = gr.State(access_logs_data_folder) |
|
|
usage_logs_state = gr.State(usage_data_folder + 'dataset1.csv') |
|
|
usage_s3_logs_loc_state = gr.State(usage_data_folder) |
|
|
feedback_logs_state = gr.State(feedback_data_folder + 'dataset1.csv') |
|
|
feedback_s3_logs_loc_state = gr.State(feedback_data_folder) |
|
|
|
|
|
gr.Markdown("<h1><center>Lightweight PDF / web page QA bot</center></h1>") |
|
|
|
|
|
gr.Markdown(f"""Chat with PDFs, web pages or data files (.csv / .xlsx). The default is a small model ({SMALL_MODEL_NAME}), that can only answer specific questions that are answered in the text. It cannot give overall impressions of, or summarise the document. Go to Advanced settings to change model to e.g. a choice of Gemini models that are available on [their very generous free tier](https://ai.google.dev/gemini-api/docs/pricing) (needs an API key), or AWS Bedrock/larger local models if activated.\n\nBy default '[{DEFAULT_DATA_SOURCE_NAME}]({DEFAULT_DATA_SOURCE})' is loaded as a data source. If you want to query another data source, please upload it on the 'Change data source' tab. If switching topic, please click the 'Clear chat' button. 'Stop generating' will halt the language model during its response.\n\n**Caution: On Hugging Face, this is a public app. Please ensure that the document you upload is not sensitive is any way as other users may see it!** Also, please note that AI chatbots may give incomplete or incorrect information, so please use with care and ensure that you verify any outputs before further use.""") |
|
|
|
|
|
with gr.Row(): |
|
|
current_source = gr.Textbox(label="Current data source(s)", value=DEFAULT_DATA_SOURCE, scale = 10) |
|
|
current_model = gr.Textbox(label="Current model", value=model_type, scale = 3) |
|
|
|
|
|
with gr.Tab("Chatbot"): |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
chatbot = gr.Chatbot(value=None, avatar_images=('user.jfif', 'bot.jpg'), scale = 1, resizable=True, buttons=['copy', 'copy_all', 'share'], max_height=500) |
|
|
with gr.Accordion("Source paragraphs with the most relevant text will appear here", open = True): |
|
|
sources = gr.HTML(value = "No relevant source paragraphs currently loaded", max_height=500) |
|
|
|
|
|
gr.Markdown("Make sure that your questions are as specific as possible to allow the search engine to find the most relevant text to your query.") |
|
|
with gr.Row(): |
|
|
message = gr.Textbox( |
|
|
label="Enter your question here", |
|
|
lines=1, |
|
|
) |
|
|
with gr.Row(): |
|
|
submit = gr.Button(value="Send message", variant="primary", scale = 4) |
|
|
clear = gr.Button(value="Clear chat", variant="secondary", scale=1) |
|
|
stop = gr.Button(value="Stop generating", variant="stop", scale=1) |
|
|
|
|
|
examples_set = gr.Radio(label="Example questions", choices=default_examples_set) |
|
|
|
|
|
current_topic = gr.Textbox(label="Feature currently disabled - Keywords related to current conversation topic.", placeholder="Keywords related to the conversation topic will appear here", visible=False) |
|
|
|
|
|
with gr.Tab("Change data source"): |
|
|
with gr.Accordion("PDF file", open = False): |
|
|
in_pdf = gr.File(label="Upload pdf", file_count="multiple", file_types=['.pdf']) |
|
|
load_pdf = gr.Button(value="Load in file", variant="secondary", scale=0) |
|
|
|
|
|
with gr.Accordion("Web page", open = False): |
|
|
with gr.Row(): |
|
|
in_web = gr.Textbox(label="Enter web page url") |
|
|
in_div = gr.Textbox(label="(Advanced) Web page div for text extraction", value="p", placeholder="p") |
|
|
load_web = gr.Button(value="Load in webpage", variant="secondary", scale=0) |
|
|
|
|
|
with gr.Accordion("CSV/Excel file", open = False): |
|
|
in_csv = gr.File(label="Upload CSV/Excel file", file_count="multiple", file_types=['.csv', '.xlsx']) |
|
|
in_text_column = gr.Textbox(label="Enter column name where text is stored") |
|
|
load_csv = gr.Button(value="Load in CSV/Excel file", variant="secondary", scale=0) |
|
|
|
|
|
with gr.Row(): |
|
|
ingest_embed_out = gr.Textbox(label="File/web page preparation progress") |
|
|
file_out_box = gr.File(file_count='single', file_types=['.zip']) |
|
|
|
|
|
with gr.Tab("Advanced settings - change model/model options"): |
|
|
out_passages = gr.Slider(minimum=1, value = 2, maximum=10, step=1, label="Choose number of passages to retrieve from the document. Numbers greater than 2 may lead to increased hallucinations or input text being truncated.") |
|
|
temp_slide = gr.Slider(minimum=0.1, value = 0.5, maximum=1, step=0.1, label="Choose temperature setting for response generation.") |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=3): |
|
|
model_choice = gr.Radio(label="Choose a chat model", value=SMALL_MODEL_NAME, choices = default_model_choices) |
|
|
if RUN_GEMINI_MODELS == "1": |
|
|
in_api_key = gr.Textbox(value = GEMINI_API_KEY, label="Enter Gemini API key (only if using Google API models)", lines=1, type="password",interactive=True, visible=True) |
|
|
else: |
|
|
in_api_key = gr.Textbox(value = GEMINI_API_KEY, label="Enter Gemini API key (only if using Google API models)", lines=1, type="password",interactive=True, visible=False) |
|
|
with gr.Column(scale=1): |
|
|
change_model_button = gr.Button(value="Load model") |
|
|
|
|
|
if LOAD_LARGE_MODEL == "1": |
|
|
with gr.Accordion("Choose number of model layers to send to GPU (WARNING: please don't modify unless you are sure you have a GPU).", open = False, visible=True): |
|
|
gpu_layer_choice = gr.Slider(label="Choose number of model layers to send to GPU.", value=0, minimum=0, maximum=100, step = 1, visible=True) |
|
|
else: |
|
|
with gr.Accordion("Choose number of model layers to send to GPU (WARNING: please don't modify unless you are sure you have a GPU).", open = False, visible=False): |
|
|
gpu_layer_choice = gr.Slider(label="Choose number of model layers to send to GPU.", value=0, minimum=0, maximum=100, step = 1, visible=False) |
|
|
|
|
|
load_text = gr.Text(label="Load status") |
|
|
|
|
|
gr.HTML( |
|
|
"<center>This app is powered by Gradio and Transformers.</center>" |
|
|
) |
|
|
|
|
|
examples_set.change(fn=chatf.update_message, inputs=[examples_set], outputs=[message]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
response_click = submit.click(chatf.create_full_prompt, inputs=[message, chat_history_state, current_topic, vectorstore_state, embeddings_model_object_state, model_type_state, out_passages, in_api_key], outputs=[chat_history_state, sources, instruction_prompt_out, relevant_query_state], queue=False, api_name="retrieval").\ |
|
|
success(chatf.turn_off_interactivity, inputs=None, outputs=[message, submit], queue=False).\ |
|
|
success(chatf.produce_streaming_answer_chatbot, inputs=[chatbot, instruction_prompt_out, model_type_state, temp_slide, relevant_query_state, chat_history_state, in_api_key], outputs=chatbot) |
|
|
response_click.success(chatf.highlight_found_text, [chatbot, sources], [sources]).\ |
|
|
success(chatf.add_inputs_answer_to_history,[message, chatbot, current_topic], [chat_history_state, current_topic]).\ |
|
|
success(lambda: chatf.restore_interactivity(), None, [message, submit], queue=False) |
|
|
|
|
|
|
|
|
response_enter = message.submit(chatf.create_full_prompt, inputs=[message, chat_history_state, current_topic, vectorstore_state, embeddings_model_object_state, model_type_state, out_passages, in_api_key], outputs=[chat_history_state, sources, instruction_prompt_out, relevant_query_state], queue=False).\ |
|
|
success(chatf.turn_off_interactivity, inputs=None, outputs=[message, submit], queue=False).\ |
|
|
success(chatf.produce_streaming_answer_chatbot, [chatbot, instruction_prompt_out, model_type_state, temp_slide, relevant_query_state, chat_history_state, in_api_key], chatbot) |
|
|
response_enter.success(chatf.highlight_found_text, [chatbot, sources], [sources]).\ |
|
|
success(chatf.add_inputs_answer_to_history,[message, chatbot, current_topic], [chat_history_state, current_topic]).\ |
|
|
success(lambda: chatf.restore_interactivity(), None, [message, submit], queue=False) |
|
|
|
|
|
|
|
|
stop.click(fn=None, inputs=None, outputs=None, cancels=[response_click, response_enter]) |
|
|
|
|
|
|
|
|
clear.click(chatf.clear_chat, inputs=[chat_history_state, sources, message, current_topic], outputs=[chat_history_state, sources, message, current_topic]) |
|
|
clear.click(lambda: None, None, chatbot, queue=False) |
|
|
|
|
|
|
|
|
chatbot.like(chatf.vote, [chat_history_state, instruction_prompt_out, model_type_state], [latest_user_rating_data_path]).\ |
|
|
success(fn = upload_file_to_s3, inputs=[latest_user_rating_data_path, latest_user_rating_data_path], outputs=[s3_logs_output_textbox]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
load_pdf_click = load_pdf.click(ing.parse_file, inputs=[in_pdf], outputs=[ingest_text, current_source]).\ |
|
|
success(ing.text_to_docs, inputs=[ingest_text], outputs=[ingest_docs]).\ |
|
|
success(embed_faiss_save_to_zip, inputs=[ingest_docs, output_folder_textbox, embeddings_model_object_state], outputs=[ingest_embed_out, vectorstore_state, file_out_box]).\ |
|
|
success(chatf.hide_block, outputs = [examples_set]) |
|
|
|
|
|
|
|
|
load_web_click = load_web.click(ing.parse_html, inputs=[in_web, in_div], outputs=[ingest_text, ingest_metadata, current_source]).\ |
|
|
success(ing.html_text_to_docs, inputs=[ingest_text, ingest_metadata], outputs=[ingest_docs]).\ |
|
|
success(embed_faiss_save_to_zip, inputs=[ingest_docs, output_folder_textbox, embeddings_model_object_state], outputs=[ingest_embed_out, vectorstore_state, file_out_box]).\ |
|
|
success(chatf.hide_block, outputs = [examples_set]) |
|
|
|
|
|
|
|
|
load_csv_click = load_csv.click(ing.parse_csv_or_excel, inputs=[in_csv, in_text_column], outputs=[ingest_text, current_source]).\ |
|
|
success(ing.csv_excel_text_to_docs, inputs=[ingest_text, in_text_column], outputs=[ingest_docs]).\ |
|
|
success(embed_faiss_save_to_zip, inputs=[ingest_docs, output_folder_textbox, embeddings_model_object_state], outputs=[ingest_embed_out, vectorstore_state, file_out_box]).\ |
|
|
success(chatf.hide_block, outputs = [examples_set]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
change_model_button.click(fn=chatf.turn_off_interactivity, inputs=None, outputs=[message, submit], queue=False).\ |
|
|
success(fn=load_model, inputs=[model_choice, gpu_layer_choice], outputs = [model_type_state, load_text, current_model]).\ |
|
|
success(lambda: chatf.restore_interactivity(), None, [message, submit], queue=False).\ |
|
|
success(chatf.clear_chat, inputs=[chat_history_state, sources, message, current_topic], outputs=[chat_history_state, sources, message, current_topic]).\ |
|
|
success(lambda: None, None, chatbot, queue=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.load(get_connection_params, inputs=None, outputs=[session_hash_state, output_folder_textbox, session_hash_textbox, input_folder_textbox]).\ |
|
|
success(load_model, inputs=[model_type_state, gpu_layer_choice, gpu_config_state, cpu_config_state, torch_device_state], outputs=[model_type_state, load_text, current_model]).\ |
|
|
success(get_faiss_store, inputs=[default_embeddings_store_text, embeddings_model_object_state], outputs=[vectorstore_state]) |
|
|
|
|
|
|
|
|
access_callback = gr.CSVLogger() |
|
|
access_callback.setup([session_hash_textbox], access_logs_data_folder) |
|
|
|
|
|
session_hash_textbox.change(lambda *args: access_callback.flag(list(args)), [session_hash_textbox], None, preprocess=False).\ |
|
|
success(fn = upload_file_to_s3, inputs=[access_logs_state, access_s3_logs_loc_state], outputs=[s3_logs_output_textbox]) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
if COGNITO_AUTH == "1": |
|
|
app.queue(max_size=int(MAX_QUEUE_SIZE), default_concurrency_limit=int(DEFAULT_CONCURRENCY_LIMIT)).launch(show_error=True, inbrowser=True, auth=authenticate_user, max_file_size=MAX_FILE_SIZE, server_port=GRADIO_SERVER_PORT, root_path=ROOT_PATH, theme = gr.themes.Default(primary_hue="blue")) |
|
|
else: |
|
|
app.queue(max_size=int(MAX_QUEUE_SIZE), default_concurrency_limit=int(DEFAULT_CONCURRENCY_LIMIT)).launch(show_error=True, inbrowser=True, max_file_size=MAX_FILE_SIZE, server_port=GRADIO_SERVER_PORT, root_path=ROOT_PATH, theme = gr.themes.Default(primary_hue="blue")) |