| | import streamlit as st |
| | import pandas as pd |
| | import sqlite3 |
| | from llama_index.core import StorageContext, load_index_from_storage |
| | from llama_index.llms.ollama import Ollama |
| | from llama_index.embeddings.huggingface import HuggingFaceEmbedding |
| | from llama_index.core import PromptTemplate |
| | import os |
| |
|
| |
|
| | version = 2.3 |
| |
|
| | |
| | conn = sqlite3.connect('qa.db') |
| | c = conn.cursor() |
| | |
| | c.execute('CREATE TABLE IF NOT EXISTS qa (question TEXT, answer TEXT, version REAL)') |
| | conn.commit() |
| |
|
| | |
| | def read_description_from_file(file_path): |
| | with open(file_path, 'r') as file: |
| | return file.read() |
| |
|
| | |
| | INDEX_OUTPUT_PATH = "./output_index" |
| |
|
| | |
| | if not os.path.exists(INDEX_OUTPUT_PATH): |
| | raise ValueError(f"Index directory {INDEX_OUTPUT_PATH} does not exist") |
| |
|
| | |
| | llm = Ollama(model="llama3", request_timeout=120.0) |
| | embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-large-en-v1.5", trust_remote_code=True) |
| |
|
| | |
| | storage_context = StorageContext.from_defaults(persist_dir=INDEX_OUTPUT_PATH) |
| | loaded_index = load_index_from_storage(embed_model=embed_model, storage_context=storage_context) |
| |
|
| | |
| | query_engine = loaded_index.as_query_engine(llm=llm, embed_model=embed_model) |
| |
|
| | |
| | |
| | qa_prompt_tmpl_str = read_description_from_file("tab2_pe.txt") |
| | qa_prompt_tmpl = PromptTemplate(qa_prompt_tmpl_str) |
| |
|
| | query_engine.update_prompts( |
| | {"response_synthesizer:text_qa_template": qa_prompt_tmpl} |
| | ) |
| |
|
| | |
| | def save_to_db(question, answer, version): |
| | c.execute('INSERT INTO qa (question, answer, version) VALUES (?, ?, ?)', (question, answer, version)) |
| | conn.commit() |
| |
|
| | |
| | def fetch_from_db(): |
| | c.execute('SELECT * FROM qa') |
| | return c.fetchall() |
| |
|
| | def main(): |
| | st.title("How Much AI Can Assist The Email Replying System Of Our Council?") |
| |
|
| | tab1, tab2, tab3 = st.tabs(["LLM Model Description", "Ask a Question", "View Q&A History"]) |
| |
|
| | with tab1: |
| | st.subheader("LLM Model Description") |
| | description = read_description_from_file("tab1_intro.txt") |
| | st.write(description) |
| |
|
| | with tab2: |
| | st.subheader("Ask a Question (Please try to focus on council tax)") |
| | question = st.text_input("Enter your question:") |
| | if st.button("Get Answer"): |
| | if question: |
| | try: |
| | response = query_engine.query(question) |
| |
|
| | |
| | try: |
| | |
| | if hasattr(response, 'text'): |
| | answer = response.text |
| | else: |
| | answer = str(response) |
| |
|
| | except AttributeError as e: |
| | st.error(f"Error extracting text from response: {e}") |
| | answer = "Sorry, could not generate an answer." |
| |
|
| | st.write(f"**Answer:** {answer}") |
| |
|
| | |
| | save_to_db(question, answer, version) |
| | except Exception as e: |
| | st.error(f"An error occurred: {e}") |
| | else: |
| | st.warning("Please enter a question") |
| |
|
| | with tab3: |
| | st.subheader("View Q&A History") |
| | qa_data = fetch_from_db() |
| | if qa_data: |
| | df = pd.DataFrame(qa_data, columns=["Question", "Answer", "Version"]) |
| | st.dataframe(df) |
| |
|
| | |
| | st.write("Due to a current bug, the app may not respond as expected. Below is an extract of the test results from previous interactions:") |
| | csv = df.to_csv(index=False).encode('utf-8') |
| | st.download_button( |
| | label="Download Q&A history as CSV", |
| | data=csv, |
| | file_name='qa_output.csv', |
| | mime='text/csv', |
| | ) |
| | else: |
| | st.write("No data available") |
| | |
| | |
| | if os.path.exists('qa_output.csv'): |
| | st.write("Additionally, you can download the pre-existing CSV file with historical data:") |
| | with open('qa_output.csv', 'rb') as f: |
| | st.download_button( |
| | label="Download existing Q&A history CSV", |
| | data=f, |
| | file_name='qa_output.csv', |
| | mime='text/csv', |
| | ) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|