iShare commited on
Commit
1fdb479
·
1 Parent(s): 99173de

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -0
app.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from llama_index import VectorStoreIndex, SimpleDirectoryReader
3
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings
4
+ from llama_index import LangchainEmbedding, ServiceContext
5
+ from llama_index import StorageContext, load_index_from_storage
6
+ from llama_index import LLMPredictor
7
+ from langchain import HuggingFaceHub
8
+ from streamlit.components.v1 import html
9
+ from pathlib import Path
10
+ from time import sleep
11
+ import random
12
+ import string
13
+
14
+ import os
15
+ from dotenv import load_dotenv
16
+ load_dotenv()
17
+
18
+ st.set_page_config(page_title="Open AI Doc-Chat Assistant", layout="wide")
19
+ st.subheader("Open AI Doc-Chat Assistant: Life Enhancing with AI!")
20
+
21
+ css_file = "main.css"
22
+ with open(css_file) as f:
23
+ st.markdown("<style>{}</style>".format(f.read()), unsafe_allow_html=True)
24
+
25
+ HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
26
+
27
+ documents=[]
28
+
29
+ def generate_random_string(length):
30
+ letters = string.ascii_lowercase
31
+ return ''.join(random.choice(letters) for i in range(length))
32
+ random_string = generate_random_string(20)
33
+ directory_path=random_string
34
+
35
+ wechat_image= "WeChatCode.jpg"
36
+
37
+ st.sidebar.markdown(
38
+ """
39
+ <style>
40
+ .blue-underline {
41
+ text-decoration: bold;
42
+ color: blue;
43
+ }
44
+ </style>
45
+ """,
46
+ unsafe_allow_html=True
47
+ )
48
+
49
+ st.markdown(
50
+ """
51
+ <style>
52
+ [data-testid=stSidebar] [data-testid=stImage]{
53
+ text-align: center;
54
+ display: block;
55
+ margin-left: auto;
56
+ margin-right: auto;
57
+ width: 50%;
58
+ }
59
+ </style>
60
+ """, unsafe_allow_html=True
61
+ )
62
+
63
+ with st.sidebar:
64
+ pdf_files = st.file_uploader("Upload file and start AI Doc-Chat.", type=['pdf'], accept_multiple_files=True)
65
+ st.write("Disclaimer: This app is for information purpose only. NO liability could be claimed against whoever associated with this app in any manner. User should consult a qualified legal professional for legal advice.")
66
+ st.sidebar.markdown("Contact: [aichat101@foxmail.com](mailto:aichat101@foxmail.com)")
67
+ st.sidebar.markdown('WeChat: <span class="blue-underline">pat2win</span>, or scan the code below.', unsafe_allow_html=True)
68
+ st.image(wechat_image)
69
+ st.sidebar.markdown('<span class="blue-underline">Life Enhancing with AI.</span>', unsafe_allow_html=True)
70
+ st.subheader("Enjoy chatting!")
71
+ if pdf_files:
72
+ os.makedirs(directory_path)
73
+ for pdf_file in pdf_files:
74
+ file_path = os.path.join(directory_path, pdf_file.name)
75
+ with open(file_path, 'wb') as f:
76
+ f.write(pdf_file.read())
77
+ st.success(f"File '{pdf_file.name}' saved successfully.")
78
+
79
+ try:
80
+ documents = SimpleDirectoryReader(directory_path).load_data()
81
+ except Exception as e:
82
+ print("waiting for path creation.")
83
+
84
+ embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2'))
85
+
86
+ llm_predictor = LLMPredictor(HuggingFaceHub(repo_id="HuggingFaceH4/starchat-beta", model_kwargs={"min_length":100, "max_new_tokens":1024, "do_sample":True, "temperature":0.1,"top_k":50, "top_p":0.95, "eos_token_id":49155}))
87
+
88
+ service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model)
89
+
90
+ new_index = VectorStoreIndex.from_documents(
91
+ documents,
92
+ service_context=service_context,
93
+ )
94
+
95
+ new_index.storage_context.persist("directory_path")
96
+
97
+ storage_context = StorageContext.from_defaults(persist_dir="directory_path")
98
+
99
+ loadedindex = load_index_from_storage(storage_context=storage_context, service_context=service_context)
100
+
101
+ query_engine = loadedindex.as_query_engine()
102
+
103
+ while True:
104
+ try:
105
+ question = st.text_input("Enter your query here:")
106
+ print("Your query:\n"+question)
107
+ if question.strip().isspace() or question == "" or question.strip() == "" or question.isspace():
108
+ break
109
+ elif question=="exit":
110
+ break
111
+ elif question!="":
112
+ with st.spinner("AI Thinking...Please wait a while to Cheers!"):
113
+ initial_response = query_engine.query(question)
114
+ temp_ai_response=str(initial_response)
115
+ final_ai_response=temp_ai_response.partition('<|end|>')[0]
116
+ st.write("AI Response:\n\n"+final_ai_response)
117
+ except Exception as e:
118
+ st.stop()