Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import openai
|
| 4 |
+
|
| 5 |
+
from langdetect import detect
|
| 6 |
+
from gtts import gTTS
|
| 7 |
+
from pdfminer.high_level import extract_text
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
#langchain part
|
| 11 |
+
from langchain.llms import OpenAI
|
| 12 |
+
from langchain.text_splitter import SpacyTextSplitter
|
| 13 |
+
from langchain.document_loaders import TextLoader
|
| 14 |
+
from langchain.document_loaders import DirectoryLoader
|
| 15 |
+
from langchain.indexes import VectorstoreIndexCreator
|
| 16 |
+
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 17 |
+
from langchain.vectorstores import Pinecone
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
openai.api_key = os.environ['OPENAI_API_KEY']
|
| 21 |
+
pinecone_key = os.environ['PINECONE_API_KEY']
|
| 22 |
+
pinecone_environment='us-west1-gcp'
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
user_db = {os.environ['username1']: os.environ['password1']]}
|
| 26 |
+
|
| 27 |
+
messages = [{"role": "system", "content": 'You are a helpful assistant.'}]
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def init_pinecone(index_name):
|
| 33 |
+
# initialize connection to Pinecone vector DB (app.pinecone.io for API key)
|
| 34 |
+
pinecone.init(
|
| 35 |
+
api_key=pinecone_key,
|
| 36 |
+
environment=pinecone_environment
|
| 37 |
+
)
|
| 38 |
+
#using openai embedding hence dim = 1536
|
| 39 |
+
pinecone.create_index(index_name, dimension=1536, metric="euclidean")
|
| 40 |
+
index = pinecone.Index(index_name)
|
| 41 |
+
return index
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def process_file(index_name, docs):
|
| 45 |
+
init_pinecone(index_name)
|
| 46 |
+
embeddings = OpenAIEmbeddings()
|
| 47 |
+
#pipeline='zh_core_web_sm'
|
| 48 |
+
splter = SpacyTextSplitter(chunk_size=1000,chunk_overlap=200)
|
| 49 |
+
split_text = splter.split_documents(docs)
|
| 50 |
+
|
| 51 |
+
for document in split_text:
|
| 52 |
+
Pinecone.from_documents([document], embeddings, index_name=index_name)
|
| 53 |
+
|
| 54 |
+
return list_pinecone(index_name)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def list_pinecone(index_name):
|
| 58 |
+
index = pinecone.Index(index_name)
|
| 59 |
+
stats = index.describe_index_stats()
|
| 60 |
+
return stats
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def roleChoice(role):
|
| 65 |
+
global messages
|
| 66 |
+
messages = [{"role": "system", "content": role}]
|
| 67 |
+
return "role:" + role
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def textGPT(text):
|
| 75 |
+
global messages
|
| 76 |
+
|
| 77 |
+
messages.append({"role": "user", "content": text})
|
| 78 |
+
|
| 79 |
+
response = openai.ChatCompletion.create(model="gpt-4", messages=messages)
|
| 80 |
+
|
| 81 |
+
system_message = response["choices"][0]["message"]
|
| 82 |
+
messages.append(system_message)
|
| 83 |
+
|
| 84 |
+
chats = ""
|
| 85 |
+
for msg in messages:
|
| 86 |
+
if msg['role'] != 'system':
|
| 87 |
+
chats += msg['role'] + ": " + msg['content'] + "\n\n"
|
| 88 |
+
|
| 89 |
+
return chats
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def fileGPT(prompt, file_obj):
|
| 93 |
+
global messages
|
| 94 |
+
|
| 95 |
+
file_text = extract_text(file_obj.name)
|
| 96 |
+
text = prompt + "\n\n" + file_text
|
| 97 |
+
|
| 98 |
+
messages.append({"role": "user", "content": text})
|
| 99 |
+
|
| 100 |
+
response = openai.ChatCompletion.create(model="gpt-4", messages=messages)
|
| 101 |
+
|
| 102 |
+
system_message = response["choices"][0]["message"]
|
| 103 |
+
messages.append(system_message)
|
| 104 |
+
|
| 105 |
+
chats = ""
|
| 106 |
+
for msg in messages:
|
| 107 |
+
if msg['role'] != 'system':
|
| 108 |
+
chats += msg['role'] + ": " + msg['content'] + "\n\n"
|
| 109 |
+
|
| 110 |
+
return chats
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def clear():
|
| 115 |
+
global messages
|
| 116 |
+
messages = [{"role": "system", "content": 'You are a helpful technology assistant.'}]
|
| 117 |
+
return
|
| 118 |
+
|
| 119 |
+
def show():
|
| 120 |
+
global messages
|
| 121 |
+
chats = ""
|
| 122 |
+
for msg in messages:
|
| 123 |
+
if msg['role'] != 'system':
|
| 124 |
+
chats += msg['role'] + ": " + msg['content'] + "\n\n"
|
| 125 |
+
|
| 126 |
+
return chats
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
with gr.Blocks() as chatHistory:
|
| 130 |
+
gr.Markdown("Click the Clear button below to remove all the chat history.")
|
| 131 |
+
clear_btn = gr.Button("Clear")
|
| 132 |
+
clear_btn.click(fn=clear, inputs=None, outputs=None, queue=False)
|
| 133 |
+
|
| 134 |
+
gr.Markdown("Click the Display button below to show all the chat history.")
|
| 135 |
+
show_out = gr.Textbox()
|
| 136 |
+
show_btn = gr.Button("Display")
|
| 137 |
+
show_btn.click(fn=show, inputs=None, outputs=show_out, queue=False)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
role = gr.Interface(fn=roleChoice, inputs="text", outputs="text", description = "Choose your GPT roles, e.g. You are a helpful technology assistant. 你是一位 IT 架构师。 你是一位开发者关系顾问。你是一位机器学习工程师。你是一位高级 C++ 开发人员 ")
|
| 142 |
+
text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
|
| 143 |
+
|
| 144 |
+
pinecone = gr.Interface(fn=process_file, inputs=["text", "file"], outputs="text")
|
| 145 |
+
|
| 146 |
+
#audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
|
| 147 |
+
#siri = gr.Interface(fn=siriGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs = "audio")
|
| 148 |
+
file = gr.Interface(fn=fileGPT, inputs=["text", "file"], outputs="text", description = "Enter prompt sentences and your PDF. e.g. lets think step by step, summarize this following text: 或者 让我们一步一步地思考,总结以下的内容:")
|
| 149 |
+
demo = gr.TabbedInterface([role, text, file, chatHistory, pinecone], [ "roleChoice", "chatGPT", "fileGPT", "ChatHistory", "Pinecone"])
|
| 150 |
+
|
| 151 |
+
if __name__ == "__main__":
|
| 152 |
+
demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,
|
| 153 |
+
auth_message="This is not designed to be used publicly as it links to a personal openAI API. However, you can copy my code and create your own multi-functional ChatGPT with your unique ID and password by utilizing the 'Repository secrets' feature in huggingface.")
|
| 154 |
+
#demo.launch()
|