louisedrumm commited on
Commit
091be36
·
1 Parent(s): b344c41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -49
app.py CHANGED
@@ -1,58 +1,46 @@
1
- import openai
2
- import os
 
3
  import gradio as gr
 
 
 
4
 
5
- openai.api_key = os.environ.get("OPENAI_API_KEY")
6
-
7
- class Conversation:
8
- def __init__(self, prompt, num_of_round):
9
- self.prompt = prompt
10
- self.num_of_round = num_of_round
11
- self.messages = []
12
- self.messages.append({"role": "system", "content": self.prompt})
13
-
14
- def ask(self, question):
15
- try:
16
- self.messages.append( {"role": "user", "content": question})
17
- response = openai.ChatCompletion.create(
18
- model="gpt-3.5-turbo",
19
- messages=self.messages,
20
- temperature=0.5,
21
- max_tokens=2048,
22
- top_p=1,
23
- )
24
- except Exception as e:
25
- print(e)
26
- return e
27
-
28
- message = response["choices"][0]["message"]["content"]
29
- self.messages.append({"role": "assistant", "content": message})
30
-
31
- if len(self.messages) > self.num_of_round*2 + 1:
32
- del self.messages[1:3]
33
- return message
34
-
35
 
36
- prompt = """你是一个中国厨师,用中文回答做菜的问题。你的回答需要满足以下要求:
37
- 1. 你的回答必须是中文
38
- 2. 回答限制在100个字以内"""
 
 
39
 
40
- conv = Conversation(prompt, 5)
41
 
42
- def predict(input, history=[]):
43
- history.append(input)
44
- response = conv.ask(input)
45
- history.append(response)
46
- responses = [(u,b) for u,b in zip(history[::2], history[1::2])]
47
- return responses, history
 
 
 
 
48
 
49
- with gr.Blocks(css="#chatbot{height:350px} .overflow-y-auto{height:500px}") as demo:
50
- chatbot = gr.Chatbot(elem_id="chatbot")
51
- state = gr.State([])
52
 
53
- with gr.Row():
54
- txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
 
 
55
 
56
- txt.submit(predict, [txt, state], [chatbot, state])
 
 
 
57
 
58
- demo.launch()
 
 
1
+ from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
2
+ #from langchain.schema import BaseLanguageModel
3
+ from langchain.chat_models import ChatOpenAI
4
  import gradio as gr
5
+ import sys
6
+ import os
7
+ import openai
8
 
9
+ os.environ["OPENAI_API_KEY"] = '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'
10
+
11
+ messages = [
12
+ {"role": "system", "content": "You try to be secretive but are terrible at it."},
13
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ def construct_index(directory_path):
16
+ max_input_size = 4096
17
+ num_outputs = 512
18
+ max_chunk_overlap = 20
19
+ chunk_size_limit = 600
20
 
21
+ prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
22
 
23
+ llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo", max_tokens=num_outputs))
24
+
25
+ documents = SimpleDirectoryReader(directory_path).load_data()
26
+
27
+ index = GPTSimpleVectorIndex(documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper)
28
+
29
+ index.save_to_disk('index.json')
30
+
31
+ return index
32
+
33
 
 
 
 
34
 
35
+ def chatbot(input_text):
36
+ index = GPTSimpleVectorIndex.load_from_disk('index.json')
37
+ response = index.query(input_text, response_mode="compact")
38
+ return response.response
39
 
40
+ iface = gr.Interface(fn=chatbot,
41
+ inputs=gr.components.Textbox(lines=7, label="Hello, I'm a wiG, what would you like to know?"),
42
+ outputs="text",
43
+ title="wiGs explained custom-trained AI Chatbot")
44
 
45
+ index = construct_index("docs")
46
+ iface.launch(share=True)