deykalion445 commited on
Commit
3202e6f
·
1 Parent(s): 41dacf7

Add the app

Browse files
README.md CHANGED
@@ -1,13 +1,9 @@
1
  ---
2
- title: FotiouTonTrwei
3
- emoji: 🐨
4
- colorFrom: gray
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 4.19.1
8
  app_file: app.py
9
- pinned: false
10
- license: wtfpl
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
1
  ---
2
+ title: chatBGP
 
 
 
 
 
3
  app_file: app.py
4
+ sdk: gradio
5
+ sdk_version: 3.46.0
6
  ---
7
 
8
+
9
+
app.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import json
4
+ import time
5
+ from llama_index import (
6
+ StorageContext,
7
+ ServiceContext,
8
+ ChatPromptTemplate,
9
+ load_index_from_storage,
10
+ )
11
+ from llama_index.llms import OpenAI
12
+ from llama_index.llms import ChatMessage, MessageRole
13
+
14
+
15
+ with open('config.json') as f:
16
+ config = json.load(f)
17
+
18
+ MODEL = config['llm']['model']
19
+ TEMPERATURE = config['llm']['temperature']
20
+ DATA_DIR = config['data']
21
+ STORAGE_DIR = config['storage']
22
+
23
+
24
+ # get OPENAI key from system vars
25
+ # use os.environ["OPENAI_API_KEY"] = "..." to use other key
26
+ os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY")
27
+
28
+
29
+ def promt():
30
+ print('\n>> ', end='')
31
+
32
+
33
+ def user(user_message, history) -> tuple:
34
+ return "", history + [[user_message, None]]
35
+
36
+
37
+ def generate_response(history):
38
+ streaming_response = query_engine.query(history[-1][0])
39
+ history[-1][1] = ""
40
+ # print char by char
41
+ for text in streaming_response.response_gen:
42
+ for character in text:
43
+ history[-1][1] += character
44
+ # time.sleep(0.05)
45
+ yield history
46
+
47
+
48
+ # load index from storage
49
+ storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
50
+ index = load_index_from_storage(storage_context)
51
+
52
+ # TODO a separate file that will have the prompts ??
53
+ # # Text QA Prompt
54
+ chat_text_qa_msgs = [
55
+ ChatMessage(
56
+ role=MessageRole.SYSTEM,
57
+ content=(
58
+ "If the context isn't helpful, do not answer the question."
59
+ ),
60
+ ),
61
+ ChatMessage(
62
+ role=MessageRole.USER,
63
+ content=(
64
+ "We have provided context information below. \n"
65
+ "---------------------\n"
66
+ "{context_str}"
67
+ "\n---------------------\n"
68
+ "Do not give me an answer if it is not mentioned in the context as a fact. \n"
69
+ "You can also reply to greetings.\n"
70
+ "Given this information, please provide me with an answer to the following:\n{query_str}\n"
71
+ ),
72
+ ),
73
+ ]
74
+ text_qa_template = ChatPromptTemplate(chat_text_qa_msgs)
75
+
76
+ # set llm model and query engine
77
+ llm = OpenAI(temperature=TEMPERATURE, model=MODEL)
78
+ service_context = ServiceContext.from_defaults(
79
+ llm=llm, system_prompt="You are a genious that want to help everyone the asks you questions")
80
+ query_engine = index.as_query_engine(
81
+ service_context=service_context, streaming=True, text_qa_template=text_qa_template)
82
+
83
+
84
+ with gr.Blocks() as demo:
85
+ chatbot = gr.components.Chatbot(label='OFotiouTonPairnei', height=500)
86
+ msg = gr.components.Textbox(label='')
87
+ submit = gr.components.Button(value='Submit')
88
+ clear = gr.components.ClearButton()
89
+
90
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
91
+ generate_response, chatbot, chatbot
92
+ )
93
+ submit.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
94
+ generate_response, chatbot, chatbot
95
+ )
96
+ clear.click(lambda: None, None, chatbot, queue=False)
97
+
98
+ demo.queue()
99
+ demo.launch()
config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "llm": {
3
+ "temperature": 0.3,
4
+ "model": "gpt-3.5-turbo-1106"
5
+ },
6
+ "data": "data",
7
+ "storage": "storage"
8
+ }
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ openai==1.3.7
2
+ llama-index==0.9.12
storage/default__vector_store.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5901df7bebb19721a4c16b020a283213db645a0eafeb23fcf9b750552fa09f8
3
+ size 19105111
storage/docstore.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e063b2bab0d44516347a70302494ba092d634f23146de05b064789292352921d
3
+ size 3476634
storage/graph_store.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e0a77744010862225c69da83c585f4f8a42fd551b044ce530dbb1eb6e16742c
3
+ size 18
storage/index_store.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69423d42f7c415a012b392ef11659b413ad5b967dfa97fbffd6bb4a6cd39452d
3
+ size 45691