sim-bdg commited on
Commit
62893b8
·
1 Parent(s): 45aa866

Upload 16 files

Browse files
app/.chainlit/config.toml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+ # List of environment variables to be provided by each user to use the app.
6
+ user_env = []
7
+
8
+ # Duration (in seconds) during which the session is saved when the connection is lost
9
+ session_timeout = 3600
10
+
11
+ # Enable third parties caching (e.g LangChain cache)
12
+ cache = false
13
+
14
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
15
+ # follow_symlink = false
16
+
17
+ [features]
18
+ # Show the prompt playground
19
+ prompt_playground = true
20
+
21
+ # Authorize users to upload files with messages
22
+ multi_modal = true
23
+
24
+ # Allows user to use speech to text
25
+ [features.speech_to_text]
26
+ enabled = false
27
+ # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
28
+ # language = "en-US"
29
+
30
+ [UI]
31
+ # Name of the app and chatbot.
32
+ name = "Chatbot"
33
+
34
+ # Show the readme while the conversation is empty.
35
+ show_readme_as_default = true
36
+
37
+ # Description of the app and chatbot. This is used for HTML tags.
38
+ # description = ""
39
+
40
+ # Large size content are by default collapsed for a cleaner ui
41
+ default_collapse_content = true
42
+
43
+ # The default value for the expand messages settings.
44
+ default_expand_messages = false
45
+
46
+ # Hide the chain of thought details from the user in the UI.
47
+ hide_cot = false
48
+
49
+ # Link to your github repo. This will add a github button in the UI's header.
50
+ # github = ""
51
+
52
+ # Specify a CSS file that can be used to customize the user interface.
53
+ # The CSS file can be served from the public directory or via an external link.
54
+ # custom_css = "/public/test.css"
55
+
56
+ # Override default MUI light theme. (Check theme.ts)
57
+ [UI.theme.light]
58
+ #background = "#FAFAFA"
59
+ #paper = "#FFFFFF"
60
+
61
+ [UI.theme.light.primary]
62
+ #main = "#F80061"
63
+ #dark = "#980039"
64
+ #light = "#FFE7EB"
65
+
66
+ # Override default MUI dark theme. (Check theme.ts)
67
+ [UI.theme.dark]
68
+ #background = "#FAFAFA"
69
+ #paper = "#FFFFFF"
70
+
71
+ [UI.theme.dark.primary]
72
+ #main = "#F80061"
73
+ #dark = "#980039"
74
+ #light = "#FFE7EB"
75
+
76
+
77
+ [meta]
78
+ generated_by = "0.7.400"
app/__pycache__/app.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
app/__pycache__/prompts.cpython-310.pyc CHANGED
Binary files a/app/__pycache__/prompts.cpython-310.pyc and b/app/__pycache__/prompts.cpython-310.pyc differ
 
app/__pycache__/prompts.cpython-311.pyc CHANGED
Binary files a/app/__pycache__/prompts.cpython-311.pyc and b/app/__pycache__/prompts.cpython-311.pyc differ
 
app/__pycache__/prompts.cpython-312.pyc ADDED
Binary file (3.72 kB). View file
 
app/__pycache__/prompts.cpython-38.pyc ADDED
Binary file (3.71 kB). View file
 
app/__pycache__/simchat.cpython-310.pyc ADDED
Binary file (2.84 kB). View file
 
app/__pycache__/simchat.cpython-311.pyc ADDED
Binary file (5.37 kB). View file
 
app/__pycache__/simchat.cpython-39.pyc ADDED
Binary file (2.68 kB). View file
 
app/__pycache__/spark.cpython-311.pyc CHANGED
Binary files a/app/__pycache__/spark.cpython-311.pyc and b/app/__pycache__/spark.cpython-311.pyc differ
 
app/requirements.txt CHANGED
@@ -4,5 +4,4 @@ openai
4
  pinecone-client
5
  cohere
6
  tiktoken
7
- context-python
8
  promptwatch
 
4
  pinecone-client
5
  cohere
6
  tiktoken
 
7
  promptwatch
app/simchat.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import pinecone
4
+ import chainlit as cl
5
+
6
+ from promptwatch import PromptWatch
7
+ from prompts import load_query_gen_prompt, load_simchat_prompt
8
+
9
+ from chainlit import on_message, on_chat_start
10
+
11
+
12
+
13
+ from langchain.embeddings.cohere import CohereEmbeddings
14
+ from langchain.vectorstores import Pinecone
15
+ from langchain.chains import ConversationalRetrievalChain, LLMChain
16
+ from langchain.chat_models import ChatOpenAI
17
+ from langchain.memory import ConversationTokenBufferMemory
18
+ from langchain.prompts import (
19
+ ChatPromptTemplate,
20
+ PromptTemplate,
21
+ SystemMessagePromptTemplate,
22
+ HumanMessagePromptTemplate,
23
+ )
24
+ from langchain.prompts.prompt import PromptTemplate
25
+ from langchain.chains.qa_with_sources import load_qa_with_sources_chain
26
+ # from langchain.callbacks import ContextCallbackHandler
27
+
28
+
29
+
30
+ index_name = "chat-index-v1"
31
+
32
+ simchat = load_simchat_prompt()
33
+ query_gen_prompt = load_query_gen_prompt()
34
+ CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(query_gen_prompt)
35
+ pinecone.init(
36
+ api_key=os.environ.get("PINECONE_API_KEY"),
37
+ environment='gcp-starter',
38
+ )
39
+ @cl.on_chat_start
40
+ async def on_chat_start():
41
+ # token = os.environ["CONTEXT_TOKEN"]
42
+ # context_callback = ContextCallbackHandler(token)
43
+ llm = ChatOpenAI(temperature=0.0, verbose=False, openai_api_key = os.environ.get("OPENAI_API_KEY"), streaming=True,
44
+ model_name='gpt-3.5-turbo',
45
+ # callbacks=[context_callback]
46
+ )
47
+ memory = ConversationTokenBufferMemory(llm=llm,memory_key="chat_history", return_messages=True,input_key='question',max_token_limit=1000)
48
+ embeddings = CohereEmbeddings(model='embed-english-light-v2.0',cohere_api_key=os.environ.get("COHERE_API_KEY"))
49
+
50
+ docsearch = Pinecone.from_existing_index(
51
+ index_name=index_name, embedding=embeddings
52
+ )
53
+ retriever = docsearch.as_retriever(search_kwargs={"k": 4})
54
+
55
+ messages = [SystemMessagePromptTemplate.from_template(simchat)]
56
+
57
+ messages.append(HumanMessagePromptTemplate.from_template("{question}"))
58
+ prompt = ChatPromptTemplate.from_messages(messages)
59
+
60
+ question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=False)
61
+ doc_chain = load_qa_with_sources_chain(llm, chain_type="stuff", verbose=False, prompt=prompt)
62
+
63
+ chain = ConversationalRetrievalChain(
64
+ retriever=retriever,
65
+ question_generator=question_generator,
66
+ combine_docs_chain=doc_chain,
67
+ verbose=False,
68
+ memory=memory,
69
+ rephrase_question=False,
70
+ # callbacks=[context_callback]
71
+ )
72
+ cl.user_session.set("conversation_chain", chain)
73
+
74
+ cl.user_session.set(
75
+ "message_history",
76
+ [{"role": "system", "content": "You are a helpful assistant."}],
77
+ )
78
+ # await cl.Message(content="Selamat datang! saya adalah Nada, asisten virtual anda.").send()
79
+
80
+
81
+ @cl.on_message
82
+ async def on_message(message: cl.Message):
83
+
84
+
85
+ message_history = cl.user_session.get("message_history")
86
+ message_history.append({"role": "user", "content": message.content})
87
+
88
+ chain = cl.user_session.get("conversation_chain")
89
+
90
+ res = await chain.arun({"question": message.content, 'stream': True})
91
+
92
+ msg = cl.Message(content=res)
93
+ message_history.append({"role": "assistant", "content": msg.content})
94
+ await msg.send()
95
+ await msg.update()
96
+
97
+
98
+ print(message.content)
99
+ print(message.id)
100
+
101
+
102
+