Spaces:
Sleeping
Sleeping
Commit
·
55ea56c
1
Parent(s):
f74f4fd
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,6 +14,8 @@ tokenizer=T5Tokenizer.from_pretrained("Logeswaransr/T5_MineAI_Prototype")
|
|
| 14 |
|
| 15 |
pipe=pipeline('text2text-generation', model=model, tokenizer=tokenizer)
|
| 16 |
|
|
|
|
|
|
|
| 17 |
st.set_page_config(page_title='Sample Chatbot', layout='wide')
|
| 18 |
|
| 19 |
if 'messages' not in st.session_state:
|
|
@@ -27,18 +29,29 @@ for message in st.session_state.messages:
|
|
| 27 |
|
| 28 |
## messages element format: {'role':'user', 'content':'<user prompt>'}
|
| 29 |
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
with st.chat_message("user"):
|
| 32 |
st.markdown(prompt)
|
| 33 |
-
|
| 34 |
st.session_state.messages.append({
|
| 35 |
'role':'user',
|
| 36 |
'content': prompt})
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
-
response = f"Analysis: {response}"
|
| 42 |
|
| 43 |
with st.chat_message("assistant"):
|
| 44 |
st.markdown(response)
|
|
|
|
| 14 |
|
| 15 |
pipe=pipeline('text2text-generation', model=model, tokenizer=tokenizer)
|
| 16 |
|
| 17 |
+
greetings=["Hello! My name is MineAI, A specially trained LLM here to assist you on your Mining Related Queries.","How may I help you?"]
|
| 18 |
+
|
| 19 |
st.set_page_config(page_title='Sample Chatbot', layout='wide')
|
| 20 |
|
| 21 |
if 'messages' not in st.session_state:
|
|
|
|
| 29 |
|
| 30 |
## messages element format: {'role':'user', 'content':'<user prompt>'}
|
| 31 |
|
| 32 |
+
for gr in greetings:
|
| 33 |
+
with st.chat_message("assistant"):
|
| 34 |
+
st.markdown(gr)
|
| 35 |
+
|
| 36 |
+
st.session_state.messages.append({
|
| 37 |
+
'role':'assistant',
|
| 38 |
+
'content': gr})
|
| 39 |
+
|
| 40 |
+
if prompt:=st.chat_input("Enter your query"):
|
| 41 |
with st.chat_message("user"):
|
| 42 |
st.markdown(prompt)
|
| 43 |
+
|
| 44 |
st.session_state.messages.append({
|
| 45 |
'role':'user',
|
| 46 |
'content': prompt})
|
| 47 |
|
| 48 |
+
if prompt in responses:
|
| 49 |
+
response=responses[prompt]
|
| 50 |
+
else:
|
| 51 |
+
out=pipe(prompt)
|
| 52 |
+
response=out[0]['generated_text']
|
| 53 |
|
| 54 |
+
# response = f"Analysis: {response}"
|
| 55 |
|
| 56 |
with st.chat_message("assistant"):
|
| 57 |
st.markdown(response)
|