Spaces:
Build error
Build error
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import os
|
| 3 |
+
from streamlit_chat import message
|
| 4 |
+
from langchain.prompts import PromptTemplate
|
| 5 |
+
from langchain import LLMChain
|
| 6 |
+
from langchain_community.llms.huggingface_hub import HuggingFaceHub
|
| 7 |
+
|
| 8 |
+
llm = HuggingFaceHub(repo_id="suriya7/MaxMini-Instruct-248M",
|
| 9 |
+
task ='text2text-generation',
|
| 10 |
+
huggingfacehub_api_token=os.getenv('HF_TOKEN'),
|
| 11 |
+
model_kwargs={
|
| 12 |
+
"do_sample":True,
|
| 13 |
+
"max_new_tokens":250
|
| 14 |
+
})
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
template = """
|
| 18 |
+
Please Answer the Question:
|
| 19 |
+
previous chat: {previous_history}
|
| 20 |
+
Human:{question}
|
| 21 |
+
chatbot:
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
prompt = PromptTemplate(template=template,input_variables=['question','previous_history'])
|
| 25 |
+
|
| 26 |
+
llm_chain = LLMChain(
|
| 27 |
+
llm=llm,
|
| 28 |
+
prompt=prompt,
|
| 29 |
+
verbose=True,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
previous_response = ""
|
| 33 |
+
def conversational_chat(user_query):
|
| 34 |
+
previous_response = "".join([f"Human: {i[0]}\nChatbot: {i[1]}" for i in st.session_state['history'] if i is not None])
|
| 35 |
+
result = llm_chain.predict(
|
| 36 |
+
question=user_query,
|
| 37 |
+
previous_history = previous_response
|
| 38 |
+
)
|
| 39 |
+
st.session_state['history'].append((user_query, result))
|
| 40 |
+
return result
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
st.title('MaxMini')
|
| 44 |
+
st.info("MaxMini-Instruct-248M is a T5 (Text-To-Text Transfer Transformer) model fine-tuned on a variety of tasks. This model is designed to perform a range of instructional tasks, enabling users to generate instructions for various inputs.")
|
| 45 |
+
|
| 46 |
+
st.session_state['history'] = []
|
| 47 |
+
|
| 48 |
+
if 'message' not in st.session_state:
|
| 49 |
+
st.session_state['message'] = ['Hey There! How Can I Assist You']
|
| 50 |
+
|
| 51 |
+
st.session_state['past'] = []
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# Create containers for chat history and user input
|
| 55 |
+
response_container = st.container()
|
| 56 |
+
container = st.container()
|
| 57 |
+
|
| 58 |
+
# User input form
|
| 59 |
+
user_input = st.chat_input("Ask Your Questions 👉..")
|
| 60 |
+
with container:
|
| 61 |
+
if user_input:
|
| 62 |
+
output = conversational_chat(user_input)
|
| 63 |
+
# answer = response_generator(output)
|
| 64 |
+
st.session_state['past'].append(user_input)
|
| 65 |
+
st.session_state['message'].append(output)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
# Display chat history
|
| 69 |
+
if st.session_state['message']:
|
| 70 |
+
with response_container:
|
| 71 |
+
for i in range(len(st.session_state['message'])):
|
| 72 |
+
if i != 0:
|
| 73 |
+
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="adventurer")
|
| 74 |
+
message(st.session_state["message"][i], key=str(i), avatar_style="bottts")
|