Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -19,24 +19,21 @@ with open("strict_mom_phrases.txt", "r", encoding="utf-8") as file:
|
|
| 19 |
# Read the entire contents of the file and store it in a variable
|
| 20 |
strict_mom_text = file.read()
|
| 21 |
|
| 22 |
-
def on_button_click():
|
| 23 |
-
if cool_button
|
| 24 |
-
|
| 25 |
with gr.Blocks() as demo:
|
| 26 |
cool_button = gr.Button(value="Cool Mom", variant="primary")
|
| 27 |
-
cool_button.click(fn=
|
| 28 |
|
| 29 |
demo.launch()
|
| 30 |
|
| 31 |
with gr.Blocks() as demo:
|
| 32 |
tutor_button = gr.Button(value="Tutor Mom", variant="primary")
|
| 33 |
-
tutor_button.click(fn=
|
| 34 |
|
| 35 |
demo.launch()
|
| 36 |
|
| 37 |
with gr.Blocks() as demo:
|
| 38 |
strict_button = gr.Button(value="Strict Mom", variant="primary")
|
| 39 |
-
strict_button.click(fn=
|
| 40 |
|
| 41 |
demo.launch()
|
| 42 |
|
|
@@ -138,17 +135,60 @@ def get_top_chunks(query, chunk_embeddings, text_chunks):
|
|
| 138 |
|
| 139 |
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
|
| 140 |
|
| 141 |
-
def
|
| 142 |
top_cool_results = get_top_chunks(message, cool_chunk_embeddings, cleaned_cool_chunks) # Complete this line
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
top_tutor_results = get_top_chunks(message, tutor_chunk_embeddings, cleaned_tutor_chunks)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
top_strict_results = get_top_chunks(message, strict_chunk_embeddings, cleaned_strict_chunks)
|
| 145 |
-
|
| 146 |
-
str_chunks = "\n".join(best_chunks)
|
| 147 |
|
| 148 |
-
messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's mom. Base your response on the provided context: {
|
| 149 |
{"role": "user",
|
| 150 |
"content": (
|
| 151 |
-
f"Context:\n{
|
| 152 |
f"Question{message}"
|
| 153 |
)}]
|
| 154 |
|
|
@@ -164,5 +204,7 @@ def respond(message, history):
|
|
| 164 |
)
|
| 165 |
return response['choices'][0]['message']['content'].strip()
|
| 166 |
|
|
|
|
|
|
|
| 167 |
chatbot = gr.ChatInterface(respond, type = 'messages')
|
| 168 |
chatbot.launch()
|
|
|
|
| 19 |
# Read the entire contents of the file and store it in a variable
|
| 20 |
strict_mom_text = file.read()
|
| 21 |
|
|
|
|
|
|
|
|
|
|
| 22 |
with gr.Blocks() as demo:
|
| 23 |
cool_button = gr.Button(value="Cool Mom", variant="primary")
|
| 24 |
+
cool_button.click(fn=respond_cool, inputs=[], outputs=[])
|
| 25 |
|
| 26 |
demo.launch()
|
| 27 |
|
| 28 |
with gr.Blocks() as demo:
|
| 29 |
tutor_button = gr.Button(value="Tutor Mom", variant="primary")
|
| 30 |
+
tutor_button.click(fn=respond_tutor, inputs=[], outputs=[])
|
| 31 |
|
| 32 |
demo.launch()
|
| 33 |
|
| 34 |
with gr.Blocks() as demo:
|
| 35 |
strict_button = gr.Button(value="Strict Mom", variant="primary")
|
| 36 |
+
strict_button.click(fn=respond_strict, inputs=[], outputs=[])
|
| 37 |
|
| 38 |
demo.launch()
|
| 39 |
|
|
|
|
| 135 |
|
| 136 |
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
|
| 137 |
|
| 138 |
+
def respond_cool(message, history):
|
| 139 |
top_cool_results = get_top_chunks(message, cool_chunk_embeddings, cleaned_cool_chunks) # Complete this line
|
| 140 |
+
#str_chunks = "\n".join(best_chunks)
|
| 141 |
+
|
| 142 |
+
messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's cool and super chill mom. Base your response on the provided context: {top_cool_results}"},
|
| 143 |
+
{"role": "user",
|
| 144 |
+
"content": (
|
| 145 |
+
f"Context:\n{top_cool_results}\n\n"
|
| 146 |
+
f"Question{message}"
|
| 147 |
+
)}]
|
| 148 |
+
|
| 149 |
+
if history:
|
| 150 |
+
messages.extend(history)
|
| 151 |
+
|
| 152 |
+
messages.append({"role": "user", "content": message})
|
| 153 |
+
|
| 154 |
+
response = client.chat_completion(
|
| 155 |
+
messages,
|
| 156 |
+
max_tokens = 100,
|
| 157 |
+
temperature = 0.2
|
| 158 |
+
)
|
| 159 |
+
return response['choices'][0]['message']['content'].strip()
|
| 160 |
+
|
| 161 |
+
def respond_tutor(message, history):
|
| 162 |
top_tutor_results = get_top_chunks(message, tutor_chunk_embeddings, cleaned_tutor_chunks)
|
| 163 |
+
#str_chunks = "\n".join(best_chunks)
|
| 164 |
+
|
| 165 |
+
messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely studious, tutor-like mom. Base your response on the provided context: {top_tutor_results}"},
|
| 166 |
+
{"role": "user",
|
| 167 |
+
"content": (
|
| 168 |
+
f"Context:\n{top_tutor_results}\n\n"
|
| 169 |
+
f"Question{message}"
|
| 170 |
+
)}]
|
| 171 |
+
|
| 172 |
+
if history:
|
| 173 |
+
messages.extend(history)
|
| 174 |
+
|
| 175 |
+
messages.append({"role": "user", "content": message})
|
| 176 |
+
|
| 177 |
+
response = client.chat_completion(
|
| 178 |
+
messages,
|
| 179 |
+
max_tokens = 100,
|
| 180 |
+
temperature = 0.2
|
| 181 |
+
)
|
| 182 |
+
return response['choices'][0]['message']['content'].strip()
|
| 183 |
+
|
| 184 |
+
def respond_strict(message, history):
|
| 185 |
top_strict_results = get_top_chunks(message, strict_chunk_embeddings, cleaned_strict_chunks)
|
| 186 |
+
#str_chunks = "\n".join(best_chunks)
|
|
|
|
| 187 |
|
| 188 |
+
messages = [{"role": "system", "content": f"You are chatbot that plays the role of the user's extremely strict mom. Base your response on the provided context: {top_strict_results}"},
|
| 189 |
{"role": "user",
|
| 190 |
"content": (
|
| 191 |
+
f"Context:\n{top_strict_results}\n\n"
|
| 192 |
f"Question{message}"
|
| 193 |
)}]
|
| 194 |
|
|
|
|
| 204 |
)
|
| 205 |
return response['choices'][0]['message']['content'].strip()
|
| 206 |
|
| 207 |
+
|
| 208 |
+
|
| 209 |
chatbot = gr.ChatInterface(respond, type = 'messages')
|
| 210 |
chatbot.launch()
|