Spaces:
Running
Running
updated with additional inputs
Browse files
app.py
CHANGED
|
@@ -1,9 +1,7 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
|
| 4 |
-
|
| 5 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
| 6 |
-
"""
|
| 7 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 8 |
|
| 9 |
|
|
@@ -11,22 +9,40 @@ def respond(
|
|
| 11 |
message,
|
| 12 |
history: list[tuple[str, str]],
|
| 13 |
system_message,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
max_tokens,
|
| 15 |
temperature,
|
| 16 |
top_p,
|
| 17 |
):
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
for val in history:
|
| 21 |
if val[0]:
|
| 22 |
messages.append({"role": "user", "content": val[0]})
|
| 23 |
if val[1]:
|
| 24 |
messages.append({"role": "assistant", "content": val[1]})
|
| 25 |
|
|
|
|
| 26 |
messages.append({"role": "user", "content": message})
|
| 27 |
|
|
|
|
| 28 |
response = ""
|
| 29 |
-
|
| 30 |
for message in client.chat_completion(
|
| 31 |
messages,
|
| 32 |
max_tokens=max_tokens,
|
|
@@ -35,18 +51,25 @@ def respond(
|
|
| 35 |
top_p=top_p,
|
| 36 |
):
|
| 37 |
token = message.choices[0].delta.content
|
| 38 |
-
|
| 39 |
response += token
|
| 40 |
yield response
|
| 41 |
|
| 42 |
|
| 43 |
-
|
| 44 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
| 45 |
-
"""
|
| 46 |
demo = gr.ChatInterface(
|
| 47 |
respond,
|
| 48 |
additional_inputs=[
|
| 49 |
-
gr.Textbox(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 51 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 52 |
gr.Slider(
|
|
@@ -58,9 +81,9 @@ demo = gr.ChatInterface(
|
|
| 58 |
),
|
| 59 |
],
|
| 60 |
title="SEO Assistant",
|
| 61 |
-
description="This app provides customized content that resonates with your customers to improve your SEO. Based on your input. Powered by Hugging Face Inference, Design Thinking and domain expertise. Developed by wn. Disclaimer: AI can make mistakes. Use with caution and at your own risk!",
|
| 62 |
)
|
| 63 |
|
| 64 |
|
| 65 |
if __name__ == "__main__":
|
| 66 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
|
| 4 |
+
# Initialize the Hugging Face Inference client
|
|
|
|
|
|
|
| 5 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 6 |
|
| 7 |
|
|
|
|
| 9 |
message,
|
| 10 |
history: list[tuple[str, str]],
|
| 11 |
system_message,
|
| 12 |
+
customer_profile,
|
| 13 |
+
customer_goals,
|
| 14 |
+
main_topic,
|
| 15 |
+
ask_for_topic_suggestions,
|
| 16 |
max_tokens,
|
| 17 |
temperature,
|
| 18 |
top_p,
|
| 19 |
):
|
| 20 |
+
# Construct the system message with additional inputs
|
| 21 |
+
enhanced_system_message = (
|
| 22 |
+
f"{system_message}\n\n"
|
| 23 |
+
f"Customer Profile: {customer_profile}\n"
|
| 24 |
+
f"Customer Goals, Pain Points, and Preferences: {customer_goals}\n"
|
| 25 |
+
f"Main Topic: {main_topic}\n"
|
| 26 |
+
)
|
| 27 |
|
| 28 |
+
# If the user wants topic suggestions, modify the prompt
|
| 29 |
+
if ask_for_topic_suggestions:
|
| 30 |
+
enhanced_system_message += "The user is also asking for topic suggestions to address their customer's needs."
|
| 31 |
+
|
| 32 |
+
messages = [{"role": "system", "content": enhanced_system_message}]
|
| 33 |
+
|
| 34 |
+
# Add conversation history
|
| 35 |
for val in history:
|
| 36 |
if val[0]:
|
| 37 |
messages.append({"role": "user", "content": val[0]})
|
| 38 |
if val[1]:
|
| 39 |
messages.append({"role": "assistant", "content": val[1]})
|
| 40 |
|
| 41 |
+
# Add the current user message
|
| 42 |
messages.append({"role": "user", "content": message})
|
| 43 |
|
| 44 |
+
# Generate the response
|
| 45 |
response = ""
|
|
|
|
| 46 |
for message in client.chat_completion(
|
| 47 |
messages,
|
| 48 |
max_tokens=max_tokens,
|
|
|
|
| 51 |
top_p=top_p,
|
| 52 |
):
|
| 53 |
token = message.choices[0].delta.content
|
|
|
|
| 54 |
response += token
|
| 55 |
yield response
|
| 56 |
|
| 57 |
|
| 58 |
+
# Define the Gradio interface
|
|
|
|
|
|
|
| 59 |
demo = gr.ChatInterface(
|
| 60 |
respond,
|
| 61 |
additional_inputs=[
|
| 62 |
+
gr.Textbox(
|
| 63 |
+
value="You are a friendly Chatbot, a digital marketing expert and a talented copywriter. You are trying to help a user write a creative post to improve their SEO based on their input.",
|
| 64 |
+
label="Instructions to Bot",
|
| 65 |
+
),
|
| 66 |
+
gr.Textbox(label="Customer Profile", placeholder="Describe your customer profile (e.g., age, interests, profession)"),
|
| 67 |
+
gr.Textbox(
|
| 68 |
+
label="Customer Goals, Pain Points, and Preferences",
|
| 69 |
+
placeholder="Describe your customer's goals, pain points, concerns, obstacles, wishes, and preferences",
|
| 70 |
+
),
|
| 71 |
+
gr.Textbox(label="Main Topic", placeholder="Enter the main topic of the post"),
|
| 72 |
+
gr.Checkbox(label="Ask for Topic Suggestions", value=False),
|
| 73 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
| 74 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 75 |
gr.Slider(
|
|
|
|
| 81 |
),
|
| 82 |
],
|
| 83 |
title="SEO Assistant",
|
| 84 |
+
description="This app provides customized content that resonates with your customers to improve your SEO. Based on your input. Powered by Hugging Face Inference, Design Thinking, and domain expertise. Developed by wn. Disclaimer: AI can make mistakes. Use with caution and at your own risk!",
|
| 85 |
)
|
| 86 |
|
| 87 |
|
| 88 |
if __name__ == "__main__":
|
| 89 |
+
demo.launch()
|