Spaces:
Sleeping
Sleeping
File size: 3,817 Bytes
c881f0d 722b6b6 c881f0d 722b6b6 080c680 c881f0d a12b3cb 722b6b6 d575221 89b1338 35b3b61 89b1338 35b3b61 722b6b6 1e72350 722b6b6 a12b3cb 722b6b6 c881f0d 200ad0d 722b6b6 89b1338 731e9db 722b6b6 731e9db 89b1338 722b6b6 1e72350 722b6b6 89b1338 1e72350 89b1338 722b6b6 1e72350 722b6b6 080c680 722b6b6 731e9db 200ad0d 722b6b6 d575221 c881f0d 1106d57 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the Hugging Face Inference Client
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
# Function to handle chatbot responses
def respond(message, history: list[tuple[str, str]], system_message):
# Set default values for max_tokens, temperature, and top_p
max_tokens = 512
temperature = 0.7
top_p = 0.95
# Prepare messages for the chat model
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
# Gradio Interface
with gr.Blocks(theme="soft") as demo:
gr.HTML(
"""
<div style="background: rgba(0, 0, 0, 0);
padding: 20px; border-radius: 10px; text-align: center;
box-shadow: 0px 4px 10px rgba(0, 0, 0, 0.2); animation: fadeIn 2s;">
<h1 style="color: white; font-size: 36px;">FlashMind AI</h1>
<p style="font-size: 18px; color: white;">
Your intelligent assistant, powered by Hugging Face's Zephyr-7B model.
Designed by <strong>Shamil Shahbaz</strong>.
</p>
</div>
"""
)
with gr.Row():
with gr.Column(scale=6): # Increased scale value to make the chat interface bigger
chat = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(
value="You are a friendly Chatbot.",
label="System message",
lines=2,
max_lines=3,
visible=False, # Hide the system message from the display
interactive=False, # Ensure the user cannot interact with it
),
],
)
# Styling the background, chat transparency, and text color
demo.css = """
@keyframes fadeIn {
0% { opacity: 0; }
100% { opacity: 1; }
}
body {
background: url('https://wallpapers.com/images/featured/unique-laptop-bnw8292wzppmnfmr.jpg') no-repeat center center fixed;
background-size: cover;
font-family: Arial, sans-serif;
}
.gr-textbox {
border: 1px solid #e1e4e8;
border-radius: 5px;
box-shadow: 0px 2px 5px rgba(0, 0, 0, 0.1);
transition: all 0.3s ease;
}
.gr-textbox:hover {
transform: scale(1.02);
border-color: #6fc3df;
}
.gr-slider {
display: none; /* Hides sliders from the interface */
}
.gr-button {
background-color: #4a90e2;
color: white;
border-radius: 5px;
}
/* Make the chat messages transparent and set text to white */
.gr-chatbox {
background: rgba(0, 0, 0, 0.4); /* Transparent black background */
color: white !important; /* Set text color to white */
}
.gr-chatbox .gr-message {
color: white !important; /* Ensures messages are white */
}
.gr-chatbox .gr-message-user {
color: white !important; /* User messages are white */
}
.gr-chatbox .gr-message-assistant {
color: white !important; /* Assistant messages are white */
}
"""
if __name__ == "__main__":
demo.launch()
|