File size: 1,819 Bytes
d34a564 ca9d207 d34a564 ca9d207 d34a564 ca9d207 d34a564 ca9d207 d34a564 ca9d207 d34a564 ca9d207 d34a564 ca9d207 d34a564 ca9d207 d34a564 ca9d207 d34a564 ca9d207 d34a564 ca9d207 d34a564 ca9d207 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import gradio as gr
from huggingface_hub import InferenceClient
from typing import List, Dict
def respond(
message: str,
history: List[Dict[str, str]],
system_message: str,
max_tokens: int,
temperature: float,
top_p: float,
hf_token: gr.OAuthToken,
):
"""
Para mais informações sobre o Inference API:
https://huggingface.co/docs/huggingface_hub/guides/inference
"""
# Inicializa cliente de inferência
client = InferenceClient(
token=hf_token.token,
model="apple/FastVLM-7B"
)
# Prepara mensagens
messages = [{"role": "system", "content": system_message}]
messages.extend(history)
messages.append({"role": "user", "content": message})
response = ""
# Stream de tokens
for chunk in client.chat_completion(
messages=messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
choices = chunk.choices
token = ""
if len(choices) and choices[0].delta and choices[0].delta.content:
token = choices[0].delta.content
response += token
yield response
# Interface do chatbot
chatbot = gr.ChatInterface(
respond,
type="messages",
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
],
)
# Monta layout com Sidebar
with gr.Blocks() as demo:
with gr.Sidebar():
gr.LoginButton()
chatbot.render()
if __name__ == "__main__":
demo.launch()
|