TaylorKaua commited on
Commit
b12c25f
·
verified ·
1 Parent(s): 10dee9f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from huggingface_hub import InferenceClient
4
+
5
+ MODEL_ID = "mradermacher/sk2decompile-struct-6.7b-GGUF"
6
+
7
+ def get_client(hf_token):
8
+ token = getattr(hf_token, "token", None) or os.getenv("HF_TOKEN")
9
+ return InferenceClient(model=MODEL_ID, token=token)
10
+
11
+ def respond(message, history, system_message, max_tokens, temperature, top_p, hf_token: gr.OAuthToken):
12
+ try:
13
+ client = get_client(hf_token)
14
+ messages = [{"role": "system", "content": system_message}]
15
+ messages += history
16
+ messages.append({"role": "user", "content": message})
17
+ response = ""
18
+ for chunk in client.chat_completion(messages=messages, max_tokens=max_tokens, stream=True,
19
+ temperature=temperature, top_p=top_p):
20
+ token = getattr(chunk.choices[0].delta, "content", "") if hasattr(chunk.choices[0], "delta") else ""
21
+ response += token
22
+ yield response
23
+ except Exception as e:
24
+ yield f"❌ Erro: {e}"
25
+
26
+ chatbot = gr.ChatInterface(
27
+ respond,
28
+ type="messages",
29
+ additional_inputs=[
30
+ gr.Textbox(value="You are a friendly assistant.", label="System message"),
31
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
32
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
33
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
34
+ gr.LoginButton(),
35
+ ],
36
+ )
37
+
38
+ demo = gr.Blocks()
39
+ with demo:
40
+ with gr.Sidebar():
41
+ gr.Markdown("Login com Hugging Face para usar o modelo protegido.")
42
+ chatbot.render()
43
+
44
+ if __name__ == "__main__":
45
+ demo.launch()