jfand commited on
Commit
a3c4528
·
verified ·
1 Parent(s): a8ce414

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -67
app.py CHANGED
@@ -1,70 +1,55 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
  )
62
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
-
68
-
69
- if __name__ == "__main__":
70
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from unsloth import FastLanguageModel
3
+ import torch
4
+
5
+ # =========================
6
+ # Charger le modèle BuccAI depuis Hugging Face
7
+ # =========================
8
+ print("⏳ Chargement du modèle BuccAI...")
9
+ model, tokenizer = FastLanguageModel.from_pretrained(
10
+ model_name="jfand/BuccAI-4bit", # ⚡ ton repo modèle
11
+ max_seq_length=2048,
12
+ load_in_4bit=True, # car version quantifiée
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  )
14
+ print("✅ Modèle chargé avec succès !")
15
+
16
+ # =========================
17
+ # Fonction de génération
18
+ # =========================
19
+ def generate_response(prompt, max_tokens=400):
20
+ try:
21
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
22
+ outputs = model.generate(
23
+ **inputs,
24
+ max_new_tokens=max_tokens,
25
+ temperature=0.7,
26
+ top_p=0.9,
27
+ do_sample=True,
28
+ repetition_penalty=1.15,
29
+ )
30
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
+ return response.strip()
32
+ except Exception as e:
33
+ return f"⚠️ Erreur: {str(e)}"
34
+
35
+ # =========================
36
+ # Interface Gradio
37
+ # =========================
38
  with gr.Blocks() as demo:
39
+ gr.Markdown("# 🦷 BuccAI - Assistant Dentaire (Makandal Technologies)")
40
+
41
+ with gr.Row():
42
+ with gr.Column(scale=3):
43
+ user_input = gr.Textbox(
44
+ label="💬 Posez votre question dentaire",
45
+ placeholder="Ex: Quels sont les symptômes de la gingivite ?",
46
+ lines=3
47
+ )
48
+ max_tokens = gr.Slider(100, 1000, value=400, step=50, label="Max tokens")
49
+ submit = gr.Button("Générer la réponse")
50
+ with gr.Column(scale=4):
51
+ output = gr.Textbox(label="🤖 Réponse de BuccAI", lines=15)
52
+
53
+ submit.click(fn=generate_response, inputs=[user_input, max_tokens], outputs=output)
54
+
55
+ demo.launch()