MaxLSB commited on
Commit
afd7003
·
verified ·
1 Parent(s): 7a40ff9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -64
app.py CHANGED
@@ -1,70 +1,75 @@
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
  messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
  ],
61
- )
62
-
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
-
68
-
69
- if __name__ == "__main__":
70
- demo.launch()
 
1
+ import spaces
2
  import gradio as gr
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
5
+ from threading import Thread
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ @spaces.GPU
8
+ def predict(message, history):
9
+ torch.set_default_device("cuda")
10
+
11
+ # Load model and tokenizer
12
+ model_id = "kurakurai/Luth-LFM2-1.2B"
13
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ model_id,
16
+ device_map="auto",
17
+ torch_dtype=torch.bfloat16,
18
+ trust_remote_code=True,
19
+ load_in_4bit=True, # Keeping 4-bit quantization for efficiency
20
+ # attn_implementation="flash_attention_2" # Uncomment on compatible GPU
21
+ )
22
+
23
+ # Format conversation history for chat template
24
+ messages = [{"role": "user" if i % 2 == 0 else "assistant", "content": msg}
25
+ for conv in history for i, msg in enumerate(conv) if msg]
26
  messages.append({"role": "user", "content": message})
27
+
28
+ # Apply chat template
29
+ input_ids = tokenizer.apply_chat_template(
 
30
  messages,
31
+ add_generation_prompt=True,
32
+ return_tensors="pt",
33
+ tokenize=True
34
+ ).to('cuda')
35
+
36
+ # Setup streamer for real-time output
37
+ streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
38
+
39
+ # Generation parameters
40
+ generate_kwargs = dict(
41
+ input_ids=input_ids,
42
+ streamer=streamer,
43
+ max_new_tokens=256,
44
+ do_sample=True,
45
+ temperature=0.3,
46
+ min_p=0.15,
47
+ repetition_penalty=1.05,
48
+ pad_token_id=tokenizer.eos_token_id
49
+ )
50
+
51
+ # Start generation in separate thread
52
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
53
+ t.start()
54
+
55
+ # Stream tokens
56
+ partial_message = ""
57
+ for new_token in streamer:
58
+ partial_message += new_token
59
+ yield partial_message
60
 
61
+ # Setup Gradio interface
62
+ gr.ChatInterface(
63
+ predict,
64
+ description="""
65
+ <center><h2>Kurakura AI Luth-LFM2-1.2B Chat</h2></center>
66
+
67
+ Chat with [Luth-LFM2-1.2B](https://huggingface.co/kurakurai/Luth-LFM2-1.2B), a French-tuned version of LFM2-1.2B.
68
+ """,
69
+ examples=[
70
+ "Peux-tu résoudre l'équation 3x - 7 = 11 pour x ?",
71
+ "Explique la photosynthèse en termes simples.",
72
+ "Écris un petit poème sur l'intelligence artificielle."
 
 
 
 
 
 
 
 
 
73
  ],
74
+ theme=gr.themes.Soft(primary_hue="blue"),
75
+ ).launch()