Amossofer commited on
Commit
a79f15b
·
1 Parent(s): a4dc283
Files changed (2) hide show
  1. app.py +54 -53
  2. requirements.txt +3 -1
app.py CHANGED
@@ -1,64 +1,65 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("arnir0/Tiny-LLM")
8
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
 
 
27
 
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
  ],
 
 
 
60
  )
61
 
62
-
63
  if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
+ # Model selection
7
+ MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
 
 
8
 
9
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
10
+ model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype=torch.float16, device_map="auto")
11
 
12
+ def chat_blend(systemA, systemB, wA, wB, user_message, max_new_tokens=50, temperature=1.0, top_p=0.95):
13
+ # Build messages
14
+ promptA = systemA + "\nUser: " + user_message + "\nAssistant:"
15
+ promptB = systemB + "\nUser: " + user_message + "\nAssistant:"
16
+ input_ids_A = tokenizer(promptA, return_tensors="pt").input_ids.to(model.device)
17
+ input_ids_B = tokenizer(promptB, return_tensors="pt").input_ids.to(model.device)
18
+ output_ids = input_ids_A # share history; or keep separate
 
 
19
 
20
+ for _ in range(max_new_tokens):
21
+ # forward last token
22
+ logitsA = model(input_ids=input_ids_A).logits[:, -1, :]
23
+ logitsB = model(input_ids=input_ids_B).logits[:, -1, :]
24
+ probsA = F.softmax(logitsA / temperature, dim=-1)
25
+ probsB = F.softmax(logitsB / temperature, dim=-1)
26
+ blended = wA * probsA + wB * probsB
27
+ # apply top_p
28
+ sorted_probs, sorted_idx = torch.sort(blended, descending=True)
29
+ cum = torch.cumsum(sorted_probs, dim=-1)
30
+ mask = cum > top_p
31
+ sorted_probs[mask] = 0
32
+ sorted_probs /= sorted_probs.sum(dim=-1, keepdim=True)
33
+ # repeat mapping back
34
+ new_id = sorted_idx.gather(-1, torch.multinomial(sorted_probs, num_samples=1))
35
+ output_ids = torch.cat([output_ids, new_id], dim=-1)
36
+ # append to each history
37
+ input_ids_A = torch.cat([input_ids_A, new_id], dim=-1)
38
+ input_ids_B = torch.cat([input_ids_B, new_id], dim=-1)
39
+ # stop on EOS
40
+ if new_id.item() == tokenizer.eos_token_id:
41
+ break
42
 
43
+ decoded = tokenizer.decode(output_ids[0], skip_special_tokens=True)
44
+ # strip system + user prompts
45
+ return decoded.split("Assistant:")[-1].strip()
46
 
47
+ iface = gr.Interface(
48
+ fn=chat_blend,
49
+ inputs=[
50
+ gr.Textbox(label="System Prompt A", value="You are assistant A."),
51
+ gr.Textbox(label="System Prompt B", value="You are assistant B."),
52
+ gr.Slider(label="wA", minimum=-2.0, maximum=2.0, step=0.1, value=1.0),
53
+ gr.Slider(label="wB", minimum=-2.0, maximum=2.0, step=0.1, value=1.0),
54
+ gr.Textbox(label="User message"),
55
+ gr.Slider(label="Max new tokens", minimum=1, maximum=200, step=1, value=50),
56
+ gr.Slider(label="Temperature", minimum=0.1, maximum=2.0, step=0.1, value=1.0),
57
+ gr.Slider(label="Top‑p", minimum=0.1, maximum=1.0, step=0.05, value=0.95),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  ],
59
+ outputs="text",
60
+ title="Blended‑LLM Chat (TinyLlama)",
61
+ description="Uses two system prompts and blends their token distributions using wA*p1 + wB*p2."
62
  )
63
 
 
64
  if __name__ == "__main__":
65
+ iface.launch()
requirements.txt CHANGED
@@ -1 +1,3 @@
1
- huggingface_hub==0.25.2
 
 
 
1
+ transformers>=4.31
2
+ torch
3
+ gradio