boyuia commited on
Commit
6c475d9
·
verified ·
1 Parent(s): 789b23e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +146 -48
app.py CHANGED
@@ -1,64 +1,162 @@
1
  import gradio as gr
 
 
 
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
 
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
 
 
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- response = ""
 
 
 
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
 
 
 
 
 
38
 
39
- response += token
40
- yield response
 
 
 
 
 
 
 
41
 
42
 
 
 
 
 
 
 
 
 
 
43
  """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  ],
 
 
 
60
  )
61
 
62
-
63
- if __name__ == "__main__":
64
- demo.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.nn import functional as F
5
  from huggingface_hub import InferenceClient
6
 
7
+ # --- Model Definition (same as before) ---
8
+ # NOTE: The model class MUST be defined in your app.py file
9
+ # so that torch.load knows how to reconstruct it.
10
+ # This code is a copy of the model classes from the training script.
11
+ batch_size = 32
12
+ block_size = 8
13
+ n_embd = 32
14
+ n_head = 4
15
+ n_layer = 4
16
+ dropout = 0.0
17
 
18
+ class Head(nn.Module):
19
+ def __init__(self, head_size):
20
+ super().__init__()
21
+ self.key = nn.Linear(n_embd, head_size, bias=False)
22
+ self.query = nn.Linear(n_embd, head_size, bias=False)
23
+ self.value = nn.Linear(n_embd, head_size, bias=False)
24
+ self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
25
+ self.dropout = nn.Dropout(dropout)
26
+ def forward(self, x):
27
+ B, T, C = x.shape
28
+ k = self.key(x)
29
+ q = self.query(x)
30
+ wei = q @ k.transpose(-2, -1) * C**-0.5
31
+ wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf'))
32
+ wei = F.softmax(wei, dim=-1)
33
+ self.dropout(wei)
34
+ v = self.value(x)
35
+ out = wei @ v
36
+ return out
37
 
38
+ class MultiHeadAttention(nn.Module):
39
+ def __init__(self, num_heads, head_size):
40
+ super().__init__()
41
+ self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
42
+ self.proj = nn.Linear(num_heads * head_size, n_embd)
43
+ self.dropout = nn.Dropout(dropout)
44
+ def forward(self, x):
45
+ out = torch.cat([h(x) for h in self.heads], dim=-1)
46
+ out = self.dropout(self.proj(out))
47
+ return out
48
 
49
+ class FeedFoward(nn.Module):
50
+ def __init__(self, n_embd):
51
+ super().__init__()
52
+ self.net = nn.Sequential(
53
+ nn.Linear(n_embd, 4 * n_embd),
54
+ nn.ReLU(),
55
+ nn.Linear(4 * n_embd, n_embd),
56
+ nn.Dropout(dropout),
57
+ )
58
+ def forward(self, x):
59
+ return self.net(x)
60
 
61
+ class TransformerBlock(nn.Module):
62
+ def __init__(self, n_embd, n_head):
63
+ super().__init__()
64
+ head_size = n_embd // n_head
65
+ self.sa = MultiHeadAttention(n_head, head_size)
66
+ self.ffwd = FeedFoward(n_embd)
67
+ self.ln1 = nn.LayerNorm(n_embd)
68
+ self.ln2 = nn.LayerNorm(n_embd)
69
+ def forward(self, x):
70
+ x = x + self.sa(self.ln1(x))
71
+ x = x + self.ffwd(self.ln2(x))
72
+ return x
73
 
74
+ class LanguageModel(nn.Module):
75
+ def __init__(self, vocab_size, block_size, n_embd, n_head, n_layer, dropout):
76
+ super().__init__()
77
+ self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
78
+ self.position_embedding_table = nn.Embedding(block_size, n_embd)
79
+ self.blocks = nn.Sequential(*[TransformerBlock(n_embd, n_head) for _ in range(n_layer)])
80
+ self.ln_f = nn.LayerNorm(n_embd)
81
+ self.lm_head = nn.Linear(n_embd, vocab_size)
82
+ self.block_size = block_size
83
+ self.vocab_size = vocab_size
84
 
85
+ def forward(self, idx, targets=None):
86
+ B, T = idx.shape
87
+ tok_emb = self.token_embedding_table(idx)
88
+ pos_emb = self.position_embedding_table(torch.arange(T, device=idx.device))
89
+ x = tok_emb + pos_emb
90
+ x = self.blocks(x)
91
+ x = self.ln_f(x)
92
+ logits = self.lm_head(x)
93
+ loss = None
94
+ if targets is not None:
95
+ B, T, C = logits.shape
96
+ logits = logits.view(B * T, C)
97
+ targets = targets.view(B * T)
98
+ loss = F.cross_entropy(logits, targets)
99
+ return logits, loss
100
 
101
+ def generate(self, idx, max_new_tokens):
102
+ for _ in range(max_new_tokens):
103
+ idx_cond = idx[:, -self.block_size:]
104
+ logits, loss = self(idx_cond)
105
+ logits = logits[:, -1, :]
106
+ probs = F.softmax(logits, dim=-1)
107
+ idx_next = torch.multinomial(probs, num_samples=1)
108
+ idx = torch.cat((idx, idx_next), dim=1)
109
+ return idx
110
 
111
 
112
+ # --- Utility functions and model loading ---
113
+ # We need to re-create the tokenizer and vocabulary from the training data.
114
+ # In a real app, you would save and load these as well.
115
+ # For simplicity, we are hardcoding a version of the dataset to get the vocab.
116
+ jsonl_data = """
117
+ {"text": "This is a much longer text that will serve as a simple dataset for our tiny language model. The model will learn to predict the next character based on the previous characters in the sequence."}
118
+ {"text": "This demonstrates the core idea behind training an autoregressive language model. The quick brown fox jumps over the lazy dog."}
119
+ {"text": "A journey of a thousand miles begins with a single step. The early bird catches the worm. All that glitters is not gold. A stitch in time saves nine."}
120
+ {"text": "Where there's a will, there's a way. Look before you leap. You can't make an omelette without breaking a few eggs. Practice makes perfect. Don't count your chickens before they hatch."}
121
  """
122
+ corpus = ""
123
+ for line in jsonl_data.strip().split('\n'):
124
+ data_point = json.loads(line)
125
+ corpus += data_point['text']
126
+
127
+ chars = sorted(list(set(corpus)))
128
+ vocab_size = len(chars)
129
+ stoi = {ch: i for i, ch in enumerate(chars)}
130
+ itos = {i: ch for i, ch in enumerate(chars)}
131
+ encode = lambda s: [stoi[c] for c in s]
132
+ decode = lambda l: ''.join([itos[i] for i in l])
133
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
134
+
135
+ # Load the trained model.
136
+ model = LanguageModel(vocab_size, block_size, n_embd, n_head, n_layer, dropout)
137
+ model.load_state_dict(torch.load('model.pt', map_location=device))
138
+ model.eval() # Set the model to evaluation mode for inference.
139
+ model.to(device)
140
+
141
+
142
+ # --- Gradio UI & Inference function ---
143
+ def generate_text(prompt, max_new_tokens):
144
+ # Encode the prompt text into tokens.
145
+ context = torch.tensor(encode(prompt), dtype=torch.long, device=device).unsqueeze(0)
146
+ # Generate new tokens.
147
+ generated_text_indices = model.generate(context, max_new_tokens=max_new_tokens)
148
+ # Decode the tokens back into text.
149
+ return decode(generated_text_indices[0].tolist())
150
+
151
+ demo = gr.Interface(
152
+ fn=generate_text,
153
+ inputs=[
154
+ gr.Textbox(label="Prompt", placeholder="Enter your text prompt here..."),
155
+ gr.Slider(1, 100, value=20, step=1, label="Number of new tokens to generate"),
156
  ],
157
+ outputs="text",
158
+ title="Leaf Chat",
159
+ description="A demo application running on the Leaf model."
160
  )
161
 
162
+ demo.launch()