mrpoons-studio commited on
Commit
1441501
·
verified ·
1 Parent(s): 502c3ee

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -117
app.py CHANGED
@@ -10,145 +10,130 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
10
 
11
  MAX_MAX_NEW_TOKENS = 2048
12
  DEFAULT_MAX_NEW_TOKENS = 2048
13
- total_count = 0
14
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "128000"))
15
 
16
  DESCRIPTION = """\
17
  # DeepSeek-R1-Chat
18
 
19
- This space demonstrates model [DeepSeek-Coder](https://huggingface.co/deepseek-ai/deepseek-coder-r1) by DeepSeek, a code model with 6XXB parameters fine-tuned for chat instructions.
20
 
21
  **You can also try our R1 model in [official homepage](https://r1.deepseek.com/chat).**
22
  """
23
 
24
  if not torch.cuda.is_available():
25
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
26
 
27
 
28
  if torch.cuda.is_available():
29
- model_id = "deepseek-ai/deepseek-r1"
30
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype = torch.bfloat16, device_map = "auto")
31
- tokenizer = AutoTokenizer.from_pretrained(model_id)
32
- tokenizer.use_default_system_prompt = False
33
-
34
 
35
 
36
  @spaces.GPU
37
  def generate(
38
- message: str,
39
- chat_history: list[tuple[str, str]],
40
- system_prompt: str,
41
- max_new_tokens: int = 2048,
42
- temperature: float = 0,
43
- top_p: float = 0,
44
- top_k: int = 50,
45
- repetition_penalty: float = 2,
46
  ) -> Iterator[str]:
47
- global total_count
48
- total_count += 1
49
- print(total_count)
50
- os.system("nvidia-smi")
51
- conversation = []
52
- if system_prompt:
53
- conversation.append({
54
- "role": "system", "content": system_prompt
55
- })
56
- for user, assistant in chat_history:
57
- conversation.extend([{
58
- "role": "user", "content": user
59
- }, {
60
- "role": "assistant", "content": assistant
61
- }])
62
- conversation.append({
63
- "role": "user", "content": message
64
- })
65
-
66
- input_ids = tokenizer.apply_chat_template(conversation, return_tensors = "pt")
67
- if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
68
- input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
69
- gr.Warning((
70
- f"Trimmed input from conversation as it was longer than "
71
- f" {
72
- MAX_INPUT_TOKEN_LENGTH
73
- } tokens."
74
- ))
75
- input_ids = input_ids.to(model.device)
76
-
77
- streamer = TextIteratorStreamer(tokenizer, timeout = 10.0, skip_prompt = True, skip_special_tokens = True)
78
- generate_kwargs = dict(
79
- {
80
- "input_ids": input_ids
81
- },
82
- streamer = streamer,
83
- max_new_tokens = max_new_tokens,
84
- do_sample = False,
85
- top_p = top_p,
86
- top_k = top_k,
87
- num_beams = 1,
88
- # temperature=temperature,
89
- repetition_penalty = repetition_penalty,
90
- eos_token_id = 32021
91
- )
92
- t = Thread(target = model.generate, kwargs = generate_kwargs)
93
- t.start()
94
-
95
- outputs = []
96
- for text in streamer:
97
- outputs.append(text)
98
- yield "".join(outputs).replace("<|EOT|>","")
99
 
100
 
101
  chat_interface = gr.ChatInterface(
102
- fn = generate,
103
- additional_inputs = [
104
- gr.Textbox(label = "System prompt", lines = 6),
105
- gr.Slider(
106
- label = "Max new tokens",
107
- minimum = 1,
108
- maximum = MAX_MAX_NEW_TOKENS,
109
- step = 1,
110
- value = DEFAULT_MAX_NEW_TOKENS,
111
- ),
112
- gr.Slider(
113
- label = "Temperature",
114
- minimum = 0,
115
- maximum = 4.0,
116
- step = 0.01,
117
- value = 0,
118
- ),
119
- gr.Slider(
120
- label = "Top-p (nucleus sampling)",
121
- minimum = 0,
122
- maximum = 4.0,
123
- step = 0.01,
124
- value = 0,
125
- ),
126
- gr.Slider(
127
- label = "Top-k",
128
- minimum = 1,
129
- maximum = 1000,
130
- step = 0.01,
131
- value = 50,
132
- ),
133
- gr.Slider(
134
- label = "Repetition penalty",
135
- minimum = 0,
136
- maximum = 2.0,
137
- step = 0.01,
138
- value = 2,
139
- ),
140
- ],
141
- stop_btn = gr.Button("Stop"),
142
- examples = [
143
- ["implement snake game using pygame"],
144
- ["Can you explain briefly to me what is the Python programming language?"],
145
- ["write a program to find the factorial of a number"],
146
- ],
147
  )
148
 
149
- with gr.Blocks(css = "style.css") as demo:
150
- gr.Markdown(DESCRIPTION)
151
- chat_interface.render()
152
 
153
  if __name__ == "__main__":
154
- demo.queue(max_size = 20).launch()
 
10
 
11
  MAX_MAX_NEW_TOKENS = 2048
12
  DEFAULT_MAX_NEW_TOKENS = 2048
13
+ total_count=0
14
  MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "128000"))
15
 
16
  DESCRIPTION = """\
17
  # DeepSeek-R1-Chat
18
 
19
+ This space demonstrates model [DeepSeek-R1](https://huggingface.co/deepseek-ai/deepseek-r1) by DeepSeek, a code model with 6XXB parameters fine-tuned for chat instructions.
20
 
21
  **You can also try our R1 model in [official homepage](https://r1.deepseek.com/chat).**
22
  """
23
 
24
  if not torch.cuda.is_available():
25
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
26
 
27
 
28
  if torch.cuda.is_available():
29
+ model_id = "deepseek-ai/deepseek-r1"
30
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
31
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
32
+ tokenizer.use_default_system_prompt = False
33
+
34
 
35
 
36
  @spaces.GPU
37
  def generate(
38
+ message: str,
39
+ chat_history: list[tuple[str, str]],
40
+ system_prompt: str,
41
+ max_new_tokens: int = 2048,
42
+ temperature: float = 0,
43
+ top_p: float = 0,
44
+ top_k: int = 50,
45
+ repetition_penalty: float = 2,
46
  ) -> Iterator[str]:
47
+ global total_count
48
+ total_count += 1
49
+ print(total_count)
50
+ os.system("nvidia-smi")
51
+ conversation = []
52
+ if system_prompt:
53
+ conversation.append({"role": "system", "content": system_prompt})
54
+ for user, assistant in chat_history:
55
+ conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
56
+ conversation.append({"role": "user", "content": message})
57
+
58
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
59
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
60
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
61
+ gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
62
+ input_ids = input_ids.to(model.device)
63
+
64
+ streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
65
+ generate_kwargs = dict(
66
+ {"input_ids": input_ids},
67
+ streamer=streamer,
68
+ max_new_tokens=max_new_tokens,
69
+ do_sample=False,
70
+ top_p=top_p,
71
+ top_k=top_k,
72
+ num_beams=1,
73
+ # temperature=temperature,
74
+ repetition_penalty=repetition_penalty,
75
+ eos_token_id=32021
76
+ )
77
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
78
+ t.start()
79
+
80
+ outputs = []
81
+ for text in streamer:
82
+ outputs.append(text)
83
+ yield "".join(outputs).replace("<|EOT|>","")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
 
86
  chat_interface = gr.ChatInterface(
87
+ fn=generate,
88
+ additional_inputs=[
89
+ gr.Textbox(label="System prompt", lines=6),
90
+ gr.Slider(
91
+ label="Max new tokens",
92
+ minimum=0,
93
+ maximum=MAX_MAX_NEW_TOKENS,
94
+ step=0.01,
95
+ value=DEFAULT_MAX_NEW_TOKENS,
96
+ ),
97
+ # gr.Slider(
98
+ # label="Temperature",
99
+ # minimum=0,
100
+ # maximum=4.0,
101
+ # step=0.01,
102
+ # value=0,
103
+ # ),
104
+ gr.Slider(
105
+ label="Top-p (nucleus sampling)",
106
+ minimum=0,
107
+ maximum=1.0,
108
+ step=0.01,
109
+ value=0,
110
+ ),
111
+ gr.Slider(
112
+ label="Top-k",
113
+ minimum=1,
114
+ maximum=1000,
115
+ step=0.01,
116
+ value=50,
117
+ ),
118
+ gr.Slider(
119
+ label="Repetition penalty",
120
+ minimum=1.0,
121
+ maximum=2.0,
122
+ step=0.01,
123
+ value=2,
124
+ ),
125
+ ],
126
+ stop_btn=gr.Button("Stop"),
127
+ examples=[
128
+ ["implement snake game using pygame"],
129
+ ["Can you explain briefly to me what is the Python programming language?"],
130
+ ["write a program to find the factorial of a number"],
131
+ ],
132
  )
133
 
134
+ with gr.Blocks(css="style.css") as demo:
135
+ gr.Markdown(DESCRIPTION)
136
+ chat_interface.render()
137
 
138
  if __name__ == "__main__":
139
+ demo.queue(max_size=20).launch()