mrpoons-studio commited on
Commit
3071f4d
·
verified ·
1 Parent(s): db823c9

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +117 -105
  2. requirements.txt +3 -3
app.py CHANGED
@@ -8,132 +8,144 @@ import spaces
8
  import torch
9
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
10
 
11
- MAX_MAX_NEW_TOKENS = 128000
12
- DEFAULT_MAX_NEW_TOKENS = 4096
13
- total_count=0
14
- MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
15
 
16
  DESCRIPTION = """\
17
  # DeepSeek-R1-Chat
18
 
19
- This space demonstrates model [DeepSeek-R1](https://huggingface.co/deepseek-ai/deepseek-r1) by DeepSeek, a reasoning model with 6xxB parameters fine-tuned for chat instructions.
20
 
21
  **You can also try our R1 model in [official homepage](https://r1.deepseek.com/chat).**
22
  """
23
 
24
  if not torch.cuda.is_available():
25
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
26
 
27
 
28
  if torch.cuda.is_available():
29
- model_id = "deepseek-ai/deepseek-r1"
30
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
31
- tokenizer = AutoTokenizer.from_pretrained(model_id)
32
- tokenizer.use_default_system_prompt = False
33
-
34
 
35
 
36
  @spaces.GPU
37
  def generate(
38
- message: str,
39
- chat_history: list[tuple[str, str]],
40
- system_prompt: str,
41
- max_new_tokens: int = 4096,
42
- temperature: float = 0,
43
- top_p: float = 0,
44
- top_k: int = 0,
45
- repetition_penalty: float = 4,
46
  ) -> Iterator[str]:
47
- global total_count
48
- total_count += 1
49
- print(total_count)
50
- os.system("nvidia-smi")
51
- conversation = []
52
- if system_prompt:
53
- conversation.append({"role": "system", "content": system_prompt})
54
- for user, assistant in chat_history:
55
- conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
56
- conversation.append({"role": "user", "content": message})
57
-
58
- input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
59
- if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
60
- input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
61
- gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
62
- input_ids = input_ids.to(model.device)
63
-
64
- streamer = TextIteratorStreamer(tokenizer, timeout=400.0, skip_prompt=True, skip_special_tokens=True)
65
- generate_kwargs = dict(
66
- {"input_ids": input_ids},
67
- streamer=streamer,
68
- max_new_tokens=max_new_tokens,
69
- do_sample=False,
70
- top_p=top_p,
71
- top_k=top_k,
72
- num_beams=1,
73
- # temperature=temperature,
74
- repetition_penalty=repetition_penalty,
75
- eos_token_id=32021
76
- )
77
- t = Thread(target=model.generate, kwargs=generate_kwargs)
78
- t.start()
79
-
80
- outputs = []
81
- for text in streamer:
82
- outputs.append(text)
83
- yield "".join(outputs).replace("<|EOT|>","")
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
 
86
  chat_interface = gr.ChatInterface(
87
- fn=generate,
88
- additional_inputs=[
89
- gr.Textbox(label="System prompt", lines=6),
90
- gr.Slider(
91
- label="Max new tokens",
92
- minimum=1,
93
- maximum=MAX_MAX_NEW_TOKENS,
94
- step=1,
95
- value=DEFAULT_MAX_NEW_TOKENS,
96
- ),
97
- # gr.Slider(
98
- # label="Temperature",
99
- # minimum=0,
100
- # maximum=4.0,
101
- # step=0.1,
102
- # value=0,
103
- # ),
104
- gr.Slider(
105
- label="Top-p (nucleus sampling)",
106
- minimum=0,
107
- maximum=1.0,
108
- step=0.001,
109
- value=0,
110
- ),
111
- gr.Slider(
112
- label="Top-k",
113
- minimum=0,
114
- maximum=400,
115
- step=1,
116
- value=0,
117
- ),
118
- gr.Slider(
119
- label="Repetition penalty",
120
- minimum=1.0,
121
- maximum=4.0,
122
- step=0.001,
123
- value=4,
124
- ),
125
- ],
126
- stop_btn=gr.Button("Stop"),
127
- examples=[
128
- ["implement snake game using pygame"],
129
- ["Can you explain briefly to me what is the Python programming language?"],
130
- ["write a program to find the factorial of a number"],
131
- ],
132
  )
133
 
134
- with gr.Blocks(css="style.css") as demo:
135
- gr.Markdown(DESCRIPTION)
136
- chat_interface.render()
137
 
138
  if __name__ == "__main__":
139
- demo.queue(max_size=16384).launch()
 
8
  import torch
9
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
10
 
11
+ MAX_MAX_NEW_TOKENS = 2048
12
+ DEFAULT_MAX_NEW_TOKENS = 2048
13
+ total_count = 0
14
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "128000"))
15
 
16
  DESCRIPTION = """\
17
  # DeepSeek-R1-Chat
18
 
19
+ This space demonstrates model [DeepSeek-Coder](https://huggingface.co/deepseek-ai/deepseek-coder-r1) by DeepSeek, a code model with 6XXB parameters fine-tuned for chat instructions.
20
 
21
  **You can also try our R1 model in [official homepage](https://r1.deepseek.com/chat).**
22
  """
23
 
24
  if not torch.cuda.is_available():
25
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
26
 
27
 
28
  if torch.cuda.is_available():
29
+ model_id = "deepseek-ai/deepseek-r1"
30
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype = torch.bfloat16, device_map = "auto")
31
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
32
+ tokenizer.use_default_system_prompt = False
33
+
34
 
35
 
36
  @spaces.GPU
37
  def generate(
38
+ message: str,
39
+ chat_history: list[tuple[str, str]],
40
+ system_prompt: str,
41
+ max_new_tokens: int = 2048,
42
+ temperature: float = 0,
43
+ top_p: float = 0,
44
+ top_k: int = 50,
45
+ repetition_penalty: float = 2,
46
  ) -> Iterator[str]:
47
+ global total_count
48
+ total_count += 1
49
+ print(total_count)
50
+ os.system("nvidia-smi")
51
+ conversation = []
52
+ if system_prompt:
53
+ conversation.append({
54
+ "role": "system", "content": system_prompt
55
+ })
56
+ for user, assistant in chat_history:
57
+ conversation.extend([{
58
+ "role": "user", "content": user
59
+ }, {
60
+ "role": "assistant", "content": assistant
61
+ }])
62
+ conversation.append({
63
+ "role": "user", "content": message
64
+ })
65
+
66
+ input_ids = tokenizer.apply_chat_template(conversation, return_tensors = "pt")
67
+ if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
68
+ input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
69
+ gr.Warning(f"Trimmed input from conversation as it was longer than {
70
+ MAX_INPUT_TOKEN_LENGTH
71
+ } tokens.")
72
+ input_ids = input_ids.to(model.device)
73
+
74
+ streamer = TextIteratorStreamer(tokenizer, timeout = 10.0, skip_prompt = True, skip_special_tokens = True)
75
+ generate_kwargs = dict(
76
+ {
77
+ "input_ids": input_ids
78
+ },
79
+ streamer = streamer,
80
+ max_new_tokens = max_new_tokens,
81
+ do_sample = False,
82
+ top_p = top_p,
83
+ top_k = top_k,
84
+ num_beams = 1,
85
+ # temperature=temperature,
86
+ repetition_penalty = repetition_penalty,
87
+ eos_token_id = 32021
88
+ )
89
+ t = Thread(target = model.generate, kwargs = generate_kwargs)
90
+ t.start()
91
+
92
+ outputs = []
93
+ for text in streamer:
94
+ outputs.append(text)
95
+ yield "".join(outputs).replace("<|EOT|>","")
96
 
97
 
98
  chat_interface = gr.ChatInterface(
99
+ fn = generate,
100
+ additional_inputs = [
101
+ gr.Textbox(label = "System prompt", lines = 6),
102
+ gr.Slider(
103
+ label = "Max new tokens",
104
+ minimum = 1,
105
+ maximum = MAX_MAX_NEW_TOKENS,
106
+ step = 1,
107
+ value = DEFAULT_MAX_NEW_TOKENS,
108
+ ),
109
+ gr.Slider(
110
+ label="Temperature",
111
+ minimum=0,
112
+ maximum=4.0,
113
+ step=0.01,
114
+ value=0,
115
+ ),
116
+ gr.Slider(
117
+ label = "Top-p (nucleus sampling)",
118
+ minimum = 0,
119
+ maximum = 4.0,
120
+ step = 0.01,
121
+ value = 0,
122
+ ),
123
+ gr.Slider(
124
+ label = "Top-k",
125
+ minimum = 1,
126
+ maximum = 1000,
127
+ step = 0.01,
128
+ value = 50,
129
+ ),
130
+ gr.Slider(
131
+ label = "Repetition penalty",
132
+ minimum = 0,
133
+ maximum = 2.0,
134
+ step = 0.01,
135
+ value = 2,
136
+ ),
137
+ ],
138
+ stop_btn = gr.Button("Stop"),
139
+ examples = [
140
+ ["implement snake game using pygame"],
141
+ ["Can you explain briefly to me what is the Python programming language?"],
142
+ ["write a program to find the factorial of a number"],
143
+ ],
144
  )
145
 
146
+ with gr.Blocks(css = "style.css") as demo:
147
+ gr.Markdown(DESCRIPTION)
148
+ chat_interface.render()
149
 
150
  if __name__ == "__main__":
151
+ demo.queue(max_size = 20).launch()
requirements.txt CHANGED
@@ -1,9 +1,9 @@
1
  https://github.com/huggingface/accelerate/archive/main.tar.gz
2
  https://github.com/bitsandbytes-foundation/bitsandbytes/archive/main.tar.gz
3
  https://github.com/gradio-app/gradio/archive/main.tar.gz
4
- https://files.pythonhosted.org/packages/f7/d1/e0a911544ca9993e0f17ce6d3cc0932752356c1b0a834397f28e63479344/protobuf-5.29.3.tar.gz
5
- https://files.pythonhosted.org/packages/76/c6/8eb0654ba0c7d0bb1bf67bf8fbace101a8e4f250f7722371105e8b6f68fc/scipy-1.15.1.tar.gz
6
  git+https://github.com/google/sentencepiece@master#sub_directory=python
7
  https://github.com/huggingface/huggingface_hub/archive/main.tar.gz
8
- https://github.com/pytorch/pytorch/archive/main.tar.gz
9
  https://github.com/huggingface/transformers/archive/main.tar.gz
 
1
  https://github.com/huggingface/accelerate/archive/main.tar.gz
2
  https://github.com/bitsandbytes-foundation/bitsandbytes/archive/main.tar.gz
3
  https://github.com/gradio-app/gradio/archive/main.tar.gz
4
+ protobuf
5
+ https://github.com/scipy/scipy/archive/main.tar.gz
6
  git+https://github.com/google/sentencepiece@master#sub_directory=python
7
  https://github.com/huggingface/huggingface_hub/archive/main.tar.gz
8
+ https://github.com/pytorch/pytorch
9
  https://github.com/huggingface/transformers/archive/main.tar.gz