braindeck commited on
Commit
679a158
·
1 Parent(s): a73f45a

Add @spaces.GPU decorator

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -1,11 +1,13 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
 
4
 
5
  # Load the model and tokenizer
6
  tokenizer = AutoTokenizer.from_pretrained("braindeck/text2text", trust_remote_code=True, subfolder="checkpoints/model")
7
  model = AutoModelForCausalLM.from_pretrained("braindeck/text2text", trust_remote_code=True, torch_dtype=torch.bfloat16, device_map="auto", subfolder="checkpoints/model")
8
 
 
9
  def generate_response(prompt):
10
  """
11
  Generates a response from the model.
@@ -20,7 +22,7 @@ def generate_response(prompt):
20
 
21
  # Create the Gradio interface
22
  with gr.Blocks() as demo:
23
- gr.Markdown("# Text-to-Text Generation with DeepSeek-R1-Distill-Qwen-7B")
24
  gr.Markdown("Enter a prompt and the model will generate a response.")
25
 
26
  with gr.Row():
@@ -39,4 +41,4 @@ with gr.Blocks() as demo:
39
  )
40
 
41
  if __name__ == "__main__":
42
- demo.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
+ import spaces
5
 
6
  # Load the model and tokenizer
7
  tokenizer = AutoTokenizer.from_pretrained("braindeck/text2text", trust_remote_code=True, subfolder="checkpoints/model")
8
  model = AutoModelForCausalLM.from_pretrained("braindeck/text2text", trust_remote_code=True, torch_dtype=torch.bfloat16, device_map="auto", subfolder="checkpoints/model")
9
 
10
+ @spaces.GPU
11
  def generate_response(prompt):
12
  """
13
  Generates a response from the model.
 
22
 
23
  # Create the Gradio interface
24
  with gr.Blocks() as demo:
25
+ gr.Markdown("# Fine-tuned Text-to-Text Generation")
26
  gr.Markdown("Enter a prompt and the model will generate a response.")
27
 
28
  with gr.Row():
 
41
  )
42
 
43
  if __name__ == "__main__":
44
+ demo.launch()