MindLabUnimib commited on
Commit
13bc307
·
1 Parent(s): dea455e

chore: set max duration to 120s

Browse files
Files changed (1) hide show
  1. app.py +1 -3
app.py CHANGED
@@ -18,7 +18,6 @@ moderator_model = AutoModelForSequenceClassification.from_pretrained(moderator_m
18
  moderator_model.to("cuda")
19
  moderator_tokenizer = AutoTokenizer.from_pretrained(moderator_model_name)
20
 
21
- @spaces.GPU
22
  def generate_responses(model, tokenizer, prompts):
23
  messages = [[{"role": "user", "content": message}] for message in prompts]
24
 
@@ -41,7 +40,6 @@ def generate_responses(model, tokenizer, prompts):
41
 
42
  return responses
43
 
44
- @spaces.GPU
45
  def classify_pairs(model, tokenizer, prompts, responses):
46
  texts = [
47
  prompt + "[SEP]" + response for prompt, response in zip(prompts, responses)
@@ -59,7 +57,7 @@ def classify_pairs(model, tokenizer, prompts, responses):
59
  return unsafety_scores
60
 
61
 
62
- @spaces.GPU
63
  def generate(submission: list[dict[str, str]], team_id: str) -> list[dict[str, str | float]]:
64
  ids = [s["id"] for s in submission]
65
  prompts = [s["prompt"] for s in submission]
 
18
  moderator_model.to("cuda")
19
  moderator_tokenizer = AutoTokenizer.from_pretrained(moderator_model_name)
20
 
 
21
  def generate_responses(model, tokenizer, prompts):
22
  messages = [[{"role": "user", "content": message}] for message in prompts]
23
 
 
40
 
41
  return responses
42
 
 
43
  def classify_pairs(model, tokenizer, prompts, responses):
44
  texts = [
45
  prompt + "[SEP]" + response for prompt, response in zip(prompts, responses)
 
57
  return unsafety_scores
58
 
59
 
60
+ @spaces.GPU(duration=120)
61
  def generate(submission: list[dict[str, str]], team_id: str) -> list[dict[str, str | float]]:
62
  ids = [s["id"] for s in submission]
63
  prompts = [s["prompt"] for s in submission]