ActiveYixiao commited on
Commit
c38da00
·
verified ·
1 Parent(s): 77ff4b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -11
app.py CHANGED
@@ -7,7 +7,6 @@ import outlines
7
  import pandas as pd
8
  import spaces
9
  import torch
10
- from outlines import generate, models, samplers
11
  from peft import PeftConfig, PeftModel
12
  from pydantic import BaseModel, ConfigDict
13
  from transformers import (
@@ -92,11 +91,7 @@ def get_model_and_tokenizer(model_id: str, device_map: str = "auto", quantizatio
92
  )
93
 
94
  # Convert to outlines model
95
- outlines_model = models.transformers(
96
- model,
97
- tokenizer=tokenizer,
98
- device_map=device_map,
99
- )
100
  result = (outlines_model, tokenizer, "generation")
101
 
102
  _model_cache[model_id] = result
@@ -126,9 +121,8 @@ def label_single_response_with_model(model_id, story, question, criteria, respon
126
  predicted_class = torch.argmax(logits, dim=1).item()
127
  return str(predicted_class)
128
  else:
129
- # For generative models
130
- sampler = samplers.greedy()
131
- generator = generate.json(model, ResponseModel, sampler=sampler)
132
  result = generator(prompt)
133
  return result.score
134
  except Exception as e:
@@ -152,8 +146,8 @@ def label_multi_responses_with_model(model_id, story, question, criteria, respon
152
  predicted_classes = torch.argmax(logits, dim=1).tolist()
153
  scores = [str(cls) for cls in predicted_classes]
154
  else:
155
- sampler = samplers.greedy()
156
- generator = generate.json(model, ResponseModel, sampler=sampler)
157
  results = generator(prompts)
158
  scores = [r.score for r in results]
159
 
 
7
  import pandas as pd
8
  import spaces
9
  import torch
 
10
  from peft import PeftConfig, PeftModel
11
  from pydantic import BaseModel, ConfigDict
12
  from transformers import (
 
91
  )
92
 
93
  # Convert to outlines model
94
+ outlines_model = outlines.models.Transformers(model, tokenizer=tokenizer)
 
 
 
 
95
  result = (outlines_model, tokenizer, "generation")
96
 
97
  _model_cache[model_id] = result
 
121
  predicted_class = torch.argmax(logits, dim=1).item()
122
  return str(predicted_class)
123
  else:
124
+ # For generative models - using the new Outlines API
125
+ generator = outlines.generate.json(model, ResponseModel)
 
126
  result = generator(prompt)
127
  return result.score
128
  except Exception as e:
 
146
  predicted_classes = torch.argmax(logits, dim=1).tolist()
147
  scores = [str(cls) for cls in predicted_classes]
148
  else:
149
+ # For generative models - using the new Outlines API
150
+ generator = outlines.generate.json(model, ResponseModel)
151
  results = generator(prompts)
152
  scores = [r.score for r in results]
153