ActiveYixiao commited on
Commit
77ff4b8
·
verified ·
1 Parent(s): 14d2dfa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -96
app.py CHANGED
@@ -7,7 +7,7 @@ import outlines
7
  import pandas as pd
8
  import spaces
9
  import torch
10
- from outlines import Generator
11
  from peft import PeftConfig, PeftModel
12
  from pydantic import BaseModel, ConfigDict
13
  from transformers import (
@@ -20,7 +20,6 @@ from transformers import (
20
  logging.basicConfig(level=logging.INFO)
21
  logger = logging.getLogger(__name__)
22
 
23
- MODEL_ID = "rshwndsz/ft-longformer-base-4096"
24
  DEVICE_MAP = "auto"
25
  QUANTIZATION_BITS = None
26
  TEMPERATURE = 0.0
@@ -39,32 +38,12 @@ AVAILABLE_MODELS = [
39
  ]
40
  DEFAULT_MODEL_ID = AVAILABLE_MODELS[0]
41
 
42
-
43
- SYSTEM_PROMPT = textwrap.dedent("""
44
- You are an assistant tasked with grading answers to a mind reading ability test. You will be provided with the following information:
45
- 1. A story that was presented to participants as context
46
- 2. The question that participants were asked to answer
47
- 3. A grading scheme to evaluate the answers (Correct Responses:1, incorrect response:0, Incomplete response:0, Irrelevant:0)
48
- 4. Grading examples
49
- 5. A participant answer
50
- Your task is to grade each answer according to the grading scheme. For each answer, you should:
51
- 1. Carefully read and understand the answer and compare it to the grading criteria
52
- 2. Assigning an score 1 or 0 for each answer.
53
- """).strip()
54
-
55
  PROMPT_TEMPLATE = textwrap.dedent("""
56
- <Story>
57
- {story}
58
- </Story>
59
- <Question>
60
- {question}
61
- </Question>
62
- <GradingScheme>
63
- {grading_scheme}
64
- </GradingScheme>
65
- <Answer>
66
- {answer}
67
- </Answer>
68
  Score:""").strip()
69
 
70
 
@@ -73,9 +52,14 @@ class ResponseModel(BaseModel):
73
  score: Literal["0", "1"]
74
 
75
 
76
- def get_outlines_model(
77
- model_id: str, device_map: str = "auto", quantization_bits: Optional[int] = 4
78
- ):
 
 
 
 
 
79
  if quantization_bits == 4:
80
  quantization_config = BitsAndBytesConfig(
81
  load_in_4bit=True,
@@ -89,82 +73,95 @@ def get_outlines_model(
89
  quantization_config = None
90
 
91
  if "longformer" in model_id:
92
- hf_model = AutoModelForSequenceClassification.from_pretrained(model_id)
93
- hf_tokenizer = AutoTokenizer.from_pretrained(model_id)
94
- return hf_model, hf_tokenizer
95
-
96
- peft_config = PeftConfig.from_pretrained(model_id)
97
- base_model_id = peft_config.base_model_name_or_path
98
-
99
- base_model = AutoModelForCausalLM.from_pretrained(
100
- base_model_id,
101
- device_map=device_map,
102
- quantization_config=quantization_config,
103
- )
104
- hf_model = PeftModel.from_pretrained(base_model, model_id)
105
- hf_tokenizer = AutoTokenizer.from_pretrained(
106
- base_model_id, use_fast=True, clean_up_tokenization_spaces=True
107
- )
108
-
109
- model = outlines.from_transformers(hf_model, hf_tokenizer)
110
- return model
 
 
 
 
 
 
 
 
 
111
 
112
 
113
  def format_prompt(story: str, question: str, grading_scheme: str, answer: str) -> str:
114
- prompt = PROMPT_TEMPLATE.format(
115
  story=story.strip(),
116
  question=question.strip(),
117
  grading_scheme=grading_scheme.strip(),
118
  answer=answer.strip(),
119
  )
120
- full_prompt = SYSTEM_PROMPT + "\n\n" + prompt
121
- return full_prompt
122
 
123
 
124
  @spaces.GPU
125
  def label_single_response_with_model(model_id, story, question, criteria, response):
126
- prompt = format_prompt(story, question, criteria, response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
- if "longformer" in model_id:
129
- model, tokenizer = get_outlines_model(model_id, DEVICE_MAP, QUANTIZATION_BITS)
130
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
131
- with torch.no_grad():
132
- logits = model(**inputs).logits
133
- predicted_class = torch.argmax(logits, dim=1).item()
134
- return str(predicted_class)
135
- else:
136
- # Use structured JSON generation like in the original script
137
- model = get_outlines_model(model_id, DEVICE_MAP, QUANTIZATION_BITS)
138
- sampler = outlines.samplers.greedy() # Match original temperature=0 behavior
139
- generator = outlines.generate.json(model, ResponseModel, sampler=sampler)
140
- result = generator(prompt)
141
- return result.score
142
 
143
  @spaces.GPU
144
  def label_multi_responses_with_model(model_id, story, question, criteria, response_file):
145
- df = pd.read_csv(response_file.name)
146
- assert "response" in df.columns, "CSV must contain a 'response' column."
147
- prompts = [
148
- format_prompt(story, question, criteria, resp) for resp in df["response"]
149
- ]
150
-
151
- if "longformer" in model_id:
152
- model, tokenizer = get_outlines_model(model_id, DEVICE_MAP, QUANTIZATION_BITS)
153
- inputs = tokenizer(prompts, return_tensors="pt", truncation=True, padding=True)
154
- with torch.no_grad():
155
- logits = model(**inputs).logits
156
- predicted_classes = torch.argmax(logits, dim=1).tolist()
157
- scores = [str(cls) for cls in predicted_classes]
158
- else:
159
- # Use structured JSON generation for batch processing
160
- model = get_outlines_model(model_id, DEVICE_MAP, QUANTIZATION_BITS)
161
- sampler = outlines.samplers.greedy()
162
- generator = outlines.generate.json(model, ResponseModel, sampler=sampler)
163
- results = generator(prompts)
164
- scores = [r.score for r in results]
165
-
166
- df["score"] = scores
167
- return df
 
168
 
169
 
170
  def single_response_ui(model_id):
@@ -185,10 +182,7 @@ def single_response_ui(model_id):
185
 
186
  def multi_response_ui(model_id):
187
  return gr.Interface(
188
- fn=lambda story,
189
- question,
190
- criteria,
191
- response_file: label_multi_responses_with_model(
192
  model_id.value, story, question, criteria, response_file
193
  ),
194
  inputs=[
@@ -208,7 +202,7 @@ with gr.Blocks(title="Zero-Shot Evaluation Grader") as iface:
208
  model_selector = gr.Dropdown(
209
  label="Select Model",
210
  choices=AVAILABLE_MODELS,
211
- value=AVAILABLE_MODELS[0],
212
  )
213
  selected_model_id = gr.State(value=DEFAULT_MODEL_ID)
214
 
@@ -227,4 +221,4 @@ with gr.Blocks(title="Zero-Shot Evaluation Grader") as iface:
227
 
228
 
229
  if __name__ == "__main__":
230
- iface.launch(share=True)
 
7
  import pandas as pd
8
  import spaces
9
  import torch
10
+ from outlines import generate, models, samplers
11
  from peft import PeftConfig, PeftModel
12
  from pydantic import BaseModel, ConfigDict
13
  from transformers import (
 
20
  logging.basicConfig(level=logging.INFO)
21
  logger = logging.getLogger(__name__)
22
 
 
23
  DEVICE_MAP = "auto"
24
  QUANTIZATION_BITS = None
25
  TEMPERATURE = 0.0
 
38
  ]
39
  DEFAULT_MODEL_ID = AVAILABLE_MODELS[0]
40
 
41
+ # Use a simpler prompt format that might be closer to your training data
 
 
 
 
 
 
 
 
 
 
 
 
42
  PROMPT_TEMPLATE = textwrap.dedent("""
43
+ Story: {story}
44
+ Question: {question}
45
+ Grading Scheme: {grading_scheme}
46
+ Answer: {answer}
 
 
 
 
 
 
 
 
47
  Score:""").strip()
48
 
49
 
 
52
  score: Literal["0", "1"]
53
 
54
 
55
+ # Cache models to avoid reloading on every request
56
+ _model_cache = {}
57
+
58
+
59
+ def get_model_and_tokenizer(model_id: str, device_map: str = "auto", quantization_bits: Optional[int] = None):
60
+ if model_id in _model_cache:
61
+ return _model_cache[model_id]
62
+
63
  if quantization_bits == 4:
64
  quantization_config = BitsAndBytesConfig(
65
  load_in_4bit=True,
 
73
  quantization_config = None
74
 
75
  if "longformer" in model_id:
76
+ model = AutoModelForSequenceClassification.from_pretrained(model_id)
77
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
78
+ result = (model, tokenizer, "classification")
79
+ else:
80
+ # For other models, use the same approach as your original script
81
+ peft_config = PeftConfig.from_pretrained(model_id)
82
+ base_model_id = peft_config.base_model_name_or_path
83
+
84
+ model = AutoModelForCausalLM.from_pretrained(
85
+ base_model_id,
86
+ device_map=device_map,
87
+ quantization_config=quantization_config,
88
+ )
89
+ model = PeftModel.from_pretrained(model, model_id)
90
+ tokenizer = AutoTokenizer.from_pretrained(
91
+ base_model_id, use_fast=True, clean_up_tokenization_spaces=True
92
+ )
93
+
94
+ # Convert to outlines model
95
+ outlines_model = models.transformers(
96
+ model,
97
+ tokenizer=tokenizer,
98
+ device_map=device_map,
99
+ )
100
+ result = (outlines_model, tokenizer, "generation")
101
+
102
+ _model_cache[model_id] = result
103
+ return result
104
 
105
 
106
  def format_prompt(story: str, question: str, grading_scheme: str, answer: str) -> str:
107
+ return PROMPT_TEMPLATE.format(
108
  story=story.strip(),
109
  question=question.strip(),
110
  grading_scheme=grading_scheme.strip(),
111
  answer=answer.strip(),
112
  )
 
 
113
 
114
 
115
  @spaces.GPU
116
  def label_single_response_with_model(model_id, story, question, criteria, response):
117
+ try:
118
+ prompt = format_prompt(story, question, criteria, response)
119
+ model, tokenizer, model_type = get_model_and_tokenizer(model_id, DEVICE_MAP, QUANTIZATION_BITS)
120
+
121
+ if model_type == "classification":
122
+ # For Longformer models
123
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
124
+ with torch.no_grad():
125
+ logits = model(**inputs).logits
126
+ predicted_class = torch.argmax(logits, dim=1).item()
127
+ return str(predicted_class)
128
+ else:
129
+ # For generative models
130
+ sampler = samplers.greedy()
131
+ generator = generate.json(model, ResponseModel, sampler=sampler)
132
+ result = generator(prompt)
133
+ return result.score
134
+ except Exception as e:
135
+ logger.error(f"Error in label_single_response_with_model: {str(e)}")
136
+ return "Error: " + str(e)
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
  @spaces.GPU
140
  def label_multi_responses_with_model(model_id, story, question, criteria, response_file):
141
+ try:
142
+ df = pd.read_csv(response_file.name)
143
+ assert "response" in df.columns, "CSV must contain a 'response' column."
144
+
145
+ model, tokenizer, model_type = get_model_and_tokenizer(model_id, DEVICE_MAP, QUANTIZATION_BITS)
146
+ prompts = [format_prompt(story, question, criteria, resp) for resp in df["response"]]
147
+
148
+ if model_type == "classification":
149
+ inputs = tokenizer(prompts, return_tensors="pt", truncation=True, padding=True)
150
+ with torch.no_grad():
151
+ logits = model(**inputs).logits
152
+ predicted_classes = torch.argmax(logits, dim=1).tolist()
153
+ scores = [str(cls) for cls in predicted_classes]
154
+ else:
155
+ sampler = samplers.greedy()
156
+ generator = generate.json(model, ResponseModel, sampler=sampler)
157
+ results = generator(prompts)
158
+ scores = [r.score for r in results]
159
+
160
+ df["score"] = scores
161
+ return df
162
+ except Exception as e:
163
+ logger.error(f"Error in label_multi_responses_with_model: {str(e)}")
164
+ return f"Error: {str(e)}"
165
 
166
 
167
  def single_response_ui(model_id):
 
182
 
183
  def multi_response_ui(model_id):
184
  return gr.Interface(
185
+ fn=lambda story, question, criteria, response_file: label_multi_responses_with_model(
 
 
 
186
  model_id.value, story, question, criteria, response_file
187
  ),
188
  inputs=[
 
202
  model_selector = gr.Dropdown(
203
  label="Select Model",
204
  choices=AVAILABLE_MODELS,
205
+ value=DEFAULT_MODEL_ID,
206
  )
207
  selected_model_id = gr.State(value=DEFAULT_MODEL_ID)
208
 
 
221
 
222
 
223
  if __name__ == "__main__":
224
+ iface.launch(share=True)