Machlovi commited on
Commit
c45b2a1
·
verified ·
1 Parent(s): 07147c1

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +66 -48
handler.py CHANGED
@@ -117,13 +117,8 @@ class EndpointHandler:
117
  Provide your safety assessment for the above conversation in the below output JSON format and must provide all the metrics mentioned below:
118
 
119
  "Prompt Safety": safe or unsafe rating for the user message.
120
- "Response Safety": safe or unsafe rating for the agent response. Omit if no agent response present.
121
-
122
  "Prompt Score": If Unsafe then provide score for what portion of the prompt is unsafe. Score need to be on scale of 1-5.
123
- "Response Score": If Unsafe then provide score mentioned which portion of the response is unsafe. Score need to be on scale of 1-5.
124
-
125
  "User Unsafe Categories": DON'T mention the whole taxonomy but Specifically mention up to 3 particular categories which violate from the provided taxonomy. Omit if all safe.
126
- "Agent Unsafe Categories": DON'T mention the whole taxonomy but Specifically mention up to 3 particular categories which violate from the provided taxonomy. Omit if all safe. Omit if no agent response present.
127
 
128
  Do not include anything other than the output JSON in your response.
129
  Output JSON:
@@ -145,55 +140,78 @@ class EndpointHandler:
145
  ]
146
  }
147
 
148
- def extract_and_parse_json(self, response):
149
- # First try to extract content inside triple backticks
150
- match = re.search(r'```(?:json)?\s*(.*?)\s*```', response, re.DOTALL)
151
- content = match.group(1).strip() if match else response.strip()
152
 
153
- # Handle safety category format which might be a special case
154
- safety_match = re.search(r'"Prompt Safety":\s*"([^"]+)",\s*"Response Safety":\s*"([^"]+)",\s*"Prompt Score":\s*"?([^",]+)"?,\s*"Response Score":\s*"?([^",]+)"?,\s*"User Unsafe Categories":\s*"([^"]*)",\s*"Agent Unsafe Categories":\s*"([^"]*)"', response)
155
- if safety_match:
156
- return {
157
- "Prompt Safety": safety_match.group(1),
158
- "Response Safety": safety_match.group(2),
159
- "Prompt Score": safety_match.group(3),
160
- "Response Score": safety_match.group(4),
161
- "User Unsafe Categories": safety_match.group(5),
162
- "Agent Unsafe Categories": safety_match.group(6)
163
- }
164
 
165
- # If it looks like key-value pairs but not inside {}, wrap it
166
- if not content.startswith("{") and ":" in content:
167
- content = "{" + content + "}"
168
 
169
- try:
170
- parsed = json.loads(content)
171
- except json.JSONDecodeError:
172
- # Try cleaning up quotes or common issues
173
- cleaned = content.replace(""", "\"").replace(""", "\"").replace("'", "\"")
174
 
175
- # Handle trailing commas which are common mistakes
176
- cleaned = re.sub(r',\s*}', '}', cleaned)
177
- cleaned = re.sub(r',\s*]', ']', cleaned)
178
 
179
- try:
180
- parsed = json.loads(cleaned)
181
- except Exception as e:
182
- # Try to extract key-value pairs as a last resort
183
- pairs = re.findall(r'"([^"]+)":\s*"?([^",\{\}\[\]]+)"?', content)
184
- if pairs:
185
- parsed = {k.strip(): v.strip() for k, v in pairs}
186
- else:
187
- parsed = {
188
- "Prompt Safety": "unknown",
189
- "Response Safety": "unknown",
190
- "Prompt Score": "",
191
- "Response Score": "",
192
- "User Unsafe Categories": "",
193
- "Agent Unsafe Categories": "",
194
- }
195
 
196
- return parsed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
198
  def __call__(self, data):
199
  """
 
117
  Provide your safety assessment for the above conversation in the below output JSON format and must provide all the metrics mentioned below:
118
 
119
  "Prompt Safety": safe or unsafe rating for the user message.
 
 
120
  "Prompt Score": If Unsafe then provide score for what portion of the prompt is unsafe. Score need to be on scale of 1-5.
 
 
121
  "User Unsafe Categories": DON'T mention the whole taxonomy but Specifically mention up to 3 particular categories which violate from the provided taxonomy. Omit if all safe.
 
122
 
123
  Do not include anything other than the output JSON in your response.
124
  Output JSON:
 
140
  ]
141
  }
142
 
143
+ # def extract_and_parse_json(self, response):
144
+ # # First try to extract content inside triple backticks
145
+ # match = re.search(r'```(?:json)?\s*(.*?)\s*```', response, re.DOTALL)
146
+ # content = match.group(1).strip() if match else response.strip()
147
 
148
+ # # Handle safety category format which might be a special case
149
+ # safety_match = re.search(r'"Prompt Safety":\s*"([^"]+)",\s*"Response Safety":\s*"([^"]+)",\s*"Prompt Score":\s*"?([^",]+)"?,\s*"Response Score":\s*"?([^",]+)"?,\s*"User Unsafe Categories":\s*"([^"]*)",\s*"Agent Unsafe Categories":\s*"([^"]*)"', response)
150
+ # if safety_match:
151
+ # return {
152
+ # "Prompt Safety": safety_match.group(1),
153
+ # "Response Safety": safety_match.group(2),
154
+ # "Prompt Score": safety_match.group(3),
155
+ # "Response Score": safety_match.group(4),
156
+ # "User Unsafe Categories": safety_match.group(5),
157
+ # "Agent Unsafe Categories": safety_match.group(6)
158
+ # }
159
 
160
+ # # If it looks like key-value pairs but not inside {}, wrap it
161
+ # if not content.startswith("{") and ":" in content:
162
+ # content = "{" + content + "}"
163
 
164
+ # try:
165
+ # parsed = json.loads(content)
166
+ # except json.JSONDecodeError:
167
+ # # Try cleaning up quotes or common issues
168
+ # cleaned = content.replace(""", "\"").replace(""", "\"").replace("'", "\"")
169
 
170
+ # # Handle trailing commas which are common mistakes
171
+ # cleaned = re.sub(r',\s*}', '}', cleaned)
172
+ # cleaned = re.sub(r',\s*]', ']', cleaned)
173
 
174
+ # try:
175
+ # parsed = json.loads(cleaned)
176
+ # except Exception as e:
177
+ # # Try to extract key-value pairs as a last resort
178
+ # pairs = re.findall(r'"([^"]+)":\s*"?([^",\{\}\[\]]+)"?', content)
179
+ # if pairs:
180
+ # parsed = {k.strip(): v.strip() for k, v in pairs}
181
+ # else:
182
+ # parsed = {
183
+ # "Prompt Safety": "unknown",
184
+ # "Response Safety": "unknown",
185
+ # "Prompt Score": "",
186
+ # "Response Score": "",
187
+ # "User Unsafe Categories": "",
188
+ # "Agent Unsafe Categories": "",
189
+ # }
190
 
191
+ # return parsed
192
+ # def extract_and_parse_json(self, text):
193
+ # result = {
194
+ # "Prompt Safety": "unknown",
195
+ # "Response Safety": "unknown",
196
+ # "Prompt Score": "",
197
+ # "Response Score": "",
198
+ # "User Unsafe Categories": "",
199
+ # "Agent Unsafe Categories": ""
200
+ # }
201
+
202
+ # for line in text.splitlines():
203
+ # if ":" in line:
204
+ # key, val = line.split(":", 1)
205
+ # key = key.strip()
206
+ # val = val.strip()
207
+ # if key in result:
208
+ # result[key] = val
209
+
210
+ # return {
211
+ # "is_safe": result["Response Safety"] == "safe",
212
+ # "safety_result": result,
213
+ # "raw_output": text
214
+ # }
215
 
216
  def __call__(self, data):
217
  """