Rename handler.py to handler_3.py
Browse files- handler.py → handler_3.py +56 -8
handler.py → handler_3.py
RENAMED
|
@@ -143,13 +143,13 @@ class EndpointHandler:
|
|
| 143 |
Agent Unsafe Categories:
|
| 144 |
|
| 145 |
<End of examples>
|
| 146 |
-
Provide your safety assessment for the above conversation in the below output JSON format and must provide all the metrics mentioned below:
|
| 147 |
|
| 148 |
"Prompt Safety": safe or unsafe rating for the user message.
|
| 149 |
"Prompt Score": If Unsafe then provide score for what portion of the prompt is unsafe. Score need to be on scale of 1-5.
|
| 150 |
-
"Prompt Unsafe Categories": DON'T mention the whole taxonomy but
|
| 151 |
|
| 152 |
-
"Agent Safety": safe or unsafe rating for the agent response, skip not
|
| 153 |
"Agent Score": If Unsafe, then provide a score for what portion of the prompt is unsafe. Score need to be on scale of 1-5. Omit no Agent response provided
|
| 154 |
"Agent Unsafe Categories": DON'T mention the whole taxonomy, but specifically mention up to 3 particular categories that violate the provided taxonomy. Omit if all safe or no Agent response provided.
|
| 155 |
|
|
@@ -208,15 +208,63 @@ class EndpointHandler:
|
|
| 208 |
|
| 209 |
return parsed
|
| 210 |
|
| 211 |
-
def _format_conversations(self, prompt):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
if self.chat_template == "gemma-3":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 213 |
return {
|
| 214 |
"conversations": [
|
| 215 |
{"role": "system", "content": [{"type": "text", "text": self.policy_prompt}]},
|
| 216 |
-
{"role": "user", "content":
|
| 217 |
]
|
| 218 |
}
|
| 219 |
-
|
|
|
|
| 220 |
return {
|
| 221 |
"conversations": [
|
| 222 |
{"role": "system", "content": self.policy_prompt},
|
|
@@ -291,7 +339,7 @@ class EndpointHandler:
|
|
| 291 |
# Decode the output
|
| 292 |
decoded_output = self.tokenizer.decode(output[0], skip_special_tokens=True)
|
| 293 |
|
| 294 |
-
Extract the generated part (after the prompt)
|
| 295 |
response_text = decoded_output[len(prompt):].strip()
|
| 296 |
print(response_text)
|
| 297 |
|
|
@@ -308,7 +356,7 @@ class EndpointHandler:
|
|
| 308 |
"safety_result": safety_result
|
| 309 |
}
|
| 310 |
|
| 311 |
-
return
|
| 312 |
|
| 313 |
# For local testing
|
| 314 |
if __name__ == "__main__":
|
|
|
|
| 143 |
Agent Unsafe Categories:
|
| 144 |
|
| 145 |
<End of examples>
|
| 146 |
+
Provide your safety assessment for the above conversation in the below output JSON format, and must provide all the metrics mentioned below:
|
| 147 |
|
| 148 |
"Prompt Safety": safe or unsafe rating for the user message.
|
| 149 |
"Prompt Score": If Unsafe then provide score for what portion of the prompt is unsafe. Score need to be on scale of 1-5.
|
| 150 |
+
"Prompt Unsafe Categories": DON'T mention the whole taxonomy, but specifically mention up to 3 particular categories that violate the provided taxonomy. Omit if all safe.
|
| 151 |
|
| 152 |
+
"Agent Safety": safe or unsafe rating for the agent response, skip not provided
|
| 153 |
"Agent Score": If Unsafe, then provide a score for what portion of the prompt is unsafe. Score need to be on scale of 1-5. Omit no Agent response provided
|
| 154 |
"Agent Unsafe Categories": DON'T mention the whole taxonomy, but specifically mention up to 3 particular categories that violate the provided taxonomy. Omit if all safe or no Agent response provided.
|
| 155 |
|
|
|
|
| 208 |
|
| 209 |
return parsed
|
| 210 |
|
| 211 |
+
# def _format_conversations(self, prompt):
|
| 212 |
+
# if self.chat_template == "gemma-3":
|
| 213 |
+
# return {
|
| 214 |
+
# "conversations": [
|
| 215 |
+
# {"role": "system", "content": [{"type": "text", "text": self.policy_prompt}]},
|
| 216 |
+
# {"role": "user", "content": [{"type": "text", "text": prompt}]},
|
| 217 |
+
# ]
|
| 218 |
+
# }
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
# return { "conversations": [
|
| 223 |
+
# {
|
| 224 |
+
# "role": "system",
|
| 225 |
+
# "content": [{"type": "text", "text": self.policy_prompt}]
|
| 226 |
+
# },
|
| 227 |
+
# {
|
| 228 |
+
# "role": "user",
|
| 229 |
+
# "content": [
|
| 230 |
+
# {"type": "image", "url": user},
|
| 231 |
+
# {"type": "text", "text": "Please assess the content"}
|
| 232 |
+
# ]
|
| 233 |
+
# }
|
| 234 |
+
# ]
|
| 235 |
+
# }
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
# else: # chatml and others
|
| 239 |
+
# return {
|
| 240 |
+
# "conversations": [
|
| 241 |
+
# {"role": "system", "content": self.policy_prompt},
|
| 242 |
+
# {"role": "user", "content": prompt},
|
| 243 |
+
# ]
|
| 244 |
+
# }
|
| 245 |
+
|
| 246 |
+
def _format_conversations(self, prompt=None, image_url=None):
|
| 247 |
if self.chat_template == "gemma-3":
|
| 248 |
+
user_content = []
|
| 249 |
+
|
| 250 |
+
if image_url:
|
| 251 |
+
user_content.append({"type": "image", "url": image_url})
|
| 252 |
+
if prompt:
|
| 253 |
+
user_content.append({"type": "text", "text": prompt})
|
| 254 |
+
elif not user_content:
|
| 255 |
+
raise ValueError("At least one of `prompt` or `image_url` must be provided.")
|
| 256 |
+
elif image_url and not prompt:
|
| 257 |
+
# default text prompt for image-only queries
|
| 258 |
+
user_content.append({"type": "text", "text": "Please analyze the image."})
|
| 259 |
+
|
| 260 |
return {
|
| 261 |
"conversations": [
|
| 262 |
{"role": "system", "content": [{"type": "text", "text": self.policy_prompt}]},
|
| 263 |
+
{"role": "user", "content": user_content},
|
| 264 |
]
|
| 265 |
}
|
| 266 |
+
|
| 267 |
+
else:
|
| 268 |
return {
|
| 269 |
"conversations": [
|
| 270 |
{"role": "system", "content": self.policy_prompt},
|
|
|
|
| 339 |
# Decode the output
|
| 340 |
decoded_output = self.tokenizer.decode(output[0], skip_special_tokens=True)
|
| 341 |
|
| 342 |
+
##Extract the generated part (after the prompt)
|
| 343 |
response_text = decoded_output[len(prompt):].strip()
|
| 344 |
print(response_text)
|
| 345 |
|
|
|
|
| 356 |
"safety_result": safety_result
|
| 357 |
}
|
| 358 |
|
| 359 |
+
return safety_result
|
| 360 |
|
| 361 |
# For local testing
|
| 362 |
if __name__ == "__main__":
|