Upload generate_responses.py with huggingface_hub
Browse files- generate_responses.py +19 -2
generate_responses.py
CHANGED
|
@@ -62,6 +62,23 @@ UV_SCRIPT_FILENAME = "generate_responses.py"
|
|
| 62 |
UV_SCRIPT_URL = f"https://huggingface.co/datasets/{UV_SCRIPT_REPO_ID}/resolve/main/{UV_SCRIPT_FILENAME}"
|
| 63 |
|
| 64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
def check_gpu_availability() -> int:
|
| 66 |
"""Check if CUDA is available and return the number of GPUs."""
|
| 67 |
if not cuda.is_available():
|
|
@@ -321,10 +338,10 @@ def main(
|
|
| 321 |
|
| 322 |
# Extract generated text and create full response list
|
| 323 |
logger.info("Extracting generated responses...")
|
| 324 |
-
responses = [""
|
| 325 |
|
| 326 |
for idx, output in enumerate(outputs):
|
| 327 |
-
responses[idx] = output
|
| 328 |
|
| 329 |
# Add responses to dataset
|
| 330 |
logger.info("Adding responses to dataset...")
|
|
|
|
| 62 |
UV_SCRIPT_URL = f"https://huggingface.co/datasets/{UV_SCRIPT_REPO_ID}/resolve/main/{UV_SCRIPT_FILENAME}"
|
| 63 |
|
| 64 |
|
| 65 |
+
def extract_output_payload(output) -> dict[str, Optional[str]]:
|
| 66 |
+
"""Convert a vLLM chat result into a dataset-serializable dict."""
|
| 67 |
+
completion = output.outputs[0] if getattr(output, "outputs", None) else None
|
| 68 |
+
text = getattr(completion, "text", "") if completion is not None else ""
|
| 69 |
+
|
| 70 |
+
reasoning_content = None
|
| 71 |
+
if completion is not None:
|
| 72 |
+
reasoning_content = getattr(completion, "reasoning_content", None)
|
| 73 |
+
if reasoning_content is None:
|
| 74 |
+
reasoning_content = getattr(completion, "reasoning", None)
|
| 75 |
+
|
| 76 |
+
return {
|
| 77 |
+
"text": text,
|
| 78 |
+
"reasoning_content": reasoning_content,
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
|
| 82 |
def check_gpu_availability() -> int:
|
| 83 |
"""Check if CUDA is available and return the number of GPUs."""
|
| 84 |
if not cuda.is_available():
|
|
|
|
| 338 |
|
| 339 |
# Extract generated text and create full response list
|
| 340 |
logger.info("Extracting generated responses...")
|
| 341 |
+
responses = [{"text": "", "reasoning_content": None} for _ in range(total_examples)]
|
| 342 |
|
| 343 |
for idx, output in enumerate(outputs):
|
| 344 |
+
responses[idx] = extract_output_payload(output)
|
| 345 |
|
| 346 |
# Add responses to dataset
|
| 347 |
logger.info("Adding responses to dataset...")
|