uploads / synthgen /summary.py
EdwardSJ151's picture
Upload folder using huggingface_hub
3af75d1 verified
import os
import json
import math
import asyncio
import random
from typing import Dict, Any, List, Tuple, Optional
from openai import AsyncOpenAI, BadRequestError
from tqdm import tqdm
from itertools import count
from prompts_resumo import CATEGORIES_SYSTEM_PROMPTS
# ---------------------------------------------------------------------
# Config vLLM OpenAI-compatible
# ---------------------------------------------------------------------
VLLM_BASE_URL = os.environ.get("VLLM_BASE_URL", "http://10.100.0.111:8022/v1")
VLLM_API_KEY = os.environ.get("VLLM_API_KEY", "no-key-needed")
CATEGORIES_SYS_PROMPTS = CATEGORIES_SYSTEM_PROMPTS
# Decoding (globais)
GEN_TEMPERATURE = float(os.environ.get("GEN_TEMPERATURE", "0.7"))
GEN_TOP_P = float(os.environ.get("GEN_TOP_P", "1.0"))
GEN_MAX_NEW_TOK = int(os.environ.get("GEN_MAX_NEW_TOK", "8192"))
STOP_STRINGS = ["<|im_end|>", "<|end_of_text|>"]
STOP_TOKEN_IDS = None
# Decoding diferenciada (resposta vs pergunta)
RESP_TEMPERATURE = float(os.environ.get("RESP_TEMPERATURE", str(GEN_TEMPERATURE if GEN_TEMPERATURE else 0.3)))
RESP_TOP_P = float(os.environ.get("RESP_TOP_P", str(min(GEN_TOP_P, 0.9))))
RESP_MAX_TOKENS = int(os.environ.get("RESP_MAX_TOKENS", "8192"))
Q_TEMPERATURE = float(os.environ.get("Q_TEMPERATURE", "0.7"))
Q_TOP_P = float(os.environ.get("Q_TOP_P", "0.95"))
Q_MAX_TOKENS = int(os.environ.get("Q_MAX_TOKENS", "8192"))
# ---------------------------------------------------------------------
# Controle de execução (apenas single-turn)
# ---------------------------------------------------------------------
NUM_ROWS = int(os.environ.get("NUM_ROWS", "300000"))
BATCH_SIZE = int(os.environ.get("BATCH_SIZE", "16"))
OUTPUT_FILE = os.environ.get("OUTPUT_FILE", "cemig_summary.jsonl")
INCLUDE_SYSTEM = True # (não será salvo no output)
LOGITS_PROCESSORS: List[str] = []
# Concorrência
MAX_ASYNC_TOTAL = int(os.environ.get("MAX_ASYNC", "32"))
CHAT_SHARE = float(os.environ.get("CHAT_SHARE", "0.8")) # 80% chat / 20% qgen
MAX_ASYNC_CHAT = max(1, int(MAX_ASYNC_TOTAL * CHAT_SHARE))
MAX_ASYNC_QGEN = max(1, MAX_ASYNC_TOTAL - MAX_ASYNC_CHAT)
QUEUE_MAXSIZE = int(os.environ.get("QUEUE_MAXSIZE", str(MAX_ASYNC_CHAT * 4)))
# ---------------------------------------------------------------------
# Dataset Wikipedia em streaming (HuggingFaceFW/clean-wikipedia, 'pt')
# ---------------------------------------------------------------------
WIKI_DATASET_ID = os.environ.get("WIKI_DATASET_ID", "cemig-ceia/energy_dataset_v1")
WIKI_SUBSET = os.environ.get("WIKI_SUBSET", "default")
WIKI_TEXT_FIELD = os.environ.get("WIKI_TEXT_FIELD", "text")
WIKI_MAX_CHARS = int(os.environ.get("WIKI_MAX_CHARS", "1500000"))
WIKI_MIN_CHARS = int(os.environ.get("WIKI_MIN_CHARS", "100"))
# ---------------------------------------------------------------------
# Estilos de PERGUNTA (mantidos)
# ---------------------------------------------------------------------
HARD_PROMPT_PAIR_SHARE = float(os.environ.get("HARD_PROMPT_PAIR_SHARE", "0.05"))
USER_PROMPTS = [
"Me ajude a responder essa questão, me dê uma resposta que explica o passo a passo do raciocinio.",
"Quero ajuda com essa questão mas sou ruim nessa materia, me explique ela mas também falando brevemente dos fundamentos dela.",
"Responda a questão de forma breve"
]
QUESTION_STYLE_PROMPTS = {
"summary": (
"Siga em pt-BR. Gere UMA pergunta solicitando resumo ou síntese de algo já mencionado "
"na conversa. A saída deve ser uma PERGUNTA. Evite sim/não e perguntas vagas."
"Máx. 1-2 sentenças; termine com '?'."
),
"rewrite_simplify": (
"Siga em pt-BR. Gere UMA pergunta solicitando reescrever o texto de forma que simplifique o jargão e vocabulário jurídico"
"A saída deve ser uma PERGUNTA. Evite sim/não e perguntas vagas."
"Máx. 1-2 sentenças; termine com '?'."
),
"rewrite_detailed": (
"Siga em pt-BR. Gere UMA pergunta solicitando reescrever de forma mais detalhada "
"algo já mencionado na conversa. A saída deve ser uma PERGUNTA. Evite sim/não e perguntas vagas."
"Máx. 1-2 sentença; termine com '?'."
),
"rewrite_focus": (
"Siga em pt-BR. Gere UMA pergunta solicitando reescrever algo já discutido, focando em um aspecto ou parte específica da resposta anterior. "
"A saída deve ser uma PERGUNTA. Evite sim/não e perguntas vagas. Máx. 1 sentença; termine com '?'."
),
"rewrite_formal": (
"Siga em pt-BR. Gere UMA pergunta solicitando reescrever algo já discutido na conversa de forma mais formal. "
"A saída deve ser uma PERGUNTA. Evite sim/não e perguntas vagas."
),
}
_DEFAULT_STYLE_PROBS = {
"summary": 0.100,
"rewrite_simplify": 0.00,
"rewrite_detailed": 0.00,
"rewrite_focus": 0.00,
"rewrite_formal": 0.00,
}
QUESTION_SAMPLES_PER_ROW = int(os.environ.get("QUESTION_SAMPLES_PER_ROW", "2"))
try:
QUESTION_STYLE_PROBS = json.loads(os.environ.get("QUESTION_STYLE_PROBS", "")) or _DEFAULT_STYLE_PROBS
except Exception:
QUESTION_STYLE_PROBS = _DEFAULT_STYLE_PROBS
# ---------------------------------------------------------------------
# Client
# ---------------------------------------------------------------------
client = AsyncOpenAI(base_url=VLLM_BASE_URL, api_key=VLLM_API_KEY)
# ---------------------------------------------------------------------
# Utilidades (sem regex)
# ---------------------------------------------------------------------
def _normalize_spaces(s: str) -> str:
return " ".join((s or "").split())
def pick_two_prompts(pool): # 2 hardcoded
pool = [p.strip() for p in pool if p and p.strip()]
if not pool:
return None
if len(pool) == 1:
return pool[0], pool[0]
return tuple(random.sample(pool, 2))
def _truncate_context(txt: str) -> str:
if not isinstance(txt, str):
return ""
t = _normalize_spaces(txt)
if len(t) <= WIKI_MAX_CHARS:
return t
cut = t.rfind(".", 0, WIKI_MAX_CHARS)
if cut == -1 or cut < WIKI_MIN_CHARS:
return t[:WIKI_MAX_CHARS]
return t[:cut+1]
def sample_system_prompt_key(prompts_with_probs: Dict[str, Tuple[str, float, str]]) -> Tuple[str, str]:
keys = list(prompts_with_probs.keys())
probs = [prompts_with_probs[k][1] for k in keys]
s = sum(probs)
probs = [1.0/len(keys)]*len(keys) if s <= 0 else [p/s for p in probs]
selected_key = random.choices(keys, weights=probs, k=1)[0]
context_type = prompts_with_probs[selected_key][2]
return selected_key, context_type
async def get_model_id() -> str:
models = await client.models.list()
if not models.data:
raise RuntimeError("Nenhum modelo disponível no endpoint vLLM.")
return models.data[0].id
async def chat_call(
messages: List[Dict[str, str]],
model_id: str,
*,
extra_body_override: Optional[dict] = None,
) -> str:
final_messages = messages
temperature = RESP_TEMPERATURE
top_p = RESP_TOP_P
max_tokens = RESP_MAX_TOKENS
extra_body = {
"chat_template_kwargs": {"enable_thinking": False},
"stop": STOP_STRINGS,
"stop_token_ids": STOP_TOKEN_IDS,
"logits_processors": LOGITS_PROCESSORS,
}
if extra_body_override:
for k, v in extra_body_override.items():
extra_body[k] = v
resp = await client.chat.completions.create(
model=model_id,
messages=final_messages,
temperature=temperature,
top_p=top_p,
max_tokens=max_tokens,
extra_body=extra_body,
)
return resp.choices[0].message.content or ""
async def safe_chat_call(*args, **kwargs) -> Optional[str]:
try:
return await chat_call(*args, **kwargs)
except BadRequestError:
try:
eb = kwargs.pop("extra_body_override", {}) or {}
eb2 = {"chat_template_kwargs": {"enable_thinking": True}}
eb2.update(eb)
return await chat_call(*args, extra_body_override=eb2, **kwargs)
except Exception:
pass
try:
eb = kwargs.pop("extra_body_override", {}) or {}
eb2 = dict(eb)
eb2.pop("chat_template_kwargs", None)
return await chat_call(*args, extra_body_override=eb2, **kwargs)
except Exception:
return None
except Exception:
return None
# ---------------------------------------------------------------------
# Geração de PERGUNTA — otimizada (n=2/JSON/fallback paralelo)
# ---------------------------------------------------------------------
def _sample_question_style(k=2) -> List[str]:
keys = list(QUESTION_STYLE_PROMPTS.keys())
weights = [QUESTION_STYLE_PROBS.get(k, 0.0) for k in keys]
s = sum(weights)
weights = [1.0/len(keys)]*len(keys) if s <= 0 else [w/s for w in weights]
return random.choices(keys, weights=weights, k=k)
async def generate_user_questions_pair(model_id: str, context_text: str, styles: List[str]) -> List[str]:
s0, s1 = styles[0], styles[1]
if s0 == s1:
try:
sys = (
"Você é um GERADOR de PERGUNTAS. Use APENAS o CONTEXTO. "
"Saída: UMA pergunta em PT-BR, sem comentários."
)
msg = [{"role": "system", "content": sys + f"\n\nINSTRUÇÃO DE ESTILO: {QUESTION_STYLE_PROMPTS[s0]}\n\nCONTEXTO:\n{context_text}"}]
resp = await client.chat.completions.create(
model=model_id,
messages=msg,
temperature=Q_TEMPERATURE,
top_p=Q_TOP_P,
max_tokens=Q_MAX_TOKENS,
n=2,
)
outs = [(c.message.content or "").strip() for c in resp.choices]
if len(outs) == 2 and all(outs):
return outs
except Exception:
pass
try:
sys = (
"Você é um GERADOR de PERGUNTAS. Use APENAS o CONTEXTO. "
"Produza JSON exatamente no formato: {\"q1\": \"...\", \"q2\": \"...\"}."
)
style_block = (
f"q1_style: {QUESTION_STYLE_PROMPTS[s0]}\n"
f"q2_style: {QUESTION_STYLE_PROMPTS[s1]}"
)
msg = [{"role": "system", "content": f"{sys}\n\n{style_block}\n\nCONTEXTO:\n{context_text}"}]
resp = await client.chat.completions.create(
model=model_id,
messages=msg,
temperature=Q_TEMPERATURE,
top_p=Q_TOP_P,
max_tokens=Q_MAX_TOKENS * 2,
)
txt = (resp.choices[0].message.content or "").strip()
q1 = q2 = None
try:
obj = json.loads(txt)
q1 = (obj.get("q1") or "").strip()
q2 = (obj.get("q2") or "").strip()
except Exception:
parts = [p.strip() for p in txt.split("\n") if p.strip()]
if len(parts) >= 2:
q1, q2 = parts[0], parts[1]
if q1 and q2:
return [q1, q2]
except Exception:
pass
async def _once(sty: str):
sys = (
"Você é um GERADOR de PERGUNTAS. Use APENAS o CONTEXTO. "
"Saída: apenas UMA pergunta em PT-BR, sem comentários."
)
msg = [{"role": "system", "content": sys + f"\n\nINSTRUÇÃO DE ESTILO: {QUESTION_STYLE_PROMPTS[sty]}\n\nCONTEXTO:\n{context_text}"}]
r = await client.chat.completions.create(
model=model_id,
messages=msg,
temperature=Q_TEMPERATURE,
top_p=Q_TOP_P,
max_tokens=Q_MAX_TOKENS,
)
return (r.choices[0].message.content or "").strip()
q1, q2 = await asyncio.gather(_once(s0), _once(s1))
return [q1, q2]
# ---------------------------------------------------------------------
# Conversa (single-turn: apenas user/assistant no output)
# ---------------------------------------------------------------------
async def generate_one_conversation(
model_id: str,
system_prompt_key: str,
system_prompt_text: str,
context_text: str,
initial_user: str,
question_style: str,
context_type: str,
) -> Dict[str, Any]:
conversation: List[Dict[str, str]] = []
base_msgs: List[Dict[str, str]] = []
if INCLUDE_SYSTEM and system_prompt_text:
base_msgs.append({"role": "system", "content": system_prompt_text})
if context_text and initial_user:
user_content = f"{context_text}\n\n{initial_user}"
elif context_text:
user_content = context_text
else:
user_content = initial_user or ""
conversation.append({"role": "user", "content": user_content})
first_answer = await safe_chat_call(base_msgs + [{"role": "user", "content": user_content}], model_id)
conversation.append({"role": "assistant", "content": first_answer})
return {"conversation": conversation}
# ---------------------------------------------------------------------
# Streaming do dataset + helpers
# ---------------------------------------------------------------------
def _wiki_stream_iter():
from datasets import load_dataset
return load_dataset(WIKI_DATASET_ID, WIKI_SUBSET, split="train", streaming=True)
def _extract_context(record: Dict[str, Any]) -> Optional[Dict[str, Any]]:
txt = record.get(WIKI_TEXT_FIELD, "")
if not isinstance(txt, str):
return None
context_text = _truncate_context(txt)
if len(context_text) < WIKI_MIN_CHARS:
return None
title = record.get("title", "")
return {"context_text": context_text, "title": title}
async def _execute_with_concurrency(coros: List[asyncio.Future], metas: List[dict], limit: int):
sem = asyncio.Semaphore(limit)
async def _runner(idx: int, coro):
async with sem:
res = await coro
return idx, res
tasks = [asyncio.create_task(_runner(i, c)) for i, c in enumerate(coros)]
try:
for done in asyncio.as_completed(tasks):
i, res = await done
yield res, metas[i]
finally:
for t in tasks:
if not t.done():
t.cancel()
def _read_resume_state(path: str) -> Tuple[int, int]:
if not os.path.exists(path) or os.path.getsize(path) == 0:
return 0, -1
next_seq_id = 0
last_ctx_id = -1
with open(path, "r", encoding="utf-8") as fin:
for line in fin:
try:
obj = json.loads(line)
except Exception:
continue
if "seq_id" in obj:
next_seq_id = max(next_seq_id, int(obj["seq_id"]) + 1)
if "context_id" in obj:
last_ctx_id = max(last_ctx_id, int(obj["context_id"]))
return next_seq_id, last_ctx_id
async def _skip_accepted_contexts(ds_iter, n_to_skip: int) -> int:
skipped = 0
if n_to_skip <= 0:
return 0
for rec in ds_iter:
if _extract_context(rec):
skipped += 1
if skipped >= n_to_skip:
break
return skipped
# ---------------------------------------------------------------------
# Runner com pipeline produtor→fila→consumidores (80/20 chat)
# ---------------------------------------------------------------------
async def run():
os.makedirs(os.path.dirname(OUTPUT_FILE) or ".", exist_ok=True)
model_id = await get_model_id()
# === RESUME ===
next_seq_id, last_ctx_id = _read_resume_state(OUTPUT_FILE)
seq_id = next_seq_id
ds_iter = _wiki_stream_iter()
_ = await _skip_accepted_contexts(ds_iter, last_ctx_id + 1)
start_ctx = (last_ctx_id + 1) if last_ctx_id >= 0 else 0
global_id = count(start_ctx)
total_remaining = max(0, NUM_ROWS - seq_id)
total_batches = math.ceil(total_remaining / BATCH_SIZE) if BATCH_SIZE > 0 else 0
# Abra em append
with open(OUTPUT_FILE, "a", encoding="utf-8") as fout:
pbar = tqdm(total=total_batches, desc="Gerando conversas (Magpie + Wikipedia streaming)")
write_lock = asyncio.Lock()
while seq_id < NUM_ROWS:
# Limita contextos por batch (2 conversas/ctx)
convs_restantes = NUM_ROWS - seq_id
max_contexts_este_batch = min(BATCH_SIZE, max(1, math.ceil(convs_restantes / 2)))
# Coleta contextos aceitos
batch_contexts: List[dict] = []
for rec in ds_iter:
context_dict = _extract_context(rec)
if context_dict:
ctx_id = next(global_id)
batch_contexts.append({**context_dict, "ctx_sample_id": ctx_id})
if len(batch_contexts) >= max_contexts_este_batch:
break
if not batch_contexts:
break # fim do stream
# Estilo da RESPOSTA por batch
key, context_type = sample_system_prompt_key(CATEGORIES_SYS_PROMPTS)
sys_prompt_text = CATEGORIES_SYS_PROMPTS[key][0]
# ===== Pipeline =====
conv_queue: asyncio.Queue = asyncio.Queue(maxsize=QUEUE_MAXSIZE)
# PRODUTORES (QGEN)
qgen_sem = asyncio.Semaphore(MAX_ASYNC_QGEN)
async def qgen_runner(cinfo: dict):
async with qgen_sem:
ctx_text = cinfo["context_text"]
ctx_id = cinfo["ctx_sample_id"]
use_hard_pair = (random.random() < HARD_PROMPT_PAIR_SHARE) and USER_PROMPTS
if use_hard_pair:
chosen = pick_two_prompts(USER_PROMPTS)
if chosen:
q1, q2 = chosen
styles = ["hardcoded", "hardcoded"]
else: #fallback caso algo dê errado
styles = _sample_question_style(k=2)
q1, q2 = await generate_user_questions_pair(model_id, ctx_text, styles)
else:
styles = _sample_question_style(k=2)
q1, q2 = await generate_user_questions_pair(model_id, ctx_text, styles)
await conv_queue.put((ctx_id, ctx_text, q1, styles[0]))
await conv_queue.put((ctx_id, ctx_text, q2, styles[1]))
producers = [asyncio.create_task(qgen_runner(cinfo)) for cinfo in batch_contexts]
# CONSUMIDORES (CHAT)
async def chat_worker(worker_id: int):
nonlocal seq_id
while True:
item = await conv_queue.get()
if item is None:
conv_queue.task_done()
break
ctx_id, ctx_text, question, q_style = item
try:
async with write_lock:
if seq_id >= NUM_ROWS:
return
r = await generate_one_conversation(
model_id=model_id,
system_prompt_key=key,
system_prompt_text=sys_prompt_text,
context_text=ctx_text,
initial_user=question,
question_style=q_style,
context_type=context_type,
)
async with write_lock:
if seq_id < NUM_ROWS:
record = {
"seq_id": seq_id,
"conversation": r["conversation"],
"question_style": q_style,
"context_id": ctx_id,
}
try:
fout.write(json.dumps(record, ensure_ascii=False) + "\n")
except Exception:
pass
seq_id += 1
except Exception:
pass
finally:
conv_queue.task_done()
consumers = [asyncio.create_task(chat_worker(i)) for i in range(MAX_ASYNC_CHAT)]
# Aguarda produtores concluírem e envia sentinelas
await asyncio.gather(*producers)
for _ in range(MAX_ASYNC_CHAT):
await conv_queue.put(None)
# Aguarda fila esvaziar e consumidores finalizarem
await conv_queue.join()
await asyncio.gather(*consumers, return_exceptions=True)
pbar.update(1)
pbar.close()
if __name__ == "__main__":
asyncio.run(run())