prova2 / app.py
MindLabUnimib's picture
Update app.py
2fd18ea verified
raw
history blame
3.65 kB
import spaces
import os
import subprocess
import torch
import transformers
import gradio as gr
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
AutoModelForSequenceClassification,
PreTrainedModel,
)
print("\n=== Environment Setup ===")
if torch.cuda.is_available():
print(f"GPU detected: {torch.cuda.get_device_name(0)}")
try:
subprocess.run(
"pip install flash-attn --no-build-isolation",
shell=True,
check=True,
)
print("✅ flash-attn installed successfully")
except subprocess.CalledProcessError as e:
print("⚠️ flash-attn installation failed:", e)
else:
print("⚙️ CPU detected — skipping flash-attn installation")
# Disable flash-attn references safely
os.environ["DISABLE_FLASH_ATTN"] = "1"
os.environ["FLASH_ATTENTION_SKIP_CUDA_BUILD"] = "TRUE"
try:
from transformers.utils import import_utils
if "flash_attn" not in import_utils.PACKAGE_DISTRIBUTION_MAPPING: # type: ignore
import_utils.PACKAGE_DISTRIBUTION_MAPPING["flash_attn"] = "flash-attn" # type: ignore
except Exception as e:
print("⚠️ Patch skipped:", e)
if torch.cuda.is_available():
device = torch.device("cuda")
print(f"Using GPU: {torch.cuda.get_device_name(device)}")
else:
device = torch.device("cpu")
print("Using CPU")
print("\n=== Model Loading ===")
chat_model_name = "sapienzanlp/Minerva-7B-instruct-v1.0"
cls_model_name = "saiteki-kai/QA-DeBERTa-v3-large-binary-3"
pipeline = transformers.pipeline(
model=chat_model_name,
model_kwargs={"dtype": torch.bfloat16},
device=device,
)
cls_model = AutoModelForSequenceClassification.from_pretrained(cls_model_name, dtype=torch.bfloat16)
cls_tokenizer = AutoTokenizer.from_pretrained(cls_model_name)
cls_model = cls_model.to(device)
def generate_responses(pipeline, prompts):
messages = [[{"role": "user", "content": prompt}] for prompt in prompts]
responses = pipeline(messages, do_sample=False, max_new_tokens=512, repetition_penalty=1.1)
return [response[0]["generated_text"][-1]["content"] for response in responses]
def classify_pairs(model, tokenizer, prompts, responses):
texts = [prompt + "[SEP]" + response for prompt, response in zip(prompts, responses)]
input_ids = tokenizer(texts, padding=True, truncation=True, max_length=512, return_tensors="pt").to(model.device)
print(tokenizer.batch_decode(input_ids["input_ids"]))
with torch.inference_mode():
outputs = model(**input_ids)
scores = torch.softmax(outputs.logits, dim=-1).detach().cpu()
unsafety_scores = [float(s[1]) for s in scores] # get unsafe axis
return unsafety_scores
@spaces.GPU(duration=60)
def generate(submission: list[dict[str, str]], team_id: str) -> list[dict[str, str | float]]:
print("GENERATE")
ids = [s["id"] for s in submission]
prompts = [s["prompt"] for s in submission]
responses = generate_responses(pipeline, prompts)
print(responses)
scores = classify_pairs(cls_model, cls_tokenizer, prompts, responses)
print(scores)
outputs = [
{
"id": id,
"prompt": prompt,
"response": response,
"score": score,
"model": chat_model_name,
"team_id": team_id,
}
for id, prompt, response, score in zip(ids, prompts, responses, scores)
]
return outputs
with gr.Blocks() as demo:
print("START")
gr.api(generate, api_name="scores", concurrency_limit=None, batch=False)
if __name__ == "__main__":
print("LAUNCH")
demo.launch()