Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import spaces | |
| from transformers import pipeline | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| model_name = "sapienzanlp/Minerva-7B-instruct-v1.0" | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| torch_dtype=torch.bfloat16, | |
| device_map="auto" | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| classifier = pipeline("text-classification", model="saiteki-kai/QA-DeBERTa-v3-large") | |
| def generate(prompts: list[str]) -> tuple[list[str], list[dict[str, float]]]: | |
| messages = [[{"role": "user", "content": message}] for message in prompts] | |
| texts = tokenizer.apply_chat_template( | |
| messages, | |
| tokenize=False, | |
| add_generation_prompt=True | |
| ) | |
| model_inputs = tokenizer(texts, padding=True, return_tensors="pt").to(model.device) | |
| generated_ids = model.generate( | |
| **model_inputs, | |
| do_sample=False, | |
| temperature=0, | |
| repetition_penalty=1.0, | |
| max_new_tokens=512, | |
| ) | |
| generated_ids = [ | |
| output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) | |
| ] | |
| responses = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) | |
| return responses, classifier([text + "[SEP]" + response for text, response in zip(texts, responses)]) | |
| with gr.Blocks() as demo: | |
| gr.Markdown("Welcome") | |
| gr.api(generate, api_name="predict", batch=True) | |
| demo.launch() |