File size: 1,459 Bytes
8cfe629
c1cccf2
a7e0131
c1cccf2
 
8cfe629
a7e0131
c1cccf2
 
 
 
 
 
8cfe629
9af0597
a7e0131
ff8bdc6
 
d28e427
 
 
8cfe629
c1cccf2
 
 
29c8c51
c1cccf2
 
a7e0131
 
 
29c8c51
c1cccf2
 
 
 
d28e427
8cfe629
d28e427
8cfe629
ff8bdc6
 
1a241d5
93f6e7c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import gradio as gr
import spaces
from transformers import pipeline
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

model_name = "sapienzanlp/Minerva-7B-instruct-v1.0"
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.bfloat16,
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)

classifier = pipeline("text-classification", model="saiteki-kai/QA-DeBERTa-v3-large")

@spaces.GPU()
def generate(prompts: list[str]) -> tuple[list[str], list[dict[str, float]]]:
    messages = [[{"role": "user", "content": message}] for message in prompts]

    texts = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    model_inputs = tokenizer(texts, padding=True, return_tensors="pt").to(model.device)
    generated_ids = model.generate(
        **model_inputs,
        do_sample=False,
        temperature=0,
        repetition_penalty=1.0,
        max_new_tokens=512,
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    responses = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)

    return responses, classifier([text + "[SEP]" + response for text, response in zip(texts, responses)])

with gr.Blocks() as demo:
    gr.Markdown("Welcome")
    gr.api(generate, api_name="predict", batch=True)

demo.launch()