Spaces:
Sleeping
Sleeping
Commit
·
bd093f8
1
Parent(s):
a2bc680
test api
Browse files
app.py
CHANGED
|
@@ -3,6 +3,7 @@ import spaces
|
|
| 3 |
from transformers import pipeline
|
| 4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
import torch
|
|
|
|
| 6 |
|
| 7 |
model_name = "sapienzanlp/Minerva-7B-instruct-v1.0"
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(
|
|
@@ -38,8 +39,14 @@ def generate(prompts: list[str]) -> tuple[list[str], list[dict[str, float]]]:
|
|
| 38 |
|
| 39 |
return responses, classifier([text + "[SEP]" + response for text, response in zip(texts, responses)])
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
with gr.Blocks() as demo:
|
| 42 |
gr.Markdown("Welcome")
|
| 43 |
-
gr.api(
|
| 44 |
|
|
|
|
| 45 |
demo.launch()
|
|
|
|
| 3 |
from transformers import pipeline
|
| 4 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
import torch
|
| 6 |
+
import json
|
| 7 |
|
| 8 |
model_name = "sapienzanlp/Minerva-7B-instruct-v1.0"
|
| 9 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
| 39 |
|
| 40 |
return responses, classifier([text + "[SEP]" + response for text, response in zip(texts, responses)])
|
| 41 |
|
| 42 |
+
def scores(body: str) -> tuple[list[str], list[dict[str, float]]]:
|
| 43 |
+
data = json.loads(body)
|
| 44 |
+
return data
|
| 45 |
+
|
| 46 |
+
|
| 47 |
with gr.Blocks() as demo:
|
| 48 |
gr.Markdown("Welcome")
|
| 49 |
+
gr.api(scores, api_name="score", batch=True, max_batch_size=25)
|
| 50 |
|
| 51 |
+
demo.queue()
|
| 52 |
demo.launch()
|