index
int64 0
0
| repo_id
stringclasses 351
values | file_path
stringlengths 26
186
| content
stringlengths 1
990k
|
|---|---|---|---|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/tests/test_audio.py
|
import os
from unittest import TestCase
import numpy as np
from api_inference_community.validation import ffmpeg_convert, normalize_payload_audio
class ValidationTestCase(TestCase):
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_original_audiofile(self):
bpayload = self.read("sample1.flac")
payload, params = normalize_payload_audio(bpayload, 16000)
self.assertEqual(params, {})
self.assertEqual(type(payload), np.ndarray)
self.assertEqual(payload.shape, (219040,))
def test_original_audiofile_differnt_sampling_rate(self):
bpayload = self.read("sample1.flac")
payload, params = normalize_payload_audio(bpayload, 48000)
self.assertEqual(params, {})
self.assertEqual(type(payload), np.ndarray)
self.assertEqual(payload.shape, (3 * 219040,))
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with self.assertRaises(ValueError):
normalize_payload_audio(bpayload, 16000)
def test_dual_channel(self):
bpayload = self.read("sample1_dual.ogg")
payload, params = normalize_payload_audio(bpayload, 16000)
self.assertEqual(payload.shape, (219520,))
def test_original_webm(self):
bpayload = self.read("sample1.webm")
payload, params = normalize_payload_audio(bpayload, 16000)
def test_ffmpeg_convert(self):
bpayload = self.read("sample1.flac")
sampling_rate = 16000
self.assertEqual(len(bpayload), 282378)
waveform, params = normalize_payload_audio(bpayload, sampling_rate)
self.assertEqual(waveform.shape, (219040,))
out = ffmpeg_convert(waveform, sampling_rate, "flac")
self.assertEqual(len(out), 280204)
waveform2, params = normalize_payload_audio(out, sampling_rate)
self.assertEqual(waveform2.shape, (219040,))
def test_ffmpeg_convert_wav(self):
bpayload = self.read("sample1.flac")
sampling_rate = 16000
self.assertEqual(len(bpayload), 282378)
waveform, params = normalize_payload_audio(bpayload, sampling_rate)
self.assertEqual(waveform.shape, (219040,))
out = ffmpeg_convert(waveform, sampling_rate, "wav")
self.assertEqual(len(out), 438158)
waveform2, params = normalize_payload_audio(out, sampling_rate)
self.assertEqual(waveform2.shape, (219040,))
def test_ffmpeg_convert_8k_sampling(self):
bpayload = self.read("sample1.flac")
sampling_rate = 8000
self.assertEqual(len(bpayload), 282378)
waveform, params = normalize_payload_audio(bpayload, sampling_rate)
self.assertEqual(waveform.shape, (109520,))
out = ffmpeg_convert(waveform, sampling_rate, "flac")
self.assertEqual(len(out), 258996)
waveform2, params = normalize_payload_audio(out, sampling_rate)
self.assertEqual(waveform2.shape, (109520,))
|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/tests/test_normalizers.py
|
from unittest import TestCase
import torch
from api_inference_community.normalizers import speaker_diarization_normalize
class NormalizersTestCase(TestCase):
def test_speaker_diarization_dummy(self):
tensor = torch.zeros((10, 2))
outputs = speaker_diarization_normalize(
tensor, 16000, ["SPEAKER_0", "SPEAKER_1"]
)
self.assertEqual(outputs, [])
def test_speaker_diarization(self):
tensor = torch.zeros((10, 2))
tensor[1:4, 0] = 1
tensor[3:8, 1] = 1
tensor[8:10, 0] = 1
outputs = speaker_diarization_normalize(
tensor, 16000, ["SPEAKER_0", "SPEAKER_1"]
)
self.assertEqual(
outputs,
[
{"class": "SPEAKER_0", "start": 1 / 16000, "end": 4 / 16000},
{"class": "SPEAKER_1", "start": 3 / 16000, "end": 8 / 16000},
{"class": "SPEAKER_0", "start": 8 / 16000, "end": 10 / 16000},
],
)
def test_speaker_diarization_3_speakers(self):
tensor = torch.zeros((10, 3))
tensor[1:4, 0] = 1
tensor[3:8, 1] = 1
tensor[8:10, 2] = 1
with self.assertRaises(ValueError):
outputs = speaker_diarization_normalize(
tensor, 16000, ["SPEAKER_0", "SPEAKER_1"]
)
outputs = speaker_diarization_normalize(
tensor, 16000, ["SPEAKER_0", "SPEAKER_1", "SPEAKER_2"]
)
self.assertEqual(
outputs,
[
{"class": "SPEAKER_0", "start": 1 / 16000, "end": 4 / 16000},
{"class": "SPEAKER_1", "start": 3 / 16000, "end": 8 / 16000},
{"class": "SPEAKER_2", "start": 8 / 16000, "end": 10 / 16000},
],
)
|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/tests/test_nlp.py
|
import json
from unittest import TestCase
from api_inference_community.validation import normalize_payload_nlp
from parameterized import parameterized
from pydantic import ValidationError
class ValidationTestCase(TestCase):
def test_malformed_input(self):
bpayload = b"\xc3\x28"
with self.assertRaises(UnicodeDecodeError):
normalize_payload_nlp(bpayload, "question-answering")
def test_accept_raw_string_for_backward_compatibility(self):
query = "funny cats"
bpayload = query.encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "translation"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, query)
def test_invalid_tag(self):
query = "funny cats"
bpayload = query.encode("utf-8")
with self.assertRaises(ValueError):
normalize_payload_nlp(bpayload, "invalid-tag")
class QuestionAnsweringValidationTestCase(TestCase):
def test_valid_input(self):
inputs = {"question": "question", "context": "context"}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "question-answering"
)
self.assertEqual(processed_params, {})
self.assertEqual(inputs, normalized_inputs)
def test_missing_input(self):
inputs = {"question": "question"}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "question-answering")
class SentenceSimilarityValidationTestCase(TestCase):
def test_valid_input(self):
source_sentence = "why is the sky blue?"
sentences = ["this is", "a list of sentences"]
inputs = {"source_sentence": source_sentence, "sentences": sentences}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "sentence-similarity"
)
self.assertEqual(processed_params, {})
self.assertEqual(inputs, normalized_inputs)
def test_missing_input(self):
source_sentence = "why is the sky blue?"
inputs = {"source_sentence": source_sentence}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "sentence-similarity")
class ConversationalValidationTestCase(TestCase):
def test_valid_inputs(self):
past_user_inputs = ["Which movie is the best ?"]
generated_responses = ["It's Die Hard for sure."]
text = "Can you explain why ?"
inputs = {
"past_user_inputs": past_user_inputs,
"generated_responses": generated_responses,
"text": text,
}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "conversational"
)
self.assertEqual(processed_params, {})
self.assertEqual(inputs, normalized_inputs)
class TableQuestionAnsweringValidationTestCase(TestCase):
def test_valid_input(self):
query = "How many stars does the transformers repository have?"
table = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
}
inputs = {"query": query, "table": table}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "table-question-answering"
)
self.assertEqual(processed_params, {})
self.assertEqual(inputs, normalized_inputs)
def test_invalid_table_input(self):
query = "How many stars does the transformers repository have?"
table = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512"],
}
inputs = {"query": query, "table": table}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "table-question-answering")
def test_invalid_question(self):
query = "How many stars does the transformers repository have?"
table = "Invalid table"
inputs = {"query": query, "table": table}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "table-question-answering")
def test_invalid_query(self):
query = {"not a": "query"}
table = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
}
inputs = {"query": query, "table": table}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "table-question-answering")
def test_no_table(self):
query = "How many stars does the transformers repository have?"
inputs = {
"query": query,
}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "table-question-answering")
def test_no_query(self):
table = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
}
inputs = {"table": table}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "table-question-answering")
class TabularDataValidationTestCase(TestCase):
def test_valid_input(self):
data = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512", "3934"],
}
inputs = {"data": data}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "tabular-classification"
)
self.assertEqual(processed_params, {})
self.assertEqual(inputs, normalized_inputs)
def test_invalid_data_lengths(self):
data = {
"Repository": ["Transformers", "Datasets", "Tokenizers"],
"Stars": ["36542", "4512"],
}
inputs = {"data": data}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "tabular-classification")
def test_invalid_data_type(self):
inputs = {"data": "Invalid data"}
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "tabular-classification")
class SummarizationValidationTestCase(TestCase):
def test_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_valid_min_length(self):
params = {"min_length": 10}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_invalid_negative_min_length(self):
params = {"min_length": -1}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_invalid_large_min_length(self):
params = {"min_length": 1000}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_invalid_type_min_length(self):
params = {"min_length": "invalid"}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_valid_max_length(self):
params = {"max_length": 10}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_invalid_negative_max_length(self):
params = {"max_length": -1}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_invalid_large_max_length(self):
params = {"max_length": 1000}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_invalid_type_max_length(self):
params = {"max_length": "invalid"}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
def test_invalid_min_length_larger_than_max_length(self):
params = {"min_length": 20, "max_length": 10}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "summarization"
)
class ZeroShotValidationTestCase(TestCase):
def test_single_label(self):
params = {"candidate_labels": "happy"}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "zero-shot-classification"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_list_labels(self):
params = {"candidate_labels": ["happy", "sad"]}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "zero-shot-classification"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_empty_list(self):
params = {"candidate_labels": []}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "zero-shot-classification")
def test_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "zero-shot-classification")
def test_multi_label(self):
params = {"candidate_labels": "happy", "multi_label": True}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "zero-shot-classification"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_multi_label_wrong_type(self):
params = {"candidate_labels": "happy", "multi_label": "wrong type"}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "zero-shot-classification")
class FillMaskValidationTestCase(TestCase):
def test_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "fill-mask"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_top_k(self):
params = {"top_k": 10}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "fill-mask"
)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
def test_top_k_invalid_value(self):
params = {"top_k": 0}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "fill-mask")
def test_top_k_wrong_type(self):
params = {"top_k": "wrong type"}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, "fill-mask")
def make_text_generation_test_case(tag):
def valid_params():
return [
("max_new_tokens", 10),
("top_k", 5),
("top_p", 0.5),
("max_time", 20.0),
("repetition_penalty", 50.0),
("temperature", 10.0),
("return_full_text", True),
("num_return_sequences", 2),
]
def invalid_params():
return [
("min_length", 1000),
("min_length", 0),
("min_length", "invalid"),
("max_length", 1000),
("max_length", 0),
("max_length", "invalid"),
("top_k", 0),
("top_k", "invalid"),
("top_p", -0.1),
("top_p", 2.1),
("top_p", "invalid"),
("max_time", -0.1),
("max_time", 120.1),
("max_time", "invalid"),
("repetition_penalty", -0.1),
("repetition_penalty", 200.1),
("repetition_penalty", "invalid"),
("temperature", -0.1),
("temperature", 200.1),
("temperature", "invalid"),
("return_full_text", "invalid"),
("num_return_sequences", -1),
("num_return_sequences", 100),
]
class TextGenerationTestCase(TestCase):
def test_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(bpayload, tag)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
@parameterized.expand(valid_params())
def test_valid_params(self, param_name, param_value):
params = {param_name: param_value}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
normalized_inputs, processed_params = normalize_payload_nlp(bpayload, tag)
self.assertEqual(processed_params, params)
self.assertEqual(normalized_inputs, "whatever")
@parameterized.expand(invalid_params())
def test_invalid_params(self, param_name, param_value):
params = {param_name: param_value}
bpayload = json.dumps({"inputs": "whatever", "parameters": params}).encode(
"utf-8"
)
with self.assertRaises(ValidationError):
normalize_payload_nlp(bpayload, tag)
return TextGenerationTestCase
class Text2TextGenerationTestCase(
make_text_generation_test_case("text2text-generation")
):
pass
class TextGenerationTestCase(make_text_generation_test_case("text-generation")):
pass
class FeatureExtractionTestCase(TestCase):
def test_valid_string(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "feature-extraction"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_valid_list_of_strings(self):
inputs = ["hugging", "face"]
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "feature-extraction"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, inputs)
def test_invalid_list_with_other_type(self):
inputs = ["hugging", [1, 2, 3]]
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValueError):
normalize_payload_nlp(bpayload, "feature-extraction")
def test_invalid_empty_list(self):
inputs = []
bpayload = json.dumps({"inputs": inputs}).encode("utf-8")
with self.assertRaises(ValueError):
normalize_payload_nlp(bpayload, "feature-extraction")
class TasksWithOnlyInputStringTestCase(TestCase):
def test_fill_mask_accept_string_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "fill-mask"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_text_classification_accept_string_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "text-classification"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_token_classification_accept_string_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "token-classification"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_translation_accept_string_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "translation"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
def test_text_to_image_accept_string_no_params(self):
bpayload = json.dumps({"inputs": "whatever"}).encode("utf-8")
normalized_inputs, processed_params = normalize_payload_nlp(
bpayload, "text-to-image"
)
self.assertEqual(processed_params, {})
self.assertEqual(normalized_inputs, "whatever")
|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/tests/test_image.py
|
import os
from unittest import TestCase
import PIL
from api_inference_community.validation import normalize_payload_image
class ValidationTestCase(TestCase):
def test_original_imagefile(self):
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", "plane.jpg")
with open(filename, "rb") as f:
bpayload = f.read()
payload, params = normalize_payload_image(bpayload)
self.assertEqual(params, {})
self.assertTrue(isinstance(payload, PIL.Image.Image))
self.assertEqual(payload.size, (300, 200))
def test_secondary_file(self):
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", "plane2.jpg")
with open(filename, "rb") as f:
bpayload = f.read()
payload, params = normalize_payload_image(bpayload)
self.assertEqual(params, {})
self.assertTrue(isinstance(payload, PIL.Image.Image))
self.assertEqual(payload.size, (2560, 1440))
|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/tests/test_routes.py
|
import io
import json
import logging
import os
from base64 import b64encode
from unittest import TestCase
import numpy as np
from api_inference_community.routes import pipeline_route, status_ok
from PIL import Image
from starlette.applications import Starlette
from starlette.routing import Route
from starlette.testclient import TestClient
class ValidationTestCase(TestCase):
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_pipeline(self):
os.environ["TASK"] = "text-classification"
class Pipeline:
def __init__(self):
pass
def __call__(self, input_: str):
return {"some": "json serializable"}
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
with TestClient(app) as client:
response = client.post("/", data=b"Some")
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["x-compute-characters"], "4")
self.assertEqual(response.content, b'{"some":"json serializable"}')
def test_invalid_task(self):
os.environ["TASK"] = "invalid"
class Pipeline:
def __init__(self):
pass
def __call__(self, input_: str):
return {"some": "json serializable"}
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
with TestClient(app) as client:
response = client.post("/", data=b"")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"The task `invalid` is not recognized by api-inference-community"}',
)
def test_invalid_pipeline(self):
os.environ["TASK"] = "text-generation"
def get_pipeline():
raise Exception("We cannot load the pipeline")
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
with TestClient(app) as client:
response = client.post("/", data=b"")
self.assertEqual(
response.status_code,
500,
)
self.assertEqual(response.content, b'{"error":"We cannot load the pipeline"}')
def test_tts_pipeline(self):
os.environ["TASK"] = "text-to-speech"
class Pipeline:
def __init__(self):
pass
def __call__(self, input_: str):
return np.array([0, 0, 0]), 16000
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
with TestClient(app) as client:
response = client.post("/", data=b"2222")
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "audio/flac")
def test_tts_pipeline_wav(self):
os.environ["TASK"] = "text-to-speech"
class Pipeline:
def __init__(self):
pass
def __call__(self, input_: str):
return np.array([0, 0, 0]), 16000
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
app.get_pipeline = get_pipeline
with TestClient(app) as client:
response = client.post("/", data=b"2222", headers={"accept": "audio/wav"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "audio/wav")
def test_tts_pipeline_ogg(self):
os.environ["TASK"] = "text-to-speech"
class Pipeline:
def __init__(self):
pass
def __call__(self, input_: str):
return np.array([0, 0, 0]), 16000
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
app.get_pipeline = get_pipeline
with TestClient(app) as client:
response = client.post("/", data=b"2222", headers={"accept": "audio/ogg"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "audio/ogg")
def test_audio_to_audio_pipeline(self):
os.environ["TASK"] = "audio-to-audio"
class Pipeline:
def __init__(self):
self.sampling_rate = 16000
def __call__(self, input_: str):
return np.array([[0, 0, 0]]), 16000, ["label_0"]
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
bpayload = self.read("sample1.flac")
with TestClient(app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
self.assertEqual(response.headers["x-compute-audio-length"], "13.69")
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertEqual(set(data[0].keys()), {"blob", "label", "content-type"})
self.assertEqual(data[0]["content-type"], "audio/flac")
self.assertEqual(data[0]["label"], "label_0")
def test_audio_to_audio_pipeline_wav(self):
os.environ["TASK"] = "audio-to-audio"
class Pipeline:
def __init__(self):
self.sampling_rate = 16000
def __call__(self, input_: str):
return np.array([[0, 0, 0]]), 16000, ["label_0"]
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
app.get_pipeline = get_pipeline
bpayload = self.read("sample1.flac")
with TestClient(app) as client:
response = client.post("/", data=bpayload, headers={"accept": "audio/wav"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
self.assertEqual(response.headers["x-compute-audio-length"], "13.69")
data = json.loads(response.content)
self.assertEqual(len(data), 1)
self.assertEqual(set(data[0].keys()), {"blob", "label", "content-type"})
self.assertEqual(data[0]["content-type"], "audio/wav")
self.assertEqual(data[0]["label"], "label_0")
def test_text_to_image_pipeline(self):
os.environ["TASK"] = "text-to-image"
class Pipeline:
def __init__(self):
pass
def __call__(self, input_: str):
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", "plane.jpg")
returned_image = Image.open(filename)
return returned_image
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
with TestClient(app) as client:
response = client.post("/", data=b"")
buf = io.BytesIO(response.content)
image = Image.open(buf)
self.assertEqual(
response.status_code,
200,
)
self.assertTrue(isinstance(image, Image.Image))
self.assertEqual(response.headers["content-type"], "image/jpeg")
def test_text_to_image_pipeline_png(self):
os.environ["TASK"] = "text-to-image"
class Pipeline:
def __init__(self):
pass
def __call__(self, input_: str):
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", "plane.jpg")
returned_image = Image.open(filename)
return returned_image
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
app.get_pipeline = get_pipeline
with TestClient(app) as client:
response = client.post("/", data=b"", headers={"accept": "image/png"})
buf = io.BytesIO(response.content)
image = Image.open(buf)
self.assertEqual(
response.status_code,
200,
)
self.assertTrue(isinstance(image, Image.Image))
self.assertEqual(response.headers["content-type"], "image/png")
def test_pipeline_zero_shot(self):
os.environ["TASK"] = "text-classification"
class Pipeline:
def __init__(self):
pass
def __call__(self, input_: str, candidate_labels=None):
return {
"some": "json serializable",
"candidate_labels": candidate_labels,
}
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
with TestClient(app) as client:
response = client.post(
"/",
json={"inputs": "Some", "parameters": {"candidate_labels": ["a", "b"]}},
)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["x-compute-characters"], "4")
self.assertEqual(
response.content,
b'{"some":"json serializable","candidate_labels":["a","b"]}',
)
def test_pipeline_zero_shot_image(self):
os.environ["TASK"] = "zero-shot-image-classification"
class Pipeline:
def __init__(self):
pass
def __call__(self, input_: Image, candidate_labels=None):
return {
"some": "json serializable",
"candidate_labels": candidate_labels,
}
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", "plane.jpg")
with open(filename, "rb") as f:
image = f.read()
image = b64encode(image).decode("utf-8")
with TestClient(app) as client:
response = client.post(
"/",
json={"inputs": image, "parameters": {"candidate_labels": ["a", "b"]}},
)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["x-compute-images"], "1")
self.assertEqual(
response.content,
b'{"some":"json serializable","candidate_labels":["a","b"]}',
)
def test_image_classification_pipeline(self):
os.environ["TASK"] = "image-classification"
class Pipeline:
def __init__(self):
pass
def __call__(self, input_: Image.Image):
return [{"label_0": 1.0}]
def get_pipeline():
return Pipeline()
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
)
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", "plane.jpg")
with open(filename, "rb") as f:
data = f.read()
with TestClient(app) as client:
response = client.post("/", data=data)
resp_data = json.loads(response.content)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(
response.headers["x-compute-images"],
"1",
)
self.assertEqual(resp_data, [{"label_0": 1.0}])
|
0
|
hf_public_repos/api-inference-community
|
hf_public_repos/api-inference-community/tests/test_dockers.py
|
import base64
import json
import os
import subprocess
import time
import unittest
import uuid
from collections import Counter
from typing import Any, Optional
import httpx
class DockerPopen(subprocess.Popen):
def __exit__(self, exc_type, exc_val, traceback):
self.terminate()
self.wait(20)
return super().__exit__(exc_type, exc_val, traceback)
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
@unittest.skipIf(
"RUN_DOCKER_TESTS" not in os.environ,
"Docker tests are slow, set `RUN_DOCKER_TESTS=1` environment variable to run them",
)
class DockerImageTests(unittest.TestCase):
def create_docker(self, name: str) -> str:
rand = str(uuid.uuid4())[:5]
tag = f"{name}:{rand}"
with cd(
os.path.join(
os.path.dirname(os.path.dirname(__file__)), "docker_images", name
)
):
proc = subprocess.run(["docker", "build", ".", "-t", tag])
self.assertEqual(proc.returncode, 0)
return tag
def test_allennlp(self):
self.framework_docker_test(
"allennlp", "question-answering", "lysandre/bidaf-elmo-model-2020.03.19"
)
self.framework_invalid_test("allennlp")
def test_asteroid(self):
self.framework_docker_test(
"asteroid",
"audio-to-audio",
"mhu-coder/ConvTasNet_Libri1Mix_enhsingle",
)
self.framework_docker_test(
"asteroid",
"audio-to-audio",
"julien-c/DPRNNTasNet-ks16_WHAM_sepclean",
)
self.framework_invalid_test("asteroid")
def test_bertopic(self):
self.framework_docker_test(
"bertopic", "text-classification", "MaartenGr/BERTopic_ArXiv"
)
self.framework_invalid_test("bertopic")
def test_espnet(self):
self.framework_docker_test(
"espnet",
"text-to-speech",
"espnet/kan-bayashi_ljspeech_fastspeech2",
)
self.framework_invalid_test("espnet")
self.framework_docker_test(
"espnet",
"automatic-speech-recognition",
"pyf98/librispeech_100h_transformer",
)
def test_fairseq(self):
self.framework_docker_test(
"fairseq",
"text-to-speech",
"facebook/fastspeech2-en-ljspeech",
)
self.framework_docker_test(
"fairseq",
"audio-to-audio",
"facebook/xm_transformer_600m-es_en-multi_domain",
)
self.framework_invalid_test("fairseq")
def test_fasttext(self):
self.framework_docker_test(
"fasttext",
"text-classification",
"osanseviero/fasttext_nearest",
)
self.framework_docker_test(
"fasttext",
"feature-extraction",
"osanseviero/fasttext_embedding",
)
self.framework_docker_test(
"fasttext",
"text-classification",
"sheonhan/fasttext-language-identification",
)
self.framework_invalid_test("fasttext")
def test_sentence_transformers(self):
self.framework_docker_test(
"sentence_transformers",
"feature-extraction",
"bert-base-uncased",
)
self.framework_docker_test(
"sentence_transformers",
"sentence-similarity",
"ymelka/camembert-cosmetic-similarity-cp1200",
)
self.framework_docker_test(
"sentence_transformers",
"sentence-similarity",
"sentence-transformers/paraphrase-distilroberta-base-v1",
)
self.framework_invalid_test("sentence_transformers")
def test_adapter_transformers(self):
self.framework_docker_test(
"adapter_transformers",
"question-answering",
"AdapterHub/roberta-base-pf-squad",
)
self.framework_docker_test(
"adapter_transformers",
"summarization",
"AdapterHub/facebook-bart-large_sum_xsum_pfeiffer",
)
self.framework_docker_test(
"adapter_transformers",
"text-classification",
"AdapterHub/roberta-base-pf-sick",
)
self.framework_docker_test(
"adapter_transformers",
"text-generation",
"AdapterHub/gpt2_lm_poem_pfeiffer",
)
self.framework_docker_test(
"adapter_transformers",
"token-classification",
"AdapterHub/roberta-base-pf-conll2003",
)
self.framework_invalid_test("adapter_transformers")
def test_flair(self):
self.framework_docker_test(
"flair", "token-classification", "flair/chunk-english-fast"
)
self.framework_invalid_test("flair")
def test_paddlenlp(self):
self.framework_docker_test(
"paddlenlp", "fill-mask", "PaddleCI/tiny-random-ernie"
)
self.framework_docker_test(
"paddlenlp", "conversational", "PaddleCI/tiny-random-plato-mini"
)
self.framework_docker_test(
"paddlenlp", "zero-shot-classification", "PaddleCI/tiny-random-ernie"
)
self.framework_docker_test(
"paddlenlp", "summarization", "PaddleCI/tiny-random-unimo-text-1.0"
)
self.framework_invalid_test("paddlenlp")
def test_sklearn(self):
clf_data = {
"data": {
"sepal length (cm)": [6.1, 5.7, 7.7],
"sepal width (cm)": [2.8, 3.8, 2.6],
"petal length (cm)": [4.7, 1.7, 6.9],
"petal width (cm)": [1.2, 0.3, 2.3],
}
}
self.framework_docker_test(
"sklearn",
"tabular-classification",
"skops-tests/iris-sklearn-latest-logistic_regression-with-config",
custom_input=clf_data,
timeout=600,
)
regr_data = {
"data": {
"age": [0.045, 0.092, 0.063],
"sex": [-0.044, -0.044, 0.050],
"bmi": [-0.006, 0.036, -0.004],
"bp": [-0.015, 0.021, -0.012],
"s1": [0.125, -0.024, 0.103],
"s2": [0.125, -0.016, 0.048],
"s3": [0.019, 0.000, 0.056],
"s4": [0.034, -0.039, -0.002],
"s5": [0.032, -0.022, 0.084],
"s6": [-0.005, -0.021, -0.017],
}
}
self.framework_docker_test(
"sklearn",
"tabular-regression",
"skops-tests/tabularregression-sklearn-latest-linear_regression-with-config",
custom_input=regr_data,
timeout=600,
)
self.framework_docker_test(
"sklearn",
"text-classification",
"merve/20newsgroups",
timeout=600,
)
def test_k2(self):
self.framework_docker_test(
"k2",
"automatic-speech-recognition",
"csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13",
)
def test_spacy(self):
self.framework_docker_test(
"spacy",
"token-classification",
"spacy/en_core_web_sm",
)
self.framework_docker_test(
"spacy",
"text-classification",
"cverluise/xx_cat_pateexx_md",
)
self.framework_docker_test(
"spacy",
"sentence-similarity",
"spacy/en_core_web_sm",
)
self.framework_invalid_test("spacy")
def test_span_marker(self):
self.framework_docker_test(
"span_marker",
"token-classification",
"tomaarsen/span-marker-bert-tiny-fewnerd-coarse-super",
)
def test_setfit(self):
self.framework_docker_test(
"setfit",
"text-classification",
"tomaarsen/setfit-all-MiniLM-L6-v2-sst2-32-shot",
)
def test_speechbrain(self):
self.framework_docker_test(
"speechbrain",
"automatic-speech-recognition",
"speechbrain/asr-crdnn-commonvoice-it",
)
self.framework_docker_test(
"speechbrain",
"automatic-speech-recognition",
"speechbrain/asr-wav2vec2-commonvoice-fr",
)
self.framework_docker_test(
"speechbrain",
"text-to-speech",
"speechbrain/tts-tacotron2-ljspeech",
)
self.framework_invalid_test("speechbrain")
# source-separation
self.framework_docker_test(
"speechbrain",
"audio-to-audio",
"speechbrain/sepformer-wham",
)
# speech-enchancement
self.framework_docker_test(
"speechbrain",
"audio-to-audio",
"speechbrain/mtl-mimic-voicebank",
)
self.framework_docker_test(
"speechbrain",
"audio-classification",
"speechbrain/urbansound8k_ecapa",
)
def test_stanza(self):
self.framework_docker_test(
"stanza", "token-classification", "stanfordnlp/stanza-en"
)
self.framework_docker_test(
"stanza",
"token-classification",
"stanfordnlp/stanza-tr",
)
self.framework_invalid_test("stanza")
def test_timm(self):
self.framework_docker_test("timm", "image-classification", "sgugger/resnet50d")
self.framework_invalid_test("timm")
def test_diffusers(self):
self.framework_docker_test(
"diffusers",
"text-to-image",
"hf-internal-testing/tiny-stable-diffusion-pipe",
custom_environment={"UVICORN_TIMEOUT": "1200"},
)
self.framework_docker_test(
"diffusers",
"image-to-image",
"hf-internal-testing/tiny-controlnet",
timeout=600,
custom_environment={"UVICORN_TIMEOUT": "1200"},
)
self.framework_invalid_test("diffusers")
def test_peft(self):
self.framework_docker_test(
"peft",
"text-generation",
"ybelkada/test-st-lora",
timeout=1000,
)
def test_pyannote_audio(self):
self.framework_docker_test(
"pyannote_audio",
"automatic-speech-recognition",
"pyannote/voice-activity-detection",
)
self.framework_invalid_test("pyannote_audio")
def test_fastai(self):
# Single Output Unit, RGB
self.framework_docker_test(
"fastai", "image-classification", "fastai/fastbook_02_bears_classifier"
)
self.framework_docker_test(
"fastai",
"image-classification",
"Kieranm/britishmus_plate_material_classifier",
)
self.framework_invalid_test("fastai")
def test_doctr(self):
self.framework_docker_test(
"doctr", "object-detection", "mindee/fasterrcnn_mobilenet_v3_large_fpn"
)
self.framework_invalid_test("doctr")
def test_nemo(self):
self.framework_docker_test(
"nemo", "automatic-speech-recognition", "nvidia/stt_en_conformer_ctc_large"
)
def test_mindspore(self):
self.framework_docker_test(
"mindspore", "image-classification", "helloway/lenet"
)
self.framework_invalid_test("mindspore")
def test_open_clip(self):
self.framework_docker_test(
"open_clip",
"zero-shot-image-classification",
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K",
)
self.framework_invalid_test("open_clip")
def framework_invalid_test(self, framework: str):
task = "invalid"
model_id = "invalid"
tag = self.create_docker(framework)
run_docker_command = [
"docker",
"run",
"-p",
"8000:80",
"-e",
f"TASK={task}",
"-e",
f"MODEL_ID={model_id}",
"-v",
"/tmp:/data",
"-t",
tag,
]
url = "http://localhost:8000"
timeout = 60
with DockerPopen(run_docker_command) as proc:
for i in range(400):
try:
response = httpx.get(url, timeout=10)
break
except Exception:
time.sleep(1)
self.assertEqual(response.content, b'{"ok":"ok"}')
response = httpx.post(url, data=b"This is a test", timeout=timeout)
self.assertIn(response.status_code, {400, 500})
self.assertEqual(response.headers["content-type"], "application/json")
proc.terminate()
proc.wait(20)
def framework_docker_batch(
self,
framework: str,
task: str,
model_id: str,
dataset_name: str,
dataset_config: str,
dataset_split: str,
dataset_column: str,
):
tag = self.create_docker(framework)
run_docker_command = [
"docker",
"run",
"-p",
"8000:80",
"-it",
"-e",
f"TASK={task}",
"-e",
f"MODEL_ID={model_id}",
"-e",
f"DATASET_NAME={dataset_name}",
"-e",
f"DATASET_CONFIG={dataset_config}",
"-e",
f"DATASET_SPLIT={dataset_split}",
"-e",
f"DATASET_COLUMN={dataset_column}",
"-v",
"/tmp:/data",
"-t",
tag,
"python",
"app/batch.py",
]
with DockerPopen(run_docker_command) as proc:
proc.wait()
self.assertTrue(True)
def framework_docker_test(
self,
framework: str,
task: str,
model_id: str,
custom_input: Optional[
Any
] = None, # if given, check inference with this specific input
timeout=60,
custom_environment: Optional[dict] = None,
):
tag = self.create_docker(framework)
run_docker_command = [
"docker",
"run",
"-p",
"8000:80",
"-e",
f"TASK={task}",
"-e",
f"MODEL_ID={model_id}",
]
if custom_environment:
for k, v in custom_environment.items():
run_docker_command += ["-e", f"{k}={v}"]
run_docker_command += [
"-v",
"/tmp:/data",
"-t",
tag,
]
url = "http://localhost:8000"
counter = Counter()
with DockerPopen(run_docker_command) as proc:
for i in range(400):
try:
response = httpx.get(url, timeout=10)
break
except Exception:
time.sleep(1)
self.assertEqual(response.content, b'{"ok":"ok"}')
response = httpx.post(url, data=b"This is a test", timeout=timeout)
self.assertIn(response.status_code, {200, 400}, response.content)
counter[response.status_code] += 1
response = httpx.post(
url,
# Include the mask for fill-mask tests.
json={"inputs": "This is a test [MASK]"},
timeout=timeout,
)
self.assertIn(response.status_code, {200, 400})
counter[response.status_code] += 1
response = httpx.post(
url,
# Conversational
json={
"inputs": {
"text": "My name [MASK].",
"past_user_inputs": [],
"generated_responses": [],
}
},
timeout=timeout,
)
self.assertIn(response.status_code, {200, 400})
counter[response.status_code] += 1
response = httpx.post(
url,
json={
"inputs": {"question": "This is a test", "context": "Some context"}
},
timeout=timeout,
)
self.assertIn(response.status_code, {200, 400})
counter[response.status_code] += 1
response = httpx.post(
url,
json={
"inputs": {
"source_sentence": "This is a test",
"sentences": ["Some context", "Something else"],
}
},
timeout=timeout,
)
self.assertIn(response.status_code, {200, 400}, response.content)
counter[response.status_code] += 1
response = httpx.post(
url,
json={
"inputs": "This is a test",
"parameters": {"candidate_labels": ["a", "b"]},
},
timeout=timeout,
)
self.assertIn(response.status_code, {200, 400}, response.content)
counter[response.status_code] += 1
response = httpx.post(
url,
json={
"inputs": {
"data": {
"1": [7.4],
"2": [7.5],
"3": [7.7],
"4": [7.7],
"5": [7.7],
"6": [7.7],
"7": [7.7],
"8": [7.7],
"9": [7.7],
"10": [7.7],
"11": [7.7],
}
}
},
timeout=timeout,
)
self.assertIn(response.status_code, {200, 400})
counter[response.status_code] += 1
if custom_input is not None:
response = httpx.post(
url,
json=custom_input,
timeout=timeout,
)
self.assertIn(response.status_code, {200, 400})
counter[response.status_code] += 1
with open(
os.path.join(os.path.dirname(__file__), "samples", "sample1.flac"), "rb"
) as f:
data = f.read()
response = httpx.post(url, data=data, timeout=timeout)
self.assertIn(response.status_code, {200, 400})
counter[response.status_code] += 1
if response.status_code == 200:
if response.headers["content-type"] == "application/json":
data = json.loads(response.content)
if isinstance(data, dict):
# ASR
self.assertEqual(set(data.keys()), {"text"})
elif isinstance(data, list):
if len(data) > 0:
keys = set(data[0].keys())
if keys == {"blob", "content-type", "label"}:
# audio-to-audio
self.assertEqual(
keys, {"blob", "content-type", "label"}
)
else:
speech_segmentation_keys = {"class", "start", "end"}
audio_classification_keys = {"label", "score"}
self.assertIn(
keys,
[
audio_classification_keys,
speech_segmentation_keys,
],
)
else:
raise Exception("Invalid result")
elif response.headers["content-type"] == "audio/flac":
pass
else:
raise Exception("Unknown format")
with open(
os.path.join(os.path.dirname(__file__), "samples", "malformed.flac"),
"rb",
) as f:
data = f.read()
response = httpx.post(url, data=data, timeout=timeout)
self.assertIn(response.status_code, {200, 400}, response.content)
counter[response.status_code] += 1
with open(
os.path.join(os.path.dirname(__file__), "samples", "plane.jpg"), "rb"
) as f:
data = f.read()
response = httpx.post(url, data=data, timeout=timeout)
self.assertIn(response.status_code, {200, 400})
counter[response.status_code] += 1
with open(
os.path.join(os.path.dirname(__file__), "samples", "plane.jpg"), "rb"
) as f:
data = f.read()
json_data = {
"inputs": base64.b64encode(data).decode("utf-8"),
"parameters": {"candidate_labels": ["A", "B"]},
}
response = httpx.post(url, json=json_data, timeout=timeout)
self.assertIn(response.status_code, {200, 400})
counter[response.status_code] += 1
with open(
os.path.join(os.path.dirname(__file__), "samples", "sample1_dual.ogg"),
"rb",
) as f:
data = f.read()
response = httpx.post(url, data=data, timeout=timeout)
self.assertIn(response.status_code, {200, 400})
counter[response.status_code] += 1
with open(
os.path.join(os.path.dirname(__file__), "samples", "sample1.webm"), "rb"
) as f:
data = f.read()
response = httpx.post(url, data=data, timeout=timeout)
self.assertIn(response.status_code, {200, 400})
counter[response.status_code] += 1
proc.terminate()
proc.wait(20)
self.assertEqual(proc.returncode, 0)
self.assertGreater(
counter[200],
0,
f"At least one request should have gone through {framework}, {task}, {model_id}",
)
# Follow up loading are much faster, 20s should be ok.
with DockerPopen(run_docker_command) as proc2:
for i in range(20):
try:
response2 = httpx.get(url, timeout=10)
break
except Exception:
time.sleep(1)
self.assertEqual(response2.content, b'{"ok":"ok"}')
proc2.terminate()
proc2.wait(20)
self.assertEqual(proc2.returncode, 0)
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/setfit/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="Tom Aarsen <tom.aarsen@huggingface.co>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/setfit/requirements.txt
|
starlette==0.27.0
git+https://github.com/huggingface/api-inference-community.git@f06a71e72e92caeebabaeced979eacb3542bf2ca
huggingface_hub==0.20.2
setfit==1.0.3
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/setfit/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/setfit
|
hf_public_repos/api-inference-community/docker_images/setfit/app/main.py
|
import functools
import logging
import os
import pathlib
from typing import Dict, Type
from api_inference_community import hub
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, TextClassificationPipeline
from huggingface_hub import constants
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
def get_model_id():
m_id = os.getenv("MODEL_ID")
# Workaround, when sentence_transformers handles properly this env variable
# this should not be needed anymore
if constants.HF_HUB_OFFLINE:
cache_dir = pathlib.Path(constants.HF_HUB_CACHE)
m_id = hub.cached_revision_path(
cache_dir=cache_dir, repo_id=m_id, revision=os.getenv("REVISION")
)
return m_id
MODEL_ID = get_model_id()
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"text-classification": TextClassificationPipeline,
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = MODEL_ID
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/setfit/app
|
hf_public_repos/api-inference-community/docker_images/setfit/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/setfit/app
|
hf_public_repos/api-inference-community/docker_images/setfit/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException
from app.pipelines.text_classification import TextClassificationPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/setfit/app
|
hf_public_repos/api-inference-community/docker_images/setfit/app/pipelines/text_classification.py
|
from typing import Dict, List
from app.pipelines import Pipeline
from setfit import SetFitModel
class TextClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
) -> None:
self.model = SetFitModel.from_pretrained(model_id)
def __call__(self, inputs: str) -> List[Dict[str, float]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`: The object returned should be a list of one list like [[{"label": 0.9939950108528137}]] containing:
- "label": A string representing what the label/class is. There can be multiple labels.
- "score": A score between 0 and 1 describing how confident the model is for this label/class.
"""
probs = self.model.predict_proba([inputs], as_numpy=True)
if probs.ndim == 2:
id2label = getattr(self.model, "id2label", {}) or {}
return [
[
{"label": id2label.get(idx, idx), "score": float(prob)}
for idx, prob in enumerate(probs[0])
]
]
|
0
|
hf_public_repos/api-inference-community/docker_images/setfit
|
hf_public_repos/api-inference-community/docker_images/setfit/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/setfit
|
hf_public_repos/api-inference-community/docker_images/setfit/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"text-classification": "tomaarsen/setfit-all-MiniLM-L6-v2-sst2-32-shot"
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"speech-segmentation",
"tabular-classification",
"tabular-regression",
"text-to-image",
"text-to-speech",
"token-classification",
"conversational",
"feature-extraction",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"summarization",
"text2text-generation",
"text-classification",
"zero-shot-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images/setfit
|
hf_public_repos/api-inference-community/docker_images/setfit/tests/test_api_text_classification.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-classification" not in ALLOWED_TASKS,
"text-classification not implemented",
)
class TextClassificationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text-classification"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = "It is a beautiful day outside"
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(len(content), 1)
self.assertEqual(type(content[0]), list)
self.assertEqual(
set(k for el in content[0] for k in el.keys()),
{"label", "score"},
)
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(len(content), 1)
self.assertEqual(type(content[0]), list)
self.assertEqual(
set(k for el in content[0] for k in el.keys()),
{"label", "score"},
)
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/diffusers/Dockerfile
|
FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu20.04
LABEL maintainer="Nicolas Patry <nicolas@huggingface.co>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
ENV DEBIAN_FRONTEND=noninteractive
# Install prerequisites
RUN apt-get update && \
apt-get install -y build-essential libssl-dev zlib1g-dev libbz2-dev \
libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev \
xz-utils tk-dev libffi-dev liblzma-dev python3-openssl git
# Install pyenv
RUN curl https://pyenv.run | bash
# Set environment variables for pyenv
ENV PYENV_ROOT=/root/.pyenv
ENV PATH=$PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH
# Install your desired Python version
ARG PYTHON_VERSION=3.9.1
RUN pyenv install $PYTHON_VERSION && \
pyenv global $PYTHON_VERSION && \
pyenv rehash
# Upgrade pip and install your desired packages
ARG PIP_VERSION=22.3.1
# FIXME: We temporarily need to specify the setuptools version <70 due to the following issue
# https://stackoverflow.com/questions/78604018/importerror-cannot-import-name-packaging-from-pkg-resources-when-trying-to
# https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/15863#issuecomment-2125026282
RUN pip install --no-cache-dir --upgrade pip==${PIP_VERSION} setuptools'<70' wheel && \
pip install --no-cache-dir torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2
WORKDIR /app
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
ENV DIFFUSERS_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
COPY --from=tiangolo/uvicorn-gunicorn:python3.8 /app/ /app
COPY --from=tiangolo/uvicorn-gunicorn:python3.8 /start.sh /
COPY --from=tiangolo/uvicorn-gunicorn:python3.8 /gunicorn_conf.py /
COPY app/ /app/app
COPY ./prestart.sh /app/
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
CMD ["/start.sh"]
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/diffusers/requirements.txt
|
starlette==0.27.0
api-inference-community==0.0.36
# to be replaced with diffusers 0.31.0 as soon as released
git+https://github.com/huggingface/diffusers.git@0f079b932d4382ad6675593f9a140b2a74c8cfb4
transformers==4.41.2
accelerate==0.31.0
hf_transfer==0.1.3
pydantic>=2
ftfy==6.1.1
sentencepiece==0.1.97
scipy==1.10.0
torch==2.0.1
torchvision==0.15.2
torchaudio==2.0.2
invisible-watermark>=0.2.0
uvicorn>=0.23.2
gunicorn>=21.2.0
psutil>=5.9.5
aiohttp>=3.8.5
peft==0.11.1
protobuf==5.27.1
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/diffusers/prestart.sh
|
echo "Prestart start at " $(date)
METRICS_ENABLED=${METRICS_ENABLED:-"0"}
if [ "$METRICS_ENABLED" = "1" ];then
echo "Spawning metrics server"
gunicorn -k "uvicorn.workers.UvicornWorker" --bind :${METRICS_PORT:-9400} "app.healthchecks:app" &
pid=$!
echo "Metrics server pid: $pid"
fi
echo "Prestart done at " $(date)
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/idle.py
|
import asyncio
import contextlib
import logging
import os
import signal
import time
LOG = logging.getLogger(__name__)
LAST_START = None
LAST_END = None
UNLOAD_IDLE = os.getenv("UNLOAD_IDLE", "").lower() in ("1", "true")
IDLE_TIMEOUT = int(os.getenv("IDLE_TIMEOUT", 15))
async def live_check_loop():
global LAST_START, LAST_END
pid = os.getpid()
LOG.debug("Starting live check loop")
while True:
await asyncio.sleep(IDLE_TIMEOUT)
LOG.debug("Checking whether we should unload anything from gpu")
last_start = LAST_START
last_end = LAST_END
LOG.debug("Checking pid %d activity", pid)
if not last_start:
continue
if not last_end or last_start >= last_end:
LOG.debug("Request likely being processed for pid %d", pid)
continue
now = time.time()
last_request_age = now - last_end
LOG.debug("Pid %d, last request age %s", pid, last_request_age)
if last_request_age < IDLE_TIMEOUT:
LOG.debug("Model recently active")
else:
LOG.debug("Inactive for too long. Leaving live check loop")
break
LOG.debug("Aborting this worker")
os.kill(pid, signal.SIGTERM)
@contextlib.contextmanager
def request_witnesses():
global LAST_START, LAST_END
# Simple assignment, concurrency safe, no need for any lock
LAST_START = time.time()
try:
yield
finally:
LAST_END = time.time()
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/healthchecks.py
|
"""
This file allows users to spawn some side service helping with giving a better view on the main ASGI app status.
The issue with the status route of the main application is that it gets unresponsive as soon as all workers get busy.
Thus, you cannot really use the said route as a healthcheck to decide whether your app is healthy or not.
Instead this module allows you to distinguish between a dead service (not able to even tcp connect to app port)
and a busy one (able to connect but not to process a trivial http request in time) as both states should result in
different actions (restarting the service vs scaling it). It also exposes some data to be
consumed as custom metrics, for example to be used in autoscaling decisions.
"""
import asyncio
import functools
import logging
import os
from collections import namedtuple
from typing import Optional
import aiohttp
import psutil
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import Response
from starlette.routing import Route
logger = logging.getLogger(__name__)
METRICS = ""
STATUS_OK = 0
STATUS_BUSY = 1
STATUS_ERROR = 2
def metrics():
logging.debug("Requesting metrics")
return METRICS
async def metrics_route(_request: Request) -> Response:
return Response(content=metrics())
routes = [
Route("/{whatever:path}", metrics_route),
]
app = Starlette(routes=routes)
def reset_logging():
if os.environ.get("METRICS_DEBUG", "false").lower() in ["1", "true"]:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(
level=level,
format="healthchecks - %(asctime)s - %(levelname)s - %(message)s",
force=True,
)
@app.on_event("startup")
async def startup_event():
reset_logging()
# Link between `api-inference-community` and framework code.
asyncio.create_task(compute_metrics_loop(), name="compute_metrics")
@functools.lru_cache()
def get_listening_port():
logger.debug("Get listening port")
main_app_port = os.environ.get("MAIN_APP_PORT", "80")
try:
main_app_port = int(main_app_port)
except ValueError:
logger.warning(
"Main app port cannot be converted to an int, skipping and defaulting to 80"
)
main_app_port = 80
return main_app_port
async def find_app_process(
listening_port: int,
) -> Optional[namedtuple("addr", ["ip", "port"])]: # noqa
connections = psutil.net_connections()
app_laddr = None
for c in connections:
if c.laddr.port != listening_port:
logger.debug("Skipping listening connection bound to excluded port %s", c)
continue
if c.status == psutil.CONN_LISTEN:
logger.debug("Found LISTEN conn %s", c)
candidate = c.pid
try:
p = psutil.Process(candidate)
except psutil.NoSuchProcess:
continue
if p.name() == "gunicorn":
logger.debug("Found gunicorn process %s", p)
app_laddr = c.laddr
break
return app_laddr
def count_current_conns(app_port: int) -> str:
estab = []
conns = psutil.net_connections()
# logger.debug("Connections %s", conns)
for c in conns:
if c.status != psutil.CONN_ESTABLISHED:
continue
if c.laddr.port == app_port:
estab.append(c)
current_conns = len(estab)
logger.info("Current count of established connections to app: %d", current_conns)
curr_conns_str = """# HELP inference_app_established_conns Established connection count for a given app.
# TYPE inference_app_established_conns gauge
inference_app_established_conns{{port="{:d}"}} {:d}
""".format(
app_port, current_conns
)
return curr_conns_str
async def status_with_timeout(
listening_port: int, app_laddr: Optional[namedtuple("addr", ["ip", "port"])] # noqa
) -> str:
logger.debug("Checking application status")
status = STATUS_OK
if not app_laddr:
status = STATUS_ERROR
else:
try:
async with aiohttp.ClientSession(
timeout=aiohttp.ClientTimeout(total=0.5)
) as session:
url = "http://{}:{:d}/".format(app_laddr.ip, app_laddr.port)
async with session.get(url) as resp:
status_code = resp.status
status_text = await resp.text()
logger.debug("Status code %s and text %s", status_code, status_text)
if status_code != 200 or status_text != '{"ok":"ok"}':
status = STATUS_ERROR
except asyncio.TimeoutError:
logger.debug("Asgi app seems busy, unable to reach it before timeout")
status = STATUS_BUSY
except Exception as e:
logger.exception(e)
status = STATUS_ERROR
status_str = """# HELP inference_app_status Application health status (0: ok, 1: busy, 2: error).
# TYPE inference_app_status gauge
inference_app_status{{port="{:d}"}} {:d}
""".format(
listening_port, status
)
return status_str
async def single_metrics_compute():
global METRICS
listening_port = get_listening_port()
app_laddr = await find_app_process(listening_port)
current_conns = count_current_conns(listening_port)
status = await status_with_timeout(listening_port, app_laddr)
# Assignment is atomic, we should be safe without locking
METRICS = current_conns + status
# Persist metrics to the local ephemeral as well
metrics_file = os.environ.get("METRICS_FILE")
if metrics_file:
with open(metrics_file) as f:
f.write(METRICS)
@functools.lru_cache()
def get_polling_sleep():
logger.debug("Get polling sleep interval")
sleep_value = os.environ.get("METRICS_POLLING_INTERVAL", 10)
try:
sleep_value = float(sleep_value)
except ValueError:
logger.warning(
"Unable to cast METRICS_POLLING_INTERVAL env value %s to float. Defaulting to 10.",
sleep_value,
)
sleep_value = 10.0
return sleep_value
@functools.lru_cache()
def get_initial_delay():
logger.debug("Get polling initial delay")
sleep_value = os.environ.get("METRICS_INITIAL_DELAY", 30)
try:
sleep_value = float(sleep_value)
except ValueError:
logger.warning(
"Unable to cast METRICS_INITIAL_DELAY env value %s to float. "
"Defaulting to 30.",
sleep_value,
)
sleep_value = 30.0
return sleep_value
async def compute_metrics_loop():
initial_delay = get_initial_delay()
await asyncio.sleep(initial_delay)
polling_sleep = get_polling_sleep()
while True:
await asyncio.sleep(polling_sleep)
try:
await single_metrics_compute()
except Exception as e:
logger.error("Something wrong occurred while computing metrics")
logger.exception(e)
if __name__ == "__main__":
reset_logging()
try:
single_metrics_compute()
logger.info("Metrics %s", metrics())
except Exception as exc:
logging.exception(exc)
raise
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/main.py
|
import asyncio
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app import idle
from app.pipelines import ImageToImagePipeline, Pipeline, TextToImagePipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"text-to-image": TextToImagePipeline,
"image-to-image": ImageToImagePipeline,
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
reset_logging()
# Link between `api-inference-community` and framework code.
if idle.UNLOAD_IDLE:
asyncio.create_task(idle.live_check_loop(), name="live_check_loop")
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
def reset_logging():
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
force=True,
)
if __name__ == "__main__":
reset_logging()
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/lora.py
|
import logging
import torch.nn as nn
from app import offline
from safetensors.torch import load_file
logger = logging.getLogger(__name__)
class LoRAPipelineMixin(offline.OfflineBestEffortMixin):
@staticmethod
def _get_lora_weight_name(model_data):
weight_name_candidate = LoRAPipelineMixin._lora_weights_candidates(model_data)
if weight_name_candidate:
return weight_name_candidate
file_to_load = next(
(
file.rfilename
for file in model_data.siblings
if file.rfilename.endswith(".safetensors")
),
None,
)
if not file_to_load and not weight_name_candidate:
raise ValueError("No *.safetensors file found for your LoRA")
return file_to_load
@staticmethod
def _is_lora(model_data):
return LoRAPipelineMixin._lora_weights_candidates(model_data) or (
model_data.cardData.get("tags")
and "lora" in model_data.cardData.get("tags", [])
)
@staticmethod
def _lora_weights_candidates(model_data):
candidate = None
for file in model_data.siblings:
rfilename = str(file.rfilename)
if rfilename.endswith("pytorch_lora_weights.bin"):
candidate = rfilename
elif rfilename.endswith("pytorch_lora_weights.safetensors"):
candidate = rfilename
break
return candidate
@staticmethod
def _is_safetensors_pivotal(model_data):
embeddings_safetensors_exists = any(
sibling.rfilename == "embeddings.safetensors"
for sibling in model_data.siblings
)
return embeddings_safetensors_exists
@staticmethod
def _is_pivotal_tuning_lora(model_data):
return LoRAPipelineMixin._is_safetensors_pivotal(model_data) or any(
sibling.rfilename == "embeddings.pti" for sibling in model_data.siblings
)
def _fuse_or_raise(self):
try:
self.ldm.fuse_lora(safe_fusing=True)
except Exception as e:
logger.exception(e)
logger.warning("Unable to fuse LoRA adapter")
self.ldm.unload_lora_weights()
self.current_lora_adapter = None
raise
@staticmethod
def _reset_tokenizer_and_encoder(tokenizer, text_encoder, token_to_remove):
token_id = tokenizer(token_to_remove)["input_ids"][1]
del tokenizer._added_tokens_decoder[token_id]
del tokenizer._added_tokens_encoder[token_to_remove]
tokenizer._update_trie()
tokenizer_size = len(tokenizer)
text_embedding_dim = text_encoder.get_input_embeddings().embedding_dim
text_embedding_weights = text_encoder.get_input_embeddings().weight[
:tokenizer_size
]
text_embeddings_filtered = nn.Embedding(tokenizer_size, text_embedding_dim)
text_embeddings_filtered.weight.data = text_embedding_weights
text_encoder.set_input_embeddings(text_embeddings_filtered)
def _unload_textual_embeddings(self):
if self.current_tokens_loaded > 0:
for i in range(self.current_tokens_loaded):
token_to_remove = f"<s{i}>"
self._reset_tokenizer_and_encoder(
self.ldm.tokenizer, self.ldm.text_encoder, token_to_remove
)
self._reset_tokenizer_and_encoder(
self.ldm.tokenizer_2, self.ldm.text_encoder_2, token_to_remove
)
self.current_tokens_loaded = 0
def _load_textual_embeddings(self, adapter, model_data):
if self._is_pivotal_tuning_lora(model_data):
embedding_path = self._hub_repo_file(
repo_id=adapter,
filename="embeddings.safetensors"
if self._is_safetensors_pivotal(model_data)
else "embeddings.pti",
repo_type="model",
)
embeddings = load_file(embedding_path)
state_dict_clip_l = (
embeddings.get("text_encoders_0")
if "text_encoders_0" in embeddings
else embeddings.get("clip_l", None)
)
state_dict_clip_g = (
embeddings.get("text_encoders_1")
if "text_encoders_1" in embeddings
else embeddings.get("clip_g", None)
)
tokens_to_add = 0 if state_dict_clip_l is None else len(state_dict_clip_l)
tokens_to_add_2 = 0 if state_dict_clip_g is None else len(state_dict_clip_g)
if tokens_to_add == tokens_to_add_2 and tokens_to_add > 0:
if state_dict_clip_l is not None and len(state_dict_clip_l) > 0:
token_list = [f"<s{i}>" for i in range(tokens_to_add)]
self.ldm.load_textual_inversion(
state_dict_clip_l,
token=token_list,
text_encoder=self.ldm.text_encoder,
tokenizer=self.ldm.tokenizer,
)
if state_dict_clip_g is not None and len(state_dict_clip_g) > 0:
token_list = [f"<s{i}>" for i in range(tokens_to_add_2)]
self.ldm.load_textual_inversion(
state_dict_clip_g,
token=token_list,
text_encoder=self.ldm.text_encoder_2,
tokenizer=self.ldm.tokenizer_2,
)
logger.info("Text embeddings loaded for adapter %s", adapter)
else:
logger.info(
"No text embeddings were loaded due to invalid embeddings or a mismatch of token sizes "
"for adapter %s",
adapter,
)
self.current_tokens_loaded = tokens_to_add
def _load_lora_adapter(self, kwargs):
adapter = kwargs.pop("lora_adapter", None)
if adapter is not None:
logger.info("LoRA adapter %s requested", adapter)
if adapter != self.current_lora_adapter:
model_data = self._hub_model_info(adapter)
if not self._is_lora(model_data):
msg = f"Requested adapter {adapter:s} is not a LoRA adapter"
logger.error(msg)
raise ValueError(msg)
base_model = model_data.cardData["base_model"]
is_list = isinstance(base_model, list)
if (is_list and (self.model_id not in base_model)) or (
not is_list and self.model_id != base_model
):
msg = f"Requested adapter {adapter:s} is not a LoRA adapter for base model {self.model_id:s}"
logger.error(msg)
raise ValueError(msg)
logger.info(
"LoRA adapter %s needs to be replaced with compatible adapter %s",
self.current_lora_adapter,
adapter,
)
if self.current_lora_adapter is not None:
self.ldm.unfuse_lora()
self.ldm.unload_lora_weights()
self._unload_textual_embeddings()
self.current_lora_adapter = None
logger.info("LoRA weights unloaded, loading new weights")
weight_name = self._get_lora_weight_name(model_data=model_data)
self.ldm.load_lora_weights(
adapter, weight_name=weight_name, use_auth_token=self.use_auth_token
)
self.current_lora_adapter = adapter
self._fuse_or_raise()
logger.info("LoRA weights loaded for adapter %s", adapter)
self._load_textual_embeddings(adapter, model_data)
else:
logger.info("LoRA adapter %s already loaded", adapter)
# Needed while a LoRA is loaded w/ model
model_data = self._hub_model_info(adapter)
if (
self._is_pivotal_tuning_lora(model_data)
and self.current_tokens_loaded == 0
):
self._load_textual_embeddings(adapter, model_data)
elif self.current_lora_adapter is not None:
logger.info(
"No LoRA adapter requested, unloading weights and using base model %s",
self.model_id,
)
self.ldm.unfuse_lora()
self.ldm.unload_lora_weights()
self._unload_textual_embeddings()
self.current_lora_adapter = None
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/timing.py
|
import logging
from functools import wraps
from time import time
logger = logging.getLogger(__name__)
def timing(f):
@wraps(f)
def inner(*args, **kwargs):
start = time()
try:
ret = f(*args, **kwargs)
finally:
end = time()
logger.debug("Func: %r took: %.2f sec to execute", f.__name__, end - start)
return ret
return inner
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/offline.py
|
import json
import logging
import os
from huggingface_hub import file_download, hf_api, hf_hub_download, model_info, utils
logger = logging.getLogger(__name__)
class OfflineBestEffortMixin(object):
def _hub_repo_file(self, repo_id, filename, repo_type="model"):
if self.offline_preferred:
try:
config_file = hf_hub_download(
repo_id,
filename,
token=self.use_auth_token,
local_files_only=True,
repo_type=repo_type,
)
except utils.LocalEntryNotFoundError:
logger.info("Unable to fetch model index in local cache")
else:
return config_file
return hf_hub_download(
repo_id, filename, token=self.use_auth_token, repo_type=repo_type
)
def _hub_model_info(self, model_id):
"""
This method tries to fetch locally cached model_info if any.
If none, it requests the Hub. Useful for pre cached private models when no token is available
"""
if self.offline_preferred:
cache_root = os.getenv(
"DIFFUSERS_CACHE", os.getenv("HUGGINGFACE_HUB_CACHE", "")
)
folder_name = file_download.repo_folder_name(
repo_id=model_id, repo_type="model"
)
folder_path = os.path.join(cache_root, folder_name)
logger.debug("Cache folder path %s", folder_path)
filename = os.path.join(folder_path, "hub_model_info.json")
try:
with open(filename, "r") as f:
model_data = json.load(f)
except OSError:
logger.info(
"No cached model info found in file %s found for model %s. Fetching on the hub",
filename,
model_id,
)
else:
model_data = hf_api.ModelInfo(**model_data)
return model_data
model_data = model_info(model_id, token=self.use_auth_token)
return model_data
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/validation.py
|
import re
STR_TO_BOOL = re.compile(r"^\s*true|yes|1\s*$", re.IGNORECASE)
def str_to_bool(s):
return STR_TO_BOOL.match(str(s))
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers/app
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/pipelines/image_to_image.py
|
import json
import logging
import os
import torch
from app import idle, offline, timing, validation
from app.pipelines import Pipeline
from diffusers import (
AltDiffusionImg2ImgPipeline,
AltDiffusionPipeline,
AutoPipelineForImage2Image,
ControlNetModel,
DiffusionPipeline,
DPMSolverMultistepScheduler,
KandinskyImg2ImgPipeline,
KandinskyPriorPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepth2ImgPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImg2ImgPipeline,
StableDiffusionInstructPix2PixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
StableDiffusionUpscalePipeline,
StableDiffusionXLImg2ImgPipeline,
StableUnCLIPImg2ImgPipeline,
StableUnCLIPPipeline,
)
from PIL import Image
logger = logging.getLogger(__name__)
class ImageToImagePipeline(Pipeline, offline.OfflineBestEffortMixin):
def __init__(self, model_id: str):
use_auth_token = os.getenv("HF_API_TOKEN")
self.use_auth_token = use_auth_token
# This should allow us to make the image work with private models when no token is provided, if the said model
# is already in local cache
self.offline_preferred = validation.str_to_bool(os.getenv("OFFLINE_PREFERRED"))
model_data = self._hub_model_info(model_id)
kwargs = (
{"safety_checker": None}
if model_id.startswith("hf-internal-testing/")
else {}
)
env_dtype = os.getenv("TORCH_DTYPE")
if env_dtype:
kwargs["torch_dtype"] = getattr(torch, env_dtype)
elif torch.cuda.is_available():
kwargs["torch_dtype"] = torch.float16
if model_id == "stabilityai/stable-diffusion-xl-refiner-1.0":
kwargs["variant"] = "fp16"
# check if is controlnet or SD/AD
config_file_name = None
for file_name in ("config.json", "model_index.json"):
if any(file.rfilename == file_name for file in model_data.siblings):
config_file_name = file_name
break
if config_file_name:
config_file = self._hub_repo_file(model_id, config_file_name)
with open(config_file, "r") as f:
config_dict = json.load(f)
model_type = config_dict.get("_class_name", None)
else:
raise ValueError("Model type not found")
# load according to model type
if model_type == "ControlNetModel":
model_to_load = (
model_data.cardData["base_model"]
if "base_model" in model_data.cardData
else "runwayml/stable-diffusion-v1-5"
)
controlnet = ControlNetModel.from_pretrained(
model_id, use_auth_token=use_auth_token, **kwargs
)
self.ldm = StableDiffusionControlNetPipeline.from_pretrained(
model_to_load,
controlnet=controlnet,
use_auth_token=use_auth_token,
**kwargs,
)
elif model_type in ["AltDiffusionPipeline", "AltDiffusionImg2ImgPipeline"]:
self.ldm = AltDiffusionImg2ImgPipeline.from_pretrained(
model_id, use_auth_token=use_auth_token, **kwargs
)
elif model_type in [
"StableDiffusionPipeline",
"StableDiffusionImg2ImgPipeline",
]:
self.ldm = StableDiffusionImg2ImgPipeline.from_pretrained(
model_id, use_auth_token=use_auth_token, **kwargs
)
elif model_type in ["StableUnCLIPPipeline", "StableUnCLIPImg2ImgPipeline"]:
self.ldm = StableUnCLIPImg2ImgPipeline.from_pretrained(
model_id, use_auth_token=use_auth_token, **kwargs
)
elif model_type in [
"StableDiffusionImageVariationPipeline",
"StableDiffusionInstructPix2PixPipeline",
"StableDiffusionUpscalePipeline",
"StableDiffusionLatentUpscalePipeline",
"StableDiffusionDepth2ImgPipeline",
]:
self.ldm = DiffusionPipeline.from_pretrained(
model_id, use_auth_token=use_auth_token, **kwargs
)
elif model_type in ["KandinskyImg2ImgPipeline", "KandinskyPipeline"]:
model_to_load = "kandinsky-community/kandinsky-2-1-prior"
self.ldm = KandinskyImg2ImgPipeline.from_pretrained(
model_id, use_auth_token=use_auth_token, **kwargs
)
self.prior = KandinskyPriorPipeline.from_pretrained(
model_to_load, use_auth_token=use_auth_token, **kwargs
)
else:
logger.debug("Falling back to generic auto pipeline loader")
self.ldm = AutoPipelineForImage2Image.from_pretrained(
model_id, use_auth_token=use_auth_token, **kwargs
)
if isinstance(
self.ldm,
(
StableUnCLIPImg2ImgPipeline,
StableUnCLIPPipeline,
StableDiffusionPipeline,
StableDiffusionImg2ImgPipeline,
AltDiffusionPipeline,
AltDiffusionImg2ImgPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionInstructPix2PixPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionDepth2ImgPipeline,
),
):
self.ldm.scheduler = DPMSolverMultistepScheduler.from_config(
self.ldm.scheduler.config
)
if not idle.UNLOAD_IDLE:
self._model_to_gpu()
@timing.timing
def _model_to_gpu(self):
if torch.cuda.is_available():
self.ldm.to("cuda")
if isinstance(self.ldm, (KandinskyImg2ImgPipeline)):
self.prior.to("cuda")
def __call__(self, image: Image.Image, prompt: str = "", **kwargs) -> "Image.Image":
"""
Args:
prompt (:obj:`str`):
a string containing some text
image (:obj:`PIL.Image.Image`):
a condition image
Return:
A :obj:`PIL.Image.Image` with the raw image representation as PIL.
"""
if idle.UNLOAD_IDLE:
with idle.request_witnesses():
self._model_to_gpu()
resp = self._process_req(image, prompt)
else:
resp = self._process_req(image, prompt)
return resp
def _process_req(self, image, prompt, **kwargs):
# only one image per prompt is supported
kwargs["num_images_per_prompt"] = 1
if isinstance(
self.ldm,
(
StableDiffusionPipeline,
StableDiffusionImg2ImgPipeline,
AltDiffusionPipeline,
AltDiffusionImg2ImgPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionInstructPix2PixPipeline,
StableDiffusionUpscalePipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionDepth2ImgPipeline,
),
):
if "num_inference_steps" not in kwargs:
kwargs["num_inference_steps"] = int(
os.getenv("DEFAULT_NUM_INFERENCE_STEPS", "25")
)
images = self.ldm(prompt, image, **kwargs)["images"]
return images[0]
elif isinstance(self.ldm, StableDiffusionXLImg2ImgPipeline):
if "num_inference_steps" not in kwargs:
kwargs["num_inference_steps"] = int(
os.getenv("DEFAULT_NUM_INFERENCE_STEPS", "25")
)
image = image.convert("RGB")
images = self.ldm(prompt, image=image, **kwargs)["images"]
return images[0]
elif isinstance(self.ldm, (StableUnCLIPImg2ImgPipeline, StableUnCLIPPipeline)):
if "num_inference_steps" not in kwargs:
kwargs["num_inference_steps"] = int(
os.getenv("DEFAULT_NUM_INFERENCE_STEPS", "25")
)
# image comes first
images = self.ldm(image, prompt, **kwargs)["images"]
return images[0]
elif isinstance(self.ldm, StableDiffusionImageVariationPipeline):
if "num_inference_steps" not in kwargs:
kwargs["num_inference_steps"] = int(
os.getenv("DEFAULT_NUM_INFERENCE_STEPS", "25")
)
# only image is needed
images = self.ldm(image, **kwargs)["images"]
return images[0]
elif isinstance(self.ldm, (KandinskyImg2ImgPipeline)):
if "num_inference_steps" not in kwargs:
kwargs["num_inference_steps"] = int(
os.getenv("DEFAULT_NUM_INFERENCE_STEPS", "100")
)
# not all args are supported by the prior
prior_args = {
"num_inference_steps": kwargs["num_inference_steps"],
"num_images_per_prompt": kwargs["num_images_per_prompt"],
"negative_prompt": kwargs.get("negative_prompt", None),
"guidance_scale": kwargs.get("guidance_scale", 7),
}
if "guidance_scale" not in kwargs:
default_guidance_scale = os.getenv("DEFAULT_GUIDANCE_SCALE")
if default_guidance_scale is not None:
kwargs["guidance_scale"] = float(default_guidance_scale)
prior_args["guidance_scale"] = float(default_guidance_scale)
# Else, don't specify anything, leave the default behaviour
image_emb, zero_image_emb = self.prior(prompt, **prior_args).to_tuple()
images = self.ldm(
prompt,
image=image,
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
**kwargs,
)["images"]
return images[0]
else:
raise ValueError("Model type not found or pipeline not implemented")
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers/app
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers/app
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.image_to_image import ImageToImagePipeline
from app.pipelines.text_to_image import TextToImagePipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers/app
|
hf_public_repos/api-inference-community/docker_images/diffusers/app/pipelines/text_to_image.py
|
import importlib
import json
import logging
import os
from typing import TYPE_CHECKING
import torch
from app import idle, lora, offline, timing, validation
from app.pipelines import Pipeline
from diffusers import (
AutoencoderKL,
AutoPipelineForText2Image,
DiffusionPipeline,
EulerAncestralDiscreteScheduler,
)
from diffusers.schedulers.scheduling_utils import KarrasDiffusionSchedulers
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from PIL import Image
class TextToImagePipeline(
Pipeline, lora.LoRAPipelineMixin, offline.OfflineBestEffortMixin
):
def __init__(self, model_id: str):
self.current_lora_adapter = None
self.model_id = None
self.current_tokens_loaded = 0
self.use_auth_token = os.getenv("HF_API_TOKEN")
# This should allow us to make the image work with private models when no token is provided, if the said model
# is already in local cache
self.offline_preferred = validation.str_to_bool(os.getenv("OFFLINE_PREFERRED"))
model_data = self._hub_model_info(model_id)
kwargs = (
{"safety_checker": None}
if model_id.startswith("hf-internal-testing/")
else {}
)
env_dtype = os.getenv("TORCH_DTYPE")
if env_dtype:
kwargs["torch_dtype"] = getattr(torch, env_dtype)
elif torch.cuda.is_available():
kwargs["torch_dtype"] = torch.float16
has_model_index = any(
file.rfilename == "model_index.json" for file in model_data.siblings
)
if self._is_lora(model_data):
model_type = "LoraModel"
elif has_model_index:
config_file = self._hub_repo_file(model_id, "model_index.json")
with open(config_file, "r") as f:
config_dict = json.load(f)
model_type = config_dict.get("_class_name", None)
else:
raise ValueError("Model type not found")
if model_type == "LoraModel":
model_to_load = model_data.cardData["base_model"]
self.model_id = model_to_load
if not model_to_load:
raise ValueError(
"No `base_model` found. Please include a `base_model` on your README.md tags"
)
self._load_sd_with_sdxl_fix(model_to_load, **kwargs)
# The lora will actually be lazily loaded on the fly per request
self.current_lora_adapter = None
else:
if model_id == "stabilityai/stable-diffusion-xl-base-1.0":
self._load_sd_with_sdxl_fix(model_id, **kwargs)
else:
self.ldm = AutoPipelineForText2Image.from_pretrained(
model_id, use_auth_token=self.use_auth_token, **kwargs
)
self.model_id = model_id
self.is_karras_compatible = (
self.ldm.__class__.__init__.__annotations__.get("scheduler", None)
== KarrasDiffusionSchedulers
)
if self.is_karras_compatible:
self.ldm.scheduler = EulerAncestralDiscreteScheduler.from_config(
self.ldm.scheduler.config
)
self.default_scheduler = self.ldm.scheduler
if not idle.UNLOAD_IDLE:
self._model_to_gpu()
def _load_sd_with_sdxl_fix(self, model_id, **kwargs):
if model_id == "stabilityai/stable-diffusion-xl-base-1.0":
vae = AutoencoderKL.from_pretrained(
"madebyollin/sdxl-vae-fp16-fix",
torch_dtype=torch.float16, # load fp16 fix VAE
)
kwargs["vae"] = vae
kwargs["variant"] = "fp16"
self.ldm = DiffusionPipeline.from_pretrained(
model_id, use_auth_token=self.use_auth_token, **kwargs
)
@timing.timing
def _model_to_gpu(self):
if torch.cuda.is_available():
self.ldm.to("cuda")
def __call__(self, inputs: str, **kwargs) -> "Image.Image":
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`PIL.Image.Image` with the raw image representation as PIL.
"""
# Check if users set a custom scheduler and pop if from the kwargs if so
custom_scheduler = None
if "scheduler" in kwargs:
custom_scheduler = kwargs["scheduler"]
kwargs.pop("scheduler")
if custom_scheduler:
compatibles = self.ldm.scheduler.compatibles
# Check if the scheduler is compatible
is_compatible_scheduler = [
cls for cls in compatibles if cls.__name__ == custom_scheduler
]
# In case of a compatible scheduler, swap to that for inference
if is_compatible_scheduler:
# Import the scheduler dynamically
SchedulerClass = getattr(
importlib.import_module("diffusers.schedulers"), custom_scheduler
)
self.ldm.scheduler = SchedulerClass.from_config(
self.ldm.scheduler.config
)
else:
logger.info("%s scheduler not loaded: incompatible", custom_scheduler)
self.ldm.scheduler = self.default_scheduler
else:
self.ldm.scheduler = self.default_scheduler
self._load_lora_adapter(kwargs)
if idle.UNLOAD_IDLE:
with idle.request_witnesses():
self._model_to_gpu()
resp = self._process_req(inputs, **kwargs)
else:
resp = self._process_req(inputs, **kwargs)
return resp
def _process_req(self, inputs, **kwargs):
# only one image per prompt is supported
kwargs["num_images_per_prompt"] = 1
if "num_inference_steps" not in kwargs:
default_num_steps = os.getenv("DEFAULT_NUM_INFERENCE_STEPS")
if default_num_steps:
kwargs["num_inference_steps"] = int(default_num_steps)
elif self.is_karras_compatible:
kwargs["num_inference_steps"] = 20
# Else, don't specify anything, leave the default behaviour
if "guidance_scale" not in kwargs:
default_guidance_scale = os.getenv("DEFAULT_GUIDANCE_SCALE")
if default_guidance_scale is not None:
kwargs["guidance_scale"] = float(default_guidance_scale)
# Else, don't specify anything, leave the default behaviour
if "seed" in kwargs:
seed = int(kwargs["seed"])
generator = torch.Generator().manual_seed(seed)
kwargs["generator"] = generator
kwargs.pop("seed")
images = self.ldm(inputs, **kwargs)["images"]
return images[0]
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/tests/test_api_text_to_image.py
|
import os
from io import BytesIO
from unittest import TestCase, skipIf
import PIL
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-to-image" not in ALLOWED_TASKS,
"text-to-image not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["text-to-image"]]
)
class TextToImageTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "text-to-image"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = "soap bubble"
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
image = PIL.Image.open(BytesIO(response.content))
self.assertTrue(isinstance(image, PIL.Image.Image))
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/tests/test_api_image_to_image.py
|
import base64
import os
from io import BytesIO
from unittest import TestCase, skipIf
import PIL
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"image-to-image" not in ALLOWED_TASKS,
"image-to-image not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["image-to-image"]]
)
class ImageToImageTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "image-to-image"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
image = PIL.Image.new("RGB", (64, 64))
image_bytes = BytesIO()
image.save(image_bytes, format="JPEG")
image_bytes.seek(0)
parameters = {"prompt": "soap bubble"}
with TestClient(self.app) as client:
response = client.post(
"/",
json={
"image": base64.b64encode(image_bytes.read()).decode("utf-8"),
"parameters": parameters,
},
)
self.assertEqual(
response.status_code,
200,
)
image = PIL.Image.open(BytesIO(response.content))
self.assertTrue(isinstance(image, PIL.Image.Image))
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertTrue(
b'{"error":"cannot identify image file <_io.BytesIO object at'
in response.content
)
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/diffusers
|
hf_public_repos/api-inference-community/docker_images/diffusers/tests/test_api.py
|
import os
from typing import Dict, List
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, List[str]] = {
"text-to-image": ["hf-internal-testing/tiny-stable-diffusion-pipe-no-safety"],
"image-to-image": [
"hf-internal-testing/tiny-controlnet",
"hf-internal-testing/tiny-stable-diffusion-pix2pix",
],
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"speech-segmentation",
"tabular-classification",
"tabular-regression",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"conversational",
"feature-extraction",
"question-answering",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"summarization",
"text2text-generation",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"zero-shot-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/mindspore/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
RUN apt-get update -y && apt-get install libglib2.0-dev libsm6 libxrender1 libgl1-mesa-glx -y
COPY requirements.txt /app
RUN /usr/local/bin/python -m pip install --upgrade pip && \
pip install --no-cache-dir -r requirements.txt
COPY prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/mindspore/requirements.txt
|
starlette==0.27.0
api-inference-community==0.0.25
huggingface_hub==0.11.0
tinyms>=0.3.2
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/mindspore/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/mindspore
|
hf_public_repos/api-inference-community/docker_images/mindspore/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import ImageClassificationPipeline, Pipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"image-classification": ImageClassificationPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/mindspore/app
|
hf_public_repos/api-inference-community/docker_images/mindspore/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any, Optional
class Pipeline(ABC):
task: Optional[str] = None
model_id: Optional[str] = None
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/mindspore/app
|
hf_public_repos/api-inference-community/docker_images/mindspore/app/pipelines/image_classification.py
|
import json
import os
from typing import TYPE_CHECKING, Any, Dict, List
import tinyms as ts
from app.pipelines import Pipeline
from huggingface_hub import snapshot_download
from tinyms import Tensor, model, vision
from tinyms.primitives import Softmax
if TYPE_CHECKING:
from PIL import Image
ALLOWED_MODEL = {
"LeNet5": model.lenet5,
"ResNet50": model.resnet50,
"MobileNetV2": model.mobilenetv2,
}
ALLOWED_TRANSFORM = {
"mnist": vision.mnist_transform,
"cifar10": vision.cifar10_transform,
"imagenet2012": vision.imagefolder_transform,
}
def load_tranform_func(config):
dataset = config.get("dataset_transform")
if dataset not in ALLOWED_TRANSFORM:
raise EnvironmentError(
f"Currently doesn't supports dataset {dataset} transform!"
)
return ALLOWED_TRANSFORM.get(dataset)
def load_config(config_json_file):
with open(config_json_file, "r", encoding="utf-8") as reader:
config = reader.read()
return json.loads(config)
def load_model_config_from_hf(model_id):
repo_path = snapshot_download(model_id)
config_json_file = os.path.join(repo_path, "config.json")
if not os.path.exists(config_json_file):
raise EnvironmentError(
f"The path of the config.json file {config_json_file} doesn't exist!"
)
config = load_config(config_json_file)
architecture = config.get("architecture")
if architecture not in ALLOWED_MODEL:
raise EnvironmentError(f"Currently doesn't supports {model} model!")
net_func = ALLOWED_MODEL.get(architecture)
class_num = config.get("num_classes")
net = net_func(class_num=class_num, is_training=False)
ms_model = model.Model(net)
model_file = os.path.join(repo_path, "mindspore_model.ckpt")
if not os.path.exists(model_file):
raise EnvironmentError(
f"The path of the model file {model_file} doesn't exist!"
)
ms_model.load_checkpoint(model_file)
return ms_model, config
class ImageClassificationPipeline(Pipeline):
def __init__(self, model_id: str):
self.model, self.config = load_model_config_from_hf(model_id)
# Obtain labels
self.id2label = self.config.get("id2label")
# Get dataset transform function
self.tranform_func = load_tranform_func(self.config)
# Return at most the top 5 predicted classes
self.top_k = 5
def __call__(self, inputs: "Image.Image") -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`PIL.Image`):
The raw image representation as PIL.
No transformation made whatsoever
from the input. Make all necessary transformations here.
Return:
A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}
It is preferred if the returned list is in decreasing `score` order
"""
# Preprocess data
img_data = self.tranform_func(inputs)
input_data = ts.array(img_data.tolist(), dtype=img_data.dtype.name)
# Execute model prediction
preds = self.model.predict(ts.expand_dims(input_data, 0))
# Postprocess data
softmax = Softmax()
pred_outputs = softmax(Tensor(preds, dtype=ts.float32)).asnumpy()
labels = [
{"label": str(self.id2label[str(i)]), "score": float(pred_outputs[0][i])}
for i in range(len(pred_outputs[0]))
]
return sorted(labels, key=lambda tup: tup["score"], reverse=True)[: self.top_k]
|
0
|
hf_public_repos/api-inference-community/docker_images/mindspore/app
|
hf_public_repos/api-inference-community/docker_images/mindspore/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.image_classification import ImageClassificationPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/mindspore
|
hf_public_repos/api-inference-community/docker_images/mindspore/tests/test_api_image_classification.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"image-classification" not in ALLOWED_TASKS,
"image-classification not implemented",
)
class ImageClassificationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["image-classification"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "image-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("0.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set((k, type(v)) for el in content for (k, v) in el.items()),
{("label", str), ("score", float)},
)
def test_different_resolution(self):
bpayload = self.read("5.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set(k for el in content for k in el.keys()), {"label", "score"}
)
|
0
|
hf_public_repos/api-inference-community/docker_images/mindspore
|
hf_public_repos/api-inference-community/docker_images/mindspore/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/mindspore
|
hf_public_repos/api-inference-community/docker_images/mindspore/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {"image-classification": "mindspore-ai/LeNet"}
ALL_TASKS = {"image-classification"}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
with self.assertRaises(EnvironmentError):
get_pipeline(unsupported_task, model_id="XX")
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/peft/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="Nicolas Patry <nicolas@huggingface.co>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
RUN pip install --no-cache-dir torch==2.0.1
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
# Uncomment if you want to load the model once before starting the asgi app
# COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
ENV PEFT_CACHE=/data
ENV HF_HOME=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/peft/requirements.txt
|
starlette==0.27.0
api-inference-community==0.0.31
huggingface_hub==0.18.0
safetensors==0.3.1
peft==0.6.2
transformers==4.35.2
accelerate>=0.21.0
hf_transfer==0.1.3
pydantic==1.8.2
ftfy==6.1.1
sentencepiece==0.1.97
scipy==1.10.0
torch==2.0.1
pydantic<2
#Dummy.
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/peft/prestart.sh
|
echo "Prestart start at " $(date)
python app/main.py
echo "Prestart done at " $(date)
|
0
|
hf_public_repos/api-inference-community/docker_images/peft
|
hf_public_repos/api-inference-community/docker_images/peft/app/idle.py
|
import asyncio
import contextlib
import logging
import os
import signal
import time
LOG = logging.getLogger(__name__)
LAST_START = None
LAST_END = None
UNLOAD_IDLE = os.getenv("UNLOAD_IDLE", "").lower() in ("1", "true")
IDLE_TIMEOUT = int(os.getenv("IDLE_TIMEOUT", 15))
async def live_check_loop():
global LAST_START, LAST_END
pid = os.getpid()
LOG.debug("Starting live check loop")
while True:
await asyncio.sleep(IDLE_TIMEOUT)
LOG.debug("Checking whether we should unload anything from gpu")
last_start = LAST_START
last_end = LAST_END
LOG.debug("Checking pid %d activity", pid)
if not last_start:
continue
if not last_end or last_start >= last_end:
LOG.debug("Request likely being processed for pid %d", pid)
continue
now = time.time()
last_request_age = now - last_end
LOG.debug("Pid %d, last request age %s", pid, last_request_age)
if last_request_age < IDLE_TIMEOUT:
LOG.debug("Model recently active")
else:
LOG.debug("Inactive for too long. Leaving live check loop")
break
LOG.debug("Aborting this worker")
os.kill(pid, signal.SIGTERM)
@contextlib.contextmanager
def request_witnesses():
global LAST_START, LAST_END
# Simple assignment, concurrency safe, no need for any lock
LAST_START = time.time()
try:
yield
finally:
LAST_END = time.time()
|
0
|
hf_public_repos/api-inference-community/docker_images/peft
|
hf_public_repos/api-inference-community/docker_images/peft/app/main.py
|
import asyncio
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app import idle
from app.pipelines import Pipeline, TextGenerationPipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"text-generation": TextGenerationPipeline,
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
reset_logging()
# Link between `api-inference-community` and framework code.
if idle.UNLOAD_IDLE:
asyncio.create_task(idle.live_check_loop(), name="live_check_loop")
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
def reset_logging():
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
force=True,
)
if __name__ == "__main__":
reset_logging()
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/peft
|
hf_public_repos/api-inference-community/docker_images/peft/app/timing.py
|
import logging
from functools import wraps
from time import time
logger = logging.getLogger(__name__)
def timing(f):
@wraps(f)
def inner(*args, **kwargs):
start = time()
try:
ret = f(*args, **kwargs)
finally:
end = time()
logger.debug("Func: %r took: %.2f sec to execute", f.__name__, end - start)
return ret
return inner
|
0
|
hf_public_repos/api-inference-community/docker_images/peft/app
|
hf_public_repos/api-inference-community/docker_images/peft/app/pipelines/text_generation.py
|
import logging
import os
import torch
from app import idle, timing
from app.pipelines import Pipeline
from huggingface_hub import model_info
from peft import PeftModel
from transformers import AutoModelForCausalLM, AutoTokenizer
logger = logging.getLogger(__name__)
class TextGenerationPipeline(Pipeline):
def __init__(self, model_id: str):
use_auth_token = os.getenv("HF_API_TOKEN")
model_data = model_info(model_id, token=use_auth_token)
config_dict = model_data.config.get("peft")
if config_dict:
base_model_id = config_dict["base_model_name_or_path"]
if base_model_id:
self.tokenizer = AutoTokenizer.from_pretrained(base_model_id)
model = AutoModelForCausalLM.from_pretrained(
base_model_id, device_map="auto"
)
# wrap base model with peft
self.model = PeftModel.from_pretrained(model, model_id)
else:
raise ValueError("There's no base model ID in configuration file.")
else:
raise ValueError("Config file for this model does not exist or is invalid.")
def __call__(self, inputs: str, **kwargs) -> str:
"""
Args:
inputs (:obj:`str`):
a string for text to be completed
Returns:
A string of completed text.
"""
if idle.UNLOAD_IDLE:
with idle.request_witnesses():
self._model_to_gpu()
resp = self._process_req(inputs, **kwargs)
else:
resp = self._process_req(inputs, **kwargs)
return [{"generated_text": resp[0]}]
@timing.timing
def _model_to_gpu(self):
if torch.cuda.is_available():
self.model.to("cuda")
def _process_req(self, inputs: str, **kwargs) -> str:
"""
Args:
inputs (:obj:`str`):
a string for text to be completed
Returns:
A string of completed text.
"""
tokenized_inputs = self.tokenizer(inputs, return_tensors="pt")
self._model_to_gpu()
if torch.cuda.is_available():
device = "cuda"
tokenized_inputs = {
"input_ids": tokenized_inputs["input_ids"].to(device),
"attention_mask": tokenized_inputs["attention_mask"].to(device),
}
with torch.no_grad():
outputs = self.model.generate(
input_ids=tokenized_inputs["input_ids"],
attention_mask=tokenized_inputs["attention_mask"],
max_new_tokens=10,
eos_token_id=3,
)
return self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
0
|
hf_public_repos/api-inference-community/docker_images/peft/app
|
hf_public_repos/api-inference-community/docker_images/peft/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/peft/app
|
hf_public_repos/api-inference-community/docker_images/peft/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.text_generation import TextGenerationPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/peft
|
hf_public_repos/api-inference-community/docker_images/peft/tests/test_api_text_generation.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-generation" not in ALLOWED_TASKS,
"text-generation not implemented",
)
class TextGenerationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text-generation"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text-generation"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = "It is a beautiful day outside"
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), str)
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(type(content[0]["generated_text"]), str)
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0
|
hf_public_repos/api-inference-community/docker_images/peft
|
hf_public_repos/api-inference-community/docker_images/peft/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"text-generation": "merve/peft-test-3",
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"speech-segmentation",
"tabular-classification",
"tabular-regression",
"text-to-image",
"text-to-speech",
"token-classification",
"conversational",
"feature-extraction",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"summarization",
"text2text-generation",
"text-classification",
"zero-shot-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="Hervé Bredin <herve.bredin@irit.fr>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
RUN apt-get update -y && apt-get install ffmpeg -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV PYANNOTE_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/requirements.txt
|
starlette==0.27.0
api-inference-community==0.0.25
torch==1.13.1
torchvision==0.12.0
torchaudio==0.11.0
torchtext==0.12.0
speechbrain==0.5.12
pyannote-audio==2.0.1
huggingface_hub==0.8.1
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import AutomaticSpeechRecognitionPipeline, Pipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/app
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/app
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.automatic_speech_recognition import (
AutomaticSpeechRecognitionPipeline,
)
|
0
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/app
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/app/pipelines/automatic_speech_recognition.py
|
from typing import Dict
import numpy as np
import torch
from app.pipelines import Pipeline
from pyannote.audio import Pipeline as Pypeline
class AutomaticSpeechRecognitionPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
# IMPLEMENT_THIS : Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = 16000
self.model = Pypeline.from_pretrained(model_id)
def __call__(self, inputs: np.array) -> Dict[str, str]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at self.sampling_rate, otherwise 16KHz.
Return:
A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing
the detected language from the input audio
"""
wav = torch.from_numpy(inputs).unsqueeze(0)
output = self.model({"waveform": wav, "sample_rate": self.sampling_rate})
regions = [
{"label": label, "start": segment.start, "stop": segment.end}
for segment, _, label in output.itertracks(yield_label=True)
]
regions = str(regions)
return {"text": regions}
|
0
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/tests/test_api_automatic_speech_recognition.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"automatic-speech-recognition" not in ALLOWED_TASKS,
"automatic-speech-recognition not implemented",
)
class AutomaticSpeecRecognitionTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["automatic-speech-recognition"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "automatic-speech-recognition"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
|
0
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio
|
hf_public_repos/api-inference-community/docker_images/pyannote_audio/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"automatic-speech-recognition": "pyannote/voice-activity-detection"
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"speech-segmentation",
"structured-data-classification",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"conversational",
"feature-extraction",
"question-answering",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"summarization",
"text2text-generation",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"zero-shot-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/allennlp/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
RUN pip install spacy && python -m spacy download en_core_web_sm
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV ALLENNLP_CACHE_ROOT=/data
ENV NLTK_DATA=/data
ENV HOME=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/allennlp/requirements.txt
|
starlette==0.27.0
numpy==1.22.0
allennlp>=2.5.0,<3.0.0
# Even though it is not imported, it is actually required.
allennlp_models>=2.5.0,<3.0.0
api-inference-community==0.0.23
huggingface_hub==0.5.1
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/allennlp/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/allennlp
|
hf_public_repos/api-inference-community/docker_images/allennlp/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, QuestionAnsweringPipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"question-answering": QuestionAnsweringPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/allennlp/app
|
hf_public_repos/api-inference-community/docker_images/allennlp/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any, Optional
class Pipeline(ABC):
task: Optional[str] = None
model_id: Optional[str] = None
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/allennlp/app
|
hf_public_repos/api-inference-community/docker_images/allennlp/app/pipelines/question_answering.py
|
import os
import shutil
from typing import Any, Dict
# Even though it is not imported, it is actually required, it downloads some stuff.
import allennlp_models # noqa: F401
from allennlp.predictors.predictor import Predictor
from app.pipelines import Pipeline
class QuestionAnsweringPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
try:
self.predictor = Predictor.from_path("hf://" + model_id)
except (IOError, OSError):
nltk = os.getenv("NLTK_DATA")
if nltk is None:
raise
directory = os.path.join(nltk, "corpora")
shutil.rmtree(directory)
self.predictor = Predictor.from_path("hf://" + model_id)
def __call__(self, inputs: Dict[str, str]) -> Dict[str, Any]:
"""
Args:
inputs (:obj:`dict`):
a dictionary containing two keys, 'question' being the question being asked and 'context' being some text containing the answer.
Return:
A :obj:`dict`:. The object return should be like {"answer": "XXX", "start": 3, "end": 6, "score": 0.82} containing :
- "answer": the extracted answer from the `context`.
- "start": the offset within `context` leading to `answer`. context[start:stop] == answer
- "end": the ending offset within `context` leading to `answer`. context[start:stop] === answer
- "score": A score between 0 and 1 describing how confident the model is for this answer.
"""
allenlp_input = {"passage": inputs["context"], "question": inputs["question"]}
predictions = self.predictor.predict_json(allenlp_input)
start_token_idx, end_token_idx = predictions["best_span"]
start = predictions["token_offsets"][start_token_idx][0]
end = predictions["token_offsets"][end_token_idx][1]
score = (
predictions["span_end_probs"][end_token_idx]
* predictions["span_start_probs"][start_token_idx]
)
return {
"answer": predictions["best_span_str"],
"start": start,
"end": end,
"score": score,
}
|
0
|
hf_public_repos/api-inference-community/docker_images/allennlp/app
|
hf_public_repos/api-inference-community/docker_images/allennlp/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.question_answering import QuestionAnsweringPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/allennlp
|
hf_public_repos/api-inference-community/docker_images/allennlp/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/allennlp
|
hf_public_repos/api-inference-community/docker_images/allennlp/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"question-answering": "lysandre/bidaf-elmo-model-2020.03.19"
}
ALL_TASKS = {
"automatic-speech-recognition",
"audio-source-separation",
"image-classification",
"question-answering",
"text-generation",
"text-to-speech",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
with self.assertRaises(EnvironmentError):
get_pipeline(unsupported_task, model_id="XX")
|
0
|
hf_public_repos/api-inference-community/docker_images/allennlp
|
hf_public_repos/api-inference-community/docker_images/allennlp/tests/test_api_question_answering.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"question-answering" not in ALLOWED_TASKS,
"question-answering not implemented",
)
class QuestionAnsweringTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["question-answering"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "question-answering"
from app.main import app
self.app = app
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = {"question": "Where do I live ?", "context": "I live in New-York"}
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"answer", "start", "end", "score"})
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"answer", "start", "end", "score"})
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"Where do I live ?")
self.assertEqual(response.status_code, 400, response.content)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"error"})
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/fairseq/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
RUN apt-get update -y && apt-get install ffmpeg espeak-ng -y
RUN pip install --no-cache-dir numpy==1.22 torch==1.11
COPY ./requirements.txt /app
RUN pip install -U pip
# This will make further requirements.txt changes faster
# Numpy is REQUIRED because pkusage requires numpy to be already installed
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/fairseq/requirements.txt
|
api-inference-community==0.0.23
g2p_en==2.1.0
g2pc==0.9.9.3
phonemizer==2.2.1
librosa==0.8.1
hanziconv==0.3.2
sentencepiece==0.1.96
# Dummy comment to trigger automatic deploy.
git+https://github.com/facebookresearch/fairseq.git@d47119871c2ac9a0a0aa2904dd8cfc1929b113d9#egg=fairseq
huggingface_hub==0.5.1
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/fairseq/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/fairseq
|
hf_public_repos/api-inference-community/docker_images/fairseq/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, SpeechToSpeechPipeline, TextToSpeechPipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeecRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"text-to-speech": TextToSpeechPipeline,
"audio-to-audio": SpeechToSpeechPipeline,
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/fairseq/app
|
hf_public_repos/api-inference-community/docker_images/fairseq/app/pipelines/audio_to_audio.py
|
import json
import os
from pathlib import Path
from typing import List, Tuple
import numpy as np
import torch
from app.pipelines import Pipeline
from app.pipelines.utils import ARG_OVERRIDES_MAP
from fairseq import hub_utils
from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
from fairseq.models.speech_to_speech.hub_interface import S2SHubInterface
from fairseq.models.speech_to_text.hub_interface import S2THubInterface
from fairseq.models.text_to_speech import CodeHiFiGANVocoder
from fairseq.models.text_to_speech.hub_interface import (
TTSHubInterface,
VocoderHubInterface,
)
from huggingface_hub import snapshot_download
class SpeechToSpeechPipeline(Pipeline):
def __init__(self, model_id: str):
arg_overrides = ARG_OVERRIDES_MAP.get(
model_id, {}
) # Model specific override. TODO: Update on checkpoint side in the future
arg_overrides["config_yaml"] = "config.yaml" # common override
models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
model_id,
arg_overrides=arg_overrides,
cache_dir=os.getenv("HUGGINGFACE_HUB_CACHE"),
)
self.cfg = cfg
self.model = models[0].cpu()
self.model.eval()
self.task = task
self.sampling_rate = getattr(self.task, "sr", None) or 16_000
tgt_lang = self.task.data_cfg.hub.get("tgt_lang", None)
pfx = f"{tgt_lang}_" if self.task.data_cfg.prepend_tgt_lang_tag else ""
generation_args = self.task.data_cfg.hub.get(f"{pfx}generation_args", None)
if generation_args is not None:
for key in generation_args:
setattr(cfg.generation, key, generation_args[key])
self.generator = task.build_generator([self.model], cfg.generation)
tts_model_id = self.task.data_cfg.hub.get(f"{pfx}tts_model_id", None)
self.unit_vocoder = self.task.data_cfg.hub.get(f"{pfx}unit_vocoder", None)
self.tts_model, self.tts_task, self.tts_generator = None, None, None
if tts_model_id is not None:
_id = tts_model_id.split(":")[-1]
cache_dir = os.getenv("HUGGINGFACE_HUB_CACHE")
if self.unit_vocoder is not None:
library_name = "fairseq"
cache_dir = (
cache_dir or (Path.home() / ".cache" / library_name).as_posix()
)
cache_dir = snapshot_download(
f"facebook/{_id}", cache_dir=cache_dir, library_name=library_name
)
x = hub_utils.from_pretrained(
cache_dir,
"model.pt",
".",
archive_map=CodeHiFiGANVocoder.hub_models(),
config_yaml="config.json",
fp16=False,
is_vocoder=True,
)
with open(f"{x['args']['data']}/config.json") as f:
vocoder_cfg = json.load(f)
assert (
len(x["args"]["model_path"]) == 1
), "Too many vocoder models in the input"
vocoder = CodeHiFiGANVocoder(x["args"]["model_path"][0], vocoder_cfg)
self.tts_model = VocoderHubInterface(vocoder_cfg, vocoder)
else:
(
tts_models,
tts_cfg,
self.tts_task,
) = load_model_ensemble_and_task_from_hf_hub(
f"facebook/{_id}",
arg_overrides={"vocoder": "griffin_lim", "fp16": False},
cache_dir=cache_dir,
)
self.tts_model = tts_models[0].cpu()
self.tts_model.eval()
tts_cfg["task"].cpu = True
TTSHubInterface.update_cfg_with_data_cfg(
tts_cfg, self.tts_task.data_cfg
)
self.tts_generator = self.tts_task.build_generator(
[self.tts_model], tts_cfg
)
def __call__(self, inputs: np.array) -> Tuple[np.array, int, List[str]]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default sampled at `self.sampling_rate`.
The shape of this array is `T`, where `T` is the time axis
Return:
A :obj:`tuple` containing:
- :obj:`np.array`:
The return shape of the array must be `C'`x`T'`
- a :obj:`int`: the sampling rate as an int in Hz.
- a :obj:`List[str]`: the annotation for each out channel.
This can be the name of the instruments for audio source separation
or some annotation for speech enhancement. The length must be `C'`.
"""
_inputs = torch.from_numpy(inputs).unsqueeze(0)
sample, text = None, None
if self.cfg.task._name in ["speech_to_text", "speech_to_text_sharded"]:
sample = S2THubInterface.get_model_input(self.task, _inputs)
text = S2THubInterface.get_prediction(
self.task, self.model, self.generator, sample
)
elif self.cfg.task._name in ["speech_to_speech"]:
s2shubinerface = S2SHubInterface(self.cfg, self.task, self.model)
sample = s2shubinerface.get_model_input(self.task, _inputs)
text = S2SHubInterface.get_prediction(
self.task, self.model, self.generator, sample
)
wav, sr = np.zeros((0,)), self.sampling_rate
if self.unit_vocoder is not None:
tts_sample = self.tts_model.get_model_input(text)
wav, sr = self.tts_model.get_prediction(tts_sample)
text = ""
else:
tts_sample = TTSHubInterface.get_model_input(self.tts_task, text)
wav, sr = TTSHubInterface.get_prediction(
self.tts_task, self.tts_model, self.tts_generator, tts_sample
)
return wav.unsqueeze(0).numpy(), sr, [text]
|
0
|
hf_public_repos/api-inference-community/docker_images/fairseq/app
|
hf_public_repos/api-inference-community/docker_images/fairseq/app/pipelines/text_to_speech.py
|
import os
from typing import Tuple
import numpy as np
from app.pipelines import Pipeline
from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
from fairseq.models.text_to_speech.hub_interface import TTSHubInterface
class TextToSpeechPipeline(Pipeline):
def __init__(self, model_id: str):
model, cfg, task = load_model_ensemble_and_task_from_hf_hub(
model_id,
arg_overrides={"vocoder": "griffin_lim", "fp16": False},
cache_dir=os.getenv("HUGGINGFACE_HUB_CACHE"),
)
self.model = model[0].cpu()
self.model.eval()
cfg["task"].cpu = True
self.task = task
TTSHubInterface.update_cfg_with_data_cfg(cfg, self.task.data_cfg)
self.generator = self.task.build_generator(model, cfg)
def __call__(self, inputs: str) -> Tuple[np.array, int]:
"""
Args:
inputs (:obj:`str`):
The text to generate audio from
Return:
A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy
array, and the sampling rate as an int.
"""
inputs = inputs.strip("\x00")
if len(inputs) == 0:
return np.zeros((0,)), self.task.sr
sample = TTSHubInterface.get_model_input(self.task, inputs)
wav, sr = TTSHubInterface.get_prediction(
self.task, self.model, self.generator, sample
)
return wav.numpy(), sr
|
0
|
hf_public_repos/api-inference-community/docker_images/fairseq/app
|
hf_public_repos/api-inference-community/docker_images/fairseq/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/fairseq/app
|
hf_public_repos/api-inference-community/docker_images/fairseq/app/pipelines/utils.py
|
ARG_OVERRIDES_MAP = {
"facebook/xm_transformer_s2ut_800m-es-en-st-asr-bt_h1_2022": {
"config_yaml": "config.yaml",
"task": "speech_to_text",
}
}
|
0
|
hf_public_repos/api-inference-community/docker_images/fairseq/app
|
hf_public_repos/api-inference-community/docker_images/fairseq/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.audio_to_audio import SpeechToSpeechPipeline
from app.pipelines.text_to_speech import TextToSpeechPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/fairseq
|
hf_public_repos/api-inference-community/docker_images/fairseq/tests/test_api_text_to_speech.py
|
import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-to-speech" not in ALLOWED_TASKS,
"text-to-speech not implemented",
)
class TextToSpeechTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text-to-speech"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text-to-speech"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": "This is some text"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "audio/flac")
audio = ffmpeg_read(response.content, 16000)
self.assertEqual(len(audio.shape), 1)
self.assertGreater(audio.shape[0], 1000)
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0
|
hf_public_repos/api-inference-community/docker_images/fairseq
|
hf_public_repos/api-inference-community/docker_images/fairseq/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/fairseq
|
hf_public_repos/api-inference-community/docker_images/fairseq/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"text-to-speech": "facebook/fastspeech2-en-ljspeech",
"audio-to-audio": "facebook/xm_transformer_sm_all-en",
}
ALL_TASKS = {
"text-to-speech",
"audio-to-audio",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images/fairseq
|
hf_public_repos/api-inference-community/docker_images/fairseq/tests/test_api_audio_to_audio.py
|
import base64
import json
import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"audio-to-audio" not in ALLOWED_TASKS,
"audio-to-audio not implemented",
)
class AudioToAudioTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["audio-to-audio"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "audio-to-audio"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample2.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/open_clip/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV TORCH_HOME=/data/
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/open_clip/requirements.txt
|
starlette==0.27.0
api-inference-community==0.0.32
huggingface_hub>=0.12.1
timm>=0.9.10
transformers>=4.34.0
open_clip_torch>=2.23.0
#dummy.
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/open_clip/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/open_clip
|
hf_public_repos/api-inference-community/docker_images/open_clip/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, ZeroShotImageClassificationPipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeecRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeecRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"zero-shot-image-classification": ZeroShotImageClassificationPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/open_clip/app
|
hf_public_repos/api-inference-community/docker_images/open_clip/app/pipelines/zero_shot_image_classification.py
|
import json
from typing import Any, Dict, List, Optional
import open_clip
import torch
import torch.nn.functional as F
from app.pipelines import Pipeline
from open_clip.pretrained import download_pretrained_from_hf
from PIL import Image
class ZeroShotImageClassificationPipeline(Pipeline):
def __init__(self, model_id: str):
self.model, self.preprocess = open_clip.create_model_from_pretrained(
f"hf-hub:{model_id}"
)
config_path = download_pretrained_from_hf(
model_id,
filename="open_clip_config.json",
)
with open(config_path, "r", encoding="utf-8") as f:
# TODO grab custom prompt templates from preprocess_cfg
self.config = json.load(f)
self.tokenizer = open_clip.get_tokenizer(f"hf-hub:{model_id}")
self.model.eval()
self.use_sigmoid = getattr(self.model, "logit_bias", None) is not None
def __call__(
self,
inputs: Image.Image,
candidate_labels: Optional[List[str]] = None,
) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`PIL.Image`):
The raw image representation as PIL.
No transformation made whatsoever from the input. Make all necessary transformations here.
candidate_labels (List[str]):
A list of strings representing candidate class labels.
Return:
A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}
It is preferred if the returned list is in decreasing `score` order
"""
if candidate_labels is None:
raise ValueError("'candidate_labels' is a required field")
if isinstance(candidate_labels, str):
candidate_labels = candidate_labels.split(",")
prompt_templates = (
"a bad photo of a {}.",
"a photo of the large {}.",
"art of the {}.",
"a photo of the small {}.",
"this is an image of {}.",
)
image = inputs.convert("RGB")
image_inputs = self.preprocess(image).unsqueeze(0)
classifier = open_clip.build_zero_shot_classifier(
self.model,
tokenizer=self.tokenizer,
classnames=candidate_labels,
templates=prompt_templates,
num_classes_per_batch=10,
)
with torch.no_grad():
image_features = self.model.encode_image(image_inputs)
image_features = F.normalize(image_features, dim=-1)
logits = image_features @ classifier * self.model.logit_scale.exp()
if self.use_sigmoid:
logits += self.model.logit_bias
scores = torch.sigmoid(logits.squeeze(0))
else:
scores = logits.squeeze(0).softmax(0)
output = [
{
"label": l,
"score": s.item(),
}
for l, s in zip(candidate_labels, scores)
]
return output
|
0
|
hf_public_repos/api-inference-community/docker_images/open_clip/app
|
hf_public_repos/api-inference-community/docker_images/open_clip/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any, Optional
class Pipeline(ABC):
task: Optional[str] = None
model_id: Optional[str] = None
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/open_clip/app
|
hf_public_repos/api-inference-community/docker_images/open_clip/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.zero_shot_image_classification import (
ZeroShotImageClassificationPipeline,
)
|
0
|
hf_public_repos/api-inference-community/docker_images/open_clip
|
hf_public_repos/api-inference-community/docker_images/open_clip/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/open_clip
|
hf_public_repos/api-inference-community/docker_images/open_clip/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"zero-shot-image-classification": [
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K",
# "laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg",
# "timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k",
]
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"speech-segmentation",
"tabular-classification",
"tabular-regression",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"conversational",
"feature-extraction",
"question-answering",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"summarization",
"text2text-generation",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"zero-shot-classification",
"zero-shot-image-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images/open_clip
|
hf_public_repos/api-inference-community/docker_images/open_clip/tests/test_api_zero_shot_image_classification.py
|
import json
import os
from base64 import b64encode
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"zero-shot-image-classification" not in ALLOWED_TASKS,
"zero-shot-image-classification not implemented",
)
@parameterized_class(
[
{"model_id": model_id}
for model_id in TESTABLE_MODELS["zero-shot-image-classification"]
]
)
class ZeroShotImageClassificationTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "zero-shot-image-classification"
from app.main import app, get_pipeline
get_pipeline.cache_clear()
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
input_dict = {
"inputs": b64encode(self.read("plane.jpg")).decode("utf-8"),
"parameters": {
"candidate_labels": [
"airplane",
"superman",
"crumpet",
],
},
}
with TestClient(self.app) as client:
response = client.post("/", json=input_dict)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set((k, type(v)) for el in content for (k, v) in el.items()),
{("label", str), ("score", float)},
)
res = {e["label"]: e["score"] for e in content}
self.assertGreater(res["airplane"], 0.9)
def test_different_resolution(self):
input_dict = {
"inputs": b64encode(self.read("plane2.jpg")).decode("utf-8"),
"parameters": {
"candidate_labels": [
"airplane",
"superman",
"crumpet",
],
},
}
with TestClient(self.app) as client:
response = client.post("/", json=input_dict)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set(k for el in content for k in el.keys()), {"label", "score"}
)
res = {e["label"]: e["score"] for e in content}
self.assertGreater(res["airplane"], 0.9)
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/doctr/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
RUN apt-get update -y && apt-get install libgl1-mesa-glx -y
RUN pip install --no-cache-dir -U pip
RUN pip install --no-cache-dir torch==1.11 torchvision==0.12
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
ENV TORCH_HOME=/data/torch_hub/
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/doctr/requirements.txt
|
starlette==0.27.0
api-inference-community==0.0.23
python-doctr[torch]==0.5.1
huggingface_hub==0.5.1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.