index
int64 0
0
| repo_id
stringclasses 351
values | file_path
stringlengths 26
186
| content
stringlengths 1
990k
|
|---|---|---|---|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/doctr/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/doctr
|
hf_public_repos/api-inference-community/docker_images/doctr/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import ObjectDetectionPipeline, Pipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {"object-detection": ObjectDetectionPipeline}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/doctr/app
|
hf_public_repos/api-inference-community/docker_images/doctr/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/doctr/app
|
hf_public_repos/api-inference-community/docker_images/doctr/app/pipelines/object_detection.py
|
from typing import Any, Dict, List
import torch
from app.pipelines import Pipeline
from doctr.models.obj_detection.factory import from_hub
from PIL import Image
from torchvision.transforms import Compose, ConvertImageDtype, PILToTensor
class ObjectDetectionPipeline(Pipeline):
def __init__(self, model_id: str):
self.model = from_hub(model_id).eval()
self.transform = Compose(
[
PILToTensor(),
ConvertImageDtype(torch.float32),
]
)
self.labels = self.model.cfg.get("classes")
if self.labels is None:
self.labels = [f"LABEL_{i}" for i in range(self.model.num_classes)]
def __call__(self, inputs: Image.Image) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`PIL.Image`):
The raw image representation as PIL.
No transformation made whatsoever from the input. Make all necessary transformations here.
Return:
A :obj:`list`:. The list contains items that are dicts with the keys "label", "score" and "box".
"""
im = inputs.convert("RGB")
inputs = self.transform(im).unsqueeze(0)
with torch.inference_mode():
out = self.model(inputs)[0]
return [
{
"label": self.labels[idx],
"score": score.item(),
"box": {
"xmin": int(round(box[0].item())),
"ymin": int(round(box[1].item())),
"xmax": int(round(box[2].item())),
"ymax": int(round(box[3].item())),
},
}
for idx, score, box in zip(out["labels"], out["scores"], out["boxes"])
]
|
0
|
hf_public_repos/api-inference-community/docker_images/doctr/app
|
hf_public_repos/api-inference-community/docker_images/doctr/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.object_detection import ObjectDetectionPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/doctr
|
hf_public_repos/api-inference-community/docker_images/doctr/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/doctr
|
hf_public_repos/api-inference-community/docker_images/doctr/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"object-detection": ["mindee/fasterrcnn_mobilenet_v3_large_fpn"]
}
ALL_TASKS = {
"object-detection",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images/doctr
|
hf_public_repos/api-inference-community/docker_images/doctr/tests/test_api_object_detection.py
|
import json
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"object-detection" not in ALLOWED_TASKS,
"object-detection not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["object-detection"]]
)
class ObjectDetectionTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "object-detection"
from app.main import app, get_pipeline
get_pipeline.cache_clear()
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("artefacts.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set((k, type(v)) for el in content for (k, v) in el.items()),
{("label", str), ("score", float), ("box", Dict[str, int])},
)
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/k2/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="Yenda <jtrmal@gmail.com>"
# Add any system dependency here
RUN apt-get update -y && apt-get install cmake ffmpeg -y && rm -rf /var/lib/apt/lists/*
COPY ./requirements.txt /app
RUN pip install --no-cache-dir torch==1.11.0+cpu torchvision==0.12.0+cpu torchaudio==0.11.0 --extra-index-url https://download.pytorch.org/whl/cpu
RUN pip install --no-cache-dir -r requirements.txt
RUN pip install --no-cache-dir k2==1.17.dev20220719+cpu.torch1.11.0 -f https://k2-fsa.org/nightly/whl/
RUN git clone https://github.com/k2-fsa/sherpa && cd sherpa && git checkout v0.6 && pip install -r ./requirements.txt && python3 setup.py install --verbose
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/k2/requirements.txt
|
starlette==0.27.0
api-inference-community==0.0.23
huggingface_hub==0.5.1
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/k2/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/k2
|
hf_public_repos/api-inference-community/docker_images/k2/app/common.py
|
import functools
import json
from typing import List, Optional, Union
import k2
import kaldifeat
import sentencepiece as spm
import torch
from huggingface_hub import HfApi, hf_hub_download
from sherpa import RnntConformerModel
from .decode import (
run_model_and_do_greedy_search,
run_model_and_do_modified_beam_search,
)
def get_hfconfig(model_id, config_name="hf_demo"):
info = HfApi().model_info(repo_id=model_id)
config_file = hf_hub_download(model_id, filename="config.json")
with open(config_file) as config:
info.config = json.load(config)
if info.config and config_name is not None:
if config_name in info.config:
return info.config[config_name]
else:
raise ValueError("Config section " + config_name + " not found")
else:
return info
def model_from_hfconfig(hf_repo, hf_config):
nn_model_filename = hf_hub_download(hf_repo, hf_config["nn_model_filename"])
token_filename = (
hf_hub_download(hf_repo, hf_config["token_filename"])
if "token_filename" in hf_config
else None
)
bpe_model_filename = (
hf_hub_download(hf_repo, hf_config["bpe_model_filename"])
if "bpe_model_filename" in hf_config
else None
)
decoding_method = hf_config.get("decoding_method", "greedy_search")
sample_rate = hf_config.get("sample_rate", 16000)
num_active_paths = hf_config.get("num_active_paths", 4)
assert decoding_method in ("greedy_search", "modified_beam_search"), decoding_method
if decoding_method == "modified_beam_search":
assert num_active_paths >= 1, num_active_paths
assert bpe_model_filename is not None or token_filename is not None
if bpe_model_filename:
assert token_filename is None
if token_filename:
assert bpe_model_filename is None
return OfflineAsr(
nn_model_filename,
bpe_model_filename,
token_filename,
decoding_method,
num_active_paths,
sample_rate,
)
def transcribe_batch_from_tensor(model, batch):
return model.decode_waves([batch])[0]
class OfflineAsr(object):
def __init__(
self,
nn_model_filename: str,
bpe_model_filename: Optional[str],
token_filename: Optional[str],
decoding_method: str,
num_active_paths: int,
sample_rate: int = 16000,
device: Union[str, torch.device] = "cpu",
):
"""
Args:
nn_model_filename:
Path to the torch script model.
bpe_model_filename:
Path to the BPE model. If it is None, you have to provide
`token_filename`.
token_filename:
Path to tokens.txt. If it is None, you have to provide
`bpe_model_filename`.
decoding_method:
The decoding method to use. Currently, only greedy_search and
modified_beam_search are implemented.
num_active_paths:
Used only when decoding_method is modified_beam_search.
It specifies number of active paths for each utterance. Due to
merging paths with identical token sequences, the actual number
may be less than "num_active_paths".
sample_rate:
Expected sample rate of the feature extractor.
device:
The device to use for computation.
"""
self.model = RnntConformerModel(
filename=nn_model_filename,
device=device,
optimize_for_inference=False,
)
if bpe_model_filename:
self.sp = spm.SentencePieceProcessor()
self.sp.load(bpe_model_filename)
else:
self.token_table = k2.SymbolTable.from_file(token_filename)
self.sample_rate = sample_rate
self.feature_extractor = self._build_feature_extractor(
sample_rate=sample_rate,
device=device,
)
assert decoding_method in (
"greedy_search",
"modified_beam_search",
), decoding_method
if decoding_method == "greedy_search":
nn_and_decoding_func = run_model_and_do_greedy_search
elif decoding_method == "modified_beam_search":
nn_and_decoding_func = functools.partial(
run_model_and_do_modified_beam_search,
num_active_paths=num_active_paths,
)
else:
raise ValueError(
f"Unsupported decoding_method: {decoding_method} "
"Please use greedy_search or modified_beam_search"
)
self.nn_and_decoding_func = nn_and_decoding_func
self.device = device
def _build_feature_extractor(
self,
sample_rate: int = 16000,
device: Union[str, torch.device] = "cpu",
) -> kaldifeat.OfflineFeature:
"""Build a fbank feature extractor for extracting features.
Args:
sample_rate:
Expected sample rate of the feature extractor.
device:
The device to use for computation.
Returns:
Return a fbank feature extractor.
"""
opts = kaldifeat.FbankOptions()
opts.device = device
opts.frame_opts.dither = 0
opts.frame_opts.snip_edges = False
opts.frame_opts.samp_freq = sample_rate
opts.mel_opts.num_bins = 80
fbank = kaldifeat.Fbank(opts)
return fbank
def decode_waves(self, waves: List[torch.Tensor]) -> List[List[str]]:
"""
Args:
waves:
A list of 1-D torch.float32 tensors containing audio samples.
wavs[i] contains audio samples for the i-th utterance.
Note:
Whether it should be in the range [-32768, 32767] or be normalized
to [-1, 1] depends on which range you used for your training data.
For instance, if your training data used [-32768, 32767],
then the given waves have to contain samples in this range.
All models trained in icefall use the normalized range [-1, 1].
Returns:
Return a list of decoded results. `ans[i]` contains the decoded
results for `wavs[i]`.
"""
waves = [w.to(self.device) for w in waves]
features = self.feature_extractor(waves)
tokens = self.nn_and_decoding_func(self.model, features)
if hasattr(self, "sp"):
results = self.sp.decode(tokens)
else:
results = [[self.token_table[i] for i in hyp] for hyp in tokens]
results = ["".join(r) for r in results]
return results
|
0
|
hf_public_repos/api-inference-community/docker_images/k2
|
hf_public_repos/api-inference-community/docker_images/k2/app/decode.py
|
# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
#
# See LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List
import torch
from sherpa import RnntConformerModel, greedy_search, modified_beam_search
from torch.nn.utils.rnn import pad_sequence
LOG_EPS = math.log(1e-10)
@torch.no_grad()
def run_model_and_do_greedy_search(
model: RnntConformerModel,
features: List[torch.Tensor],
) -> List[List[int]]:
"""Run RNN-T model with the given features and use greedy search
to decode the output of the model.
Args:
model:
The RNN-T model.
features:
A list of 2-D tensors. Each entry is of shape
(num_frames, feature_dim).
Returns:
Return a list-of-list containing the decoding token IDs.
"""
features_length = torch.tensor(
[f.size(0) for f in features],
dtype=torch.int64,
)
features = pad_sequence(
features,
batch_first=True,
padding_value=LOG_EPS,
)
device = model.device
features = features.to(device)
features_length = features_length.to(device)
encoder_out, encoder_out_length = model.encoder(
features=features,
features_length=features_length,
)
hyp_tokens = greedy_search(
model=model,
encoder_out=encoder_out,
encoder_out_length=encoder_out_length.cpu(),
)
return hyp_tokens
@torch.no_grad()
def run_model_and_do_modified_beam_search(
model: RnntConformerModel,
features: List[torch.Tensor],
num_active_paths: int,
) -> List[List[int]]:
"""Run RNN-T model with the given features and use greedy search
to decode the output of the model.
Args:
model:
The RNN-T model.
features:
A list of 2-D tensors. Each entry is of shape
(num_frames, feature_dim).
num_active_paths:
Used only when decoding_method is modified_beam_search.
It specifies number of active paths for each utterance. Due to
merging paths with identical token sequences, the actual number
may be less than "num_active_paths".
Returns:
Return a list-of-list containing the decoding token IDs.
"""
features_length = torch.tensor(
[f.size(0) for f in features],
dtype=torch.int64,
)
features = pad_sequence(
features,
batch_first=True,
padding_value=LOG_EPS,
)
device = model.device
features = features.to(device)
features_length = features_length.to(device)
encoder_out, encoder_out_length = model.encoder(
features=features,
features_length=features_length,
)
hyp_tokens = modified_beam_search(
model=model,
encoder_out=encoder_out,
encoder_out_length=encoder_out_length.cpu(),
num_active_paths=num_active_paths,
)
return hyp_tokens
|
0
|
hf_public_repos/api-inference-community/docker_images/k2
|
hf_public_repos/api-inference-community/docker_images/k2/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import AutomaticSpeechRecognitionPipeline, Pipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline,
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/k2/app
|
hf_public_repos/api-inference-community/docker_images/k2/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/k2/app
|
hf_public_repos/api-inference-community/docker_images/k2/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.automatic_speech_recognition import (
AutomaticSpeechRecognitionPipeline,
)
|
0
|
hf_public_repos/api-inference-community/docker_images/k2/app
|
hf_public_repos/api-inference-community/docker_images/k2/app/pipelines/automatic_speech_recognition.py
|
from typing import Dict
import app.common as cx
import numpy as np
import torch
from app.pipelines import Pipeline
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
# See https://github.com/pytorch/pytorch/issues/38342
# and https://github.com/pytorch/pytorch/issues/33354
#
# If we don't do this, the delay increases whenever there is
# a new request that changes the actual batch size.
# If you use `py-spy dump --pid <server-pid> --native`, you will
# see a lot of time is spent in re-compiling the torch script model.
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._set_graph_executor_optimize(False)
class AutomaticSpeechRecognitionPipeline(Pipeline):
def __init__(self, model_id: str):
model_config = cx.get_hfconfig(model_id, "hf_demo")
self.model = cx.model_from_hfconfig(hf_repo=model_id, hf_config=model_config)
self.sampling_rate = self.model.sample_rate
def __call__(self, inputs: np.array) -> Dict[str, str]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at self.sampling_rate, otherwise 16KHz.
Check `app.validation` if a different sample rate is required
or if it depends on the model
Return:
A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing
the detected language from the input audio
"""
batch = torch.from_numpy(inputs)
words = cx.transcribe_batch_from_tensor(self.model, batch)
return {"text": words}
|
0
|
hf_public_repos/api-inference-community/docker_images/k2
|
hf_public_repos/api-inference-community/docker_images/k2/tests/test_api_automatic_speech_recognition.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"automatic-speech-recognition" not in ALLOWED_TASKS,
"automatic-speech-recognition not implemented",
)
@parameterized_class(
[
{"model_id": model_id}
for model_id in TESTABLE_MODELS["automatic-speech-recognition"]
]
)
class AutomaticSpeecRecognitionTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "automatic-speech-recognition"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
|
0
|
hf_public_repos/api-inference-community/docker_images/k2
|
hf_public_repos/api-inference-community/docker_images/k2/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/k2
|
hf_public_repos/api-inference-community/docker_images/k2/tests/test_api.py
|
import os
from typing import Dict, List
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, List[str]] = {
"automatic-speech-recognition": [
"jtrmal/icefall-asr-librispeech-pruned-transducer-stateless3-2022-05-13"
],
}
ALL_TASKS = {
"automatic-speech-recognition",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/espnet/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
RUN apt-get update -y && apt-get install ffmpeg -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/espnet/requirements.txt
|
api-inference-community==0.0.32
huggingface_hub==0.18.0
espnet==202310
torch<2.0.1
torchaudio
torch_optimizer
espnet_model_zoo==0.1.7
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/espnet/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/espnet
|
hf_public_repos/api-inference-community/docker_images/espnet/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import ( # AutomaticSpeechRecognitionPipeline,
AutomaticSpeechRecognitionPipeline,
Pipeline,
TextToSpeechPipeline,
)
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeecRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"text-to-speech": TextToSpeechPipeline,
"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline,
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/espnet/app
|
hf_public_repos/api-inference-community/docker_images/espnet/app/pipelines/text_to_speech.py
|
from typing import Tuple
import numpy as np
from app.pipelines import Pipeline
from espnet2.bin.tts_inference import Text2Speech
class TextToSpeechPipeline(Pipeline):
def __init__(self, model_id: str):
self.model = Text2Speech.from_pretrained(model_id, device="cpu")
if hasattr(self.model, "fs"):
self.sampling_rate = self.model.fs
else:
# 16000 by default if not specified
self.sampling_rate = 16000
def __call__(self, inputs: str) -> Tuple[np.array, int]:
"""
Args:
inputs (:obj:`str`):
The text to generate audio from
Return:
A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy array, and the sampling rate as an int.
"""
outputs = self.model(inputs)
speech = outputs["wav"]
return speech.numpy(), self.sampling_rate
|
0
|
hf_public_repos/api-inference-community/docker_images/espnet/app
|
hf_public_repos/api-inference-community/docker_images/espnet/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/espnet/app
|
hf_public_repos/api-inference-community/docker_images/espnet/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.automatic_speech_recognition import (
AutomaticSpeechRecognitionPipeline,
)
from app.pipelines.text_to_speech import TextToSpeechPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/espnet/app
|
hf_public_repos/api-inference-community/docker_images/espnet/app/pipelines/automatic_speech_recognition.py
|
from typing import Dict
import numpy as np
from app.pipelines import Pipeline
from espnet2.bin.asr_inference import Speech2Text
class AutomaticSpeechRecognitionPipeline(Pipeline):
def __init__(self, model_id: str):
self.model = Speech2Text.from_pretrained(model_id, device="cpu", beam_size=1)
self.sampling_rate = 16000
def __call__(self, inputs: np.array) -> Dict[str, str]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at 16KHz.
Check `app.validation` if a different sample rate is required
or if it depends on the model
Return:
A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing
the detected language from the input audio
"""
outputs = self.model(inputs)
text, *_ = outputs[0]
return {"text": text}
|
0
|
hf_public_repos/api-inference-community/docker_images/espnet
|
hf_public_repos/api-inference-community/docker_images/espnet/tests/test_api_automatic_speech_recognition.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"automatic-speech-recognition" not in ALLOWED_TASKS,
"automatic-speech-recognition not implemented",
)
class AutomaticSpeecRecognitionTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["automatic-speech-recognition"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "automatic-speech-recognition"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_original_audiofile(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
|
0
|
hf_public_repos/api-inference-community/docker_images/espnet
|
hf_public_repos/api-inference-community/docker_images/espnet/tests/test_api_text_to_speech.py
|
import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-to-speech" not in ALLOWED_TASKS,
"text-to-speech not implemented",
)
class TextToSpeechTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text-to-speech"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text-to-speech"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": "This is some text"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "audio/flac")
audio = ffmpeg_read(response.content, 16000)
self.assertEqual(len(audio.shape), 1)
self.assertGreater(audio.shape[0], 1000)
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0
|
hf_public_repos/api-inference-community/docker_images/espnet
|
hf_public_repos/api-inference-community/docker_images/espnet/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/espnet
|
hf_public_repos/api-inference-community/docker_images/espnet/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"text-to-speech": "espnet/kan-bayashi_ljspeech_fastspeech2",
"automatic-speech-recognition": "espnet/kamo-naoyuki_mini_an4_asr_train_raw_bpe_valid.acc.best",
}
ALL_TASKS = {
"automatic-speech-recognition",
"audio-source-separation",
"image-classification",
"question-answering",
"text-generation",
"text-to-speech",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/timm/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV TORCH_HOME=/data/
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/timm/requirements.txt
|
starlette==0.27.0
api-inference-community==0.0.32
huggingface_hub>=0.11.1
timm>=1.0.7
#dummy
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/timm/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/timm
|
hf_public_repos/api-inference-community/docker_images/timm/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import ImageClassificationPipeline, Pipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeecRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeecRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"image-classification": ImageClassificationPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/timm/app
|
hf_public_repos/api-inference-community/docker_images/timm/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any, Optional
class Pipeline(ABC):
task: Optional[str] = None
model_id: Optional[str] = None
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/timm/app
|
hf_public_repos/api-inference-community/docker_images/timm/app/pipelines/image_classification.py
|
from typing import Any, Dict, List
import timm
import torch
from app.pipelines import Pipeline
from PIL import Image
from timm.data import (
CustomDatasetInfo,
ImageNetInfo,
create_transform,
infer_imagenet_subset,
resolve_model_data_config,
)
class ImageClassificationPipeline(Pipeline):
def __init__(self, model_id: str):
self.model = timm.create_model(f"hf_hub:{model_id}", pretrained=True)
self.transform = create_transform(
**resolve_model_data_config(self.model, use_test_size=True)
)
self.top_k = min(self.model.num_classes, 5)
self.model.eval()
self.dataset_info = None
label_names = self.model.pretrained_cfg.get("label_names", None)
label_descriptions = self.model.pretrained_cfg.get("label_descriptions", None)
if label_names is None:
# if no labels added to config, use imagenet labeller in timm
imagenet_subset = infer_imagenet_subset(self.model)
if imagenet_subset:
self.dataset_info = ImageNetInfo(imagenet_subset)
else:
# fallback label names
label_names = [f"LABEL_{i}" for i in range(self.model.num_classes)]
if self.dataset_info is None:
self.dataset_info = CustomDatasetInfo(
label_names=label_names,
label_descriptions=label_descriptions,
)
def __call__(self, inputs: Image.Image) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`PIL.Image`):
The raw image representation as PIL.
No transformation made whatsoever from the input. Make all necessary transformations here.
Return:
A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}
It is preferred if the returned list is in decreasing `score` order
"""
im = inputs.convert("RGB")
inputs = self.transform(im).unsqueeze(0)
with torch.no_grad():
out = self.model(inputs)
probabilities = out.squeeze(0).softmax(dim=0)
values, indices = torch.topk(probabilities, self.top_k)
labels = [
{
"label": self.dataset_info.index_to_description(i, detailed=True),
"score": v.item(),
}
for i, v in zip(indices, values)
]
return labels
|
0
|
hf_public_repos/api-inference-community/docker_images/timm/app
|
hf_public_repos/api-inference-community/docker_images/timm/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.image_classification import ImageClassificationPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/timm
|
hf_public_repos/api-inference-community/docker_images/timm/tests/test_api_image_classification.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"image-classification" not in ALLOWED_TASKS,
"image-classification not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["image-classification"]]
)
class ImageClassificationTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "image-classification"
from app.main import app, get_pipeline
get_pipeline.cache_clear()
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("plane.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set((k, type(v)) for el in content for (k, v) in el.items()),
{("label", str), ("score", float)},
)
def test_different_resolution(self):
bpayload = self.read("plane2.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set(k for el in content for k in el.keys()), {"label", "score"}
)
|
0
|
hf_public_repos/api-inference-community/docker_images/timm
|
hf_public_repos/api-inference-community/docker_images/timm/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/timm
|
hf_public_repos/api-inference-community/docker_images/timm/tests/test_api.py
|
import os
from typing import Dict, List
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, List[str]] = {
"image-classification": [
"timm/vit_base_patch32_clip_224.laion2b_ft_in1k",
"timm/convnext_nano.in12k",
"nateraw/timm-resnet50-beans",
]
}
ALL_TASKS = {
"automatic-speech-recognition",
"audio-source-separation",
"image-classification",
"question-answering",
"text-generation",
"text-to-speech",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/nemo/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.9
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
RUN apt-get update -y && \
apt-get install libsndfile1 ffmpeg -y
# See PyTorch releases for pip here: https://download.pytorch.org/whl/torch_stable.html
COPY ./requirements.txt /app
RUN pip install https://download.pytorch.org/whl/cpu/torch-1.13.1%2Bcpu-cp39-cp39-linux_x86_64.whl && \
pip install Cython numpy==1.21.6
RUN pip install -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
ENV NEMO_CACHE_DIR=/data/nemo_cache/
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/nemo/requirements.txt
|
starlette==0.28.0
api-inference-community==0.0.27
nemo_toolkit[all]>=1.18.1
huggingface_hub==0.15.1
# Dummy
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/nemo/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/nemo
|
hf_public_repos/api-inference-community/docker_images/nemo/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline
from app.pipelines.automatic_speech_recognition import (
AutomaticSpeechRecognitionPipeline,
)
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline,
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/nemo/app
|
hf_public_repos/api-inference-community/docker_images/nemo/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/nemo/app
|
hf_public_repos/api-inference-community/docker_images/nemo/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.automatic_speech_recognition import (
AutomaticSpeechRecognitionPipeline,
)
|
0
|
hf_public_repos/api-inference-community/docker_images/nemo/app
|
hf_public_repos/api-inference-community/docker_images/nemo/app/pipelines/automatic_speech_recognition.py
|
import os
import tempfile
import uuid
from typing import Dict
import librosa
import nemo.collections.asr as nemo_asr
import numpy as np
import soundfile
from app.pipelines import Pipeline
from huggingface_hub import hf_hub_download
from huggingface_hub.hf_api import HfFolder
class AutomaticSpeechRecognitionPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
# Precheck for API key
is_token_available = HfFolder.get_token() is not None
# Prepare file name from model_id
filename = model_id.split("/")[-1] + ".nemo"
path = hf_hub_download(
repo_id=model_id, filename=filename, use_auth_token=is_token_available
)
# Load model
self.model = nemo_asr.models.ASRModel.restore_from(path)
self.model.freeze()
# Pre-Initialize RNNT decoding strategy
if hasattr(self.model, "change_decoding_strategy"):
self.model.change_decoding_strategy(None)
# IMPLEMENT_THIS : Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = self.model.cfg.sample_rate
def __call__(self, inputs: np.array) -> Dict[str, str]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at self.sampling_rate, otherwise 16KHz.
Return:
A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing
the detected language from the input audio
"""
inputs = self.process_audio_file(inputs)
with tempfile.TemporaryDirectory() as tmpdir:
audio_path = os.path.join(tmpdir, f"audio_{uuid.uuid4()}.wav")
soundfile.write(audio_path, inputs, self.sampling_rate)
transcriptions = self.model.transcribe([audio_path])
# if transcriptions form a tuple (from RNNT), extract just "best" hypothesis
if isinstance(transcriptions, tuple) and len(transcriptions) == 2:
transcriptions = transcriptions[0]
audio_transcription = transcriptions[0]
return {"text": audio_transcription}
def process_audio_file(self, data):
# monochannel
data = librosa.to_mono(data)
return data
|
0
|
hf_public_repos/api-inference-community/docker_images/nemo
|
hf_public_repos/api-inference-community/docker_images/nemo/tests/test_api_automatic_speech_recognition.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"automatic-speech-recognition" not in ALLOWED_TASKS,
"automatic-speech-recognition not implemented",
)
class AutomaticSpeecRecognitionTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["automatic-speech-recognition"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "automatic-speech-recognition"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
|
0
|
hf_public_repos/api-inference-community/docker_images/nemo
|
hf_public_repos/api-inference-community/docker_images/nemo/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/nemo
|
hf_public_repos/api-inference-community/docker_images/nemo/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
# IMPLEMENT_THIS
"automatic-speech-recognition": "nvidia/stt_en_conformer_ctc_small",
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"speech-segmentation",
"structured-data-classification",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"conversational",
"feature-extraction",
"question-answering",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"summarization",
"text2text-generation",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"zero-shot-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/flair/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
ENV FLAIR_CACHE_ROOT=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/flair/requirements.txt
|
starlette==0.27.0
pydantic==1.8.2
flair @ git+https://github.com/flairNLP/flair@e17ab1234fcfed2b089d8ef02b99949d520382d2
api-inference-community==0.0.25
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/flair/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/flair
|
hf_public_repos/api-inference-community/docker_images/flair/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, TokenClassificationPipeline
from starlette.applications import Starlette
from starlette.routing import Route
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"token-classification": TokenClassificationPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"]
)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
app.get_pipeline = get_pipeline
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/flair/app
|
hf_public_repos/api-inference-community/docker_images/flair/app/pipelines/token_classification.py
|
from typing import Any, Dict, List
from app.pipelines import Pipeline
from flair.data import Sentence, Span, Token
from flair.models import SequenceTagger
class TokenClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
self.tagger = SequenceTagger.load(model_id)
def __call__(self, inputs: str) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be like [{"entity_group": "XXX", "word": "some word", "start": 3, "end": 6, "score": 0.82}] containing :
- "entity_group": A string representing what the entity is.
- "word": A substring of the original string that was detected as an entity.
- "start": the offset within `input` leading to `answer`. context[start:stop] == word
- "end": the ending offset within `input` leading to `answer`. context[start:stop] === word
- "score": A score between 0 and 1 describing how confident the model is for this entity.
"""
sentence: Sentence = Sentence(inputs)
self.tagger.predict(sentence)
entities = []
for label in sentence.get_labels():
current_data_point = label.data_point
if isinstance(current_data_point, Token):
current_entity = {
"entity_group": current_data_point.tag,
"word": current_data_point.text,
"start": current_data_point.start_position,
"end": current_data_point.end_position,
"score": current_data_point.score,
}
entities.append(current_entity)
elif isinstance(current_data_point, Span):
if not current_data_point.tokens:
continue
current_entity = {
"entity_group": current_data_point.tag,
"word": current_data_point.text,
"start": current_data_point.tokens[0].start_position,
"end": current_data_point.tokens[-1].end_position,
"score": current_data_point.score,
}
entities.append(current_entity)
return entities
|
0
|
hf_public_repos/api-inference-community/docker_images/flair/app
|
hf_public_repos/api-inference-community/docker_images/flair/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any, Optional
class Pipeline(ABC):
task: Optional[str] = None
model_id: Optional[str] = None
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/flair/app
|
hf_public_repos/api-inference-community/docker_images/flair/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.token_classification import TokenClassificationPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/flair
|
hf_public_repos/api-inference-community/docker_images/flair/tests/test_api_token_classification.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"token-classification" not in ALLOWED_TASKS,
"token-classification not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["token-classification"]]
)
class TokenClassificationTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "token-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = "Hello, my name is John and I live in New York"
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"entity_group", "word", "start", "end", "score"},
)
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"entity_group", "word", "start", "end", "score"},
)
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0
|
hf_public_repos/api-inference-community/docker_images/flair
|
hf_public_repos/api-inference-community/docker_images/flair/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/flair
|
hf_public_repos/api-inference-community/docker_images/flair/tests/test_api.py
|
import os
from typing import Dict, List
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, List[str]] = {
"token-classification": ["flair/chunk-english-fast", "flair/upos-english-fast"]
}
ALL_TASKS = {
"automatic-speech-recognition",
"audio-source-separation",
"image-classification",
"question-answering",
"text-generation",
"text-to-speech",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/span_marker/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="Tom Aarsen <ta.aarsen@gmail.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/span_marker/requirements.txt
|
starlette==0.27.0
api-inference-community==0.0.32
huggingface_hub>=0.17.3
span_marker>=1.4.0
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/span_marker/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/span_marker
|
hf_public_repos/api-inference-community/docker_images/span_marker/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, TokenClassificationPipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"token-classification": TokenClassificationPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/span_marker/app
|
hf_public_repos/api-inference-community/docker_images/span_marker/app/pipelines/token_classification.py
|
from typing import Any, Dict, List
from app.pipelines import Pipeline
from span_marker import SpanMarkerModel
class TokenClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
) -> None:
self.model = SpanMarkerModel.from_pretrained(model_id)
def __call__(self, inputs: str) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be like [{"entity_group": "XXX", "word": "some word", "start": 3, "end": 6, "score": 0.82}] containing :
- "entity_group": A string representing what the entity is.
- "word": A rubstring of the original string that was detected as an entity.
- "start": the offset within `input` leading to `answer`. context[start:stop] == word
- "end": the ending offset within `input` leading to `answer`. context[start:stop] === word
- "score": A score between 0 and 1 describing how confident the model is for this entity.
"""
return [
{
"entity_group": entity["label"],
"word": entity["span"],
"start": entity["char_start_index"],
"end": entity["char_end_index"],
"score": entity["score"],
}
for entity in self.model.predict(inputs)
]
|
0
|
hf_public_repos/api-inference-community/docker_images/span_marker/app
|
hf_public_repos/api-inference-community/docker_images/span_marker/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/span_marker/app
|
hf_public_repos/api-inference-community/docker_images/span_marker/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.token_classification import TokenClassificationPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/span_marker
|
hf_public_repos/api-inference-community/docker_images/span_marker/tests/test_api_token_classification.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"token-classification" not in ALLOWED_TASKS,
"token-classification not implemented",
)
class TokenClassificationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["token-classification"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "token-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = "Hello, my name is John and I live in New York"
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"entity_group", "word", "start", "end", "score"},
)
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"entity_group", "word", "start", "end", "score"},
)
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0
|
hf_public_repos/api-inference-community/docker_images/span_marker
|
hf_public_repos/api-inference-community/docker_images/span_marker/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/span_marker
|
hf_public_repos/api-inference-community/docker_images/span_marker/tests/test_api.py
|
import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
# IMPLEMENT_THIS
"token-classification": "tomaarsen/span-marker-bert-tiny-fewnerd-coarse-super"
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"speech-segmentation",
"tabular-classification",
"tabular-regression",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"conversational",
"feature-extraction",
"question-answering",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"summarization",
"text2text-generation",
"text-classification",
"text-to-image",
"text-to-speech",
"token-classification",
"zero-shot-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/bertopic/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="Daniel van Strien <daniel@hf.co> "
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/bertopic/requirements.txt
|
starlette==0.27.0
api-inference-community==0.0.25
huggingface_hub==0.14.0
bertopic==0.15.0
safetensors==0.3.1
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/bertopic/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/bertopic
|
hf_public_repos/api-inference-community/docker_images/bertopic/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, TextClassificationPipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"text-classification": TextClassificationPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/bertopic/app
|
hf_public_repos/api-inference-community/docker_images/bertopic/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/bertopic/app
|
hf_public_repos/api-inference-community/docker_images/bertopic/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.text_classification import TextClassificationPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/bertopic/app
|
hf_public_repos/api-inference-community/docker_images/bertopic/app/pipelines/text_classification.py
|
from typing import Dict, List
from app.pipelines import Pipeline
from bertopic import BERTopic
class TextClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
self.model = BERTopic.load(model_id)
def __call__(self, inputs: str) -> List[List[Dict[str, float]]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be a list of one list like [[{"label": "positive", "score": 0.5}]] containing:
- "label": A string representing what the label/class is. There can be multiple labels.
- "score": A score between 0 and 1 describing how confident the model is for this label/class.
"""
topics, probabilities = self.model.transform(inputs)
results = []
for topic, prob in zip(topics, probabilities):
if self.model.custom_labels_ is not None:
topic_label = self.model.custom_labels_[topic + self.model._outliers]
else:
topic_label = self.model.topic_labels_[topic]
results.append({"label": topic_label, "score": float(prob)})
return [results]
|
0
|
hf_public_repos/api-inference-community/docker_images/bertopic
|
hf_public_repos/api-inference-community/docker_images/bertopic/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/bertopic
|
hf_public_repos/api-inference-community/docker_images/bertopic/tests/test_api.py
|
import os
from typing import Dict, List
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, List[str]] = {
"text-classification": ["MaartenGr/BERTopic_ArXiv", "MaartenGr/BERTopic_Wikipedia"],
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"speech-segmentation",
"tabular-classification",
"tabular-regression",
"text-to-image",
"text-to-speech",
"token-classification",
"conversational",
"feature-extraction",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"summarization",
"text2text-generation",
"text-classification",
"zero-shot-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0
|
hf_public_repos/api-inference-community/docker_images/bertopic
|
hf_public_repos/api-inference-community/docker_images/bertopic/tests/test_api_text_classification.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-classification" not in ALLOWED_TASKS,
"text-classification not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["text-classification"]]
)
class TextClassificationTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "text-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = "It is a beautiful day outside"
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(len(content), 1)
self.assertEqual(type(content[0]), list)
self.assertEqual(
set(k for el in content[0] for k in el.keys()),
{"label", "score"},
)
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(len(content), 1)
self.assertEqual(type(content[0]), list)
self.assertEqual(
set(k for el in content[0] for k in el.keys()),
{"label", "score"},
)
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/speechbrain/Dockerfile
|
FROM tiangolo/uvicorn-gunicorn:python3.9
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
RUN apt-get update -y && apt-get install ffmpeg -y
RUN pip install --no-cache-dir torch==2.0
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/speechbrain/requirements.txt
|
starlette==0.27.0
# TODO: Replace with the correct tag once the core PR is merged
api-inference-community==0.0.32
huggingface_hub>=0.7
transformers==4.30.0
git+https://github.com/speechbrain/speechbrain@v1.0.0
https://github.com/kpu/kenlm/archive/master.zip
pygtrie
#Dummy.
|
0
|
hf_public_repos/api-inference-community/docker_images
|
hf_public_repos/api-inference-community/docker_images/speechbrain/prestart.sh
|
python app/main.py
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app/common.py
|
from enum import Enum
from huggingface_hub import HfApi
class ModelType(Enum):
# audio-to-audio
SEPFORMERSEPARATION = "SEPFORMERSEPARATION"
SPECTRALMASKENHANCEMENT = "SPECTRALMASKENHANCEMENT"
WAVEFORMENHANCEMENT = "WAVEFORMENHANCEMENT"
# automatic-speech-recognition
ENCODERASR = "ENCODERASR"
ENCODERDECODERASR = "ENCODERDECODERASR"
WHISPERASR = "WHISPERASR"
# audio-clasification
ENCODERCLASSIFIER = "ENCODERCLASSIFIER"
# text-to-speech
TACOTRON2 = "TACOTRON2"
HIFIGAN = "HIFIGAN"
FASTSPEECH2 = "FASTSPEECH2"
# text2text-generation
GRAPHEMETOPHONEME = "GRAPHEMETOPHONEME"
def get_type(model_id, interface_type="speechbrain_interface"):
info = HfApi().model_info(repo_id=model_id)
if info.config:
if "speechbrain" in info.config:
if interface_type in info.config["speechbrain"]:
return ModelType(info.config["speechbrain"][interface_type].upper())
else:
raise ValueError(f"{interface_type} not in config.json")
else:
raise ValueError("speechbrain_interface not in config.json")
raise ValueError("no config.json in repository")
def get_vocoder_model_id(model_id):
info = HfApi().model_info(repo_id=model_id)
if info.config:
if "speechbrain" in info.config:
if "vocoder_model_id" in info.config["speechbrain"]:
return info.config["speechbrain"]["vocoder_model_id"]
else:
raise ValueError("vocoder_model_id not in config.json")
else:
raise ValueError("speechbrain_interface not in config.json")
raise ValueError("no config.json in repository")
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app/main.py
|
import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import (
AudioClassificationPipeline,
AudioToAudioPipeline,
AutomaticSpeechRecognitionPipeline,
Pipeline,
TextToSpeechPipeline,
TextToTextPipeline,
)
from starlette.applications import Starlette
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"audio-classification": AudioClassificationPipeline,
"audio-to-audio": AudioToAudioPipeline,
"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline,
"text-to-speech": TextToSpeechPipeline,
"text2text-generation": TextToTextPipeline,
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
app = Starlette(routes=routes)
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"]
)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/audio_to_audio.py
|
from typing import List, Tuple
import numpy as np
import torch
from app.common import ModelType, get_type
from app.pipelines import Pipeline
from speechbrain.inference import (
SepformerSeparation,
SpectralMaskEnhancement,
WaveformEnhancement,
)
class AudioToAudioPipeline(Pipeline):
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type == ModelType.SEPFORMERSEPARATION:
self.model = SepformerSeparation.from_hparams(source=model_id)
self.type = "audio-source-separation"
elif model_type == ModelType.SPECTRALMASKENHANCEMENT:
self.model = SpectralMaskEnhancement.from_hparams(source=model_id)
self.type = "speech-enhancement"
elif model_type == ModelType.WAVEFORMENHANCEMENT:
self.type = "speech-enhancement"
self.model = WaveformEnhancement.from_hparams(source=model_id)
else:
raise ValueError(f"{model_type.value} is invalid for audio-to-audio")
self.sampling_rate = self.model.hparams.sample_rate
def __call__(self, inputs: np.array) -> Tuple[np.array, int, List[str]]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default sampled at `self.sampling_rate`.
The shape of this array is `T`, where `T` is the time axis
Return:
A :obj:`tuple` containing:
- :obj:`np.array`:
The return shape of the array must be `C'`x`T'`
- a :obj:`int`: the sampling rate as an int in Hz.
- a :obj:`List[str]`: the annotation for each out channel.
This can be the name of the instruments for audio source separation
or some annotation for speech enhancement. The length must be `C'`.
"""
if self.type == "speech-enhancement":
return self.enhance(inputs)
elif self.type == "audio-source-separation":
return self.separate(inputs)
else:
return self.separate(inputs)
def separate(self, inputs):
mix = torch.from_numpy(inputs)
est_sources = self.model.separate_batch(mix.unsqueeze(0))
est_sources = est_sources[0]
# C x T
est_sources = est_sources.transpose(1, 0)
# normalize for loudness
est_sources = est_sources / est_sources.abs().max(dim=1, keepdim=True).values
n = est_sources.shape[0]
labels = [f"label_{i}" for i in range(n)]
return est_sources.numpy(), int(self.sampling_rate), labels
def enhance(self, inputs: np.array):
mix = torch.from_numpy(inputs)
enhanced = self.model.enhance_batch(mix.unsqueeze(0))
# C x T
labels = ["speech_enhanced"]
return enhanced.numpy(), int(self.sampling_rate), labels
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/audio_classification.py
|
from typing import Dict, List
import numpy as np
import torch
from app.common import ModelType, get_type
from app.pipelines import Pipeline
from speechbrain.inference import EncoderClassifier
class AudioClassificationPipeline(Pipeline):
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type != ModelType.ENCODERCLASSIFIER:
raise ValueError(f"{model_type.value} is invalid for audio-classification")
self.model = EncoderClassifier.from_hparams(source=model_id)
self.top_k = 5
# Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = 16000
def __call__(self, inputs: np.array) -> List[Dict[str, float]]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at 16KHz.
Return:
A :obj:`list`:. The object returned should be a list like [{"label": "text", "score": 0.9939950108528137}] containing :
- "label": A string representing what the label/class is. There can be multiple labels.
- "score": A score between 0 and 1 describing how confident the model is for this label/class.
"""
batch = torch.from_numpy(inputs).unsqueeze(0)
rel_length = torch.tensor([1.0])
probs, _, _, _ = self.model.classify_batch(batch, rel_length)
probs = torch.softmax(probs[0], dim=0)
labels = self.model.hparams.label_encoder.decode_ndim(range(len(probs)))
results = []
for prob, label in sorted(zip(probs, labels), reverse=True)[: self.top_k]:
results.append({"label": label, "score": prob.item()})
return results
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/text_to_speech.py
|
from typing import Tuple
import numpy as np
from app.common import ModelType, get_type, get_vocoder_model_id
from app.pipelines import Pipeline
from speechbrain.inference import HIFIGAN, FastSpeech2, Tacotron2
class TextToSpeechPipeline(Pipeline):
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type is ModelType.TACOTRON2:
self.model = Tacotron2.from_hparams(source=model_id)
self.type = "tacotron2"
elif model_type is ModelType.FASTSPEECH2:
self.model = FastSpeech2.from_hparams(source=model_id)
self.type = "fastspeech2"
else:
raise ValueError(f"{model_type.value} is invalid for text-to-speech")
vocoder_type = get_type(model_id, "vocoder_interface")
vocoder_model_id = get_vocoder_model_id(model_id)
if vocoder_type is ModelType.HIFIGAN:
self.vocoder_model = HIFIGAN.from_hparams(source=vocoder_model_id)
else:
raise ValueError(
f"{vocoder_type.value} is invalid vocoder for text-to-speech"
)
self.sampling_rate = self.model.hparams.sample_rate
def __call__(self, inputs: str) -> Tuple[np.array, int]:
"""
Args:
inputs (:obj:`str`):
The text to generate audio from
Return:
A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy array, and the sampling rate as an int.
"""
if not inputs.replace("\0", "").strip():
inputs = "Empty query"
if self.type == "tacotron2":
mel_output, _, _ = self.model.encode_text(inputs)
elif self.type == "fastspeech2":
mel_output, _, _, _ = self.model.encode_text(
[inputs], pace=1.0, pitch_rate=1.0, energy_rate=1.0
)
waveforms = self.vocoder_model.decode_batch(mel_output).numpy()
return waveforms, self.sampling_rate
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/base.py
|
from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/text2text_generation.py
|
from typing import Dict, List
from app.common import ModelType, get_type
from app.pipelines import Pipeline
from speechbrain.inference import GraphemeToPhoneme
POSTPROCESSING = {ModelType.GRAPHEMETOPHONEME: lambda output: "-".join(output)}
class TextToTextPipeline(Pipeline):
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type == ModelType.GRAPHEMETOPHONEME:
self.model = GraphemeToPhoneme.from_hparams(source=model_id)
else:
raise ValueError(f"{model_type.value} is invalid for text-to-text")
self.post_process = POSTPROCESSING.get(model_type, lambda output: output)
def __call__(self, inputs: str) -> List[Dict[str, str]]:
"""
Args:
inputs (:obj:`str`):
The input text
Return:
A :obj:`list`:. The list contains a single item that is a dict {"text": the model output}
"""
output = self.model(inputs)
output = self.post_process(output)
return [{"generated_text": output}]
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/__init__.py
|
from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.audio_classification import AudioClassificationPipeline
from app.pipelines.audio_to_audio import AudioToAudioPipeline
from app.pipelines.automatic_speech_recognition import (
AutomaticSpeechRecognitionPipeline,
)
from app.pipelines.text2text_generation import TextToTextPipeline
from app.pipelines.text_to_speech import TextToSpeechPipeline
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app
|
hf_public_repos/api-inference-community/docker_images/speechbrain/app/pipelines/automatic_speech_recognition.py
|
from typing import Dict
import numpy as np
import torch
from app.common import ModelType, get_type
from app.pipelines import Pipeline
from speechbrain.inference import EncoderASR, EncoderDecoderASR, WhisperASR
class AutomaticSpeechRecognitionPipeline(Pipeline):
def __init__(self, model_id: str):
model_type = get_type(model_id)
if model_type is ModelType.ENCODERASR:
self.model = EncoderASR.from_hparams(source=model_id)
elif model_type is ModelType.ENCODERDECODERASR:
self.model = EncoderDecoderASR.from_hparams(source=model_id)
# Reduce latency
self.model.mods.decoder.beam_size = 1
elif model_type is ModelType.WHISPERASR:
self.model = WhisperASR.from_hparams(source=model_id)
else:
raise ValueError(
f"{model_type.value} is invalid for automatic-speech-recognition"
)
# Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = self.model.hparams.sample_rate
def __call__(self, inputs: np.array) -> Dict[str, str]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at 16KHz.
Check `app.validation` if a different sample rate is required
or if it depends on the model
Return:
A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing
the detected language from the input audio
"""
batch = torch.from_numpy(inputs).unsqueeze(0)
rel_length = torch.tensor([1.0])
predicted_words, predicted_tokens = self.model.transcribe_batch(
batch, rel_length
)
return {"text": predicted_words[0]}
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain
|
hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api_automatic_speech_recognition.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"automatic-speech-recognition" not in ALLOWED_TASKS,
"automatic-speech-recognition not implemented",
)
@parameterized_class(
[
{"model_id": model_id}
for model_id in TESTABLE_MODELS["automatic-speech-recognition"]
]
)
class AutomaticSpeecRecognitionTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "automatic-speech-recognition"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain
|
hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api_text_to_speech.py
|
import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-to-speech" not in ALLOWED_TASKS,
"text-to-speech not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["text-to-speech"]]
)
class TextToSpeechTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "text-to-speech"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": "This is some text"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "audio/flac")
audio = ffmpeg_read(response.content, 16000)
self.assertEqual(len(audio.shape), 1)
self.assertGreater(audio.shape[0], 1000)
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain
|
hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_docker_build.py
|
import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain
|
hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api.py
|
import os
from typing import Dict, List
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, List[str]] = {
"audio-classification": [
# Language Identification
"speechbrain/lang-id-commonlanguage_ecapa",
# Command recognition
"speechbrain/google_speech_command_xvector",
# Speaker recognition
"speechbrain/spkrec-xvect-voxceleb",
],
"audio-to-audio": [
# Speech Enhancement
"speechbrain/mtl-mimic-voicebank",
# Source separation
"speechbrain/sepformer-wham",
],
"automatic-speech-recognition": [
# ASR with EncoderASR
"speechbrain/asr-wav2vec2-commonvoice-fr",
# ASR with EncoderDecoderASR
"speechbrain/asr-crdnn-commonvoice-it",
# ASR with WhisperASR
"speechbrain/asr-whisper-large-v2-commonvoice-fr",
],
"text-to-speech": [
"speechbrain/tts-tacotron2-ljspeech",
"speechbrain/tts-fastspeech2-ljspeech",
],
"text2text-generation": [
# SoundChoice G2P
"speechbrain/soundchoice-g2p"
],
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"audio-source-separation",
"image-classification",
"question-answering",
"text-generation",
"text-to-speech",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
with self.assertRaises(EnvironmentError):
get_pipeline(unsupported_task, model_id="XX")
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain
|
hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api_audio_to_audio.py
|
import base64
import json
import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"audio-to-audio" not in ALLOWED_TASKS,
"audio-to-audio not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["audio-to-audio"]]
)
class AudioToAudioTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "audio-to-audio"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
|
0
|
hf_public_repos/api-inference-community/docker_images/speechbrain
|
hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api_audio_classification.py
|
import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"audio-classification" not in ALLOWED_TASKS,
"audio-classification not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["audio-classification"]]
)
class AudioClassificationTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "audio-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(type(content[0]), dict)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"label", "score"},
)
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(type(content[0]), dict)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"label", "score"},
)
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(type(content[0]), dict)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"label", "score"},
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.