index int64 0 0 | repo_id stringclasses 351 values | file_path stringlengths 26 186 | content stringlengths 1 990k |
|---|---|---|---|
0 | hf_public_repos/api-inference-community/docker_images/speechbrain | hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api_text2text_generation.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text2text-generation" not in ALLOWED_TASKS,
"text2text-generation not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["text2text-generation"]]
)
class TextToSpeechTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "text2text-generation"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
with TestClient(self.app) as client:
response = client.post(
"/",
json={
"inputs": "English is tough. It can be understood "
"through thorough thought though."
},
)
self.assertEqual(
response.status_code,
200,
)
result = json.loads(response.content)
self.assertEqual(type(result), list)
self.assertEqual(
"IH-NG-G-L-IH-SH- -IH-Z- -T-AH-F- -IH-T- -K-AE-N- -B-IY- -"
"AH-N-D-ER-S-T-UH-D- -TH-R-UW- -TH-ER-OW- -TH-AO-T- -DH-OW",
result[0]["generated_text"],
)
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/stanza/Dockerfile | FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/stanza/requirements.txt | starlette==0.27.0
api-inference-community==0.0.23
huggingface_hub==0.5.1
stanza==1.3.0
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/stanza/prestart.sh | python app/main.py
|
0 | hf_public_repos/api-inference-community/docker_images/stanza | hf_public_repos/api-inference-community/docker_images/stanza/app/batch.py | #!/usr/bin/env python
import os
from api_inference_community.batch import batch
from app.main import get_pipeline
DATASET_NAME = os.getenv("DATASET_NAME")
DATASET_CONFIG = os.getenv("DATASET_CONFIG", None)
DATASET_SPLIT = os.getenv("DATASET_SPLIT")
DATASET_COLUMN = os.getenv("DATASET_COLUMN")
USE_GPU = os.getenv("USE_GPU", "0").lower() in {"1", "true"}
TOKEN = os.getenv("TOKEN")
REPO_ID = os.getenv("REPO_ID")
TASK = os.getenv("TASK")
if __name__ == "__main__":
batch(
dataset_name=DATASET_NAME,
dataset_config=DATASET_CONFIG,
dataset_split=DATASET_SPLIT,
dataset_column=DATASET_COLUMN,
token=TOKEN,
repo_id=REPO_ID,
use_gpu=USE_GPU,
pipeline=get_pipeline(),
task=TASK,
)
|
0 | hf_public_repos/api-inference-community/docker_images/stanza | hf_public_repos/api-inference-community/docker_images/stanza/app/main.py | import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, TokenClassificationPipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"token-classification": TokenClassificationPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0 | hf_public_repos/api-inference-community/docker_images/stanza/app | hf_public_repos/api-inference-community/docker_images/stanza/app/pipelines/token_classification.py | import os
from typing import Any, Dict, List
import stanza
from app.pipelines import Pipeline
from stanza import Pipeline as pipeline
class TokenClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
namespace, model_name = model_id.split("/")
path = os.path.join(
os.environ.get("HUGGINGFACE_HUB_CACHE", "."), namespace, model_name
)
lang = model_name.replace("stanza-", "")
stanza.download(model_dir=path, lang=lang)
self.model = pipeline(model_dir=path, lang=lang)
def __call__(self, inputs: str) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be like [{"entity_group": "XXX", "word": "some word", "start": 3, "end": 6, "score": 0.82}] containing :
- "entity_group": A string representing what the entity is.
- "word": A rubstring of the original string that was detected as an entity.
- "start": the offset within `input` leading to `answer`. context[start:stop] == word
- "end": the ending offset within `input` leading to `answer`. context[start:stop] === word
- "score": A score between 0 and 1 describing how confident the model is for this entity.
"""
doc = self.model(inputs)
entities = []
if "ner_model_path" in self.model.config.keys():
for entity in doc.entities:
entity_dict = {
"entity_group": entity.type,
"word": entity.text,
"start": entity.start_char,
"end": entity.end_char,
"score": 1.0,
}
entities.append(entity_dict)
else:
for sent in doc.sentences:
for entity in sent.words:
entity_dict = {
"entity_group": entity.upos,
"word": entity.text,
"start": entity.start_char,
"end": entity.end_char,
"score": 1.0,
}
entities.append(entity_dict)
return entities
|
0 | hf_public_repos/api-inference-community/docker_images/stanza/app | hf_public_repos/api-inference-community/docker_images/stanza/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0 | hf_public_repos/api-inference-community/docker_images/stanza/app | hf_public_repos/api-inference-community/docker_images/stanza/app/pipelines/__init__.py | from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.token_classification import TokenClassificationPipeline
|
0 | hf_public_repos/api-inference-community/docker_images/stanza | hf_public_repos/api-inference-community/docker_images/stanza/tests/test_api_token_classification.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"token-classification" not in ALLOWED_TASKS,
"token-classification not implemented",
)
@parameterized_class(
("model_id", "inputs"),
[
[[model_id, lang[model_id]] for model_id in lang]
for lang in TESTABLE_MODELS["token-classification"]
],
)
class TokenClassificationTestCase(TestCase):
def setUp(self):
model_id = self.model_id[0] # changed
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "token-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = self.model_id[1] # changed
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"entity_group", "word", "start", "end", "score"},
)
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"entity_group", "word", "start", "end", "score"},
)
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0 | hf_public_repos/api-inference-community/docker_images/stanza | hf_public_repos/api-inference-community/docker_images/stanza/tests/test_docker_build.py | import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0 | hf_public_repos/api-inference-community/docker_images/stanza | hf_public_repos/api-inference-community/docker_images/stanza/tests/test_api.py | import os
from typing import Dict, List
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, List[Dict]] = {
"token-classification": [
{"stanfordnlp/stanza-en": "Hello, my name is John and I live in New York"},
{"stanfordnlp/stanza-tr": "Merhaba, adım Merve ve İstanbul'da yaşıyorum."},
]
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"structured-data-classification",
"speech-segmentation",
"text-to-speech",
"token-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/common/Dockerfile | FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <me@example.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/common/requirements.txt | starlette==0.27.0
api-inference-community==0.0.32
huggingface_hub==0.11.0
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/common/prestart.sh | python app/main.py
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/app/main.py | import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
# IMPLEMENT_THIS: Add your implemented tasks here !
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/feature_extraction.py | from typing import List
from app.pipelines import Pipeline
class FeatureExtractionPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement FeatureExtractionPipeline __init__ function"
)
def __call__(self, inputs: str) -> List[float]:
"""
Args:
inputs (:obj:`str`):
a string to get the features of.
Return:
A :obj:`list` of floats: The features computed by the model.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement FeatureExtractionPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/token_classification.py | from typing import Any, Dict, List
from app.pipelines import Pipeline
class TokenClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement TokenClassificationPipeline __init__ function"
)
def __call__(self, inputs: str) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be like [{"entity_group": "XXX", "word": "some word", "start": 3, "end": 6, "score": 0.82}] containing :
- "entity_group": A string representing what the entity is.
- "word": A rubstring of the original string that was detected as an entity.
- "start": the offset within `input` leading to `answer`. context[start:stop] == word
- "end": the ending offset within `input` leading to `answer`. context[start:stop] === word
- "score": A score between 0 and 1 describing how confident the model is for this entity.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement TokenClassificationPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/speech_segmentation.py | from typing import Dict
import numpy as np
from app.pipelines import Pipeline
class SpeechSegmentationPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
# IMPLEMENT_THIS : Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = 16000
raise NotImplementedError(
"Please implement SpeechSegmentationPipeline __init__ function"
)
def __call__(self, inputs: np.array) -> Dict[str, str]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at self.sampling_rate, otherwise 16KHz.
Return:
A :obj:`list`:. Each item in the list is like {"class": "XXX", "start": float, "end": float}
"class" is the associated class of the audio segment, "start" and "end" are markers expressed in seconds
within the audio file.
"""
# IMPLEMENT_THIS
# api_inference_community.normalizers.speaker_diarization_normalize could help.
raise NotImplementedError(
"Please implement SpeechSegmentationPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/image_to_image.py | from typing import TYPE_CHECKING, Optional
from app.pipelines import Pipeline
if TYPE_CHECKING:
from PIL import Image
class ImageToImagePipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need for inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement ImageToImagePipeline.__init__ function"
)
def __call__(self, image: Image.Image, prompt: Optional[str] = "") -> "Image.Image":
"""
Args:
image (:obj:`PIL.Image.Image`):
a condition image
prompt (:obj:`str`, *optional*):
a string containing some text
Return:
A :obj:`PIL.Image` with the raw image representation as PIL.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement ImageToImagePipeline.__call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/audio_to_audio.py | from typing import List, Tuple
import numpy as np
from app.pipelines import Pipeline
class AudioToAudioPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
# IMPLEMENT_THIS : Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = 16000
raise NotImplementedError(
"Please implement AudioToAudioPipeline __init__ function"
)
def __call__(self, inputs: np.array) -> Tuple[np.array, int, List[str]]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default sampled at `self.sampling_rate`.
The shape of this array is `T`, where `T` is the time axis
Return:
A :obj:`tuple` containing:
- :obj:`np.array`:
The return shape of the array must be `C'`x`T'`
- a :obj:`int`: the sampling rate as an int in Hz.
- a :obj:`List[str]`: the annotation for each out channel.
This can be the name of the instruments for audio source separation
or some annotation for speech enhancement. The length must be `C'`.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement AudioToAudioPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/audio_classification.py | from typing import Dict, List
import numpy as np
from app.pipelines import Pipeline
class AudioClassificationPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
# IMPLEMENT_THIS : Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = 16000
raise NotImplementedError(
"Please implement AudioClassificationPipeline __init__ function"
)
def __call__(self, inputs: np.array) -> List[Dict[str, float]]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at 16KHz.
Return:
A :obj:`list`:. The object returned should be a list like [{"label": "text", "score": 0.9939950108528137}] containing :
- "label": A string representing what the label/class is. There can be multiple labels.
- "score": A score between 0 and 1 describing how confident the model is for this label/class.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement AudioClassificationPipeline __init__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/tabular_classification_pipeline.py | from typing import Dict, List, Union
from app.pipelines import Pipeline
class TabularClassificationPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement TabularClassificationPipeline __init__ function"
)
def __call__(
self, inputs: Dict[str, Dict[str, List[Union[int, str, float]]]]
) -> List[Union[int, str, float]]:
"""
Args:
inputs (:obj:`dict`):
a dictionary containing a key 'data' mapping to a dict in which
the values represent each column.
Return:
A :obj:`list` of int, str, or float: The classification output for each row.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement TabularClassificationPipeline __init__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/tabular_regression_pipeline.py | from typing import Dict, List, Union
from app.pipelines import Pipeline
class TabularRegressionPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement TabularRegressionPipeline __init__ function"
)
def __call__(
self, inputs: Dict[str, Dict[str, List[Union[int, str, float]]]]
) -> List[float]:
"""
Args:
inputs (:obj:`dict`):
a dictionary containing a key 'data' mapping to a dict in which
the values represent each column.
Return:
A :obj:`list` of float: The regression output for each row.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement TabularRegressionPipeline __init__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/summarization.py | from typing import Dict, List
from app.pipelines import Pipeline
class SummarizationPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement SummarizationPipeline __init__ function"
)
def __call__(self, inputs: str) -> List[Dict[str, str]]:
"""
Args:
inputs (:obj:`str`): a string to be summarized
Return:
A :obj:`list` of :obj:`dict` in the form of {"summary_text": "The string after summarization"}
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement SummarizationPipeline __init__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/text_to_speech.py | from typing import Tuple
import numpy as np
from app.pipelines import Pipeline
class TextToSpeechPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement TextToSpeechPipeline __init__ function"
)
def __call__(self, inputs: str) -> Tuple[np.array, int]:
"""
Args:
inputs (:obj:`str`):
The text to generate audio from
Return:
A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy array, and the sampling rate as an int.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement TextToSpeechPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/question_answering.py | from typing import Any, Dict
from app.pipelines import Pipeline
class QuestionAnsweringPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement QuestionAnsweringPipeline __init__ function"
)
def __call__(self, inputs: Dict[str, str]) -> Dict[str, Any]:
"""
Args:
inputs (:obj:`dict`):
a dictionary containing two keys, 'question' being the question being asked and 'context' being some text containing the answer.
Return:
A :obj:`dict`:. The object return should be like {"answer": "XXX", "start": 3, "end": 6, "score": 0.82} containing :
- "answer": the extracted answer from the `context`.
- "start": the offset within `context` leading to `answer`. context[start:stop] == answer
- "end": the ending offset within `context` leading to `answer`. context[start:stop] === answer
- "score": A score between 0 and 1 describing how confident the model is for this answer.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement QuestionAnsweringPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/text2text_generation.py | from typing import Dict, List
from app.pipelines import Pipeline
class TextToTextPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement TextToTextPipeline __init__ function"
)
def __call__(self, inputs: str) -> List[Dict[str, str]]:
"""
Args:
inputs (:obj:`str`):
The input text
Return:
A :obj:`list`:. The list contains a single item that is a dict {"text": the model output}
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement TextToTextPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/image_classification.py | from typing import TYPE_CHECKING, Any, Dict, List
from app.pipelines import Pipeline
if TYPE_CHECKING:
from PIL import Image
class ImageClassificationPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement ImageClassificationPipeline __init__ function"
)
def __call__(self, inputs: "Image.Image") -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`PIL.Image`):
The raw image representation as PIL.
No transformation made whatsoever from the input. Make all necessary transformations here.
Return:
A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}
It is preferred if the returned list is in decreasing `score` order
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement ImageClassificationPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/fill_mask.py | from typing import Any, Dict, List
from app.pipelines import Pipeline
class FillMaskPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError("Please implement FillMaskPipeline __init__ function")
def __call__(self, inputs: str) -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`str`): a string to be filled from, must contain one and only one [MASK] token (check model card for exact name of the mask)
Return:
A :obj:`list`:. a list of dicts containing the following:
- "sequence": The actual sequence of tokens that ran against the model (may contain special tokens)
- "score": The probability for this token.
- "token": The id of the token
- "token_str": The string representation of the token
"""
# IMPLEMENT_THIS
raise NotImplementedError("Please implement FillMaskPipeline __call__ function")
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/sentence_similarity.py | from typing import Dict, List, Union
from app.pipelines import Pipeline
class SentenceSimilarityPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement SentenceSimilarityPipeline __init__ function"
)
def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]:
"""
Args:
inputs (:obj:`dict`):
a dictionary containing two keys, 'source_sentence' mapping
to the sentence that will be compared against all the others,
and 'sentences', mapping to a list of strings to which the
source will be compared.
Return:
A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement SentenceSimilarityPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/conversational.py | from typing import Any, Dict, List, Union
from app.pipelines import Pipeline
class ConversationalPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement ConversationalPipeline __init__ function"
)
def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> Dict[str, Any]:
"""
Args:
inputs (:obj:`dict`): a dictionary containing the following key values:
text (`str`, *optional*):
The initial user input to start the conversation
past_user_inputs (`List[str]`, *optional*):
Eventual past history of the conversation of the user. You don't need to pass it manually if you use the
pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and
`generated_responses` with equal length lists of strings
generated_responses (`List[str]`, *optional*):
Eventual past history of the conversation of the model. You don't need to pass it manually if you use the
pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and
`generated_responses` with equal length lists of strings
Return:
A :obj:`dict`: a dictionary containing the following key values:
generated_text (`str`):
The answer of the bot
conversation (`Dict[str, List[str]]`):
A facility dictionary to send back for the next input (with the new user input addition).
past_user_inputs (`List[str]`)
List of strings. The last inputs from the user in the conversation, after the model has run.
generated_responses (`List[str]`)
List of strings. The last outputs from the model in the conversation, after the model has run.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement ConversationalPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/__init__.py | from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.audio_classification import AudioClassificationPipeline
from app.pipelines.audio_to_audio import AudioToAudioPipeline
from app.pipelines.automatic_speech_recognition import (
AutomaticSpeechRecognitionPipeline,
)
from app.pipelines.feature_extraction import FeatureExtractionPipeline
from app.pipelines.image_classification import ImageClassificationPipeline
from app.pipelines.question_answering import QuestionAnsweringPipeline
from app.pipelines.sentence_similarity import SentenceSimilarityPipeline
from app.pipelines.speech_segmentation import SpeechSegmentationPipeline
from app.pipelines.tabular_classification_pipeline import TabularClassificationPipeline
from app.pipelines.tabular_regression_pipeline import TabularRegressionPipeline
from app.pipelines.text_to_speech import TextToSpeechPipeline
from app.pipelines.token_classification import TokenClassificationPipeline
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/text_to_image.py | from typing import TYPE_CHECKING
from app.pipelines import Pipeline
if TYPE_CHECKING:
from PIL import Image
class TextToImagePipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need for inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement TextToImagePipeline.__init__ function"
)
def __call__(self, inputs: str) -> "Image.Image":
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`PIL.Image` with the raw image representation as PIL.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement TextToImagePipeline.__call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/automatic_speech_recognition.py | from typing import Dict
import numpy as np
from app.pipelines import Pipeline
class AutomaticSpeechRecognitionPipeline(Pipeline):
def __init__(self, model_id: str):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
# IMPLEMENT_THIS : Please define a `self.sampling_rate` for this pipeline
# to automatically read the input correctly
self.sampling_rate = 16000
raise NotImplementedError(
"Please implement AutomaticSpeechRecognitionPipeline __init__ function"
)
def __call__(self, inputs: np.array) -> Dict[str, str]:
"""
Args:
inputs (:obj:`np.array`):
The raw waveform of audio received. By default at self.sampling_rate, otherwise 16KHz.
Return:
A :obj:`dict`:. The object return should be liked {"text": "XXX"} containing
the detected language from the input audio
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement AutomaticSpeechRecognitionPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common/app | hf_public_repos/api-inference-community/docker_images/common/app/pipelines/text_classification.py | from typing import Dict, List
from app.pipelines import Pipeline
class TextClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
# IMPLEMENT_THIS
# Preload all the elements you are going to need at inference.
# For instance your model, processors, tokenizer that might be needed.
# This function is only called once, so do all the heavy processing I/O here
raise NotImplementedError(
"Please implement TextClassificationPipeline __init__ function"
)
def __call__(self, inputs: str) -> List[Dict[str, float]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be a list of one list like [[{"label": 0.9939950108528137}]] containing:
- "label": A string representing what the label/class is. There can be multiple labels.
- "score": A score between 0 and 1 describing how confident the model is for this label/class.
"""
# IMPLEMENT_THIS
raise NotImplementedError(
"Please implement TextClassificationPipeline __call__ function"
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_automatic_speech_recognition.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"automatic-speech-recognition" not in ALLOWED_TASKS,
"automatic-speech-recognition not implemented",
)
class AutomaticSpeecRecognitionTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["automatic-speech-recognition"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "automatic-speech-recognition"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"text"})
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_text_to_image.py | import os
from io import BytesIO
from unittest import TestCase, skipIf
import PIL
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-to-image" not in ALLOWED_TASKS,
"text-to-image not implemented",
)
class TextToImageTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text-to-image"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text-to-image"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = "soap bubble"
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
image = PIL.Image.open(BytesIO(response.content))
self.assertTrue(isinstance(image, PIL.Image.Image))
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_text_to_speech.py | import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-to-speech" not in ALLOWED_TASKS,
"text-to-speech not implemented",
)
class TextToSpeechTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text-to-speech"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text-to-speech"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": "This is some text"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "audio/flac")
audio = ffmpeg_read(response.content, 16000)
self.assertEqual(len(audio.shape), 1)
self.assertGreater(audio.shape[0], 1000)
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_token_classification.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"token-classification" not in ALLOWED_TASKS,
"token-classification not implemented",
)
class TokenClassificationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["token-classification"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "token-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = "Hello, my name is John and I live in New York"
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"entity_group", "word", "start", "end", "score"},
)
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"entity_group", "word", "start", "end", "score"},
)
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_tabular_classification.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"tabular-classification" not in ALLOWED_TASKS,
"tabular-classification not implemented",
)
class TabularClassificationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["tabular-classification"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "tabular-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
# IMPLEMENT_THIS
# Add one or multiple rows that the test model expects.
data = {}
inputs = {"data": data}
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(len(content), 2)
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"Where do I live ?")
self.assertEqual(
response.status_code,
400,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"error"})
def test_missing_columns(self):
# IMPLEMENT_THIS
# Add wrong number of columns
data = {}
inputs = {"data": data}
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
400,
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_image_classification.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"image-classification" not in ALLOWED_TASKS,
"image-classification not implemented",
)
class ImageClassificationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["image-classification"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "image-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("plane.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set((k, type(v)) for el in content for (k, v) in el.items()),
{("label", str), ("score", float)},
)
def test_different_resolution(self):
bpayload = self.read("plane2.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set(k for el in content for k in el.keys()), {"label", "score"}
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_image_to_image.py | import base64
import os
from io import BytesIO
from unittest import TestCase, skipIf
import PIL
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"image-to-image" not in ALLOWED_TASKS,
"image-to-image not implemented",
)
class ImageToImageTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["image-to-image"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "image-to-image"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
image = PIL.Image.new("RGB", (64, 64))
parameters = {"prompt": "soap bubble"}
with TestClient(self.app) as client:
response = client.post(
"/",
json={
"image": base64.b64encode(image).decode("utf-8"),
"parameters": parameters,
},
)
self.assertEqual(
response.status_code,
200,
)
image = PIL.Image.open(BytesIO(response.content))
self.assertTrue(isinstance(image, PIL.Image.Image))
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_feature_extraction.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"feature-extraction" not in ALLOWED_TASKS,
"feature-extraction not implemented",
)
class FeatureExtractionTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["feature-extraction"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "feature-extraction"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = "Hello, my name is John and I live in New York"
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual({type(item) for item in content}, {float})
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual({type(item) for item in content}, {float})
def test_malformed_sentence(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_docker_build.py | import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_summarization.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"summarization" not in ALLOWED_TASKS,
"summarization not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["summarization"]]
)
class SummarizationTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "summarization"
from app.main import app
self.app = app
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_single_input(self):
text = "test"
with TestClient(self.app) as client:
response = client.post("/", json=text)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(len(content), 1)
for result in content:
self.assertIn("summary_text", result)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api.py | import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
# IMPLEMENT_THIS
# "automatic-speech-recognition": "mysample-ASR",
# "text-generation": "mysample-gpt2",
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"speech-segmentation",
"tabular-classification",
"tabular-regression",
"text-to-image",
"text-to-speech",
"token-classification",
"conversational",
"feature-extraction",
"sentence-similarity",
"fill-mask",
"table-question-answering",
"summarization",
"text2text-generation",
"text-classification",
"zero-shot-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
os.environ["TASK"] = unsupported_task
os.environ["MODEL_ID"] = "XX"
with self.assertRaises(EnvironmentError):
get_pipeline()
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_sentence_similarity.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"sentence-similarity" not in ALLOWED_TASKS,
"sentence-similarity not implemented",
)
class SentenceSimilarityTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["sentence-similarity"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "sentence-similarity"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
source_sentence = "I am a very happy man"
sentences = [
"What is this?",
"I am a super happy man",
"I am a sad man",
"I am a happy dog",
]
inputs = {"source_sentence": source_sentence, "sentences": sentences}
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual({type(item) for item in content}, {float})
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual({type(item) for item in content}, {float})
def test_missing_input_sentences(self):
source_sentence = "I am a very happy man"
inputs = {"source_sentence": source_sentence}
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
400,
)
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_audio_to_audio.py | import base64
import json
import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"audio-to-audio" not in ALLOWED_TASKS,
"audio-to-audio not implemented",
)
class AudioToAudioTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["audio-to-audio"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "audio-to-audio"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "application/json")
audio = json.loads(response.content)
self.assertTrue(isinstance(audio, list))
self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"})
data = base64.b64decode(audio[0]["blob"])
wavform = ffmpeg_read(data, 16000)
self.assertGreater(wavform.shape[0], 1000)
self.assertTrue(isinstance(audio[0]["content-type"], str))
self.assertTrue(isinstance(audio[0]["label"], str))
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_question_answering.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"question-answering" not in ALLOWED_TASKS,
"question-answering not implemented",
)
class QuestionAnsweringTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["question-answering"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "question-answering"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = {"question": "Where do I live ?", "context": "I live in New-York"}
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"answer", "start", "end", "score"})
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"answer", "start", "end", "score"})
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"Where do I live ?")
self.assertEqual(
response.status_code,
400,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"error"})
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_speech_segmentation.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"speech-segmentation" not in ALLOWED_TASKS,
"speech-segmentation not implemented",
)
class SpeechSegmentationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["speech-segmentation"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "speech-segmentation"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_original_audiofile(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertIsInstance(content, list)
for c in content:
self.assertEqual(set(c.keys()), {"class", "start", "end"})
self.assertIsInstance(c["class"], str)
self.assertIsInstance(c["start"], float)
self.assertIsInstance(c["end"], float)
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertIsInstance(content, list)
for c in content:
self.assertEqual(set(c.keys()), {"class", "start", "end"})
self.assertIsInstance(c["class"], str)
self.assertIsInstance(c["start"], float)
self.assertIsInstance(c["end"], float)
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertIsInstance(content, list)
for c in content:
self.assertEqual(set(c.keys()), {"class", "start", "end"})
self.assertIsInstance(c["class"], str)
self.assertIsInstance(c["start"], float)
self.assertIsInstance(c["end"], float)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_audio_classification.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"audio-classification" not in ALLOWED_TASKS,
"audio-classification not implemented",
)
class AudioClassificationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["audio-classification"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "audio-classification"
from app.main import app
self.app = app
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(type(content[0]), dict)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"label", "score"},
)
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(type(content[0]), dict)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"label", "score"},
)
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(type(content[0]), dict)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"label", "score"},
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_text_classification.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-classification" not in ALLOWED_TASKS,
"text-classification not implemented",
)
class TextClassificationTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text-classification"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
inputs = "It is a beautiful day outside"
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(len(content), 1)
self.assertEqual(type(content[0]), list)
self.assertEqual(
set(k for el in content[0] for k in el.keys()),
{"label", "score"},
)
with TestClient(self.app) as client:
response = client.post("/", json=inputs)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(len(content), 1)
self.assertEqual(type(content[0]), list)
self.assertEqual(
set(k for el in content[0] for k in el.keys()),
{"label", "score"},
)
def test_malformed_question(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_text2text_generation.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text2text-generation" not in ALLOWED_TASKS,
"text2text-generation not implemented",
)
class TextToSpeechTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text2text-generation"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text2text-generation"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
with TestClient(self.app) as client:
response = client.post(
"/",
json={
"inputs": "English is tough. It can be understood "
"through thorough thought though."
},
)
self.assertEqual(
response.status_code,
200,
)
result = json.loads(response.content)
self.assertEqual(type(result), list)
# Add more tests here
|
0 | hf_public_repos/api-inference-community/docker_images/common | hf_public_repos/api-inference-community/docker_images/common/tests/test_api_tabular_regression.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"tabular-regression" not in ALLOWED_TASKS,
"tabular-regression not implemented",
)
class TabularRegressionTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["tabular-regression"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "tabular-regression"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
# IMPLEMENT_THIS
# Add one or multiple rows that the test model expects.
data = {}
inputs = {"data": data}
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(len(content), 2)
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"Where do I live ?")
self.assertEqual(
response.status_code,
400,
)
content = json.loads(response.content)
self.assertEqual(set(content.keys()), {"error"})
def test_missing_columns(self):
# IMPLEMENT_THIS
# Add wrong number of columns
data = {}
inputs = {"data": data}
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": inputs})
self.assertEqual(
response.status_code,
400,
)
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/fastai/Dockerfile | FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="Omar Espejel <espejelomar@gmail.com>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
# This enables better docker caching so adding new requirements doesn't
# retrigger reinstalling the whole pytorch.
RUN pip install torch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/fastai/requirements.txt | starlette==0.27.0
api-inference-community==0.0.23
huggingface_hub[fastai]==0.6.0
timm==0.5.4
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/fastai/prestart.sh | python app/main.py
|
0 | hf_public_repos/api-inference-community/docker_images/fastai | hf_public_repos/api-inference-community/docker_images/fastai/app/main.py | import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import ImageClassificationPipeline, Pipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"image-classification": ImageClassificationPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0 | hf_public_repos/api-inference-community/docker_images/fastai/app | hf_public_repos/api-inference-community/docker_images/fastai/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any, Optional
class Pipeline(ABC):
task: Optional[str] = None
model_id: Optional[str] = None
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0 | hf_public_repos/api-inference-community/docker_images/fastai/app | hf_public_repos/api-inference-community/docker_images/fastai/app/pipelines/image_classification.py | from typing import Any, Dict, List
import numpy as np
from app.pipelines import Pipeline
from huggingface_hub import from_pretrained_fastai
from PIL import Image
class ImageClassificationPipeline(Pipeline):
def __init__(self, model_id: str):
self.model = from_pretrained_fastai(model_id)
# Obtain labels
self.id2label = self.model.dls.vocab
# Return at most the top 5 predicted classes
self.top_k = 5
def __call__(self, inputs: "Image.Image") -> List[Dict[str, Any]]:
"""
Args:
inputs (:obj:`PIL.Image`):
The raw image representation as PIL.
No transformation made whatsoever from the input. Make all necessary transformations here.
Return:
A :obj:`list`:. The list contains items that are dicts should be liked {"label": "XXX", "score": 0.82}
It is preferred if the returned list is in decreasing `score` order
"""
# FastAI expects a np array, not a PIL Image.
_, _, preds = self.model.predict(np.array(inputs))
preds = preds.tolist()
labels = [
{"label": str(self.id2label[i]), "score": float(preds[i])}
for i in range(len(preds))
]
return sorted(labels, key=lambda tup: tup["score"], reverse=True)[: self.top_k]
|
0 | hf_public_repos/api-inference-community/docker_images/fastai/app | hf_public_repos/api-inference-community/docker_images/fastai/app/pipelines/__init__.py | from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.image_classification import ImageClassificationPipeline
# from app.pipelines.audio_classification import AudioClassificationPipeline
# from app.pipelines.audio_to_audio import AudioToAudioPipeline
# from app.pipelines.automatic_speech_recognition import (
# AutomaticSpeechRecognitionPipeline,
# )
# from app.pipelines.feature_extraction import FeatureExtractionPipeline
# from app.pipelines.question_answering import QuestionAnsweringPipeline
# from app.pipelines.sentence_similarity import SentenceSimilarityPipeline
# from app.pipelines.speech_segmentation import SpeechSegmentationPipeline
# from app.pipelines.tabular_classification import (
# TabularDataPipeline,
# )
# from app.pipelines.text_to_speech import TextToSpeechPipeline
# from app.pipelines.token_classification import TokenClassificationPipeline
|
0 | hf_public_repos/api-inference-community/docker_images/fastai | hf_public_repos/api-inference-community/docker_images/fastai/tests/test_api_image_classification.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"image-classification" not in ALLOWED_TASKS,
"image-classification not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["image-classification"]]
)
class ImageClassificationTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "image-classification"
from app.main import app, get_pipeline
get_pipeline.cache_clear()
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("plane.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set((k, type(v)) for el in content for (k, v) in el.items()),
{("label", str), ("score", float)},
)
def test_different_resolution(self):
bpayload = self.read("plane2.jpg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(set(type(el) for el in content), {dict})
self.assertEqual(
set(k for el in content for k in el.keys()), {"label", "score"}
)
|
0 | hf_public_repos/api-inference-community/docker_images/fastai | hf_public_repos/api-inference-community/docker_images/fastai/tests/test_docker_build.py | import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0 | hf_public_repos/api-inference-community/docker_images/fastai | hf_public_repos/api-inference-community/docker_images/fastai/tests/test_api.py | import os
from typing import Dict
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, str] = {
"image-classification": ["fastai/fastbook_02_bears_classifier"]
}
ALL_TASKS = {
"image-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
# def test_unsupported_tasks(self):
# unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
# for unsupported_task in unsupported_tasks:
# with self.subTest(msg=unsupported_task, task=unsupported_task):
# os.environ["TASK"] = unsupported_task
# os.environ["MODEL_ID"] = "XX"
# with self.assertRaises(EnvironmentError):
# get_pipeline()
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
with self.assertRaises(EnvironmentError):
get_pipeline(unsupported_task, model_id="XX")
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/sklearn/Dockerfile | FROM mambaorg/micromamba
LABEL maintainer="Adrin Jalali adrin@hf.co"
# micromamba comes with a default non-root user. But we need root to install
# our required system packages.
USER root
RUN apt-get update && apt-get install -y curl jq
USER $MAMBAUSER
# Most our dockerfiles start from tiangolo/uvicorn-gunicorn:python3.8, but
# since here we'd like to start from micromamba, we copy necessary files from
# the uvicorn docker image using `COPY --from=...` commands. These steps are
# taken from:
# https://github.com/tiangolo/uvicorn-gunicorn-docker/blob/master/docker-images/python3.8-slim.dockerfile
COPY --from=tiangolo/uvicorn-gunicorn:python3.8 /start.sh /start.sh
RUN chmod +x /start.sh
COPY --from=tiangolo/uvicorn-gunicorn:python3.8 /gunicorn_conf.py /gunicorn_conf.py
COPY --from=tiangolo/uvicorn-gunicorn:python3.8 /start-reload.sh /start-reload.sh
RUN chmod +x /start-reload.sh
COPY --from=tiangolo/uvicorn-gunicorn:python3.8 /app /app
WORKDIR /app/
ENV PYTHONPATH=/app
EXPOSE 80
# This part is new and only specific to scikit-learn image.
ENV HUGGINGFACE_HUB_CACHE=/data
COPY ./app /app/app
COPY run_app.sh /run_app.sh
RUN chmod +x /run_app.sh
CMD /run_app.sh
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/sklearn/requirements.txt | starlette>=0.14.2
api-inference-community>=0.0.25
huggingface_hub>=0.5.1
scikit-learn
joblib>=1.0.1
# Dummy changes.
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/sklearn/README.md | ## Tests
### Test setup
The tests require certain repositories with certain requirements to exist on HF
Hub and certain output files to be created.
You can make sure those repos and files are up to date by running the
`docker_images/sklearn/tests/generators/run.sh` script. The script creates
required conda environments, updates them if necessary, and runs scripts inside
those environments. You should also give it a valid token with access to the
`skops-tests` org:
```bash
# from the project root
SKOPS_TESTS_TOKEN=your_secret_token docker_images/sklearn/tests/generators/run.sh
```
This script needs to be run _only once_ when you first start developing, or each
time a new scikit-learn version is released.
The created model repositories are also used for common tests of this package,
see `tests/test_dockers.py` > `test_sklearn`.
Note that a working [mamba
installation](https://mamba.readthedocs.io/en/latest/installation.html) is
required for this step
### Test environment
Create a new Python environment and install the test dependencies:
```bash
# with pip
python -m pip install -r docker_images/sklearn/requirements.txt
# with conda/mamba
conda install --file docker_images/sklearn/requirements.txt
```
### Running the tests
From within the Python environment, run:
```
pytest -sv --rootdir docker_images/sklearn/ docker_images/sklearn/
```
You will see many tests being skipped. If the message is "Skipping test because
requirements are not met.", it means that the test was intended to be skipped,
so you don't need to do anything about it. When adding a new test, make sure
that at least one of the parametrized settings is not skipped for that test.
### Adding a new task
When adding tests for a new task, certain artifacts like HF Hub repositories,
model inputs, and model outputs need to be generated first using the `run.sh`
script, as explained above. For the new task, those have to be implemented
first. For this, visit `docker_images/sklearn/tests/generators/generate.py` and
extend the script to include the new task. Most notably, visit the "CONSTANTS"
section and extend the constants defined there to include your task. This will
make it obvious which extra functions you need to write.
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/sklearn/prestart.sh | python app/main.py
|
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/sklearn/run_app.sh | #!/bin/bash --login
# This file creates an environment with all required dependencies for the given
# model, and then runs the start command.
# This makes it easy to see in logs what exactly is being run.
set -xe
get_requirements() {
requirements="pandas uvicorn gunicorn api-inference-community skops"
# this next command is needed to run the while loop in the same process and
# therefore modify the same $requirements variable. Otherwise the loop would be
# a separate process and the variable wouldn't be accessible from this parent
# process.
shopt -s lastpipe
jq '.sklearn.environment' /tmp/config.json | jq '.[]' | while read r; do
requirements+=" $r"
done
# not sure why these are required. But if they're not here, the string passed
# to micromamba is kinda not parsable by it.
requirements=$(echo "$requirements" | sed "s/'//g")
requirements=$(echo "$requirements" | sed "s/\"//g")
echo $requirements
}
# We download only the config file and use `jq` to extract the requirements. If
# the download fails, we use a default set of dependencies. We need to capture
# the output of `curl` here so that if it fails, it doesn't make the whole
# script to exit, which it would do due to the -e flag we've set above the
# script.
response="$(curl https://huggingface.co/$MODEL_ID/raw/main/config.json -f --output /tmp/config.json)" || response=$?
if [ -z $response ]; then
requirements=$(get_requirements)
else
# if the curl command is not successful, we use a default set of
# dependencies, and use the latest scikit-learn version. This is to allow
# users for a basic usage if they haven't put the config.json file in their
# repository.
requirements="pandas uvicorn gunicorn api-inference-community scikit-learn"
fi
micromamba create -c conda-forge -y -q --name=api-inference-model-env $requirements
micromamba activate api-inference-model-env
# start.sh file is not in our repo, rather taken from the
# `uvicorn-gunicorn-docker` repo. You can check the Dockerfile to see where
# exactly it is coming from.
/start.sh
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn | hf_public_repos/api-inference-community/docker_images/sklearn/app/main.py | import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import (
Pipeline,
TabularClassificationPipeline,
TabularRegressionPipeline,
TextClassificationPipeline,
)
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
# IMPLEMENT_THIS: Add your implemented tasks here!
"tabular-classification": TabularClassificationPipeline,
"tabular-regression": TabularRegressionPipeline,
"text-classification": TextClassificationPipeline,
}
def get_pipeline(task=None, model_id=None) -> Pipeline:
task = task or os.environ["TASK"]
model_id = model_id or os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(
f"{task} is not a valid pipeline for model : {model_id} ({','.join(ALLOWED_TASKS.keys())})"
)
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/common.py | import json
import logging
import warnings
from abc import abstractmethod
from pathlib import Path
from typing import Any
import joblib
import skops.io as sio
from app.pipelines import Pipeline
from huggingface_hub import snapshot_download
logger = logging.getLogger(__name__)
DEFAULT_FILENAME = "sklearn_model.joblib"
class SklearnBasePipeline(Pipeline):
"""Base class for sklearn-based inference pipelines
Concrete implementations should add two methods:
- `_get_output`: Method to generate model predictions
- `__call__`: Should delegate to handle_call, add docstring and type
annotations.
"""
def __init__(self, model_id: str):
cached_folder = snapshot_download(repo_id=model_id)
self._load_warnings = []
self._load_exception = None
try:
with open(Path(cached_folder) / "config.json") as f:
# this is the default path for configuration of a scikit-learn
# project. If the project is created using `skops`, it should have
# this file.
config = json.load(f)
except Exception:
config = dict()
warnings.warn("`config.json` does not exist or is invalid.")
self.model_file = (
config.get("sklearn", {}).get("model", {}).get("file", DEFAULT_FILENAME)
)
self.model_format = config.get("sklearn", {}).get("model_format", "pickle")
try:
with warnings.catch_warnings(record=True) as record:
if self.model_format == "pickle":
self.model = joblib.load(
open(Path(cached_folder) / self.model_file, "rb")
)
elif self.model_format == "skops":
self.model = sio.load(
file=Path(cached_folder) / self.model_file, trusted=True
)
if len(record) > 0:
# if there's a warning while loading the model, we save it so
# that it can be raised to the user when __call__ is called.
self._load_warnings += record
except Exception as e:
# if there is an exception while loading the model, we save it to
# raise the write error when __call__ is called.
self._load_exception = e
# use column names from the config file if available, to give the data
# to the model in the right order.
self.columns = config.get("sklearn", {}).get("columns", None)
@abstractmethod
def _get_output(self, inputs: Any) -> Any:
raise NotImplementedError(
"Implement this method to get the model output (prediction)"
)
def __call__(self, inputs: Any) -> Any:
"""Handle call for getting the model prediction
This method is responsible for handling all possible errors and
warnings. To get the actual prediction, implement the `_get_output`
method.
The types of the inputs and output depend on the specific task being
implemented.
"""
if self._load_exception:
# there has been an error while loading the model. We need to raise
# that, and can't call predict on the model.
raise ValueError(
"An error occurred while loading the model: "
f"{str(self._load_exception)}"
)
_warnings = []
if self.columns:
# TODO: we should probably warn if columns are not configured, we
# really do need them.
given_cols = set(inputs["data"].keys())
expected = set(self.columns)
extra = given_cols - expected
missing = expected - given_cols
if extra:
_warnings.append(
f"The following columns were given but not expected: {extra}"
)
if missing:
_warnings.append(
f"The following columns were expected but not given: {missing}"
)
exception = None
try:
with warnings.catch_warnings(record=True) as record:
res = self._get_output(inputs)
except Exception as e:
exception = e
for warning in record:
_warnings.append(f"{warning.category.__name__}({warning.message})")
for warning in self._load_warnings:
_warnings.append(f"{warning.category.__name__}({warning.message})")
if _warnings:
for warning in _warnings:
logger.warning(warning)
if not exception:
# we raise an error if there are any warnings, so that routes.py
# can catch and return a non 200 status code.
error = {
"error": "There were warnings while running the model.",
"output": res,
"warnings": _warnings, # see issue #96
}
raise ValueError(json.dumps(error))
else:
# if there was an exception, we raise it so that routes.py can
# catch and return a non 200 status code.
raise exception
if exception:
raise exception
return res
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/tabular_regression.py | from app.pipelines.tabular_classification import TabularClassificationPipeline
class TabularRegressionPipeline(TabularClassificationPipeline):
# The actual work done by the pipeline is identical
pass
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/tabular_classification.py | from typing import Dict, List, Union
import pandas as pd
from app.pipelines.common import SklearnBasePipeline
class TabularClassificationPipeline(SklearnBasePipeline):
def _get_output(
self, inputs: Dict[str, Dict[str, List[Union[str, float]]]]
) -> List[Union[str, float]]:
# We convert the inputs to a pandas DataFrame, and use self.columns
# to order the columns in the order they're expected, ignore extra
# columns given if any, and put NaN for missing columns.
data = pd.DataFrame(inputs["data"], columns=self.columns)
res = self.model.predict(data).tolist()
return res
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/__init__.py | from app.pipelines.base import Pipeline, PipelineException # isort:skip
from app.pipelines.tabular_classification import TabularClassificationPipeline
from app.pipelines.tabular_regression import TabularRegressionPipeline
from app.pipelines.text_classification import TextClassificationPipeline
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/app | hf_public_repos/api-inference-community/docker_images/sklearn/app/pipelines/text_classification.py | from typing import Dict, List
from app.pipelines.common import SklearnBasePipeline
class TextClassificationPipeline(SklearnBasePipeline):
def _get_output(self, inputs: str) -> List[Dict[str, float]]:
res = []
for i, c in enumerate(self.model.predict_proba([inputs]).tolist()[0]):
res.append({"label": str(self.model.classes_[i]), "score": c})
return [res]
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn | hf_public_repos/api-inference-community/docker_images/sklearn/tests/test_api_tabular_classification.py | import json
import os
from pathlib import Path
from unittest import TestCase, skipIf
import pytest
from app.main import ALLOWED_TASKS
from parameterized import parameterized, parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TEST_CASES, TESTABLE_MODELS
@parameterized_class(
[{"test_case": x} for x in TESTABLE_MODELS["tabular-classification"]]
)
@skipIf(
"tabular-classification" not in ALLOWED_TASKS,
"tabular-classification not implemented",
)
class TabularClassificationTestCase(TestCase):
# self.test_case is provided by parameterized_class
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.test_case
os.environ["TASK"] = "tabular-classification"
self.case_data = TEST_CASES["tabular-classification"][self.test_case]
sample_folder = Path(__file__).parent / "generators" / "samples"
self.data = json.load(open(sample_folder / self.case_data["input"], "r"))
self.expected_output = json.load(
open(sample_folder / self.case_data["output"], "r")
)
from app.main import app
self.app = app
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def _can_load(self):
# to load a model, it has to either support being loaded on new sklearn
# versions, or it needs to be saved by a new sklearn version, since the
# assumption is that the current sklearn version is the latest.
return (
self.case_data["loads_on_new_sklearn"] or not self.case_data["old_sklearn"]
)
def _check_requirement(self, requirement):
# This test is not supposed to run and is thus skipped.
if not requirement:
pytest.skip("Skipping test because requirements are not met.")
def test_success_code(self):
# This test does a sanity check on the output and checks the response
# code which should be 200. This requires the model to be from the
# latest sklearn which is the one installed locally.
self._check_requirement(not self.case_data["old_sklearn"])
data = self.data
expected_output_len = len(self.expected_output)
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
assert response.status_code == 200
content = json.loads(response.content)
assert isinstance(content, list)
assert len(content) == expected_output_len
def test_wrong_sklearn_version_warning(self):
# if the wrong sklearn version is used the model will be loaded and
# gives an output, but warnings are raised. This test makes sure the
# right warnings are raised and that the output is included in the
# error message.
self._check_requirement(self.case_data["old_sklearn"] and self._can_load())
data = self.data
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
# check response
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "warnings" in content
# check warnings
assert any("Trying to unpickle estimator" in w for w in content["warnings"])
warnings = json.loads(content["error"])["warnings"]
assert any("Trying to unpickle estimator" in w for w in warnings)
# check error
error_message = json.loads(content["error"])
assert error_message["output"] == self.expected_output
def test_cannot_load_model(self):
# test the error message when the model cannot be loaded on a wrong
# sklearn version
self._check_requirement(not self.case_data["loads_on_new_sklearn"])
data = self.data
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "An error occurred while loading the model:" in content["error"]
@parameterized.expand(
[
(["add"], ["The following columns were given but not expected:"]),
(["drop"], ["The following columns were expected but not given:"]),
(
["add", "drop"],
[
"The following columns were given but not expected:",
"The following columns were expected but not given:",
],
),
]
)
def test_extra_columns(self, column_operations, warn_messages):
# Test that the right warning is raised when there are extra columns in
# the input.
self._check_requirement(self.case_data["has_config"] and self._can_load())
data = self.data.copy()
if "drop" in column_operations:
# we remove the first column in the data. Note that `data` is a
# dict of column names to values.
data["data"].pop(next(iter(data["data"].keys())))
if "add" in column_operations:
# we add an extra column to the data, the same as the first column.
# Note that `data` is a dict of column names to values.
data["data"]["extra_column"] = next(iter(data["data"].values()))
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "warnings" in content
for warn_message in warn_messages:
assert any(warn_message in w for w in content["warnings"])
if "drop" not in column_operations or self.case_data["accepts_nan"]:
# the predict does not raise an error
error_message = json.loads(content["error"])
assert len(error_message["output"]) == len(self.expected_output)
if "drop" not in column_operations:
# if no column was dropped, the predictions should be the same
assert error_message["output"] == self.expected_output
else:
# otherwise some columns will be empty and predict errors.
assert (
"does not accept missing values encoded as NaN natively"
in content["error"]
)
def test_malformed_input(self):
self._check_requirement(self._can_load())
with TestClient(self.app) as client:
response = client.post("/", data=b"Where do I live ?")
assert response.status_code == 400
content = json.loads(response.content)
assert set(content.keys()) == {"error"}
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn | hf_public_repos/api-inference-community/docker_images/sklearn/tests/test_docker_build.py | import os
import subprocess
from unittest import TestCase
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class DockerBuildTestCase(TestCase):
def test_can_build_docker_image(self):
with cd(os.path.dirname(os.path.dirname(__file__))):
subprocess.check_output(["docker", "build", "."])
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn | hf_public_repos/api-inference-community/docker_images/sklearn/tests/test_api.py | import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
TESTABLE_MODELS = {
"tabular-classification": [
"skops-tests/iris-sklearn-1.0-logistic_regression-with-config-skops",
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-with-config-skops",
"skops-tests/iris-sklearn-latest-logistic_regression-with-config-skops",
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-with-config-skops",
"skops-tests/iris-sklearn-1.0-logistic_regression-with-config-pickle",
"skops-tests/iris-sklearn-1.0-logistic_regression-without-config-pickle",
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-with-config-pickle",
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-without-config-pickle",
"skops-tests/iris-sklearn-latest-logistic_regression-with-config-pickle",
"skops-tests/iris-sklearn-latest-logistic_regression-without-config-pickle",
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-with-config-pickle",
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-without-config-pickle",
],
"tabular-regression": [
"skops-tests/tabularregression-sklearn-1.0-linear_regression-with-config-skops",
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-with-config-skops",
"skops-tests/tabularregression-sklearn-latest-linear_regression-with-config-skops",
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-with-config-skops",
"skops-tests/tabularregression-sklearn-1.0-linear_regression-with-config-pickle",
"skops-tests/tabularregression-sklearn-1.0-linear_regression-without-config-pickle",
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-with-config-pickle",
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-without-config-pickle",
"skops-tests/tabularregression-sklearn-latest-linear_regression-with-config-pickle",
"skops-tests/tabularregression-sklearn-latest-linear_regression-without-config-pickle",
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-with-config-pickle",
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-without-config-pickle",
],
"text-classification": [
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-with-config-skops",
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-with-config-skops",
"skops-tests/textclassification-sklearn-latest-logistic_regression-with-config-skops",
"skops-tests/textclassification-sklearn-1.0-logistic_regression-with-config-skops",
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-without-config-pickle",
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-with-config-pickle",
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-without-config-pickle",
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-with-config-pickle",
"skops-tests/textclassification-sklearn-latest-logistic_regression-without-config-pickle",
"skops-tests/textclassification-sklearn-latest-logistic_regression-with-config-pickle",
"skops-tests/textclassification-sklearn-1.0-logistic_regression-without-config-pickle",
"skops-tests/textclassification-sklearn-1.0-logistic_regression-with-config-pickle",
],
}
# This contains information about the test cases above, used in the tests to
# define which tests to run for which examples.
TEST_CASES = {
"tabular-classification": {
"skops-tests/iris-sklearn-latest-logistic_regression-without-config-pickle": {
"input": "iris-latest-input.json",
"output": "iris-logistic_regression-latest-output.json",
"has_config": False,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-latest-logistic_regression-with-config-pickle": {
"input": "iris-latest-input.json",
"output": "iris-logistic_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-latest-logistic_regression-with-config-skops": {
"input": "iris-latest-input.json",
"output": "iris-logistic_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-1.0-logistic_regression-without-config-pickle": {
"input": "iris-1.0-input.json",
"output": "iris-logistic_regression-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-1.0-logistic_regression-with-config-pickle": {
"input": "iris-1.0-input.json",
"output": "iris-logistic_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-1.0-logistic_regression-with-config-skops": {
"input": "iris-1.0-input.json",
"output": "iris-logistic_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-without-config-pickle": {
"input": "iris-latest-input.json",
"output": "iris-hist_gradient_boosting-latest-output.json",
"has_config": False,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-with-config-pickle": {
"input": "iris-latest-input.json",
"output": "iris-hist_gradient_boosting-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-latest-hist_gradient_boosting-with-config-skops": {
"input": "iris-latest-input.json",
"output": "iris-hist_gradient_boosting-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-without-config-pickle": {
"input": "iris-1.0-input.json",
"output": "iris-hist_gradient_boosting-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-with-config-pickle": {
"input": "iris-1.0-input.json",
"output": "iris-hist_gradient_boosting-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
"skops-tests/iris-sklearn-1.0-hist_gradient_boosting-with-config-skops": {
"input": "iris-1.0-input.json",
"output": "iris-hist_gradient_boosting-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
},
"tabular-regression": {
"skops-tests/tabularregression-sklearn-latest-linear_regression-without-config-pickle": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-linear_regression-latest-output.json",
"has_config": False,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-latest-linear_regression-with-config-pickle": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-linear_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-latest-linear_regression-with-config-skops": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-linear_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-1.0-linear_regression-without-config-pickle": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-linear_regression-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-1.0-linear_regression-with-config-pickle": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-linear_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-1.0-linear_regression-with-config-skops": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-linear_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": False,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-without-config-pickle": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-latest-output.json",
"has_config": False,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-with-config-pickle": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-latest-hist_gradient_boosting_regressor-with-config-skops": {
"input": "tabularregression-latest-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-latest-output.json",
"has_config": True,
"old_sklearn": False,
"accepts_nan": True,
"loads_on_new_sklearn": True,
},
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-without-config-pickle": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-with-config-pickle": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
"skops-tests/tabularregression-sklearn-1.0-hist_gradient_boosting_regressor-with-config-skops": {
"input": "tabularregression-1.0-input.json",
"output": "tabularregression-hist_gradient_boosting_regressor-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"accepts_nan": True,
"loads_on_new_sklearn": False,
},
},
"text-classification": {
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-without-config-pickle": {
"input": "textclassification-latest-input.json",
"output": "textclassification-hist_gradient_boosting-latest-output.json",
"has_config": False,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-with-config-pickle": {
"input": "textclassification-latest-input.json",
"output": "textclassification-hist_gradient_boosting-latest-output.json",
"has_config": True,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-latest-hist_gradient_boosting-with-config-skops": {
"input": "textclassification-latest-input.json",
"output": "textclassification-hist_gradient_boosting-latest-output.json",
"has_config": True,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-without-config-pickle": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-hist_gradient_boosting-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"loads_on_new_sklearn": False,
},
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-with-config-pickle": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-hist_gradient_boosting-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"loads_on_new_sklearn": False,
},
"skops-tests/textclassification-sklearn-1.0-hist_gradient_boosting-with-config-skops": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-hist_gradient_boosting-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"loads_on_new_sklearn": False,
},
"skops-tests/textclassification-sklearn-latest-logistic_regression-without-config-pickle": {
"input": "textclassification-latest-input.json",
"output": "textclassification-logistic_regression-latest-output.json",
"has_config": False,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-latest-logistic_regression-with-config-pickle": {
"input": "textclassification-latest-input.json",
"output": "textclassification-logistic_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-latest-logistic_regression-with-config-skops": {
"input": "textclassification-latest-input.json",
"output": "textclassification-logistic_regression-latest-output.json",
"has_config": True,
"old_sklearn": False,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-1.0-logistic_regression-without-config-pickle": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-logistic_regression-1.0-output.json",
"has_config": False,
"old_sklearn": True,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-1.0-logistic_regression-with-config-pickle": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-logistic_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"loads_on_new_sklearn": True,
},
"skops-tests/textclassification-sklearn-1.0-logistic_regression-with-config-skops": {
"input": "textclassification-1.0-input.json",
"output": "textclassification-logistic_regression-1.0-output.json",
"has_config": True,
"old_sklearn": True,
"loads_on_new_sklearn": True,
},
},
}
ALL_TASKS = {
"automatic-speech-recognition",
"audio-source-separation",
"feature-extraction",
"image-classification",
"question-answering",
"sentence-similarity",
"tabular-classification",
"text-generation",
"text-to-speech",
"token-classification",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
with self.assertRaises(EnvironmentError):
get_pipeline(unsupported_task, model_id="XX")
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn | hf_public_repos/api-inference-community/docker_images/sklearn/tests/test_api_text_classification.py | import json
import os
from pathlib import Path
from unittest import TestCase, skipIf
import pytest
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TEST_CASES, TESTABLE_MODELS
@parameterized_class([{"test_case": x} for x in TESTABLE_MODELS["text-classification"]])
@skipIf(
"text-classification" not in ALLOWED_TASKS,
"text-classification not implemented",
)
class TextClassificationTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.test_case
os.environ["TASK"] = "text-classification"
self.case_data = TEST_CASES["text-classification"][self.test_case]
sample_folder = Path(__file__).parent / "generators" / "samples"
self.data = json.load(open(sample_folder / self.case_data["input"], "r"))
self.expected_output = json.load(
open(sample_folder / self.case_data["output"], "r")
)
from app.main import app
self.app = app
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def _can_load(self):
# to load a model, it has to either support being loaded on new sklearn
# versions, or it needs to be saved by a new sklearn version, since the
# assumption is that the current sklearn version is the latest.
return (
self.case_data["loads_on_new_sklearn"] or not self.case_data["old_sklearn"]
)
def _check_requirement(self, requirement):
# This test is not supposed to run and is thus skipped.
if not requirement:
pytest.skip("Skipping test because requirements are not met.")
def test_success_code(self):
# This test does a sanity check on the output and checks the response
# code which should be 200. This requires the model to be from the
# latest sklearn which is the one installed locally.
self._check_requirement(not self.case_data["old_sklearn"])
data = self.data
expected_output_len = len(self.expected_output)
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data["data"][0]})
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(len(content), 1)
self.assertEqual(type(content[0]), list)
self.assertEqual(
set(k for el in content[0] for k in el.keys()),
{"label", "score"},
)
self.assertEqual(len(content), expected_output_len)
def test_wrong_sklearn_version_warning(self):
# if the wrong sklearn version is used the model will be loaded and
# gives an output, but warnings are raised. This test makes sure the
# right warnings are raised and that the output is included in the
# error message.
self._check_requirement(self.case_data["old_sklearn"] and self._can_load())
data = self.data
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data["data"][0]})
# check response
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "warnings" in content
# check warnings
assert any("Trying to unpickle estimator" in w for w in content["warnings"])
warnings = json.loads(content["error"])["warnings"]
assert any("Trying to unpickle estimator" in w for w in warnings)
# check error
error_message = json.loads(content["error"])
assert error_message["output"] == self.expected_output
def test_cannot_load_model(self):
# test the error message when the model cannot be loaded on a wrong
# sklearn version
self._check_requirement(not self.case_data["loads_on_new_sklearn"])
data = self.data
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data["data"][0]})
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "An error occurred while loading the model:" in content["error"]
def test_malformed_question(self):
# testing wrong input for inference API
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn | hf_public_repos/api-inference-community/docker_images/sklearn/tests/test_api_tabular_regression.py | """Tests for tabular regression
The test class is almost completely copied from TabularClassificationTestCase,
only changing to different parametrized test cases.
"""
import json
import os
from pathlib import Path
from unittest import TestCase, skipIf
import pytest
from app.main import ALLOWED_TASKS
from parameterized import parameterized, parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TEST_CASES, TESTABLE_MODELS
@parameterized_class([{"test_case": x} for x in TESTABLE_MODELS["tabular-regression"]])
@skipIf(
"tabular-regression" not in ALLOWED_TASKS,
"tabular-regression not implemented",
)
class TabularRegressionTestCase(TestCase):
# self.test_case is provided by parameterized_class
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.test_case
os.environ["TASK"] = "tabular-regression"
self.case_data = TEST_CASES["tabular-regression"][self.test_case]
sample_folder = Path(__file__).parent / "generators" / "samples"
self.data = json.load(open(sample_folder / self.case_data["input"], "r"))
self.expected_output = json.load(
open(sample_folder / self.case_data["output"], "r")
)
from app.main import app
self.app = app
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def _can_load(self):
# to load a model, it has to either support being loaded on new sklearn
# versions, or it needs to be saved by a new sklearn version, since the
# assumption is that the current sklearn version is the latest.
return (
self.case_data["loads_on_new_sklearn"] or not self.case_data["old_sklearn"]
)
def _check_requirement(self, requirement):
# This test is not supposed to run and is thus skipped.
if not requirement:
pytest.skip("Skipping test because requirements are not met.")
def test_success_code(self):
# This test does a sanity check on the output and checks the response
# code which should be 200. This requires the model to be from the
# latest sklearn which is the one installed locally.
self._check_requirement(not self.case_data["old_sklearn"])
data = self.data
expected_output_len = len(self.expected_output)
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
assert response.status_code == 200
content = json.loads(response.content)
assert isinstance(content, list)
assert len(content) == expected_output_len
def test_wrong_sklearn_version_warning(self):
# if the wrong sklearn version is used the model will be loaded and
# gives an output, but warnings are raised. This test makes sure the
# right warnings are raised and that the output is included in the
# error message.
self._check_requirement(self.case_data["old_sklearn"] and self._can_load())
data = self.data
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
# check response
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "warnings" in content
# check warnings
assert any("Trying to unpickle estimator" in w for w in content["warnings"])
warnings = json.loads(content["error"])["warnings"]
assert any("Trying to unpickle estimator" in w for w in warnings)
# check error
error_message = json.loads(content["error"])
assert len(error_message["output"]) == len(self.expected_output)
for val_output, val_expected in zip(
error_message["output"], self.expected_output
):
self.assertAlmostEqual(val_output, val_expected)
def test_cannot_load_model(self):
# test the error message when the model cannot be loaded on a wrong
# sklearn version
self._check_requirement(not self.case_data["loads_on_new_sklearn"])
data = self.data
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "An error occurred while loading the model:" in content["error"]
@parameterized.expand(
[
(["add"], ["The following columns were given but not expected:"]),
(["drop"], ["The following columns were expected but not given:"]),
(
["add", "drop"],
[
"The following columns were given but not expected:",
"The following columns were expected but not given:",
],
),
]
)
def test_extra_columns(self, column_operations, warn_messages):
# Test that the right warning is raised when there are extra columns in
# the input.
self._check_requirement(self.case_data["has_config"] and self._can_load())
data = self.data.copy()
if "drop" in column_operations:
# we remove the first column in the data. Note that `data` is a
# dict of column names to values.
data["data"].pop(next(iter(data["data"].keys())))
if "add" in column_operations:
# we add an extra column to the data, the same as the first column.
# Note that `data` is a dict of column names to values.
data["data"]["extra_column"] = next(iter(data["data"].values()))
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": data})
assert response.status_code == 400
content = json.loads(response.content)
assert "error" in content
assert "warnings" in content
for warn_message in warn_messages:
assert any(warn_message in w for w in content["warnings"])
if "drop" not in column_operations or self.case_data["accepts_nan"]:
# predict does not raise an error
error_message = json.loads(content["error"])
assert len(error_message["output"]) == len(self.expected_output)
if "drop" not in column_operations:
# if no column was dropped, the predictions should be the same
for val_output, val_expected in zip(
error_message["output"], self.expected_output
):
self.assertAlmostEqual(val_output, val_expected)
else:
# otherwise some columns will be empty and predict errors.
assert (
"does not accept missing values encoded as NaN natively"
in content["error"]
)
def test_malformed_input(self):
self._check_requirement(self._can_load())
with TestClient(self.app) as client:
response = client.post("/", data=b"Where do I live ?")
assert response.status_code == 400
content = json.loads(response.content)
assert set(content.keys()) == {"error"}
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/sklearn-1.0.yml | name: api-inference-community-test-generator-sklearn-1-0
channels:
- conda-forge
- nodefaults
dependencies:
- scikit-learn=1.0.2
- pandas
- huggingface_hub
- pip
- pip:
# if you're testing skops, you should install from github, and probably
# a specific hash if your PR on the skops side is not merged.
- git+https://github.com/skops-dev/skops.git
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/run.sh | #!/usr/bin/env bash
# uncomment to enable debugging
# set -xe
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
cd $SCRIPT_DIR
# have to do this since can't do mamba run, and need bash functions to call
# activate
source $(mamba info -q --base)/etc/profile.d/conda.sh
source $(mamba info -q --base)/etc/profile.d/mamba.sh
mamba env update --file sklearn-1.0.yml
mamba env update --file sklearn-latest.yml
# not doing mamba run ... since it just wouldn't work and would use system's
# python
mamba activate api-inference-community-test-generator-sklearn-1-0
python generate.py 1.0
mamba deactivate
mamba activate api-inference-community-test-generator-sklearn-latest
python generate.py latest
mamba deactivate
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/sklearn-latest.yml | name: api-inference-community-test-generator-sklearn-latest
channels:
- conda-forge
- nodefaults
dependencies:
- scikit-learn
- pandas
- huggingface_hub
- pip
- pip:
# if you're testing skops, you should install from github, and probably
# a specific hash if your PR on the skops side is not merged.
- git+https://github.com/skops-dev/skops.git
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/generate.py | #!/usr/bin/env python3
"""Generate artefacts used for testing
Don't run this script directly but use `run.sh` instead.
For the given sklearn version, train models for different task types, upload
them with and without config to HF Hub, and store their input and predictions
locally (and in the GH repo).
These artefacts will be used for unit testing the sklearn integration.
"""
import json
import os
import pickle
import sys
import time
from operator import methodcaller
from pathlib import Path
from tempfile import mkdtemp, mkstemp
import sklearn
import skops.io as sio
from huggingface_hub import HfApi
from huggingface_hub.utils import RepositoryNotFoundError
from sklearn.datasets import fetch_20newsgroups, load_diabetes, load_iris
from sklearn.ensemble import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from skops import hub_utils
SLEEP_BETWEEN_PUSHES = 1
def push_repo(repo_name, local_repo):
# this token should be allowed to push to the skops-tests org.
token = os.environ["SKOPS_TESTS_TOKEN"]
repo_id = f"skops-tests/{repo_name}"
print(f"Pushing {repo_id}")
client = HfApi()
try:
client.delete_repo(repo_id, token=token)
except RepositoryNotFoundError:
# repo does not exist yet
pass
client.create_repo(repo_id=repo_id, token=token, repo_type="model")
client.upload_folder(
repo_id=repo_id,
path_in_repo=".",
folder_path=local_repo,
commit_message="pushing files to the repo from test generator!",
commit_description=None,
token=token,
repo_type=None,
revision=None,
create_pr=False,
)
# prevent AWS "503 Server Error: Slow Down for url" error
time.sleep(SLEEP_BETWEEN_PUSHES)
def get_tabular_classifiers():
# yield classifier names and estimators to train and push to hub.
# this is a pipeline with simple estimators which can be loaded across
# different sklearn versions.
yield "logistic_regression", make_pipeline(StandardScaler(), LogisticRegression())
# this estimator cannot be loaded on 1.1 if it's stored using 1.0, but it
# handles NaN input values which the previous pipeline cannot handle.
yield "hist_gradient_boosting", HistGradientBoostingClassifier()
def get_text_classifiers():
# yield classifier names and estimators to train and push to hub.
# this is a pipeline with simple estimators which can be loaded across
# different sklearn versions.
yield "logistic_regression", make_pipeline(CountVectorizer(), LogisticRegression())
# this estimator cannot be loaded on 1.1 if it's stored using 1.0, but it
# handles NaN input values which the previous pipeline cannot handle.
yield "hist_gradient_boosting", make_pipeline(
CountVectorizer(max_features=100),
FunctionTransformer(methodcaller("toarray")),
HistGradientBoostingClassifier(max_iter=20),
)
def get_tabular_regressors():
# yield regressor names and estimators to train and push to hub.
# this is a pipeline with simple estimators which can be loaded across
# different sklearn versions.
yield "linear_regression", make_pipeline(StandardScaler(), LinearRegression())
# this estimator cannot be loaded on 1.1 if it's stored using 1.0, but it
# handles NaN input values which the previous pipeline cannot handle.
yield "hist_gradient_boosting_regressor", HistGradientBoostingRegressor()
def create_repos(est_name, task_name, est, sample, version, serialization_format):
# given trained estimator instance, it's name, and the version tag, push to
# hub once with and once without a config file.
# initialize repo
_, est_filename = mkstemp(
prefix="skops-", suffix=SERIALIZATION_FORMATS[serialization_format]
)
if serialization_format == "pickle":
with open(est_filename, mode="bw") as f:
pickle.dump(est, file=f)
else:
sio.dump(est, est_filename)
local_repo = mkdtemp(prefix="skops-")
hub_utils.init(
model=est_filename,
requirements=[f"scikit-learn={sklearn.__version__}"],
dst=local_repo,
task=task_name,
data=sample,
)
# push WITH config
repo_name = REPO_NAMES[task_name].format(
version=version,
est_name=est_name,
w_or_wo="with",
serialization_format=serialization_format,
)
push_repo(repo_name=repo_name, local_repo=local_repo)
if serialization_format == "pickle":
# push WITHOUT CONFIG
repo_name = REPO_NAMES[task_name].format(
version=version,
est_name=est_name,
w_or_wo="without",
serialization_format=serialization_format,
)
# Now we remove the config file and push to a new repo
os.remove(Path(local_repo) / "config.json")
# The only valid file name for a model pickle file if no config.json is
# available is `sklearn_model.joblib`, otherwise the backend will fail to
# find the file.
os.rename(
Path(local_repo) / est_filename, Path(local_repo) / "sklearn_model.joblib"
)
push_repo(
repo_name=repo_name,
local_repo=local_repo,
)
def save_sample(sample, filename, task):
if "text" in task:
payload = {"data": sample}
else:
payload = {"data": sample.to_dict(orient="list")}
with open(Path(__file__).parent / "samples" / filename, "w+") as f:
json.dump(payload, f, indent=2)
def predict_tabular_classifier(est, sample, filename):
output = [int(x) for x in est.predict(sample)]
with open(Path(__file__).parent / "samples" / filename, "w") as f:
json.dump(output, f, indent=2)
def predict_tabular_regressor(est, sample, filename):
output = [float(x) for x in est.predict(sample)]
with open(Path(__file__).parent / "samples" / filename, "w") as f:
json.dump(output, f, indent=2)
def predict_text_classifier(est, sample, filename):
output = []
for i, c in enumerate(est.predict_proba(sample).tolist()[0]):
output.append({"label": str(est.classes_[i]), "score": c})
with open(Path(__file__).parent / "samples" / filename, "w") as f:
json.dump([output], f, indent=2)
#############
# CONSTANTS #
#############
# TASKS = ["tabular-classification", "tabular-regression", "text-classification"]
TASKS = ["text-classification"]
DATA = {
"tabular-classification": load_iris(return_X_y=True, as_frame=True),
"tabular-regression": load_diabetes(return_X_y=True, as_frame=True),
"text-classification": fetch_20newsgroups(subset="test", return_X_y=True),
}
MODELS = {
"tabular-classification": get_tabular_classifiers(),
"tabular-regression": get_tabular_regressors(),
"text-classification": get_text_classifiers(),
}
INPUT_NAMES = {
"tabular-classification": "iris-{version}-input.json",
"tabular-regression": "tabularregression-{version}-input.json",
"text-classification": "textclassification-{version}-input.json",
}
OUTPUT_NAMES = {
"tabular-classification": "iris-{est_name}-{version}-output.json",
"tabular-regression": "tabularregression-{est_name}-{version}-output.json",
"text-classification": "textclassification-{est_name}-{version}-output.json",
}
REPO_NAMES = {
"tabular-classification": "iris-sklearn-{version}-{est_name}-{w_or_wo}-config-{serialization_format}",
"tabular-regression": "tabularregression-sklearn-{version}-{est_name}-{w_or_wo}-config-{serialization_format}",
"text-classification": "textclassification-sklearn-{version}-{est_name}-{w_or_wo}-config-{serialization_format}",
}
PREDICT_FUNCTIONS = {
"tabular-classification": predict_tabular_classifier,
"tabular-regression": predict_tabular_regressor,
"text-classification": predict_text_classifier,
}
SERIALIZATION_FORMATS = {"pickle": ".pkl", "skops": ".skops"}
def main(version):
for task in TASKS:
print(f"Creating data for task '{task}' and version '{version}'")
X, y = DATA[task]
X_train, X_test, y_train, _ = train_test_split(
X, y, test_size=0.2, random_state=42
)
is_frame = getattr(X_train, "head", None)
if callable(is_frame):
sample = X_test.head(10)
else:
sample = X_test[:10]
# save model input, which are later used for tests
input_name = INPUT_NAMES[task].format(version=version)
save_sample(sample, input_name, task)
for est_name, model in MODELS[task]:
for serialization_format in SERIALIZATION_FORMATS:
model.fit(X_train, y_train)
create_repos(
est_name=est_name,
task_name=task,
est=model,
sample=sample,
version=version,
serialization_format=serialization_format,
)
# save model predictions, which are later used for tests
output_name = OUTPUT_NAMES[task].format(est_name=est_name, version=version)
predict = PREDICT_FUNCTIONS[task]
predict(model, sample, output_name)
if __name__ == "__main__":
sklearn_version = sys.argv[1]
main(sklearn_version)
|
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/tabularregression-hist_gradient_boosting_regressor-1.0-output.json | [
128.767605088706,
213.12484287152625,
152.87415981711302,
271.367552554169,
109.00499923164844,
81.88059224780598,
238.4711759447084,
215.14159932904784,
134.42407401121258,
189.15096503239798
] |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/textclassification-hist_gradient_boosting-1.0-output.json | [
[
{
"label": "0",
"score": 0.007188341023581015
},
{
"label": "1",
"score": 0.03422080165466977
},
{
"label": "2",
"score": 0.04593990453288178
},
{
"label": "3",
"score": 0.02403056643079793
},
{
"label": "4",
"score": 0.03209593726948873
},
{
"label": "5",
"score": 0.1118272840896114
},
{
"label": "6",
"score": 0.445567936909444
},
{
"label": "7",
"score": 0.02455902207117283
},
{
"label": "8",
"score": 0.011669934009514884
},
{
"label": "9",
"score": 0.030182113638619834
},
{
"label": "10",
"score": 0.0248637811927033
},
{
"label": "11",
"score": 0.04262809835525814
},
{
"label": "12",
"score": 0.0524577232869705
},
{
"label": "13",
"score": 0.03816834348974906
},
{
"label": "14",
"score": 0.019925741393428644
},
{
"label": "15",
"score": 0.009631384752144945
},
{
"label": "16",
"score": 0.014821126836993737
},
{
"label": "17",
"score": 0.012278330051148537
},
{
"label": "18",
"score": 0.011661211580574562
},
{
"label": "19",
"score": 0.006282417431246468
}
]
] |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/textclassification-hist_gradient_boosting-latest-output.json | [
[
{
"label": "0",
"score": 0.007188341023581018
},
{
"label": "1",
"score": 0.03422080165466977
},
{
"label": "2",
"score": 0.04593990453288182
},
{
"label": "3",
"score": 0.024030566430797918
},
{
"label": "4",
"score": 0.03209593726948875
},
{
"label": "5",
"score": 0.11182728408961141
},
{
"label": "6",
"score": 0.44556793690944363
},
{
"label": "7",
"score": 0.024559022071172845
},
{
"label": "8",
"score": 0.011669934009514878
},
{
"label": "9",
"score": 0.030182113638619862
},
{
"label": "10",
"score": 0.0248637811927033
},
{
"label": "11",
"score": 0.04262809835525818
},
{
"label": "12",
"score": 0.05245772328697052
},
{
"label": "13",
"score": 0.038168343489749096
},
{
"label": "14",
"score": 0.019925741393428655
},
{
"label": "15",
"score": 0.009631384752144933
},
{
"label": "16",
"score": 0.014821126836993746
},
{
"label": "17",
"score": 0.012278330051148532
},
{
"label": "18",
"score": 0.011661211580574558
},
{
"label": "19",
"score": 0.006282417431246482
}
]
] |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/iris-logistic_regression-latest-output.json | [
1,
0,
2,
1,
1,
0,
1,
2,
1,
1
] |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/tabularregression-latest-input.json | {
"data": {
"age": [
0.04534098333546186,
0.09256398319871433,
0.06350367559055897,
0.09619652164973376,
0.012648137276287077,
0.009015598825267658,
-0.009147093429829445,
-0.02367724723390713,
-0.09269547780327612,
-0.06000263174410134
],
"sex": [
-0.044641636506989144,
-0.044641636506989144,
0.05068011873981862,
-0.044641636506989144,
0.05068011873981862,
-0.044641636506989144,
0.05068011873981862,
0.05068011873981862,
0.05068011873981862,
0.05068011873981862
],
"bmi": [
-0.006205954135807083,
0.0369065288194249,
-0.004050329988045492,
0.05199589785375607,
-0.020217511096257485,
-0.02452875939178067,
0.17055522598064407,
0.045529025410471304,
-0.09027529589850945,
0.015350287341808908
],
"bp": [
-0.015998975220305175,
0.0218723855140367,
-0.012556124244455912,
0.0792647112814439,
-0.002227571316908129,
-0.02632752814785296,
0.014986683562338177,
0.0218723855140367,
-0.057313186930496314,
-0.019441826196154435
],
"s1": [
0.12501870313429186,
-0.0249601584096303,
0.10300345740307394,
0.05484510736603471,
0.03833367306762126,
0.09887559882847057,
0.030077955918414535,
0.10988322169407955,
-0.0249601584096303,
0.03695772020942014
],
"s2": [
0.1251981011367534,
-0.016658152053905938,
0.04878987646010685,
0.036577086450315016,
0.05317395492516036,
0.0941964034195894,
0.03375875029420919,
0.08887287956916731,
-0.030436684372645465,
0.04816357953652778
],
"s3": [
0.019186997017453092,
0.0007788079970183853,
0.05600337505832251,
-0.07653558588880739,
-0.006584467611155497,
0.07072992627467027,
-0.02131101882750326,
0.0007788079970183853,
-0.006584467611155497,
0.019186997017453092
],
"s4": [
0.03430885887772673,
-0.03949338287409329,
-0.002592261998183278,
0.14132210941786577,
0.03430885887772673,
-0.002592261998183278,
0.03430885887772673,
0.03430885887772673,
-0.002592261998183278,
-0.002592261998183278
],
"s5": [
0.03243232415655107,
-0.022516528376302174,
0.08449153066204618,
0.0986480615153178,
-0.005142189801713891,
-0.021395309255276825,
0.033653814906286016,
0.07419089971278872,
0.024055085357995654,
-0.030747917533098208
],
"s6": [
-0.005219804415300423,
-0.021788232074638245,
-0.01764612515980379,
0.06105390622205087,
-0.009361911330134878,
0.007206516329202944,
0.03205915781820968,
0.06105390622205087,
0.0030644094143684884,
-0.0010776975004659671
]
}
} |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/textclassification-1.0-input.json | {
"data": [
"From: krs@allegra.att.com (K. R. Subramanian)\nSubject: Companies involved with Scientific Visualization...\nReply-To: krs@allegra.att.com\nOrganization: AT&T Bell Laboratories\nLines: 10\n\nIf anyone has a list of companies doing data visualization (software\nor hardware) I would like to hear from them. Thanks.\n\n\t-- krs\n-- \n\nK.R.Subramanian Ph: (908) 582-6346\nAT&T Bell Laboratories, Rm. 2A240 email : krs@research.att.com\n600 Mountain Av.\nMurray Hill, NJ 07974\n",
"From: lairdb@crash.cts.com (Laird P. Broadfield)\nSubject: Re: CNN for sale; Influencing the coverage\nOrganization: \"Well, a head on top, an arm on each side, two legs....\"\nDistribution: usa\nLines: 25\n\nIn <1993Apr19.171602.27135@guinness.idbsu.edu> betz@gozer.idbsu.edu (Andrew Betz) writes:\n>In article <1993Apr19.153444.28112@ucsu.Colorado.EDU> fcrary@ucsu.Colorado.EDU (Frank Crary) writes:\n>>I'd be willing to go in as well. By the way, we wouldn't need to\n>>buy the network wholesale. Simply owning a large number of shares\n>>would still work (if 5% of the shareholders want pro-gun coverage\n>>and the rest don't really care, they'll go along just to keep \n>>the 5% happy...)\n\n>I'd go along with this plan as well. Turner's stock is traded\n>on the American exchange and has 3 classes (A, B, and C). A and\n>B stock is currently about 23 bucks a share; C stock is about 11\n>bucks a share. Does anybody have any idea how much stock TBS\n>has issued? What would it take to reach 5%, 51%, or even 100%?\n\nUm, I sortof hesitate to bring this up, but owning even a single share\nentitles you to attend the annual shareholders meeting, and under most\ncorporate charters to introduce topics to be discussed. While I *don't*\nsuggest the tactic used by some in Japan (go to the shareholders meeting,\nand disrupt the bejeezus out of everything), what about a well-worded\nresolution complaining about \"advocacy journalism\"?\n\n\n-- \nLaird P. Broadfield lairdb@crash.cts.com ...{ucsd, nosc}!crash!lairdb\nHi! I'm a shareware signature! Send $5 if you use me, send $10 for manual!\n",
"From: galen@picea.CFNR.ColoState.EDU (Galen Watts)\nSubject: Re: RF Communications Link\nNntp-Posting-Host: storm.cfnr.colostate.edu\nOrganization: College of Natural Resources, Colo. State Univ.\nLines: 20\n\nIn article <blumenow.7@underdog.ee.wits.ac.za> blumenow@underdog.ee.wits.ac.za (Warren Blumenow) writes:\n>We have to design an RF link for a distance of 250 m. We're using\n>standard RS232 waves (square pulses) as the modulating waves and the \n>carrier wave is sinusoidal. The link has to be bidirectional.\n>We would appreciate any advice on the type of modulating techniques\n>or antennas that we should use.\n\nWhat frequency is your carrier?\n\nHave you considered using two tones, one for 1 and another for 0?\n\nHow high is your RS-232 data rate?\n\nCan you use more than one carrier freq?\n\nHave you considered hiring an RF data transmission consultant?\n\nJust Curious,\nGalen Watts, KF0YJ\n\n",
"Subject: Re: what to do with old 256k SIMMs?\nFrom: cvafymfa@vmsb.is.csupomona.edu (Srikanth Viswanathan)\nDistribution: world\nOrganization: California State Polytechnic University, Pomona\nNntp-Posting-Host: vmsb.is.csupomona.edu\nNews-Software: VAX/VMS VNEWS 1.41 \nLines: 14\n\nIn article <1qkf2hINN65c@rave.larc.nasa.gov>, kludge@grissom.larc.nasa.gov (Scott Dorsey) writes...\n>In article <C5JCH1.FrC@ulowell.ulowell.edu> wex@cs.ulowell.edu writes:\n>>In article <1993Apr15.100452.16793@csx.cciw.ca>, u009@csx.cciw.ca (G. Stewart Beal) writes:\n>>|> >\tI was wondering if people had any good uses for old\n>>|> >256k SIMMs. I have a bunch of them for the Apple Mac\n>>|> >and I know lots of other people do to. I have tried to\n>>|> >sell them but have gotten NO interest.\n>>\n\nWell, if you're willing to spend a little money, you could buy one\nof those IDE caching controllers (assuming you have an IDE of course)\nand put the 256K SIMMs on them. Hardware cache!\n\nSrikanth\n",
"From: jlove@ivrit.ra.itd.umich.edu (Jack Love)\nSubject: Re: Israeli destruction of mosque(s) in Jerusalem\nOrganization: /usr/local/trn/lib/organization\nLines: 33\nNNTP-Posting-Host: ivrit.ra.itd.umich.edu\n\nIn article <2BEC0A64.21705@news.service.uci.edu> tclock@orion.oac.uci.edu (Tim Clock) writes:\n>This issue has been going on for a while and your presentation here of\n>just one reference probably won't resolve this issue to those that\n>oppose your insistence that mosques *were* destroyed. Even in your\n>location of this one reference, you spend most of your quote dealing\n>with an incidence that, while abhorrant, has nothing to do with the \n>issue at hand here. Then, at the end of the quote, there is an almost\n>off-hand comment that \"two mosques\" were destroyed.\n>\n>To support a claim of this nature, what other authors support this\n>incident? If identifiable mosques were destroyed they are certainly\n>identifiable, they have names and addresses (steet location). The\n>comment by one reporter *does* make us wonder if \"this happened\" but\n>by no means \"proves it.\n\nThere is no doubt that Israeli authorities ordered the destruction of\nmosques in the vicinity of the Wailing Wall. That does not mean,\nhowever, that once can generalize from this to any other points. The\nentire plaza, mosques and all, was cleared to make it possible for Jews\nto have a place to worship in the place that was holiest to many of\nthem, and which had been denied to them for millenia.\n\nOn the other hand, throughout the rest of Jerusalem and Israel, to the\nbest of my knowledge, Israeli authorities have scrupulously avoided\ndamage to any Islamic religious sites. This contrasts with the policies\nof previous regimes which destroyed Jewish synagogues out of hate and\nbigotry.\n\n\n-- \n________________________________________\nJack F. Love\t| \tOpinions expressed are mine alone.\n\t\t|\t(Unless you happen to agree.)\n",
"From: msnyder@nmt.edu (Rebecca Snyder)\nSubject: centi- and milli- pedes\nOrganization: New Mexico Tech\nLines: 10\n\nDoes anyone know how posionous centipedes and millipedes are? If someone\nwas bitten, how soon would medical treatment be needed, and what would\nbe liable to happen to the person?\n\n(Just for clarification - I have NOT been bitten by one of these, but my\nhouse seems to be infested, and I want to know 'just in case'.)\n\nRebecca\n\n\n",
"From: bryan@jpl-devvax.jpl.nasa.gov (Bryan L. Allen)\nSubject: Re: New Encryption Algorithm\nSummary: Boundaries are in the eye of the beholder\nKeywords: NSA surveillance ( )\nOrganization: Telos Corp., Jet Propulsion Laboratory (NASA)\nLines: 25\n\nIn article <49@shockwave.win.net> jhupp@shockwave.win.net (Jeff Hupp) writes:\n> \n>>In article <1raeir$be1@access.digex.net> steve-b@access.digex.com (Steve Brinich) writes:\n[some deleted]\n>>\n>>Unlike the CIA, the NSA has no prohibition against domestic spying. Read\n>>Bamford's THE PUZZLE PALACE.\n>>\n>>Bruce\n>>\n> I have that book, and the way I read it is, one side of the\n>conversation MUST be from outside the United States.\n> Of coures, that ASS U MEs that the NSA plays by the rules...\n\nOne thing that seems ambiguous is whether a signal being echoed down from\ngeosynchronous orbit is \"...from outside the United States.\"\n\nAlso, being able to assess whether NSA is playing by the rules requires\nknowing what the rules are. We only know a subset. For those even more\nsuspicious, there could be other surveillance organizations \"blacker\"\nthan the NSA.\n\n-- \n Bryan L. Allen bryan@devvax.jpl.nasa.gov\n Telos Corp./JPL (818) 306-6425\n",
"Subject: Re: Zeno's Countersteering Paradox Unveiled!!!\nFrom: Stafford@Vax2.Winona.MSUS.Edu (John Stafford)\nDistribution: world\nOrganization: Winona State University\nNntp-Posting-Host: stafford.winona.msus.edu\nLines: 14\n\nIn article <1993Apr26.002631.1@acfcluster.nyu.edu>,\nmullignj@acfcluster.nyu.edu wrote:\n> \n>[...] Therefore, there is a point\n> in time when even though my front wheel is turned to the right \n> I must be going straight ahead (the point when I go from the right\n> turn to the left). [...]\n\n\tWhat you are trying to describe is that transition point where\n\tthe front wheel actually reverses direction; turns backwards.\n\n====================================================\nJohn Stafford Minnesota State University @ Winona\n All standard disclaimers apply.\n",
"From: jcm@head-cfa.harvard.edu (Jonathan McDowell)\nSubject: Re: STS-57 inclination?\nOrganization: Smithsonian Astrophysical Observatory, Cambridge, MA, USA\nLines: 11\n\nFrom article <1993May14.023220.1@vax1.tcd.ie>, by apryan@vax1.tcd.ie:\n>> Primary payload: Spacehab 1 EURECA 1-R Inclination: 57 degrees\n> I have seen elsewhere that inclination is 28 degrees. \n> Which is correct?\n\nHmmm... Atlantis left Eureca in a 28 degree orbit. Retrieving it is\ngoing to be *REALLY* fun if they fly to 57 degrees. Torque that \nCanadarm! :-)\n\n - Jonathan\n\n",
"From: jagrant@emr1.emr.ca (John Grant)\nSubject: Re: AfterDark (StarTrek) out of memory!\nOrganization: Energy, Mines, and Resources, Ottawa\nLines: 17\n\nIn article <1993May18.234042.4519@informix.com> jerry@doodles.informix.com writes:\n>I have the startrek version of afterdark running but it nearly always\n>reports \"space: out of memory\", which floats across the top of the\n>screen. What have I not set correctly (I've got 16M of ram)?\n>\n>jerry\n\n\tYou're right, it doesn't appear to working correctly. It really\n\tshould say:\n\t\t\"space: the last frontier\"\n\tacross the top. :) :) :)\n\n\n-- \nJohn A. Grant\t\t\t\t\t\tjagrant@emr1.emr.ca\nAirborne Geophysics\nGeological Survey of Canada, Ottawa\n"
]
} |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/iris-latest-input.json | {
"data": {
"sepal length (cm)": [
6.1,
5.7,
7.7,
6.0,
6.8,
5.4,
5.6,
6.9,
6.2,
5.8
],
"sepal width (cm)": [
2.8,
3.8,
2.6,
2.9,
2.8,
3.4,
2.9,
3.1,
2.2,
2.7
],
"petal length (cm)": [
4.7,
1.7,
6.9,
4.5,
4.8,
1.5,
3.6,
5.1,
4.5,
3.9
],
"petal width (cm)": [
1.2,
0.3,
2.3,
1.5,
1.4,
0.4,
1.3,
2.3,
1.5,
1.2
]
}
} |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/tabularregression-hist_gradient_boosting_regressor-latest-output.json | [
128.767605088706,
213.12484287152625,
152.87415981711302,
271.367552554169,
109.00499923164844,
81.88059224780598,
238.4711759447084,
215.14159932904784,
134.42407401121258,
189.15096503239798
] |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/textclassification-logistic_regression-1.0-output.json | [
[
{
"label": "0",
"score": 0.011336682178470362
},
{
"label": "1",
"score": 0.2877934181568519
},
{
"label": "2",
"score": 0.04995931455255469
},
{
"label": "3",
"score": 0.048606811555846334
},
{
"label": "4",
"score": 0.048146172402679224
},
{
"label": "5",
"score": 0.04894534558346955
},
{
"label": "6",
"score": 0.15705162235349931
},
{
"label": "7",
"score": 0.06988420347214097
},
{
"label": "8",
"score": 0.020057367014262424
},
{
"label": "9",
"score": 0.023752600566338086
},
{
"label": "10",
"score": 0.008731496867220766
},
{
"label": "11",
"score": 0.02785232288841256
},
{
"label": "12",
"score": 0.031210992630705495
},
{
"label": "13",
"score": 0.08448433781265935
},
{
"label": "14",
"score": 0.01330510596772587
},
{
"label": "15",
"score": 0.04597003192222465
},
{
"label": "16",
"score": 0.011186980163869558
},
{
"label": "17",
"score": 0.0025914382007259642
},
{
"label": "18",
"score": 0.00786147018192487
},
{
"label": "19",
"score": 0.0012722855284180384
}
]
] |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/iris-1.0-input.json | {
"data": {
"sepal length (cm)": [
6.1,
5.7,
7.7,
6.0,
6.8,
5.4,
5.6,
6.9,
6.2,
5.8
],
"sepal width (cm)": [
2.8,
3.8,
2.6,
2.9,
2.8,
3.4,
2.9,
3.1,
2.2,
2.7
],
"petal length (cm)": [
4.7,
1.7,
6.9,
4.5,
4.8,
1.5,
3.6,
5.1,
4.5,
3.9
],
"petal width (cm)": [
1.2,
0.3,
2.3,
1.5,
1.4,
0.4,
1.3,
2.3,
1.5,
1.2
]
}
} |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/iris-hist_gradient_boosting-latest-output.json | [
1,
0,
2,
1,
1,
0,
1,
2,
1,
1
] |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/textclassification-logistic_regression-latest-output.json | [
[
{
"label": "0",
"score": 0.008896718626007294
},
{
"label": "1",
"score": 0.280220671837972
},
{
"label": "2",
"score": 0.044846046575922976
},
{
"label": "3",
"score": 0.03920991699885032
},
{
"label": "4",
"score": 0.057385215221753105
},
{
"label": "5",
"score": 0.052487265368533896
},
{
"label": "6",
"score": 0.13974153545132648
},
{
"label": "7",
"score": 0.07067742309755881
},
{
"label": "8",
"score": 0.01745753683135335
},
{
"label": "9",
"score": 0.025684542619266296
},
{
"label": "10",
"score": 0.009767948522403052
},
{
"label": "11",
"score": 0.02612484979490926
},
{
"label": "12",
"score": 0.03535200014248993
},
{
"label": "13",
"score": 0.10969064335116936
},
{
"label": "14",
"score": 0.013549205719292714
},
{
"label": "15",
"score": 0.04903959225618569
},
{
"label": "16",
"score": 0.01001134971391883
},
{
"label": "17",
"score": 0.0017206454852604731
},
{
"label": "18",
"score": 0.0073138998853855075
},
{
"label": "19",
"score": 0.0008229925004406002
}
]
] |
0 | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators | hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/tabularregression-linear_regression-latest-output.json | [
139.54755840379605,
179.51720835342783,
134.0387557189011,
291.4170292522083,
123.78965872239607,
92.17234650105041,
258.23238898921295,
181.3373205706072,
90.22411310941459,
108.63375858007925
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.