index
int64
0
0
repo_id
stringclasses
351 values
file_path
stringlengths
26
186
content
stringlengths
1
990k
0
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/tabularregression-linear_regression-1.0-output.json
[ 139.54831330342856, 179.52030577879273, 134.04133297819817, 291.4119359771987, 123.78723656395928, 92.17357676591854, 258.2340970376254, 181.33895237832277, 90.22217861672894, 108.63143297584902 ]
0
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/iris-hist_gradient_boosting-1.0-output.json
[ 1, 0, 2, 1, 1, 0, 1, 2, 1, 1 ]
0
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/tabularregression-1.0-input.json
{ "data": { "age": [ 0.0453409833354632, 0.0925639831987174, 0.063503675590561, 0.096196521649737, 0.0126481372762872, 0.00901559882526763, -0.00914709342983014, -0.0236772472339084, -0.0926954778032799, -0.0600026317441039 ], "sex": [ -0.044641636506989, -0.044641636506989, 0.0506801187398187, -0.044641636506989, 0.0506801187398187, -0.044641636506989, 0.0506801187398187, 0.0506801187398187, 0.0506801187398187, 0.0506801187398187 ], "bmi": [ -0.00620595413580824, 0.0369065288194278, -0.00405032998804645, 0.0519958978537604, -0.02021751109626, -0.0245287593917836, 0.17055522598066, 0.045529025410475, -0.0902752958985185, 0.0153502873418098 ], "bp": [ -0.015999222636143, 0.0218723549949558, -0.0125563519424068, 0.0792535333386559, -0.00222773986119799, -0.0263278347173518, 0.0149866136074833, 0.0218723549949558, -0.0573136709609782, -0.0194420933298793 ], "s1": [ 0.125018703134293, -0.0249601584096305, 0.103003457403075, 0.054845107366035, 0.0383336730676214, 0.0988755988284711, 0.0300779559184146, 0.10988322169408, -0.0249601584096305, 0.0369577202094203 ], "s2": [ 0.125198101136752, -0.0166581520539057, 0.0487898764601065, 0.0365770864503148, 0.05317395492516, 0.0941964034195887, 0.033758750294209, 0.0888728795691667, -0.0304366843726451, 0.0481635795365275 ], "s3": [ 0.0191869970174533, 0.000778807997017968, 0.056003375058324, -0.0765355858888105, -0.00658446761115617, 0.0707299262746723, -0.0213110188275045, 0.000778807997017968, -0.00658446761115617, 0.0191869970174533 ], "s4": [ 0.0343088588777263, -0.0394933828740919, -0.00259226199818282, 0.141322109417863, 0.0343088588777263, -0.00259226199818282, 0.0343088588777263, 0.0343088588777263, -0.00259226199818282, -0.00259226199818282 ], "s5": [ 0.0324332257796019, -0.0225121719296605, 0.0844952822124031, 0.098646374304928, -0.00514530798026311, -0.02139368094036, 0.0336568129023847, 0.0741925366900307, 0.024052583226893, -0.0307512098645563 ], "s6": [ -0.0052198044153011, -0.0217882320746399, -0.0176461251598052, 0.0610539062220542, -0.0093619113301358, 0.00720651632920303, 0.0320591578182113, 0.0610539062220542, 0.00306440941436832, -0.00107769750046639 ] } }
0
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/textclassification-latest-input.json
{ "data": [ "From: krs@allegra.att.com (K. R. Subramanian)\nSubject: Companies involved with Scientific Visualization...\nReply-To: krs@allegra.att.com\nOrganization: AT&T Bell Laboratories\nLines: 10\n\nIf anyone has a list of companies doing data visualization (software\nor hardware) I would like to hear from them. Thanks.\n\n\t-- krs\n-- \n\nK.R.Subramanian Ph: (908) 582-6346\nAT&T Bell Laboratories, Rm. 2A240 email : krs@research.att.com\n600 Mountain Av.\nMurray Hill, NJ 07974\n", "From: lairdb@crash.cts.com (Laird P. Broadfield)\nSubject: Re: CNN for sale; Influencing the coverage\nOrganization: \"Well, a head on top, an arm on each side, two legs....\"\nDistribution: usa\nLines: 25\n\nIn <1993Apr19.171602.27135@guinness.idbsu.edu> betz@gozer.idbsu.edu (Andrew Betz) writes:\n>In article <1993Apr19.153444.28112@ucsu.Colorado.EDU> fcrary@ucsu.Colorado.EDU (Frank Crary) writes:\n>>I'd be willing to go in as well. By the way, we wouldn't need to\n>>buy the network wholesale. Simply owning a large number of shares\n>>would still work (if 5% of the shareholders want pro-gun coverage\n>>and the rest don't really care, they'll go along just to keep \n>>the 5% happy...)\n\n>I'd go along with this plan as well. Turner's stock is traded\n>on the American exchange and has 3 classes (A, B, and C). A and\n>B stock is currently about 23 bucks a share; C stock is about 11\n>bucks a share. Does anybody have any idea how much stock TBS\n>has issued? What would it take to reach 5%, 51%, or even 100%?\n\nUm, I sortof hesitate to bring this up, but owning even a single share\nentitles you to attend the annual shareholders meeting, and under most\ncorporate charters to introduce topics to be discussed. While I *don't*\nsuggest the tactic used by some in Japan (go to the shareholders meeting,\nand disrupt the bejeezus out of everything), what about a well-worded\nresolution complaining about \"advocacy journalism\"?\n\n\n-- \nLaird P. Broadfield lairdb@crash.cts.com ...{ucsd, nosc}!crash!lairdb\nHi! I'm a shareware signature! Send $5 if you use me, send $10 for manual!\n", "From: galen@picea.CFNR.ColoState.EDU (Galen Watts)\nSubject: Re: RF Communications Link\nNntp-Posting-Host: storm.cfnr.colostate.edu\nOrganization: College of Natural Resources, Colo. State Univ.\nLines: 20\n\nIn article <blumenow.7@underdog.ee.wits.ac.za> blumenow@underdog.ee.wits.ac.za (Warren Blumenow) writes:\n>We have to design an RF link for a distance of 250 m. We're using\n>standard RS232 waves (square pulses) as the modulating waves and the \n>carrier wave is sinusoidal. The link has to be bidirectional.\n>We would appreciate any advice on the type of modulating techniques\n>or antennas that we should use.\n\nWhat frequency is your carrier?\n\nHave you considered using two tones, one for 1 and another for 0?\n\nHow high is your RS-232 data rate?\n\nCan you use more than one carrier freq?\n\nHave you considered hiring an RF data transmission consultant?\n\nJust Curious,\nGalen Watts, KF0YJ\n\n", "Subject: Re: what to do with old 256k SIMMs?\nFrom: cvafymfa@vmsb.is.csupomona.edu (Srikanth Viswanathan)\nDistribution: world\nOrganization: California State Polytechnic University, Pomona\nNntp-Posting-Host: vmsb.is.csupomona.edu\nNews-Software: VAX/VMS VNEWS 1.41 \nLines: 14\n\nIn article <1qkf2hINN65c@rave.larc.nasa.gov>, kludge@grissom.larc.nasa.gov (Scott Dorsey) writes...\n>In article <C5JCH1.FrC@ulowell.ulowell.edu> wex@cs.ulowell.edu writes:\n>>In article <1993Apr15.100452.16793@csx.cciw.ca>, u009@csx.cciw.ca (G. Stewart Beal) writes:\n>>|> >\tI was wondering if people had any good uses for old\n>>|> >256k SIMMs. I have a bunch of them for the Apple Mac\n>>|> >and I know lots of other people do to. I have tried to\n>>|> >sell them but have gotten NO interest.\n>>\n\nWell, if you're willing to spend a little money, you could buy one\nof those IDE caching controllers (assuming you have an IDE of course)\nand put the 256K SIMMs on them. Hardware cache!\n\nSrikanth\n", "From: jlove@ivrit.ra.itd.umich.edu (Jack Love)\nSubject: Re: Israeli destruction of mosque(s) in Jerusalem\nOrganization: /usr/local/trn/lib/organization\nLines: 33\nNNTP-Posting-Host: ivrit.ra.itd.umich.edu\n\nIn article <2BEC0A64.21705@news.service.uci.edu> tclock@orion.oac.uci.edu (Tim Clock) writes:\n>This issue has been going on for a while and your presentation here of\n>just one reference probably won't resolve this issue to those that\n>oppose your insistence that mosques *were* destroyed. Even in your\n>location of this one reference, you spend most of your quote dealing\n>with an incidence that, while abhorrant, has nothing to do with the \n>issue at hand here. Then, at the end of the quote, there is an almost\n>off-hand comment that \"two mosques\" were destroyed.\n>\n>To support a claim of this nature, what other authors support this\n>incident? If identifiable mosques were destroyed they are certainly\n>identifiable, they have names and addresses (steet location). The\n>comment by one reporter *does* make us wonder if \"this happened\" but\n>by no means \"proves it.\n\nThere is no doubt that Israeli authorities ordered the destruction of\nmosques in the vicinity of the Wailing Wall. That does not mean,\nhowever, that once can generalize from this to any other points. The\nentire plaza, mosques and all, was cleared to make it possible for Jews\nto have a place to worship in the place that was holiest to many of\nthem, and which had been denied to them for millenia.\n\nOn the other hand, throughout the rest of Jerusalem and Israel, to the\nbest of my knowledge, Israeli authorities have scrupulously avoided\ndamage to any Islamic religious sites. This contrasts with the policies\nof previous regimes which destroyed Jewish synagogues out of hate and\nbigotry.\n\n\n-- \n________________________________________\nJack F. Love\t| \tOpinions expressed are mine alone.\n\t\t|\t(Unless you happen to agree.)\n", "From: msnyder@nmt.edu (Rebecca Snyder)\nSubject: centi- and milli- pedes\nOrganization: New Mexico Tech\nLines: 10\n\nDoes anyone know how posionous centipedes and millipedes are? If someone\nwas bitten, how soon would medical treatment be needed, and what would\nbe liable to happen to the person?\n\n(Just for clarification - I have NOT been bitten by one of these, but my\nhouse seems to be infested, and I want to know 'just in case'.)\n\nRebecca\n\n\n", "From: bryan@jpl-devvax.jpl.nasa.gov (Bryan L. Allen)\nSubject: Re: New Encryption Algorithm\nSummary: Boundaries are in the eye of the beholder\nKeywords: NSA surveillance ( )\nOrganization: Telos Corp., Jet Propulsion Laboratory (NASA)\nLines: 25\n\nIn article <49@shockwave.win.net> jhupp@shockwave.win.net (Jeff Hupp) writes:\n> \n>>In article <1raeir$be1@access.digex.net> steve-b@access.digex.com (Steve Brinich) writes:\n[some deleted]\n>>\n>>Unlike the CIA, the NSA has no prohibition against domestic spying. Read\n>>Bamford's THE PUZZLE PALACE.\n>>\n>>Bruce\n>>\n> I have that book, and the way I read it is, one side of the\n>conversation MUST be from outside the United States.\n> Of coures, that ASS U MEs that the NSA plays by the rules...\n\nOne thing that seems ambiguous is whether a signal being echoed down from\ngeosynchronous orbit is \"...from outside the United States.\"\n\nAlso, being able to assess whether NSA is playing by the rules requires\nknowing what the rules are. We only know a subset. For those even more\nsuspicious, there could be other surveillance organizations \"blacker\"\nthan the NSA.\n\n-- \n Bryan L. Allen bryan@devvax.jpl.nasa.gov\n Telos Corp./JPL (818) 306-6425\n", "Subject: Re: Zeno's Countersteering Paradox Unveiled!!!\nFrom: Stafford@Vax2.Winona.MSUS.Edu (John Stafford)\nDistribution: world\nOrganization: Winona State University\nNntp-Posting-Host: stafford.winona.msus.edu\nLines: 14\n\nIn article <1993Apr26.002631.1@acfcluster.nyu.edu>,\nmullignj@acfcluster.nyu.edu wrote:\n> \n>[...] Therefore, there is a point\n> in time when even though my front wheel is turned to the right \n> I must be going straight ahead (the point when I go from the right\n> turn to the left). [...]\n\n\tWhat you are trying to describe is that transition point where\n\tthe front wheel actually reverses direction; turns backwards.\n\n====================================================\nJohn Stafford Minnesota State University @ Winona\n All standard disclaimers apply.\n", "From: jcm@head-cfa.harvard.edu (Jonathan McDowell)\nSubject: Re: STS-57 inclination?\nOrganization: Smithsonian Astrophysical Observatory, Cambridge, MA, USA\nLines: 11\n\nFrom article <1993May14.023220.1@vax1.tcd.ie>, by apryan@vax1.tcd.ie:\n>> Primary payload: Spacehab 1 EURECA 1-R Inclination: 57 degrees\n> I have seen elsewhere that inclination is 28 degrees. \n> Which is correct?\n\nHmmm... Atlantis left Eureca in a 28 degree orbit. Retrieving it is\ngoing to be *REALLY* fun if they fly to 57 degrees. Torque that \nCanadarm! :-)\n\n - Jonathan\n\n", "From: jagrant@emr1.emr.ca (John Grant)\nSubject: Re: AfterDark (StarTrek) out of memory!\nOrganization: Energy, Mines, and Resources, Ottawa\nLines: 17\n\nIn article <1993May18.234042.4519@informix.com> jerry@doodles.informix.com writes:\n>I have the startrek version of afterdark running but it nearly always\n>reports \"space: out of memory\", which floats across the top of the\n>screen. What have I not set correctly (I've got 16M of ram)?\n>\n>jerry\n\n\tYou're right, it doesn't appear to working correctly. It really\n\tshould say:\n\t\t\"space: the last frontier\"\n\tacross the top. :) :) :)\n\n\n-- \nJohn A. Grant\t\t\t\t\t\tjagrant@emr1.emr.ca\nAirborne Geophysics\nGeological Survey of Canada, Ottawa\n" ] }
0
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators
hf_public_repos/api-inference-community/docker_images/sklearn/tests/generators/samples/iris-logistic_regression-1.0-output.json
[ 1, 0, 2, 1, 1, 0, 1, 2, 1, 1 ]
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/asteroid/Dockerfile
FROM tiangolo/uvicorn-gunicorn:python3.8 LABEL maintainer="me <me@example.com>" # Add any system dependency here # RUN apt-get update -y && apt-get install libXXX -y RUN apt-get update -y && apt-get install ffmpeg -y COPY ./requirements.txt /app RUN pip install --no-cache-dir -r requirements.txt COPY ./prestart.sh /app/ # Most DL models are quite large in terms of memory, using workers is a HUGE # slowdown because of the fork and GIL with python. # Using multiple pods seems like a better default strategy. # Feel free to override if it does not make sense for your library. ARG max_workers=1 ENV MAX_WORKERS=$max_workers ENV HUGGINGFACE_HUB_CACHE=/data ENV ASTEROID_CACHE=/data # Necessary on GPU environment docker. # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose # rendering TIMEOUT defined by uvicorn impossible to use correctly # We're overriding it to be renamed UVICORN_TIMEOUT # UVICORN_TIMEOUT is a useful variable for very large models that take more # than 30s (the default) to load in memory. # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will # kill workers all the time before they finish. RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py COPY ./app /app/app
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/asteroid/requirements.txt
starlette==0.27.0 api-inference-community==0.0.23 huggingface_hub==0.5.1 asteroid==0.4.4
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/asteroid/prestart.sh
python app/main.py
0
hf_public_repos/api-inference-community/docker_images/asteroid
hf_public_repos/api-inference-community/docker_images/asteroid/app/main.py
import functools import logging import os from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import AudioSourceSeparationPipeline, AudioToAudioPipeline, Pipeline from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route TASK = os.getenv("TASK") MODEL_ID = os.getenv("MODEL_ID") logger = logging.getLogger(__name__) # Add the allowed tasks # Supported tasks are: # - text-generation # - text-classification # - token-classification # - translation # - summarization # - automatic-speech-recognition # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} # You can check the requirements and expectations of each pipelines in their respective # directories. Implement directly within the directories. ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "audio-source-separation": AudioSourceSeparationPipeline, "audio-to-audio": AudioToAudioPipeline, } @functools.lru_cache() def get_pipeline() -> Pipeline: task = os.environ["TASK"] model_id = os.environ["MODEL_ID"] if task not in ALLOWED_TASKS: raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") return ALLOWED_TASKS[task](model_id) routes = [ Route("/{whatever:path}", status_ok), Route("/{whatever:path}", pipeline_route, methods=["POST"]), ] middleware = [Middleware(GZipMiddleware, minimum_size=1000)] if os.environ.get("DEBUG", "") == "1": from starlette.middleware.cors import CORSMiddleware middleware.append( Middleware( CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"], ) ) app = Starlette(routes=routes, middleware=middleware) @app.on_event("startup") async def startup_event(): logger = logging.getLogger("uvicorn.access") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.handlers = [handler] # Link between `api-inference-community` and framework code. app.get_pipeline = get_pipeline try: get_pipeline() except Exception: # We can fail so we can show exception later. pass if __name__ == "__main__": try: get_pipeline() except Exception: # We can fail so we can show exception later. pass
0
hf_public_repos/api-inference-community/docker_images/asteroid/app
hf_public_repos/api-inference-community/docker_images/asteroid/app/pipelines/audio_to_audio.py
from typing import List, Tuple import numpy as np from app.pipelines import Pipeline from asteroid import separate from asteroid.models import BaseModel class AudioToAudioPipeline(Pipeline): def __init__(self, model_id: str): self.model = BaseModel.from_pretrained(model_id) self.sampling_rate = self.model.sample_rate def __call__(self, inputs: np.array) -> Tuple[np.array, int, List[str]]: # Pass wav as [batch, n_chan, time]; here: [1, 1, time] """ Args: inputs (:obj:`np.array`): The raw waveform of audio received. By default sampled at `self.sampling_rate`. The shape of this array is `T`, where `T` is the time axis Return: A :obj:`tuple` containing: - :obj:`np.array`: The return shape of the array must be `C'`x`T'` - a :obj:`int`: the sampling rate as an int in Hz. - a :obj:`List[str]`: the annotation for each out channel. This can be the name of the instruments for audio source separation or some annotation for speech enhancement. The length must be `C'`. """ separated = separate.numpy_separate(self.model, inputs.reshape((1, 1, -1))) # FIXME: how to deal with multiple sources? out = separated[0] n = out.shape[0] labels = [f"label_{i}" for i in range(n)] return separated[0], int(self.model.sample_rate), labels
0
hf_public_repos/api-inference-community/docker_images/asteroid/app
hf_public_repos/api-inference-community/docker_images/asteroid/app/pipelines/base.py
from abc import ABC, abstractmethod from typing import Any class Pipeline(ABC): @abstractmethod def __init__(self, model_id: str): raise NotImplementedError("Pipelines should implement an __init__ method") @abstractmethod def __call__(self, inputs: Any) -> Any: raise NotImplementedError("Pipelines should implement a __call__ method") class PipelineException(Exception): pass
0
hf_public_repos/api-inference-community/docker_images/asteroid/app
hf_public_repos/api-inference-community/docker_images/asteroid/app/pipelines/audio_source_separation.py
from typing import Tuple import numpy as np from app.pipelines import Pipeline from asteroid import separate from asteroid.models import BaseModel class AudioSourceSeparationPipeline(Pipeline): def __init__(self, model_id: str): self.model = BaseModel.from_pretrained(model_id) self.sampling_rate = self.model.sample_rate def __call__(self, inputs: np.array) -> Tuple[np.array, int]: """ Args: inputs (:obj:`np.array`): The raw waveform of audio received. By default at 16KHz. Check `app.validation` if a different sample rate is required or if it depends on the model Return: A :obj:`np.array` and a :obj:`int`: The raw waveform as a numpy array, and the sampling rate as an int. """ # Pass wav as [batch, n_chan, time]; here: [1, 1, time] separated = separate.numpy_separate(self.model, inputs.reshape((1, 1, -1))) # FIXME: how to deal with multiple sources? return separated[0, 0], int(self.model.sample_rate)
0
hf_public_repos/api-inference-community/docker_images/asteroid/app
hf_public_repos/api-inference-community/docker_images/asteroid/app/pipelines/__init__.py
from app.pipelines.base import Pipeline, PipelineException # isort:skip from app.pipelines.audio_source_separation import AudioSourceSeparationPipeline from app.pipelines.audio_to_audio import AudioToAudioPipeline
0
hf_public_repos/api-inference-community/docker_images/asteroid
hf_public_repos/api-inference-community/docker_images/asteroid/tests/test_docker_build.py
import os import subprocess from unittest import TestCase class cd: """Context manager for changing the current working directory""" def __init__(self, newPath): self.newPath = os.path.expanduser(newPath) def __enter__(self): self.savedPath = os.getcwd() os.chdir(self.newPath) def __exit__(self, etype, value, traceback): os.chdir(self.savedPath) class DockerBuildTestCase(TestCase): def test_can_build_docker_image(self): with cd(os.path.dirname(os.path.dirname(__file__))): subprocess.check_output(["docker", "build", "."])
0
hf_public_repos/api-inference-community/docker_images/asteroid
hf_public_repos/api-inference-community/docker_images/asteroid/tests/test_api.py
import os from typing import Dict from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS, get_pipeline # Must contain at least one example of each implemented pipeline # Tests do not check the actual values of the model output, so small dummy # models are recommended for faster tests. TESTABLE_MODELS: Dict[str, str] = { # IMPLEMENT_THIS # "automatic-speech-recognition": "mysample-ASR", # "text-generation": "mysample-gpt2", "audio-source-separation": "julien-c/DPRNNTasNet-ks16_WHAM_sepclean", "audio-to-audio": "julien-c/DPRNNTasNet-ks16_WHAM_sepclean", } ALL_TASKS = { "automatic-speech-recognition", "audio-source-separation", "feature-extraction", "image-classification", "question-answering", "text-generation", "text-to-speech", } class PipelineTestCase(TestCase): @skipIf( os.path.dirname(os.path.dirname(__file__)).endswith("common"), "common is a special case", ) def test_has_at_least_one_task_enabled(self): self.assertGreater( len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task" ) def test_unsupported_tasks(self): unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys() for unsupported_task in unsupported_tasks: with self.subTest(msg=unsupported_task, task=unsupported_task): with self.assertRaises(EnvironmentError): get_pipeline(unsupported_task, model_id="XX")
0
hf_public_repos/api-inference-community/docker_images/asteroid
hf_public_repos/api-inference-community/docker_images/asteroid/tests/test_api_audio_source_separation.py
import os from unittest import TestCase, skipIf from api_inference_community.validation import ffmpeg_read from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "audio-source-separation" not in ALLOWED_TASKS, "audio-source-separation not implemented", ) class AudioSourceSeparationTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["audio-source-separation"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "audio-source-separation" from app.main import app self.app = app def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def read(self, filename: str) -> bytes: dirname = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(dirname, "samples", filename) with open(filename, "rb") as f: bpayload = f.read() return bpayload def test_simple(self): bpayload = self.read("sample1.flac") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) self.assertEqual(response.headers["content-type"], "audio/flac") audio = ffmpeg_read(response.content) self.assertEqual(len(audio.shape), 1) self.assertGreater(audio.shape[0], 1000) def test_malformed_audio(self): bpayload = self.read("malformed.flac") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 400, ) self.assertEqual(response.content, b'{"error":"Malformed soundfile"}') def test_dual_channel_audiofile(self): bpayload = self.read("sample1_dual.ogg") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) self.assertEqual(response.header["content-type"], "audio/wav") audio = ffmpeg_read(response.content) self.assertEqual(audio.shape, (10,)) def test_webm_audiofile(self): bpayload = self.read("sample1.webm") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) self.assertEqual(response.header["content-type"], "audio/wav") audio = ffmpeg_read(response.content) self.assertEqual(audio.shape, (10,))
0
hf_public_repos/api-inference-community/docker_images/asteroid
hf_public_repos/api-inference-community/docker_images/asteroid/tests/test_api_audio_to_audio.py
import base64 import json import os from unittest import TestCase, skipIf from api_inference_community.validation import ffmpeg_read from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "audio-to-audio" not in ALLOWED_TASKS, "audio-to-audio not implemented", ) class AudioToAudioTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["audio-to-audio"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "audio-to-audio" from app.main import app self.app = app def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def read(self, filename: str) -> bytes: dirname = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(dirname, "samples", filename) with open(filename, "rb") as f: bpayload = f.read() return bpayload def test_simple(self): bpayload = self.read("sample1.flac") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) self.assertEqual(response.headers["content-type"], "application/json") audio = json.loads(response.content) self.assertTrue(isinstance(audio, list)) self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"}) data = base64.b64decode(audio[0]["blob"]) wavform = ffmpeg_read(data) self.assertGreater(wavform.shape[0], 1000) self.assertTrue(isinstance(audio[0]["content-type"], str)) self.assertTrue(isinstance(audio[0]["label"], str)) def test_malformed_audio(self): bpayload = self.read("malformed.flac") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 400, ) self.assertEqual(response.content, b'{"error":"Malformed soundfile"}') def test_dual_channel_audiofile(self): bpayload = self.read("sample1_dual.ogg") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) self.assertEqual(response.headers["content-type"], "application/json") audio = json.loads(response.content) self.assertTrue(isinstance(audio, list)) self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"}) data = base64.b64decode(audio[0]["blob"]) wavform = ffmpeg_read(data) self.assertGreater(wavform.shape[0], 1000) self.assertTrue(isinstance(audio[0]["content-type"], str)) self.assertTrue(isinstance(audio[0]["label"], str)) def test_webm_audiofile(self): bpayload = self.read("sample1.webm") with TestClient(self.app) as client: response = client.post("/", data=bpayload) self.assertEqual( response.status_code, 200, ) self.assertEqual(response.headers["content-type"], "application/json") audio = json.loads(response.content) self.assertTrue(isinstance(audio, list)) self.assertEqual(set(audio[0].keys()), {"blob", "content-type", "label"}) data = base64.b64decode(audio[0]["blob"]) wavform = ffmpeg_read(data) self.assertGreater(wavform.shape[0], 1000) self.assertTrue(isinstance(audio[0]["content-type"], str)) self.assertTrue(isinstance(audio[0]["label"], str))
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/adapter_transformers/Dockerfile
FROM tiangolo/uvicorn-gunicorn:python3.8 LABEL maintainer="me <me@example.com>" # Add any system dependency here # RUN apt-get update -y && apt-get install libXXX -y COPY ./requirements.txt /app RUN pip install --no-cache-dir -r requirements.txt COPY ./prestart.sh /app/ # Most DL models are quite large in terms of memory, using workers is a HUGE # slowdown because of the fork and GIL with python. # Using multiple pods seems like a better default strategy. # Feel free to override if it does not make sense for your library. ARG max_workers=1 ENV MAX_WORKERS=$max_workers ENV HUGGINGFACE_HUB_CACHE=/data ENV HF_HOME=/data ENV TORCH_HOME=/data # Necessary on GPU environment docker. # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose # rendering TIMEOUT defined by uvicorn impossible to use correctly # We're overriding it to be renamed UVICORN_TIMEOUT # UVICORN_TIMEOUT is a useful variable for very large models that take more # than 30s (the default) to load in memory. # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will # kill workers all the time before they finish. RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py COPY ./app /app/app
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/adapter_transformers/requirements.txt
starlette==0.37.2 api-inference-community==0.0.32 torch==2.3.0 adapters==0.2.1 huggingface_hub==0.23.0
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/adapter_transformers/prestart.sh
python app/main.py
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app/batch.py
#!/usr/bin/env python import os from api_inference_community.batch import batch from app.main import get_pipeline DATASET_NAME = os.getenv("DATASET_NAME") DATASET_CONFIG = os.getenv("DATASET_CONFIG", None) DATASET_SPLIT = os.getenv("DATASET_SPLIT") DATASET_COLUMN = os.getenv("DATASET_COLUMN") USE_GPU = os.getenv("USE_GPU", "0").lower() in {"1", "true"} TOKEN = os.getenv("TOKEN") REPO_ID = os.getenv("REPO_ID") if __name__ == "__main__": batch( dataset_name=DATASET_NAME, dataset_config=DATASET_CONFIG, dataset_split=DATASET_SPLIT, dataset_column=DATASET_COLUMN, token=TOKEN, repo_id=REPO_ID, use_gpu=USE_GPU, pipeline=get_pipeline(), )
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app/main.py
import functools import logging import os from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import ( Pipeline, QuestionAnsweringPipeline, SummarizationPipeline, TextClassificationPipeline, TextGenerationPipeline, TokenClassificationPipeline, ) from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route TASK = os.getenv("TASK") MODEL_ID = os.getenv("MODEL_ID") logger = logging.getLogger(__name__) # Add the allowed tasks # Supported tasks are: # - text-generation # - text-classification # - token-classification # - translation # - summarization # - automatic-speech-recognition # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} # You can check the requirements and expectations of each pipelines in their respective # directories. Implement directly within the directories. ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "question-answering": QuestionAnsweringPipeline, "summarization": SummarizationPipeline, "text-classification": TextClassificationPipeline, "text-generation": TextGenerationPipeline, "token-classification": TokenClassificationPipeline, # IMPLEMENT_THIS: Add your implemented tasks here ! } @functools.lru_cache() def get_pipeline() -> Pipeline: task = os.environ["TASK"] model_id = os.environ["MODEL_ID"] if task not in ALLOWED_TASKS: raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") return ALLOWED_TASKS[task](model_id) routes = [ Route("/{whatever:path}", status_ok), Route("/{whatever:path}", pipeline_route, methods=["POST"]), ] middleware = [Middleware(GZipMiddleware, minimum_size=1000)] if os.environ.get("DEBUG", "") == "1": from starlette.middleware.cors import CORSMiddleware middleware.append( Middleware( CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"], ) ) app = Starlette(routes=routes, middleware=middleware) @app.on_event("startup") async def startup_event(): logger = logging.getLogger("uvicorn.access") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.handlers = [handler] # Link between `api-inference-community` and framework code. app.get_pipeline = get_pipeline try: get_pipeline() except Exception: # We can fail so we can show exception later. pass if __name__ == "__main__": try: get_pipeline() except Exception: # We can fail so we can show exception later. pass
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app/pipelines/token_classification.py
from typing import Any, Dict, List import numpy as np from app.pipelines import Pipeline from transformers import ( TokenClassificationPipeline as TransformersTokenClassificationPipeline, ) class TokenClassificationPipeline(Pipeline): def __init__( self, adapter_id: str, ): self.pipeline = self._load_pipeline_instance( TransformersTokenClassificationPipeline, adapter_id ) def __call__(self, inputs: str) -> List[Dict[str, Any]]: """ Args: inputs (:obj:`str`): a string containing some text Return: A :obj:`list`:. The object returned should be like [{"entity_group": "XXX", "word": "some word", "start": 3, "end": 6, "score": 0.82}] containing : - "entity_group": A string representing what the entity is. - "word": A rubstring of the original string that was detected as an entity. - "start": the offset within `input` leading to `answer`. context[start:stop] == word - "end": the ending offset within `input` leading to `answer`. context[start:stop] === word - "score": A score between 0 and 1 describing how confident the model is for this entity. """ outputs = self.pipeline(inputs) # convert all numpy types to plain Python floats for output in outputs: # remove & rename keys output.pop("index") entity = output.pop("entity") for k, v in output.items(): if isinstance(v, np.generic): output[k] = v.item() output["entity_group"] = entity return outputs
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app/pipelines/summarization.py
from typing import Dict, List from app.pipelines import Pipeline from transformers import SummarizationPipeline as TransformersSummarizationPipeline class SummarizationPipeline(Pipeline): def __init__(self, adapter_id: str): self.pipeline = self._load_pipeline_instance( TransformersSummarizationPipeline, adapter_id ) def __call__(self, inputs: str) -> List[Dict[str, str]]: """ Args: inputs (:obj:`str`): a string to be summarized Return: A :obj:`list` of :obj:`dict` in the form of {"summary_text": "The string after summarization"} """ return self.pipeline(inputs, truncation=True)
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app/pipelines/text_generation.py
from typing import Dict, List from app.pipelines import Pipeline from transformers import TextGenerationPipeline as TransformersTextGenerationPipeline class TextGenerationPipeline(Pipeline): def __init__(self, adapter_id: str): self.pipeline = self._load_pipeline_instance( TransformersTextGenerationPipeline, adapter_id ) def __call__(self, inputs: str) -> List[Dict[str, str]]: """ Args: inputs (:obj:`str`): The input text Return: A :obj:`list`:. The list contains a single item that is a dict {"text": the model output} """ return self.pipeline(inputs, truncation=True)
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app/pipelines/base.py
from abc import ABC, abstractmethod from typing import Any from adapters import AutoAdapterModel, get_adapter_info from transformers import AutoTokenizer from transformers.pipelines.base import logger class Pipeline(ABC): @abstractmethod def __init__(self, model_id: str): raise NotImplementedError("Pipelines should implement an __init__ method") @abstractmethod def __call__(self, inputs: Any) -> Any: raise NotImplementedError("Pipelines should implement a __call__ method") @staticmethod def _load_pipeline_instance(pipeline_class, adapter_id): adapter_info = get_adapter_info(adapter_id, source="hf") if adapter_info is None: raise ValueError(f"Adapter with id '{adapter_id}' not available.") tokenizer = AutoTokenizer.from_pretrained(adapter_info.model_name) model = AutoAdapterModel.from_pretrained(adapter_info.model_name) model.load_adapter(adapter_id, source="hf", set_active=True) # Transformers incorrectly logs an error because class name is not known. Filter this out. logger.addFilter( lambda record: not record.getMessage().startswith( f"The model '{model.__class__.__name__}' is not supported" ) ) return pipeline_class(model=model, tokenizer=tokenizer) class PipelineException(Exception): pass
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app/pipelines/question_answering.py
from typing import Any, Dict from app.pipelines import Pipeline from transformers import QuestionAnsweringPipeline as TransformersQAPipeline class QuestionAnsweringPipeline(Pipeline): def __init__( self, adapter_id: str, ): self.pipeline = self._load_pipeline_instance(TransformersQAPipeline, adapter_id) def __call__(self, inputs: Dict[str, str]) -> Dict[str, Any]: """ Args: inputs (:obj:`dict`): a dictionary containing two keys, 'question' being the question being asked and 'context' being some text containing the answer. Return: A :obj:`dict`:. The object return should be like {"answer": "XXX", "start": 3, "end": 6, "score": 0.82} containing : - "answer": the extracted answer from the `context`. - "start": the offset within `context` leading to `answer`. context[start:stop] == answer - "end": the ending offset within `context` leading to `answer`. context[start:stop] === answer - "score": A score between 0 and 1 describing how confident the model is for this answer. """ return self.pipeline(**inputs)
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app/pipelines/__init__.py
from app.pipelines.base import Pipeline, PipelineException # isort:skip from app.pipelines.question_answering import QuestionAnsweringPipeline from app.pipelines.summarization import SummarizationPipeline from app.pipelines.text_classification import TextClassificationPipeline from app.pipelines.text_generation import TextGenerationPipeline from app.pipelines.token_classification import TokenClassificationPipeline
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app
hf_public_repos/api-inference-community/docker_images/adapter_transformers/app/pipelines/text_classification.py
from typing import Dict, List from app.pipelines import Pipeline from transformers import ( TextClassificationPipeline as TransformersClassificationPipeline, ) class TextClassificationPipeline(Pipeline): def __init__( self, adapter_id: str, ): self.pipeline = self._load_pipeline_instance( TransformersClassificationPipeline, adapter_id ) def __call__(self, inputs: str) -> List[Dict[str, float]]: """ Args: inputs (:obj:`str`): a string containing some text Return: A :obj:`list`:. The object returned should be like [{"label": 0.9939950108528137}] containing : - "label": A string representing what the label/class is. There can be multiple labels. - "score": A score between 0 and 1 describing how confident the model is for this label/class. """ try: return self.pipeline(inputs, return_all_scores=True) except Exception as e: raise ValueError(e)
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers
hf_public_repos/api-inference-community/docker_images/adapter_transformers/tests/test_api_token_classification.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "token-classification" not in ALLOWED_TASKS, "token-classification not implemented", ) class TokenClassificationTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["token-classification"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "token-classification" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "Hello, my name is John and I live in New York" with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual( set(k for el in content for k in el.keys()), {"entity_group", "word", "start", "end", "score"}, ) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual( set(k for el in content for k in el.keys()), {"entity_group", "word", "start", "end", "score"}, ) def test_malformed_question(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers
hf_public_repos/api-inference-community/docker_images/adapter_transformers/tests/test_api_text_generation.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "text-generation" not in ALLOWED_TASKS, "text-generation not implemented", ) class TextGenerationTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["text-generation"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "text-generation" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "The weather is nice today." with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(type(content[0]["generated_text"]), str) def test_malformed_question(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"error"})
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers
hf_public_repos/api-inference-community/docker_images/adapter_transformers/tests/test_docker_build.py
import os import subprocess from unittest import TestCase class cd: """Context manager for changing the current working directory""" def __init__(self, newPath): self.newPath = os.path.expanduser(newPath) def __enter__(self): self.savedPath = os.getcwd() os.chdir(self.newPath) def __exit__(self, etype, value, traceback): os.chdir(self.savedPath) class DockerBuildTestCase(TestCase): def test_can_build_docker_image(self): with cd(os.path.dirname(os.path.dirname(__file__))): subprocess.check_output(["docker", "build", "."])
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers
hf_public_repos/api-inference-community/docker_images/adapter_transformers/tests/test_api_summarization.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "summarization" not in ALLOWED_TASKS, "summarization not implemented", ) class SummarizationTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["summarization"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "summarization" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "The weather is nice today." with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(type(content[0]["summary_text"]), str) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(type(content[0]["summary_text"]), str) def test_malformed_question(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"error"})
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers
hf_public_repos/api-inference-community/docker_images/adapter_transformers/tests/test_api.py
import os from typing import Dict from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS, get_pipeline # Must contain at least one example of each implemented pipeline # Tests do not check the actual values of the model output, so small dummy # models are recommended for faster tests. TESTABLE_MODELS: Dict[str, str] = { "question-answering": "AdapterHub/roberta-base-pf-squad", "summarization": "AdapterHub/facebook-bart-large_sum_xsum_pfeiffer", "text-classification": "AdapterHub/roberta-base-pf-sick", "text-generation": "AdapterHub/gpt2_lm_poem_pfeiffer", "token-classification": "AdapterHub/roberta-base-pf-conll2003", } ALL_TASKS = { "automatic-speech-recognition", "feature-extraction", "image-classification", "question-answering", "sentence-similarity", "structured-data-classification", "text-generation", "text-to-speech", "token-classification", } class PipelineTestCase(TestCase): @skipIf( os.path.dirname(os.path.dirname(__file__)).endswith("common"), "common is a special case", ) def test_has_at_least_one_task_enabled(self): self.assertGreater( len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task" ) def test_unsupported_tasks(self): unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys() for unsupported_task in unsupported_tasks: with self.subTest(msg=unsupported_task, task=unsupported_task): os.environ["TASK"] = unsupported_task os.environ["MODEL_ID"] = "XX" with self.assertRaises(EnvironmentError): get_pipeline()
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers
hf_public_repos/api-inference-community/docker_images/adapter_transformers/tests/test_api_question_answering.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "question-answering" not in ALLOWED_TASKS, "question-answering not implemented", ) class QuestionAnsweringTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["question-answering"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "question-answering" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = {"question": "Where do I live ?", "context": "I live in New-York"} with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"answer", "start", "end", "score"}) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"answer", "start", "end", "score"}) def test_malformed_question(self): with TestClient(self.app) as client: response = client.post("/", data=b"Where do I live ?") self.assertEqual( response.status_code, 400, ) content = json.loads(response.content) self.assertEqual(set(content.keys()), {"error"})
0
hf_public_repos/api-inference-community/docker_images/adapter_transformers
hf_public_repos/api-inference-community/docker_images/adapter_transformers/tests/test_api_text_classification.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "text-classification" not in ALLOWED_TASKS, "text-classification not implemented", ) class TextClassificationTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["text-classification"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "text-classification" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "It is a beautiful day outside" with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(len(content), 1) self.assertEqual(type(content[0]), list) self.assertEqual( set(k for el in content[0] for k in el.keys()), {"label", "score"}, ) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(len(content), 1) self.assertEqual(type(content[0]), list) self.assertEqual( set(k for el in content[0] for k in el.keys()), {"label", "score"}, ) def test_malformed_question(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/fasttext/Dockerfile
FROM tiangolo/uvicorn-gunicorn:python3.8 LABEL maintainer="me <me@example.com>" # Add any system dependency here # RUN apt-get update -y && apt-get install libXXX -y COPY ./requirements.txt /app RUN pip install --no-cache-dir -r requirements.txt COPY ./prestart.sh /app/ # Most DL models are quite large in terms of memory, using workers is a HUGE # slowdown because of the fork and GIL with python. # Using multiple pods seems like a better default strategy. # Feel free to override if it does not make sense for your library. ARG max_workers=1 ENV MAX_WORKERS=$max_workers ENV HUGGINGFACE_HUB_CACHE=/data # Necessary on GPU environment docker. # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose # rendering TIMEOUT defined by uvicorn impossible to use correctly # We're overriding it to be renamed UVICORN_TIMEOUT # UVICORN_TIMEOUT is a useful variable for very large models that take more # than 30s (the default) to load in memory. # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will # kill workers all the time before they finish. RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py COPY ./app /app/app
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/fasttext/requirements.txt
starlette==0.27.0 api-inference-community==0.0.23 fasttext==0.9.2 huggingface_hub==0.5.1 # Dummy change.
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/fasttext/prestart.sh
python app/main.py
0
hf_public_repos/api-inference-community/docker_images/fasttext
hf_public_repos/api-inference-community/docker_images/fasttext/app/main.py
import functools import logging import os from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import ( FeatureExtractionPipeline, Pipeline, TextClassificationPipeline, ) from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route TASK = os.getenv("TASK") MODEL_ID = os.getenv("MODEL_ID") logger = logging.getLogger(__name__) # Add the allowed tasks # Supported tasks are: # - text-generation # - text-classification # - token-classification # - translation # - summarization # - automatic-speech-recognition # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} # You can check the requirements and expectations of each pipelines in their respective # directories. Implement directly within the directories. ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "feature-extraction": FeatureExtractionPipeline, "text-classification": TextClassificationPipeline, } @functools.lru_cache() def get_pipeline() -> Pipeline: task = os.environ["TASK"] model_id = os.environ["MODEL_ID"] if task not in ALLOWED_TASKS: raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") return ALLOWED_TASKS[task](model_id) routes = [ Route("/{whatever:path}", status_ok), Route("/{whatever:path}", pipeline_route, methods=["POST"]), ] middleware = [Middleware(GZipMiddleware, minimum_size=1000)] if os.environ.get("DEBUG", "") == "1": from starlette.middleware.cors import CORSMiddleware middleware.append( Middleware( CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"], ) ) app = Starlette(routes=routes, middleware=middleware) @app.on_event("startup") async def startup_event(): logger = logging.getLogger("uvicorn.access") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.handlers = [handler] # Link between `api-inference-community` and framework code. app.get_pipeline = get_pipeline try: get_pipeline() except Exception: # We can fail so we can show exception later. pass if __name__ == "__main__": try: get_pipeline() except Exception: # We can fail so we can show exception later. pass
0
hf_public_repos/api-inference-community/docker_images/fasttext/app
hf_public_repos/api-inference-community/docker_images/fasttext/app/pipelines/feature_extraction.py
from typing import List from app.pipelines import Pipeline class FeatureExtractionPipeline(Pipeline): def __init__( self, model_id: str, ): # IMPLEMENT_THIS # Preload all the elements you are going to need at inference. # For instance your model, processors, tokenizer that might be needed. # This function is only called once, so do all the heavy processing I/O here super().__init__(model_id) def __call__(self, inputs: str) -> List[float]: """ Args: inputs (:obj:`str`): a string to get the features of. Return: A :obj:`list` of floats: The features computed by the model. """ return self.model.get_sentence_vector(inputs).tolist()
0
hf_public_repos/api-inference-community/docker_images/fasttext/app
hf_public_repos/api-inference-community/docker_images/fasttext/app/pipelines/base.py
from abc import ABC, abstractmethod from typing import Any import fasttext from huggingface_hub import hf_hub_download class Pipeline(ABC): @abstractmethod def __init__(self, model_id: str): model_path = hf_hub_download(model_id, "model.bin", library_name="fasttext") self.model = fasttext.load_model(model_path) self.model_id = model_id @abstractmethod def __call__(self, inputs: Any) -> Any: raise NotImplementedError("Pipelines should implement a __call__ method") class PipelineException(Exception): pass
0
hf_public_repos/api-inference-community/docker_images/fasttext/app
hf_public_repos/api-inference-community/docker_images/fasttext/app/pipelines/__init__.py
from app.pipelines.base import Pipeline, PipelineException # isort:skip from app.pipelines.feature_extraction import FeatureExtractionPipeline from app.pipelines.text_classification import TextClassificationPipeline
0
hf_public_repos/api-inference-community/docker_images/fasttext/app
hf_public_repos/api-inference-community/docker_images/fasttext/app/pipelines/text_classification.py
from typing import Dict, List from app.pipelines import Pipeline from huggingface_hub import HfApi FASTTEXT_PREFIX_LENGTH = 9 # fasttext labels are formatted like "__label__eng_Latn" class TextClassificationPipeline(Pipeline): def __init__( self, model_id: str, ): super().__init__(model_id) self.info = HfApi().model_info(repo_id=self.model_id) def __call__(self, inputs: str) -> List[Dict[str, float]]: """ Args: inputs (:obj:`str`): a string containing some text Return: A :obj:`list`:. The object returned should be a list of one list like [[{"label": 0.9939950108528137}]] containing: - "label": A string representing what the label/class is. There can be multiple labels. - "score": A score between 0 and 1 describing how confident the model is for this label/class. """ if "language-identification" in self.info.tags: preds = self.model.predict(inputs, k=5) result = [ {"label": label[FASTTEXT_PREFIX_LENGTH:], "score": prob} for label, prob in zip(preds[0], preds[1]) ] return [result] if len(inputs.split()) > 1: raise ValueError("Expected input is a single word") preds = self.model.get_nearest_neighbors(inputs, k=5) result = [] for distance, word in preds: result.append({"label": word, "score": distance}) return [result]
0
hf_public_repos/api-inference-community/docker_images/fasttext
hf_public_repos/api-inference-community/docker_images/fasttext/tests/test_api_feature_extraction.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "feature-extraction" not in ALLOWED_TASKS, "feature-extraction not implemented", ) class FeatureExtractionTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["feature-extraction"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "feature-extraction" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "Hello, my name is John and I live in New York" with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual({type(item) for item in content}, {float}) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual({type(item) for item in content}, {float}) def test_malformed_sentence(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
0
hf_public_repos/api-inference-community/docker_images/fasttext
hf_public_repos/api-inference-community/docker_images/fasttext/tests/test_docker_build.py
import os import subprocess from unittest import TestCase class cd: """Context manager for changing the current working directory""" def __init__(self, newPath): self.newPath = os.path.expanduser(newPath) def __enter__(self): self.savedPath = os.getcwd() os.chdir(self.newPath) def __exit__(self, etype, value, traceback): os.chdir(self.savedPath) class DockerBuildTestCase(TestCase): def test_can_build_docker_image(self): with cd(os.path.dirname(os.path.dirname(__file__))): subprocess.check_output(["docker", "build", "."])
0
hf_public_repos/api-inference-community/docker_images/fasttext
hf_public_repos/api-inference-community/docker_images/fasttext/tests/test_api.py
import os from typing import Dict, List from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS, get_pipeline # Must contain at least one example of each implemented pipeline # Tests do not check the actual values of the model output, so small dummy # models are recommended for faster tests. TESTABLE_MODELS: Dict[str, List[str]] = { "text-classification": [ "osanseviero/fasttext_nearest", "sheonhan/fasttext-language-identification", ], "feature-extraction": ["osanseviero/fasttext_embedding"], } ALL_TASKS = { "audio-classification", "audio-to-audio", "automatic-speech-recognition", "feature-extraction", "image-classification", "language-identification", "question-answering", "sentence-similarity", "speech-segmentation", "structured-data-classification", "text-to-speech", "token-classification", } class PipelineTestCase(TestCase): @skipIf( os.path.dirname(os.path.dirname(__file__)).endswith("common"), "common is a special case", ) def test_has_at_least_one_task_enabled(self): self.assertGreater( len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task" ) def test_unsupported_tasks(self): unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys() for unsupported_task in unsupported_tasks: with self.subTest(msg=unsupported_task, task=unsupported_task): os.environ["TASK"] = unsupported_task os.environ["MODEL_ID"] = "XX" with self.assertRaises(EnvironmentError): get_pipeline()
0
hf_public_repos/api-inference-community/docker_images/fasttext
hf_public_repos/api-inference-community/docker_images/fasttext/tests/test_api_text_classification.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from parameterized import parameterized_class from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "text-classification" not in ALLOWED_TASKS, "text-classification not implemented", ) @parameterized_class( [{"model_id": model_id} for model_id in TESTABLE_MODELS["text-classification"]] ) class TextClassificationTestCase(TestCase): def setUp(self): self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = self.model_id os.environ["TASK"] = "text-classification" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "beautiful" with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(len(content), 1) self.assertEqual(type(content[0]), list) self.assertEqual( set(k for el in content[0] for k in el.keys()), {"label", "score"}, ) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(len(content), 1) self.assertEqual(type(content[0]), list) self.assertEqual( set(k for el in content[0] for k in el.keys()), {"label", "score"}, ) def test_malformed_question(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', ) def test_multiple_words(self): inputs = "this is great" # For "language-identification" subtask, fasttext can identify the language of a sentence # but when getting a word vector's nearest neighbors, only a single word is valid as an input expected_status_code = ( 200 if "language-identification" in self.model_id else 400 ) with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, expected_status_code, )
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/sentence_transformers/Dockerfile
FROM tiangolo/uvicorn-gunicorn:python3.8 LABEL maintainer="Omar <omar@huggingface.co>" # Add any system dependency here # RUN apt-get update -y && apt-get install libXXX -y RUN pip3 install --no-cache-dir torch==1.13.0 COPY ./requirements.txt /app RUN pip install --no-cache-dir -r requirements.txt COPY ./prestart.sh /app/ # Most DL models are quite large in terms of memory, using workers is a HUGE # slowdown because of the fork and GIL with python. # Using multiple pods seems like a better default strategy. # Feel free to override if it does not make sense for your library. ARG max_workers=1 ENV MAX_WORKERS=$max_workers ENV HUGGINGFACE_HUB_CACHE=/data ENV SENTENCE_TRANSFORMERS_HOME=/data ENV TRANSFORMERS_CACHE=/data # Necessary on GPU environment docker. # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose # rendering TIMEOUT defined by uvicorn impossible to use correctly # We're overriding it to be renamed UVICORN_TIMEOUT # UVICORN_TIMEOUT is a useful variable for very large models that take more # than 30s (the default) to load in memory. # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will # kill workers all the time before they finish. RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py COPY ./app /app/app
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/sentence_transformers/requirements.txt
starlette==0.27.0 api-inference-community==0.0.32 sentence-transformers==3.0.1 transformers==4.41.1 tokenizers==0.19.1 protobuf==3.18.3 huggingface_hub==0.23.3 sacremoses==0.0.53 # dummy.
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/sentence_transformers/prestart.sh
python app/main.py
0
hf_public_repos/api-inference-community/docker_images/sentence_transformers
hf_public_repos/api-inference-community/docker_images/sentence_transformers/app/main.py
import functools import logging import os from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import ( FeatureExtractionPipeline, Pipeline, SentenceSimilarityPipeline, ) from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route TASK = os.getenv("TASK") MODEL_ID = os.getenv("MODEL_ID") logger = logging.getLogger(__name__) # Add the allowed tasks # Supported tasks are: # - text-generation # - text-classification # - token-classification # - translation # - summarization # - automatic-speech-recognition # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} # You can check the requirements and expectations of each pipelines in their respective # directories. Implement directly within the directories. ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "feature-extraction": FeatureExtractionPipeline, "sentence-similarity": SentenceSimilarityPipeline, } @functools.lru_cache() def get_pipeline() -> Pipeline: task = os.environ["TASK"] model_id = os.environ["MODEL_ID"] if task not in ALLOWED_TASKS: raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") return ALLOWED_TASKS[task](model_id) routes = [ Route("/{whatever:path}", status_ok), Route("/{whatever:path}", pipeline_route, methods=["POST"]), ] middleware = [Middleware(GZipMiddleware, minimum_size=1000)] if os.environ.get("DEBUG", "") == "1": from starlette.middleware.cors import CORSMiddleware middleware.append( Middleware( CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"], ) ) app = Starlette(routes=routes, middleware=middleware) @app.on_event("startup") async def startup_event(): logger = logging.getLogger("uvicorn.access") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.handlers = [handler] # Link between `api-inference-community` and framework code. app.get_pipeline = get_pipeline try: get_pipeline() except Exception: # We can fail so we can show exception later. pass if __name__ == "__main__": try: get_pipeline() except Exception: # We can fail so we can show exception later. pass
0
hf_public_repos/api-inference-community/docker_images/sentence_transformers/app
hf_public_repos/api-inference-community/docker_images/sentence_transformers/app/pipelines/feature_extraction.py
import os from typing import List from app.pipelines import Pipeline from sentence_transformers import SentenceTransformer class FeatureExtractionPipeline(Pipeline): def __init__( self, model_id: str, ): self.model = SentenceTransformer( model_id, use_auth_token=os.getenv("HF_API_TOKEN") ) def __call__(self, inputs: str) -> List[float]: """ Args: inputs (:obj:`str`): a string to get the features of. Return: A :obj:`list` of floats: The features computed by the model. """ return self.model.encode(inputs).tolist()
0
hf_public_repos/api-inference-community/docker_images/sentence_transformers/app
hf_public_repos/api-inference-community/docker_images/sentence_transformers/app/pipelines/base.py
from abc import ABC, abstractmethod from typing import Any class Pipeline(ABC): @abstractmethod def __init__(self, model_id: str): raise NotImplementedError("Pipelines should implement an __init__ method") @abstractmethod def __call__(self, inputs: Any) -> Any: raise NotImplementedError("Pipelines should implement a __call__ method") class PipelineException(Exception): pass
0
hf_public_repos/api-inference-community/docker_images/sentence_transformers/app
hf_public_repos/api-inference-community/docker_images/sentence_transformers/app/pipelines/sentence_similarity.py
import os from typing import Dict, List, Union from app.pipelines import Pipeline from sentence_transformers import SentenceTransformer, util class SentenceSimilarityPipeline(Pipeline): def __init__( self, model_id: str, ): self.model = SentenceTransformer( model_id, use_auth_token=os.getenv("HF_API_TOKEN") ) def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]: """ Args: inputs (:obj:`dict`): a dictionary containing two keys, 'source_sentence' mapping to the sentence that will be compared against all the others, and 'sentences', mapping to a list of strings to which the source will be compared. Return: A :obj:`list` of floats: Cosine similarity between `source_sentence` and each sentence from `sentences`. """ embeddings1 = self.model.encode( inputs["source_sentence"], convert_to_tensor=True ) embeddings2 = self.model.encode(inputs["sentences"], convert_to_tensor=True) similarities = util.pytorch_cos_sim(embeddings1, embeddings2).tolist()[0] return similarities
0
hf_public_repos/api-inference-community/docker_images/sentence_transformers/app
hf_public_repos/api-inference-community/docker_images/sentence_transformers/app/pipelines/__init__.py
from app.pipelines.base import Pipeline, PipelineException # isort:skip from app.pipelines.feature_extraction import FeatureExtractionPipeline from app.pipelines.sentence_similarity import SentenceSimilarityPipeline
0
hf_public_repos/api-inference-community/docker_images/sentence_transformers
hf_public_repos/api-inference-community/docker_images/sentence_transformers/tests/test_api_feature_extraction.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "feature-extraction" not in ALLOWED_TASKS, "feature-extraction not implemented", ) class FeatureExtractionTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["feature-extraction"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "feature-extraction" from app.main import app self.app = app def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "Hello, my name is John and I live in New York" with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual({type(item) for item in content}, {float}) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual({type(item) for item in content}, {float}) def test_malformed_sentence(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
0
hf_public_repos/api-inference-community/docker_images/sentence_transformers
hf_public_repos/api-inference-community/docker_images/sentence_transformers/tests/test_docker_build.py
import os import subprocess from unittest import TestCase class cd: """Context manager for changing the current working directory""" def __init__(self, newPath): self.newPath = os.path.expanduser(newPath) def __enter__(self): self.savedPath = os.getcwd() os.chdir(self.newPath) def __exit__(self, etype, value, traceback): os.chdir(self.savedPath) class DockerBuildTestCase(TestCase): def test_can_build_docker_image(self): with cd(os.path.dirname(os.path.dirname(__file__))): subprocess.check_output(["docker", "build", "."])
0
hf_public_repos/api-inference-community/docker_images/sentence_transformers
hf_public_repos/api-inference-community/docker_images/sentence_transformers/tests/test_api.py
import os from typing import Dict, List from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS, get_pipeline # Must contain at least one example of each implemented pipeline # Tests do not check the actual values of the model output, so small dummy # models are recommended for faster tests. TESTABLE_MODELS: Dict[str, List[str]] = { "feature-extraction": ["bert-base-uncased"], "sentence-similarity": [ "sentence-transformers/paraphrase-distilroberta-base-v1", "sentence-transformers/paraphrase-xlm-r-multilingual-v1", ], } ALL_TASKS = { "automatic-speech-recognition", "audio-source-separation", "feature-extraction", "image-classification", "question-answering", "sentence-similarity", "text-generation", "text-to-speech", } class PipelineTestCase(TestCase): @skipIf( os.path.dirname(os.path.dirname(__file__)).endswith("common"), "common is a special case", ) def test_has_at_least_one_task_enabled(self): self.assertGreater( len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task" ) def test_unsupported_tasks(self): unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys() for unsupported_task in unsupported_tasks: with self.subTest(msg=unsupported_task, task=unsupported_task): with self.assertRaises(EnvironmentError): get_pipeline(unsupported_task, model_id="XX")
0
hf_public_repos/api-inference-community/docker_images/sentence_transformers
hf_public_repos/api-inference-community/docker_images/sentence_transformers/tests/test_api_sentence_similarity.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from parameterized import parameterized_class from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "feature-extraction" not in ALLOWED_TASKS, "feature-extraction not implemented", ) @parameterized_class( [{"model_id": model_id} for model_id in TESTABLE_MODELS["sentence-similarity"]] ) class SentenceSimilarityTestCase(TestCase): def setUp(self): self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = self.model_id os.environ["TASK"] = "sentence-similarity" from app.main import app self.app = app def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): source_sentence = "I am a very happy man" sentences = [ "What is this?", "I am a super happy man", "I am a sad man", "I am a happy dog", ] inputs = {"source_sentence": source_sentence, "sentences": sentences} with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual({type(item) for item in content}, {float}) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual({type(item) for item in content}, {float}) def test_missing_input_sentences(self): source_sentence = "I am a very happy man" inputs = {"source_sentence": source_sentence} with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 400, ) def test_malformed_input(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/spacy/Dockerfile
FROM tiangolo/uvicorn-gunicorn:python3.8 LABEL maintainer="Omar Sanseviero omar@huggingface.com" # Add any system dependency here # RUN apt-get update -y && apt-get install libXXX -y COPY ./requirements.txt /app RUN pip install --no-cache-dir -r requirements.txt COPY ./prestart.sh /app/ # Most DL models are quite large in terms of memory, using workers is a HUGE # slowdown because of the fork and GIL with python. # Using multiple pods seems like a better default strategy. # Feel free to override if it does not make sense for your library. ARG max_workers=1 ENV MAX_WORKERS=$max_workers ENV HUGGINGFACE_HUB_CACHE=/data ENV PIP_CACHE=/data # Necessary on GPU environment docker. # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose # rendering TIMEOUT defined by uvicorn impossible to use correctly # We're overriding it to be renamed UVICORN_TIMEOUT # UVICORN_TIMEOUT is a useful variable for very large models that take more # than 30s (the default) to load in memory. # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will # kill workers all the time before they finish. RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py COPY ./app /app/app
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/spacy/requirements.txt
starlette==0.27.0 api-inference-community==0.0.23 huggingface_hub==0.5.1 requests==2.31.0
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/spacy/prestart.sh
python app/main.py
0
hf_public_repos/api-inference-community/docker_images/spacy
hf_public_repos/api-inference-community/docker_images/spacy/app/main.py
import functools import logging import os from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import ( Pipeline, SentenceSimilarityPipeline, TextClassificationPipeline, TokenClassificationPipeline, ) from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route TASK = os.getenv("TASK") MODEL_ID = os.getenv("MODEL_ID") logger = logging.getLogger(__name__) # Add the allowed tasks # Supported tasks are: # - text-generation # - text-classification # - token-classification # - translation # - summarization # - automatic-speech-recognition # - sentence-similarity # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} # You can check the requirements and expectations of each pipelines in their respective # directories. Implement directly within the directories. ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "token-classification": TokenClassificationPipeline, "text-classification": TextClassificationPipeline, "sentence-similarity": SentenceSimilarityPipeline, } @functools.lru_cache() def get_pipeline() -> Pipeline: task = os.environ["TASK"] model_id = os.environ["MODEL_ID"] if task not in ALLOWED_TASKS: raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") return ALLOWED_TASKS[task](model_id) routes = [ Route("/{whatever:path}", status_ok), Route("/{whatever:path}", pipeline_route, methods=["POST"]), ] middleware = [Middleware(GZipMiddleware, minimum_size=1000)] if os.environ.get("DEBUG", "") == "1": from starlette.middleware.cors import CORSMiddleware middleware.append( Middleware( CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"], ) ) app = Starlette(routes=routes, middleware=middleware) @app.on_event("startup") async def startup_event(): logger = logging.getLogger("uvicorn.access") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.handlers = [handler] # Link between `api-inference-community` and framework code. app.get_pipeline = get_pipeline try: get_pipeline() except Exception: # We can fail so we can show exception later. pass if __name__ == "__main__": try: get_pipeline() except Exception: # We can fail so we can show exception later. pass
0
hf_public_repos/api-inference-community/docker_images/spacy/app
hf_public_repos/api-inference-community/docker_images/spacy/app/pipelines/token_classification.py
import os import subprocess import sys from typing import Any, Dict, List from app.pipelines import Pipeline class TokenClassificationPipeline(Pipeline): def __init__( self, model_id: str, ): # At the time, only public models from spaCy are allowed in the inference API. full_model_path = model_id.split("/") if len(full_model_path) != 2: raise ValueError( f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)" ) namespace, model_name = full_model_path hf_endpoint = os.getenv("HF_ENDPOINT", "https://huggingface.co") package = f"{hf_endpoint}/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl" cache_dir = os.environ["PIP_CACHE"] subprocess.check_call( [sys.executable, "-m", "pip", "install", "--cache-dir", cache_dir, package] ) import spacy self.model = spacy.load(model_name) def __call__(self, inputs: str) -> List[Dict[str, Any]]: """ Args: inputs (:obj:`str`): a string containing some text Return: A :obj:`list`:. The object returned should be like [{"entity_group": "XXX", "word": "some word", "start": 3, "end": 6, "score": 0.82}] containing : - "entity_group": A string representing what the entity is. - "word": A rubstring of the original string that was detected as an entity. - "start": the offset within `input` leading to `answer`. context[start:stop] == word - "end": the ending offset within `input` leading to `answer`. context[start:stop] === word - "score": A score between 0 and 1 describing how confident the model is for this entity. """ doc = self.model(inputs) entities = [] for ent in doc.ents: # Score is currently not well supported, see # https://github.com/explosion/spaCy/issues/5917. current_entity = { "entity_group": ent.label_, "word": ent.text, "start": ent.start_char, "end": ent.end_char, "score": 1.0, } entities.append(current_entity) return entities
0
hf_public_repos/api-inference-community/docker_images/spacy/app
hf_public_repos/api-inference-community/docker_images/spacy/app/pipelines/base.py
from abc import ABC, abstractmethod from typing import Any class Pipeline(ABC): @abstractmethod def __init__(self, model_id: str): raise NotImplementedError("Pipelines should implement an __init__ method") @abstractmethod def __call__(self, inputs: Any) -> Any: raise NotImplementedError("Pipelines should implement a __call__ method") class PipelineException(Exception): pass
0
hf_public_repos/api-inference-community/docker_images/spacy/app
hf_public_repos/api-inference-community/docker_images/spacy/app/pipelines/sentence_similarity.py
import os import subprocess import sys from typing import Dict, List, Union from app.pipelines import Pipeline class SentenceSimilarityPipeline(Pipeline): def __init__( self, model_id: str, ): # At the time, only public models from spaCy are allowed in the inference API. full_model_path = model_id.split("/") if len(full_model_path) != 2: raise ValueError( f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)" ) namespace, model_name = full_model_path hf_endpoint = os.getenv("HF_ENDPOINT", "https://huggingface.co") package = f"{hf_endpoint}/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl" cache_dir = os.environ["PIP_CACHE"] subprocess.check_call( [sys.executable, "-m", "pip", "install", "--cache-dir", cache_dir, package] ) import spacy self.model = spacy.load(model_name) def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> List[float]: """ Args: inputs (:obj:`dict`): a dictionary containing two keys, 'source_sentence' mapping to the sentence that will be compared against all the others, and 'sentences', mapping to a list of strings to which the source will be compared. Return: A :obj:`list` of floats: Some similarity measure between `source_sentence` and each sentence from `sentences`. """ source_sentence = inputs["source_sentence"] source_doc = self.model(source_sentence) similarities = [] for sentence in inputs["sentences"]: search_doc = self.model(sentence) similarities.append(source_doc.similarity(search_doc)) return similarities
0
hf_public_repos/api-inference-community/docker_images/spacy/app
hf_public_repos/api-inference-community/docker_images/spacy/app/pipelines/__init__.py
from app.pipelines.base import Pipeline, PipelineException # isort:skip from app.pipelines.sentence_similarity import SentenceSimilarityPipeline from app.pipelines.text_classification import TextClassificationPipeline from app.pipelines.token_classification import TokenClassificationPipeline
0
hf_public_repos/api-inference-community/docker_images/spacy/app
hf_public_repos/api-inference-community/docker_images/spacy/app/pipelines/text_classification.py
import os import subprocess import sys from typing import Dict, List from app.pipelines import Pipeline class TextClassificationPipeline(Pipeline): def __init__( self, model_id: str, ): # At the time, only public models from spaCy are allowed in the inference API. full_model_path = model_id.split("/") if len(full_model_path) != 2: raise ValueError( f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)" ) namespace, model_name = full_model_path hf_endpoint = os.getenv("HF_ENDPOINT", "https://huggingface.co") package = f"{hf_endpoint}/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl" cache_dir = os.environ["PIP_CACHE"] subprocess.check_call( [sys.executable, "-m", "pip", "install", "--cache-dir", cache_dir, package] ) import spacy self.model = spacy.load(model_name) def __call__(self, inputs: str) -> List[List[Dict[str, float]]]: """ Args: inputs (:obj:`str`): a string containing some text Return: A :obj:`list`:. The object returned should be a list of one list like [[{"label": 0.9939950108528137}]] containing : - "label": A string representing what the label/class is. There can be multiple labels. - "score": A score between 0 and 1 describing how confident the model is for this label/class. """ doc = self.model(inputs) categories = [] for cat, score in doc.cats.items(): categories.append({"label": cat, "score": score}) return [categories]
0
hf_public_repos/api-inference-community/docker_images/spacy
hf_public_repos/api-inference-community/docker_images/spacy/tests/test_api_token_classification.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "token-classification" not in ALLOWED_TASKS, "token-classification not implemented", ) class TokenClassificationTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["token-classification"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "token-classification" from app.main import app self.app = app def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "Hello, my name is John and I live in New York" with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual( set(k for el in content for k in el.keys()), {"entity_group", "word", "start", "end", "score"}, ) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual( set(k for el in content for k in el.keys()), {"entity_group", "word", "start", "end", "score"}, ) def test_malformed_question(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
0
hf_public_repos/api-inference-community/docker_images/spacy
hf_public_repos/api-inference-community/docker_images/spacy/tests/test_docker_build.py
import os import subprocess from unittest import TestCase class cd: """Context manager for changing the current working directory""" def __init__(self, newPath): self.newPath = os.path.expanduser(newPath) def __enter__(self): self.savedPath = os.getcwd() os.chdir(self.newPath) def __exit__(self, etype, value, traceback): os.chdir(self.savedPath) class DockerBuildTestCase(TestCase): def test_can_build_docker_image(self): with cd(os.path.dirname(os.path.dirname(__file__))): subprocess.check_output(["docker", "build", "."])
0
hf_public_repos/api-inference-community/docker_images/spacy
hf_public_repos/api-inference-community/docker_images/spacy/tests/test_api.py
import os from typing import Dict from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS, get_pipeline # Must contain at least one example of each implemented pipeline # Tests do not check the actual values of the model output, so small dummy # models are recommended for faster tests. TESTABLE_MODELS: Dict[str, str] = { # IMPLEMENT_THIS # "automatic-speech-recognition": "mysample-ASR", # "text-generation": "mysample-gpt2", "token-classification": "spacy/en_core_web_sm", "text-classification": "explosion/en_textcat_goemotions", "sentence-similarity": "spacy/en_core_web_sm", } ALL_TASKS = { "automatic-speech-recognition", "audio-source-separation", "feature-extraction", "image-classification", "question-answering", "sentence-similarity", "text-generation", "text-to-speech", } class PipelineTestCase(TestCase): @skipIf( os.path.dirname(os.path.dirname(__file__)).endswith("common"), "common is a special case", ) def test_has_at_least_one_task_enabled(self): self.assertGreater( len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task" ) def test_unsupported_tasks(self): unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys() for unsupported_task in unsupported_tasks: with self.subTest(msg=unsupported_task, task=unsupported_task): with self.assertRaises(EnvironmentError): get_pipeline(unsupported_task, model_id="XX")
0
hf_public_repos/api-inference-community/docker_images/spacy
hf_public_repos/api-inference-community/docker_images/spacy/tests/test_api_sentence_similarity.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "sentence-similarity" not in ALLOWED_TASKS, "sentence-similarity not implemented", ) class SentenceSimilarityTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["sentence-similarity"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "sentence-similarity" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): source_sentence = "I am a very happy man" sentences = [ "What is this?", "I am a super happy man", "I am a sad man", "I am a happy dog", ] inputs = {"source_sentence": source_sentence, "sentences": sentences} with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual({type(item) for item in content}, {float}) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual({type(item) for item in content}, {float}) def test_missing_input_sentences(self): source_sentence = "I am a very happy man" inputs = {"source_sentence": source_sentence} with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 400, ) def test_malformed_input(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
0
hf_public_repos/api-inference-community/docker_images/spacy
hf_public_repos/api-inference-community/docker_images/spacy/tests/test_api_text_classification.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "text-classification" not in ALLOWED_TASKS, "text-classification not implemented", ) class TextClassificationTestCase(TestCase): def setUp(self): model_id = TESTABLE_MODELS["text-classification"] self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = model_id os.environ["TASK"] = "text-classification" from app.main import app self.app = app @classmethod def setUpClass(cls): from app.main import get_pipeline get_pipeline.cache_clear() def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "It is a beautiful day outside" with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(len(content), 1) self.assertEqual(type(content[0]), list) self.assertEqual( set(k for el in content[0] for k in el.keys()), {"label", "score"}, ) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(len(content), 1) self.assertEqual(type(content[0]), list) self.assertEqual( set(k for el in content[0] for k in el.keys()), {"label", "score"}, ) def test_malformed_question(self): with TestClient(self.app) as client: response = client.post("/", data=b"\xc3\x28") self.assertEqual( response.status_code, 400, ) self.assertEqual( response.content, b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}', )
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/paddlenlp/Dockerfile
FROM tiangolo/uvicorn-gunicorn:python3.8 LABEL maintainer="PaddleNLP <paddlenlp@baidu.com>" # Add any system dependency here # RUN apt-get update -y && apt-get install libXXX -y COPY ./requirements.txt /app RUN pip install --no-cache-dir -r requirements.txt COPY ./prestart.sh /app/ # Most DL models are quite large in terms of memory, using workers is a HUGE # slowdown because of the fork and GIL with python. # Using multiple pods seems like a better default strategy. # Feel free to override if it does not make sense for your library. ARG max_workers=1 ENV MAX_WORKERS=$max_workers ENV HUGGINGFACE_HUB_CACHE=/data # Necessary on GPU environment docker. # TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose # rendering TIMEOUT defined by uvicorn impossible to use correctly # We're overriding it to be renamed UVICORN_TIMEOUT # UVICORN_TIMEOUT is a useful variable for very large models that take more # than 30s (the default) to load in memory. # If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will # kill workers all the time before they finish. RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py COPY ./app /app/app
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/paddlenlp/requirements.txt
starlette==0.27.0 api-inference-community==0.0.27 huggingface_hub>=0.10.1 paddlepaddle==2.5.0 paddlenlp>=2.5.0 #Dummy
0
hf_public_repos/api-inference-community/docker_images
hf_public_repos/api-inference-community/docker_images/paddlenlp/prestart.sh
python app/main.py
0
hf_public_repos/api-inference-community/docker_images/paddlenlp
hf_public_repos/api-inference-community/docker_images/paddlenlp/app/main.py
import functools import logging import os from typing import Dict, Type from api_inference_community.routes import pipeline_route, status_ok from app.pipelines import ( ConversationalPipeline, FillMaskPipeline, Pipeline, SummarizationPipeline, ZeroShotClassificationPipeline, ) from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.middleware.gzip import GZipMiddleware from starlette.routing import Route TASK = os.getenv("TASK") MODEL_ID = os.getenv("MODEL_ID") logger = logging.getLogger(__name__) # Add the allowed tasks # Supported tasks are: # - text-generation # - text-classification # - token-classification # - translation # - summarization # - automatic-speech-recognition # - ... # For instance # from app.pipelines import AutomaticSpeechRecognitionPipeline # ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline} # You can check the requirements and expectations of each pipelines in their respective # directories. Implement directly within the directories. ALLOWED_TASKS: Dict[str, Type[Pipeline]] = { "conversational": ConversationalPipeline, "fill-mask": FillMaskPipeline, "summarization": SummarizationPipeline, "zero-shot-classification": ZeroShotClassificationPipeline, } @functools.lru_cache() def get_pipeline() -> Pipeline: task = os.environ["TASK"] model_id = os.environ["MODEL_ID"] if task not in ALLOWED_TASKS: raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}") return ALLOWED_TASKS[task](model_id) routes = [ Route("/{whatever:path}", status_ok), Route("/{whatever:path}", pipeline_route, methods=["POST"]), ] middleware = [Middleware(GZipMiddleware, minimum_size=1000)] if os.environ.get("DEBUG", "") == "1": from starlette.middleware.cors import CORSMiddleware middleware.append( Middleware( CORSMiddleware, allow_origins=["*"], allow_headers=["*"], allow_methods=["*"], ) ) app = Starlette(routes=routes, middleware=middleware) @app.on_event("startup") async def startup_event(): logger = logging.getLogger("uvicorn.access") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) logger.handlers = [handler] # Link between `api-inference-community` and framework code. app.get_pipeline = get_pipeline try: get_pipeline() except Exception: # We can fail so we can show exception later. pass if __name__ == "__main__": try: get_pipeline() except Exception: # We can fail so we can show exception later. pass
0
hf_public_repos/api-inference-community/docker_images/paddlenlp/app
hf_public_repos/api-inference-community/docker_images/paddlenlp/app/pipelines/summarization.py
from typing import Dict, List from app.pipelines import Pipeline from paddlenlp.taskflow import Taskflow class SummarizationPipeline(Pipeline): def __init__(self, model_id: str): self.taskflow = Taskflow( "text_summarization", task_path=model_id, from_hf_hub=True ) def __call__(self, inputs: str) -> List[Dict[str, str]]: """ Args: inputs (:obj:`str`): a string to be summarized Return: A :obj:`list` of :obj:`dict` in the form of {"summary_text": "The string after summarization"} """ results = self.taskflow(inputs) return [{"summary_text": results[0]}]
0
hf_public_repos/api-inference-community/docker_images/paddlenlp/app
hf_public_repos/api-inference-community/docker_images/paddlenlp/app/pipelines/base.py
from abc import ABC, abstractmethod from typing import Any class Pipeline(ABC): @abstractmethod def __init__(self, model_id: str): raise NotImplementedError("Pipelines should implement an __init__ method") @abstractmethod def __call__(self, inputs: Any) -> Any: raise NotImplementedError("Pipelines should implement a __call__ method") class PipelineException(Exception): pass
0
hf_public_repos/api-inference-community/docker_images/paddlenlp/app
hf_public_repos/api-inference-community/docker_images/paddlenlp/app/pipelines/fill_mask.py
from typing import Any, Dict, List from app.pipelines import Pipeline from paddlenlp.taskflow import Taskflow class FillMaskPipeline(Pipeline): def __init__(self, model_id: str): self.taskflow = Taskflow("fill_mask", task_path=model_id, from_hf_hub=True) def __call__(self, inputs: str) -> List[Dict[str, Any]]: """ Args: inputs (:obj:`str`): a string to be filled from, must contain one and only one [MASK] token (check model card for exact name of the mask) Return: A :obj:`list`:. a list of dicts containing the following: - "sequence": The actual sequence of tokens that ran against the model (may contain special tokens) - "score": The probability for this token. - "token": The id of the token - "token_str": The string representation of the token """ results = self.taskflow(inputs) # since paddlenlp taskflow takes batch requests and returns batch results, we take the first element here return results[0]
0
hf_public_repos/api-inference-community/docker_images/paddlenlp/app
hf_public_repos/api-inference-community/docker_images/paddlenlp/app/pipelines/conversational.py
from typing import Any, Dict, List, Union from app.pipelines import Pipeline from paddlenlp.taskflow import Taskflow class ConversationalPipeline(Pipeline): def __init__(self, model_id: str): self.pipeline = Taskflow("dialogue", task_path=model_id, from_hf_hub=True) def __call__(self, inputs: Dict[str, Union[str, List[str]]]) -> Dict[str, Any]: """ Args: inputs (:obj:`dict`): a dictionary containing the following key values: text (`str`, *optional*): The initial user input to start the conversation past_user_inputs (`List[str]`, *optional*): Eventual past history of the conversation of the user. You don't need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and `generated_responses` with equal length lists of strings generated_responses (`List[str]`, *optional*): Eventual past history of the conversation of the model. You don't need to pass it manually if you use the pipeline interactively but if you want to recreate history you need to set both `past_user_inputs` and `generated_responses` with equal length lists of strings Return: A :obj:`dict`: a dictionary containing the following key values: generated_text (`str`): The answer of the bot conversation (`Dict[str, List[str]]`): A facility dictionary to send back for the next input (with the new user input addition). past_user_inputs (`List[str]`) List of strings. The last inputs from the user in the conversation, after the model has run. generated_responses (`List[str]`) List of strings. The last outputs from the model in the conversation, after the model has run. """ text = inputs["text"] past_user_inputs = inputs.get("past_user_inputs", []) generated_responses = inputs.get("generated_responses", []) complete_message_history = [] for user_input, responses in zip(past_user_inputs, generated_responses): complete_message_history.extend([user_input, responses]) complete_message_history.append(text) cur_response = self.pipeline(complete_message_history)[0] past_user_inputs.append(text) generated_responses.append(cur_response) return { "generated_text": cur_response, "conversation": { "generated_responses": generated_responses, "past_user_inputs": past_user_inputs, }, }
0
hf_public_repos/api-inference-community/docker_images/paddlenlp/app
hf_public_repos/api-inference-community/docker_images/paddlenlp/app/pipelines/zero_shot_classification.py
from typing import Any, Dict, List, Optional from app.pipelines import Pipeline from paddlenlp.taskflow import Taskflow class ZeroShotClassificationPipeline(Pipeline): def __init__(self, model_id: str): self.taskflow = Taskflow( "zero_shot_text_classification", task_path=model_id, from_hf_hub=True, pred_threshold=0.0, # so that it returns all predictions ) def __call__( self, inputs: str, candidate_labels: Optional[List[str]] = None, **kwargs ) -> List[Dict[str, Any]]: """ Args: inputs (:obj:`str`): a string to be classified candidate_labels (:obj:`List[str]`): a list of strings that are potential classes for inputs. Return: A :obj:`list`:. a list of dicts containing the following: - "sequence": The string sent as an input - "labels": The list of strings for labels that you sent (in order) - "scores": a list of floats that correspond the the probability of label, in the same order as labels. """ if candidate_labels is None: raise ValueError("'candidate_labels' is a required field") if isinstance(candidate_labels, str): candidate_labels = candidate_labels.split(",") self.taskflow.set_schema(candidate_labels) taskflow_results = self.taskflow(inputs) pipeline_results = {} labels = [] scores = [] for result in taskflow_results[0]["predictions"]: labels.append(result["label"]) scores.append(result["score"]) pipeline_results["labels"] = labels pipeline_results["scores"] = scores pipeline_results["sequence"] = taskflow_results[0]["text_a"] return pipeline_results
0
hf_public_repos/api-inference-community/docker_images/paddlenlp/app
hf_public_repos/api-inference-community/docker_images/paddlenlp/app/pipelines/__init__.py
from app.pipelines.base import Pipeline, PipelineException # isort:skip from app.pipelines.conversational import ConversationalPipeline from app.pipelines.fill_mask import FillMaskPipeline from app.pipelines.summarization import SummarizationPipeline from app.pipelines.zero_shot_classification import ZeroShotClassificationPipeline
0
hf_public_repos/api-inference-community/docker_images/paddlenlp
hf_public_repos/api-inference-community/docker_images/paddlenlp/tests/test_api_fill_mask.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from parameterized import parameterized_class from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "fill-mask" not in ALLOWED_TASKS, "fill-mask not implemented", ) @parameterized_class( [{"model_id": model_id} for model_id in TESTABLE_MODELS["fill-mask"]] ) class FillMaskTestCase(TestCase): def setUp(self): self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = self.model_id os.environ["TASK"] = "fill-mask" from app.main import app self.app = app def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): inputs = "生活的真谛是[MASK]。" with TestClient(self.app) as client: response = client.post("/", json={"inputs": inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual({type(item) for item in content}, {dict}) with TestClient(self.app) as client: response = client.post("/", json=inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual({type(item) for item in content}, {dict}) def test_malformed_input(self): inputs = "生活的真谛是" with TestClient(self.app) as client: response = client.post("/", json=inputs) # should return error since the input doesn't contain a mask token self.assertEqual( response.status_code, 400, )
0
hf_public_repos/api-inference-community/docker_images/paddlenlp
hf_public_repos/api-inference-community/docker_images/paddlenlp/tests/test_api_conversational.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from parameterized import parameterized_class from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "conversational" not in ALLOWED_TASKS, "conversational not implemented", ) @parameterized_class( [{"model_id": model_id} for model_id in TESTABLE_MODELS["conversational"]] ) class ConversationalTestCase(TestCase): def setUp(self): self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = self.model_id os.environ["TASK"] = "fill-mask" from app.main import app self.app = app def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_simple(self): first_round_inputs = {"text": "你好!"} with TestClient(self.app) as client: response = client.post("/", json={"inputs": first_round_inputs}) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), dict) self.assertIn("generated_text", content) self.assertIn("conversation", content) self.assertIn("past_user_inputs", content["conversation"]) self.assertIn("generated_responses", content["conversation"]) self.assertEqual(len(content["conversation"]["generated_responses"]), 1) self.assertEqual(len(content["conversation"]["past_user_inputs"]), 1) second_round_inputs = { "text": "这是个测试", "past_user_inputs": content["conversation"]["past_user_inputs"], "generated_responses": content["conversation"]["generated_responses"], } with TestClient(self.app) as client: response = client.post("/", json=second_round_inputs) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), dict) self.assertIn("generated_text", content) self.assertIn("conversation", content) self.assertIn("past_user_inputs", content["conversation"]) self.assertIn("generated_responses", content["conversation"]) self.assertEqual(len(content["conversation"]["generated_responses"]), 2) self.assertEqual(len(content["conversation"]["past_user_inputs"]), 2)
0
hf_public_repos/api-inference-community/docker_images/paddlenlp
hf_public_repos/api-inference-community/docker_images/paddlenlp/tests/test_api_zero_shot_classification.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from parameterized import parameterized_class from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "summarization" not in ALLOWED_TASKS, "summarization not implemented", ) @parameterized_class( [{"model_id": model_id} for model_id in TESTABLE_MODELS["summarization"]] ) class SummarizationTestCase(TestCase): def setUp(self): self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = self.model_id os.environ["TASK"] = "summarization" from app.main import app self.app = app def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_single_input(self): input_dict = { "inputs": "房间干净明亮,非常不错", "parameters": {"candidate_labels": ["这是一条好评", "这是一条差评"]}, } with TestClient(self.app) as client: response = client.post("/", json=input_dict) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(len(content), 1) for result in content: self.assertIn("labels", result) self.assertIn("scores", result) self.assertIn("sequences", result)
0
hf_public_repos/api-inference-community/docker_images/paddlenlp
hf_public_repos/api-inference-community/docker_images/paddlenlp/tests/test_docker_build.py
import os import subprocess from unittest import TestCase class cd: """Context manager for changing the current working directory""" def __init__(self, newPath): self.newPath = os.path.expanduser(newPath) def __enter__(self): self.savedPath = os.getcwd() os.chdir(self.newPath) def __exit__(self, etype, value, traceback): os.chdir(self.savedPath) class DockerBuildTestCase(TestCase): def test_can_build_docker_image(self): with cd(os.path.dirname(os.path.dirname(__file__))): subprocess.check_output(["docker", "build", "."])
0
hf_public_repos/api-inference-community/docker_images/paddlenlp
hf_public_repos/api-inference-community/docker_images/paddlenlp/tests/test_api_summarization.py
import json import os from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS from parameterized import parameterized_class from starlette.testclient import TestClient from tests.test_api import TESTABLE_MODELS @skipIf( "summarization" not in ALLOWED_TASKS, "summarization not implemented", ) @parameterized_class( [{"model_id": model_id} for model_id in TESTABLE_MODELS["summarization"]] ) class SummarizationTestCase(TestCase): def setUp(self): self.old_model_id = os.getenv("MODEL_ID") self.old_task = os.getenv("TASK") os.environ["MODEL_ID"] = self.model_id os.environ["TASK"] = "summarization" from app.main import app self.app = app def tearDown(self): if self.old_model_id is not None: os.environ["MODEL_ID"] = self.old_model_id else: del os.environ["MODEL_ID"] if self.old_task is not None: os.environ["TASK"] = self.old_task else: del os.environ["TASK"] def test_single_input(self): text = "test" with TestClient(self.app) as client: response = client.post("/", json=text) self.assertEqual( response.status_code, 200, ) content = json.loads(response.content) self.assertEqual(type(content), list) self.assertEqual(len(content), 1) for result in content: self.assertIn("summary_text", result)
0
hf_public_repos/api-inference-community/docker_images/paddlenlp
hf_public_repos/api-inference-community/docker_images/paddlenlp/tests/test_api.py
import os from typing import Dict from unittest import TestCase, skipIf from app.main import ALLOWED_TASKS, get_pipeline # Must contain at least one example of each implemented pipeline # Tests do not check the actual values of the model output, so small dummy # models are recommended for faster tests. TESTABLE_MODELS: Dict[str, str] = { "conversational": "PaddleCI/tiny-random-plato-mini", "fill-mask": "PaddleCI/tiny-random-ernie", "summarization": "PaddleCI/tiny-random-unimo-text-1.0", "zero-shot-classification": "PaddleCI/tiny-random-ernie", } ALL_TASKS = { "audio-classification", "audio-to-audio", "automatic-speech-recognition", "feature-extraction", "image-classification", "question-answering", "sentence-similarity", "speech-segmentation", "tabular-classification", "tabular-regression", "text-classification", "text-to-image", "text-to-speech", "token-classification", "conversational", "feature-extraction", "question-answering", "sentence-similarity", "fill-mask", "table-question-answering", "summarization", "text2text-generation", "text-classification", "text-to-image", "text-to-speech", "token-classification", "zero-shot-classification", } class PipelineTestCase(TestCase): @skipIf( os.path.dirname(os.path.dirname(__file__)).endswith("common"), "common is a special case", ) def test_has_at_least_one_task_enabled(self): self.assertGreater( len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task" ) def test_unsupported_tasks(self): unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys() for unsupported_task in unsupported_tasks: with self.subTest(msg=unsupported_task, task=unsupported_task): os.environ["TASK"] = unsupported_task os.environ["MODEL_ID"] = "XX" with self.assertRaises(EnvironmentError): get_pipeline()
0
hf_public_repos/api-inference-community
hf_public_repos/api-inference-community/scripts/export_tasks.py
"""Exports a library -> supported tasks mapping in JSON format. This script - parses the source code of a library's app/main.py and extracts the AST - finds the ALLOWED_TASKS variable and get all the keys. - prints the library name as well as its tasks in JSON format. Note that the transformer library is not included in the output as we can assume it supports all tasks. This is done as the transformers API codebase is not in this repository. """ import ast import collections import os import pathlib import json lib_to_task_map = collections.defaultdict(list) def _extract_tasks(library_name, variable_name, value): """Extract supported tasks of the library. Args: library_name: The name of the library (e.g. paddlenlp) variable_name: The name of the Python variable (e.g. ALLOWED_TASKS) value: The AST of the variable's Python value. """ if variable_name == "ALLOWED_TASKS": if isinstance(value, ast.Dict): for key in value.keys: lib_to_task_map[library_name].append(key.value) def traverse_global_assignments(library_name, file_content, handler): """Traverse all global assignments and apply handler on each of them. Args: library_name: The name of the library (e.g. paddlenlp) file_content: The content of app/main.py file in string. handler: A callback that processes the AST. """ for element in ast.parse(file_content).body: # Typical case, e.g. TARGET_ID: Type = VALUE if isinstance(element, ast.AnnAssign): handler(library_name, element.target.id, element.value) # Just in case user omitted the type annotation # Unpacking and multi-variable assignment is rare so not handled # e.g. TARGET_ID = VALUE elif isinstance(element, ast.Assign): target = element.targets[0] if isinstance(target, ast.Name): handler(library_name, target.id, element.value) if __name__ == "__main__": root = pathlib.Path(__file__).parent.parent.resolve() libs = os.listdir(root / "docker_images") libs.remove("common") for lib in libs: with open(root / "docker_images" / lib / "app/main.py") as f: content = f.read() traverse_global_assignments(lib, content, _extract_tasks) output = json.dumps(lib_to_task_map, sort_keys=True, indent=4) print(output)
0
hf_public_repos
hf_public_repos/diffusers/setup.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/main/setup.py To create the package for PyPI. 1. Run `make pre-release` (or `make pre-patch` for a patch release) then run `make fix-copies` to fix the index of the documentation. If releasing on a special branch, copy the updated README.md on the main branch for the commit you will make for the post-release and run `make fix-copies` on the main branch as well. 2. Unpin specific versions from setup.py that use a git install. 3. Checkout the release branch (v<RELEASE>-release, for example v4.19-release), and commit these changes with the message: "Release: <RELEASE>" and push. 4. Manually trigger the "Nightly and release tests on main/release branch" workflow from the release branch. Wait for the tests to complete. We can safely ignore the known test failures. 5. Wait for the tests on main to be completed and be green (otherwise revert and fix bugs). 6. Add a tag in git to mark the release: "git tag v<RELEASE> -m 'Adds tag v<RELEASE> for PyPI'" Push the tag to git: git push --tags origin v<RELEASE>-release 7. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level directory (This will build a wheel for the Python version you use to build it). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions. Long story cut short, you need to run both before you can upload the distribution to the test PyPI and the actual PyPI servers: python setup.py bdist_wheel && python setup.py sdist 8. Check that everything looks correct by uploading the package to the PyPI test server: twine upload dist/* -r pypitest (pypi suggests using twine as other methods upload files via plaintext.) You may have to specify the repository url, use the following command then: twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/ Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi diffusers If you are testing from a Colab Notebook, for instance, then do: pip install diffusers && pip uninstall diffusers pip install -i https://testpypi.python.org/pypi diffusers Check you can run the following commands: python -c "from diffusers import __version__; print(__version__)" python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('fusing/unet-ldm-dummy-update'); pipe()" python -c "from diffusers import DiffusionPipeline; pipe = DiffusionPipeline.from_pretrained('hf-internal-testing/tiny-stable-diffusion-pipe', safety_checker=None); pipe('ah suh du')" python -c "from diffusers import *" 9. Upload the final version to the actual PyPI: twine upload dist/* -r pypi 10. Prepare the release notes and publish them on GitHub once everything is looking hunky-dory. You can use the following Space to fetch all the commits applicable for the release: https://huggingface.co/spaces/lysandre/github-release. Repo should be `huggingface/diffusers`. `tag` should be the previous release tag (v0.26.1, for example), and `branch` should be the latest release branch (v0.27.0-release, for example). It denotes all commits that have happened on branch v0.27.0-release after the tag v0.26.1 was created. 11. Run `make post-release` (or, for a patch release, `make post-patch`). If you were on a branch for the release, you need to go back to main before executing this. """ import os import re import sys from setuptools import Command, find_packages, setup # IMPORTANT: # 1. all dependencies should be listed here with their version requirements if any # 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py _deps = [ "Pillow", # keep the PIL.Image.Resampling deprecation away "accelerate>=0.31.0", "compel==0.1.8", "datasets", "filelock", "flax>=0.4.1", "hf-doc-builder>=0.3.0", "huggingface-hub>=0.23.2", "requests-mock==1.10.0", "importlib_metadata", "invisible-watermark>=0.2.0", "isort>=5.5.4", "jax>=0.4.1", "jaxlib>=0.4.1", "Jinja2", "k-diffusion>=0.0.12", "torchsde", "note_seq", "librosa", "numpy", "parameterized", "peft>=0.6.0", "protobuf>=3.20.3,<4", "pytest", "pytest-timeout", "pytest-xdist", "python>=3.8.0", "ruff==0.1.5", "safetensors>=0.3.1", "sentencepiece>=0.1.91,!=0.1.92", "GitPython<3.1.19", "scipy", "onnx", "regex!=2019.12.17", "requests", "tensorboard", "torch>=1.4", "torchvision", "transformers>=4.41.2", "urllib3<=2.0.0", "black", ] # this is a lookup table with items like: # # tokenizers: "huggingface-hub==0.8.0" # packaging: "packaging" # # some of the values are versioned whereas others aren't. deps = {b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)} # since we save this data in src/diffusers/dependency_versions_table.py it can be easily accessed from # anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with: # # python -c 'import sys; from diffusers.dependency_versions_table import deps; \ # print(" ".join([deps[x] for x in sys.argv[1:]]))' tokenizers datasets # # Just pass the desired package names to that script as it's shown with 2 packages above. # # If diffusers is not yet installed and the work is done from the cloned repo remember to add `PYTHONPATH=src` to the script above # # You can then feed this for example to `pip`: # # pip install -U $(python -c 'import sys; from diffusers.dependency_versions_table import deps; \ # print(" ".join([deps[x] for x in sys.argv[1:]]))' tokenizers datasets) # def deps_list(*pkgs): return [deps[pkg] for pkg in pkgs] class DepsTableUpdateCommand(Command): """ A custom command that updates the dependency table. usage: python setup.py deps_table_update """ description = "build runtime dependency table" user_options = [ # format: (long option, short option, description). ( "dep-table-update", None, "updates src/diffusers/dependency_versions_table.py", ), ] def initialize_options(self): pass def finalize_options(self): pass def run(self): entries = "\n".join([f' "{k}": "{v}",' for k, v in deps.items()]) content = [ "# THIS FILE HAS BEEN AUTOGENERATED. To update:", "# 1. modify the `_deps` dict in setup.py", "# 2. run `make deps_table_update`", "deps = {", entries, "}", "", ] target = "src/diffusers/dependency_versions_table.py" print(f"updating {target}") with open(target, "w", encoding="utf-8", newline="\n") as f: f.write("\n".join(content)) extras = {} extras["quality"] = deps_list("urllib3", "isort", "ruff", "hf-doc-builder") extras["docs"] = deps_list("hf-doc-builder") extras["training"] = deps_list("accelerate", "datasets", "protobuf", "tensorboard", "Jinja2", "peft") extras["test"] = deps_list( "compel", "GitPython", "datasets", "Jinja2", "invisible-watermark", "k-diffusion", "librosa", "parameterized", "pytest", "pytest-timeout", "pytest-xdist", "requests-mock", "safetensors", "sentencepiece", "scipy", "torchvision", "transformers", ) extras["torch"] = deps_list("torch", "accelerate") if os.name == "nt": # windows extras["flax"] = [] # jax is not supported on windows else: extras["flax"] = deps_list("jax", "jaxlib", "flax") extras["dev"] = ( extras["quality"] + extras["test"] + extras["training"] + extras["docs"] + extras["torch"] + extras["flax"] ) install_requires = [ deps["importlib_metadata"], deps["filelock"], deps["huggingface-hub"], deps["numpy"], deps["regex"], deps["requests"], deps["safetensors"], deps["Pillow"], ] version_range_max = max(sys.version_info[1], 10) + 1 setup( name="diffusers", version="0.32.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) description="State-of-the-art diffusion in PyTorch and JAX.", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning diffusion jax pytorch stable diffusion audioldm", license="Apache 2.0 License", author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/diffusers/graphs/contributors)", author_email="diffusers@huggingface.co", url="https://github.com/huggingface/diffusers", package_dir={"": "src"}, packages=find_packages("src"), package_data={"diffusers": ["py.typed"]}, include_package_data=True, python_requires=">=3.8.0", install_requires=list(install_requires), extras_require=extras, entry_points={"console_scripts": ["diffusers-cli=diffusers.commands.diffusers_cli:main"]}, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Programming Language :: Python :: 3", ] + [f"Programming Language :: Python :: 3.{i}" for i in range(8, version_range_max)], cmdclass={"deps_table_update": DepsTableUpdateCommand}, )
0
hf_public_repos
hf_public_repos/diffusers/Makefile
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := examples scripts src tests utils benchmarks modified_only_fixup: $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) @if test -n "$(modified_py_files)"; then \ echo "Checking/fixing $(modified_py_files)"; \ ruff check $(modified_py_files) --fix; \ ruff format $(modified_py_files);\ else \ echo "No library .py files were modified"; \ fi # Update src/diffusers/dependency_versions_table.py deps_table_update: @python setup.py deps_table_update deps_table_check_updated: @md5sum src/diffusers/dependency_versions_table.py > md5sum.saved @python setup.py deps_table_update @md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1) @rm md5sum.saved # autogenerating code autogenerate_code: deps_table_update # Check that the repo is in a good state repo-consistency: python utils/check_dummies.py python utils/check_repo.py python utils/check_inits.py # this target runs checks on all files quality: ruff check $(check_dirs) setup.py ruff format --check $(check_dirs) setup.py doc-builder style src/diffusers docs/source --max_len 119 --check_only python utils/check_doc_toc.py # Format source code automatically and check is there are any problems left that need manual fixing extra_style_checks: python utils/custom_init_isort.py python utils/check_doc_toc.py --fix_and_overwrite # this target runs checks on all files and potentially modifies some of them style: ruff check $(check_dirs) setup.py --fix ruff format $(check_dirs) setup.py doc-builder style src/diffusers docs/source --max_len 119 ${MAKE} autogenerate_code ${MAKE} extra_style_checks # Super fast fix and check target that only works on relevant modified files since the branch was made fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency # Make marked copies of snippets of codes conform to the original fix-copies: python utils/check_copies.py --fix_and_overwrite python utils/check_dummies.py --fix_and_overwrite # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/ # Run tests for examples test-examples: python -m pytest -n auto --dist=loadfile -s -v ./examples/ # Release stuff pre-release: python utils/release.py pre-patch: python utils/release.py --patch post-release: python utils/release.py --post_release post-patch: python utils/release.py --post_release --patch
0
hf_public_repos
hf_public_repos/diffusers/PHILOSOPHY.md
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Philosophy 🧨 Diffusers provides **state-of-the-art** pretrained diffusion models across multiple modalities. Its purpose is to serve as a **modular toolbox** for both inference and training. We aim to build a library that stands the test of time and therefore take API design very seriously. In a nutshell, Diffusers is built to be a natural extension of PyTorch. Therefore, most of our design choices are based on [PyTorch's Design Principles](https://pytorch.org/docs/stable/community/design.html#pytorch-design-philosophy). Let's go over the most important ones: ## Usability over Performance - While Diffusers has many built-in performance-enhancing features (see [Memory and Speed](https://huggingface.co/docs/diffusers/optimization/fp16)), models are always loaded with the highest precision and lowest optimization. Therefore, by default diffusion pipelines are always instantiated on CPU with float32 precision if not otherwise defined by the user. This ensures usability across different platforms and accelerators and means that no complex installations are required to run the library. - Diffusers aims to be a **light-weight** package and therefore has very few required dependencies, but many soft dependencies that can improve performance (such as `accelerate`, `safetensors`, `onnx`, etc...). We strive to keep the library as lightweight as possible so that it can be added without much concern as a dependency on other packages. - Diffusers prefers simple, self-explainable code over condensed, magic code. This means that short-hand code syntaxes such as lambda functions, and advanced PyTorch operators are often not desired. ## Simple over easy As PyTorch states, **explicit is better than implicit** and **simple is better than complex**. This design philosophy is reflected in multiple parts of the library: - We follow PyTorch's API with methods like [`DiffusionPipeline.to`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.to) to let the user handle device management. - Raising concise error messages is preferred to silently correct erroneous input. Diffusers aims at teaching the user, rather than making the library as easy to use as possible. - Complex model vs. scheduler logic is exposed instead of magically handled inside. Schedulers/Samplers are separated from diffusion models with minimal dependencies on each other. This forces the user to write the unrolled denoising loop. However, the separation allows for easier debugging and gives the user more control over adapting the denoising process or switching out diffusion models or schedulers. - Separately trained components of the diffusion pipeline, *e.g.* the text encoder, the UNet, and the variational autoencoder, each has their own model class. This forces the user to handle the interaction between the different model components, and the serialization format separates the model components into different files. However, this allows for easier debugging and customization. DreamBooth or Textual Inversion training is very simple thanks to Diffusers' ability to separate single components of the diffusion pipeline. ## Tweakable, contributor-friendly over abstraction For large parts of the library, Diffusers adopts an important design principle of the [Transformers library](https://github.com/huggingface/transformers), which is to prefer copy-pasted code over hasty abstractions. This design principle is very opinionated and stands in stark contrast to popular design principles such as [Don't repeat yourself (DRY)](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself). In short, just like Transformers does for modeling files, Diffusers prefers to keep an extremely low level of abstraction and very self-contained code for pipelines and schedulers. Functions, long code blocks, and even classes can be copied across multiple files which at first can look like a bad, sloppy design choice that makes the library unmaintainable. **However**, this design has proven to be extremely successful for Transformers and makes a lot of sense for community-driven, open-source machine learning libraries because: - Machine Learning is an extremely fast-moving field in which paradigms, model architectures, and algorithms are changing rapidly, which therefore makes it very difficult to define long-lasting code abstractions. - Machine Learning practitioners like to be able to quickly tweak existing code for ideation and research and therefore prefer self-contained code over one that contains many abstractions. - Open-source libraries rely on community contributions and therefore must build a library that is easy to contribute to. The more abstract the code, the more dependencies, the harder to read, and the harder to contribute to. Contributors simply stop contributing to very abstract libraries out of fear of breaking vital functionality. If contributing to a library cannot break other fundamental code, not only is it more inviting for potential new contributors, but it is also easier to review and contribute to multiple parts in parallel. At Hugging Face, we call this design the **single-file policy** which means that almost all of the code of a certain class should be written in a single, self-contained file. To read more about the philosophy, you can have a look at [this blog post](https://huggingface.co/blog/transformers-design-philosophy). In Diffusers, we follow this philosophy for both pipelines and schedulers, but only partly for diffusion models. The reason we don't follow this design fully for diffusion models is because almost all diffusion pipelines, such as [DDPM](https://huggingface.co/docs/diffusers/api/pipelines/ddpm), [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview#stable-diffusion-pipelines), [unCLIP (DALL·E 2)](https://huggingface.co/docs/diffusers/api/pipelines/unclip) and [Imagen](https://imagen.research.google/) all rely on the same diffusion model, the [UNet](https://huggingface.co/docs/diffusers/api/models/unet2d-cond). Great, now you should have generally understood why 🧨 Diffusers is designed the way it is 🤗. We try to apply these design principles consistently across the library. Nevertheless, there are some minor exceptions to the philosophy or some unlucky design choices. If you have feedback regarding the design, we would ❤️ to hear it [directly on GitHub](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=). ## Design Philosophy in Details Now, let's look a bit into the nitty-gritty details of the design philosophy. Diffusers essentially consists of three major classes: [pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines), [models](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models), and [schedulers](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers). Let's walk through more detailed design decisions for each class. ### Pipelines Pipelines are designed to be easy to use (therefore do not follow [*Simple over easy*](#simple-over-easy) 100%), are not feature complete, and should loosely be seen as examples of how to use [models](#models) and [schedulers](#schedulers) for inference. The following design principles are followed: - Pipelines follow the single-file policy. All pipelines can be found in individual directories under src/diffusers/pipelines. One pipeline folder corresponds to one diffusion paper/project/release. Multiple pipeline files can be gathered in one pipeline folder, as it’s done for [`src/diffusers/pipelines/stable-diffusion`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines/stable_diffusion). If pipelines share similar functionality, one can make use of the [# Copied from mechanism](https://github.com/huggingface/diffusers/blob/125d783076e5bd9785beb05367a2d2566843a271/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py#L251). - Pipelines all inherit from [`DiffusionPipeline`]. - Every pipeline consists of different model and scheduler components, that are documented in the [`model_index.json` file](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/model_index.json), are accessible under the same name as attributes of the pipeline and can be shared between pipelines with [`DiffusionPipeline.components`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.components) function. - Every pipeline should be loadable via the [`DiffusionPipeline.from_pretrained`](https://huggingface.co/docs/diffusers/main/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained) function. - Pipelines should be used **only** for inference. - Pipelines should be very readable, self-explanatory, and easy to tweak. - Pipelines should be designed to build on top of each other and be easy to integrate into higher-level APIs. - Pipelines are **not** intended to be feature-complete user interfaces. For feature-complete user interfaces one should rather have a look at [InvokeAI](https://github.com/invoke-ai/InvokeAI), [Diffuzers](https://github.com/abhishekkrthakur/diffuzers), and [lama-cleaner](https://github.com/Sanster/lama-cleaner). - Every pipeline should have one and only one way to run it via a `__call__` method. The naming of the `__call__` arguments should be shared across all pipelines. - Pipelines should be named after the task they are intended to solve. - In almost all cases, novel diffusion pipelines shall be implemented in a new pipeline folder/file. ### Models Models are designed as configurable toolboxes that are natural extensions of [PyTorch's Module class](https://pytorch.org/docs/stable/generated/torch.nn.Module.html). They only partly follow the **single-file policy**. The following design principles are followed: - Models correspond to **a type of model architecture**. *E.g.* the [`UNet2DConditionModel`] class is used for all UNet variations that expect 2D image inputs and are conditioned on some context. - All models can be found in [`src/diffusers/models`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/models) and every model architecture shall be defined in its file, e.g. [`unets/unet_2d_condition.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unets/unet_2d_condition.py), [`transformers/transformer_2d.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/transformers/transformer_2d.py), etc... - Models **do not** follow the single-file policy and should make use of smaller model building blocks, such as [`attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py), [`resnet.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/resnet.py), [`embeddings.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py), etc... **Note**: This is in stark contrast to Transformers' modeling files and shows that models do not really follow the single-file policy. - Models intend to expose complexity, just like PyTorch's `Module` class, and give clear error messages. - Models all inherit from `ModelMixin` and `ConfigMixin`. - Models can be optimized for performance when it doesn’t demand major code changes, keep backward compatibility, and give significant memory or compute gain. - Models should by default have the highest precision and lowest performance setting. - To integrate new model checkpoints whose general architecture can be classified as an architecture that already exists in Diffusers, the existing model architecture shall be adapted to make it work with the new checkpoint. One should only create a new file if the model architecture is fundamentally different. - Models should be designed to be easily extendable to future changes. This can be achieved by limiting public function arguments, configuration arguments, and "foreseeing" future changes, *e.g.* it is usually better to add `string` "...type" arguments that can easily be extended to new future types instead of boolean `is_..._type` arguments. Only the minimum amount of changes shall be made to existing architectures to make a new model checkpoint work. - The model design is a difficult trade-off between keeping code readable and concise and supporting many model checkpoints. For most parts of the modeling code, classes shall be adapted for new model checkpoints, while there are some exceptions where it is preferred to add new classes to make sure the code is kept concise and readable long-term, such as [UNet blocks](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unets/unet_2d_blocks.py) and [Attention processors](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). ### Schedulers Schedulers are responsible to guide the denoising process for inference as well as to define a noise schedule for training. They are designed as individual classes with loadable configuration files and strongly follow the **single-file policy**. The following design principles are followed: - All schedulers are found in [`src/diffusers/schedulers`](https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers). - Schedulers are **not** allowed to import from large utils files and shall be kept very self-contained. - One scheduler Python file corresponds to one scheduler algorithm (as might be defined in a paper). - If schedulers share similar functionalities, we can make use of the `# Copied from` mechanism. - Schedulers all inherit from `SchedulerMixin` and `ConfigMixin`. - Schedulers can be easily swapped out with the [`ConfigMixin.from_config`](https://huggingface.co/docs/diffusers/main/en/api/configuration#diffusers.ConfigMixin.from_config) method as explained in detail [here](./docs/source/en/using-diffusers/schedulers.md). - Every scheduler has to have a `set_num_inference_steps`, and a `step` function. `set_num_inference_steps(...)` has to be called before every denoising process, *i.e.* before `step(...)` is called. - Every scheduler exposes the timesteps to be "looped over" via a `timesteps` attribute, which is an array of timesteps the model will be called upon. - The `step(...)` function takes a predicted model output and the "current" sample (x_t) and returns the "previous", slightly more denoised sample (x_t-1). - Given the complexity of diffusion schedulers, the `step` function does not expose all the complexity and can be a bit of a "black box". - In almost all cases, novel schedulers shall be implemented in a new scheduling file.
0
hf_public_repos
hf_public_repos/diffusers/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
hf_public_repos
hf_public_repos/diffusers/MANIFEST.in
include LICENSE include src/diffusers/utils/model_card_template.md
0
hf_public_repos
hf_public_repos/diffusers/CONTRIBUTING.md
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How to contribute to Diffusers 🧨 We ❤️ contributions from the open-source community! Everyone is welcome, and all types of participation –not just code– are valued and appreciated. Answering questions, helping others, reaching out, and improving the documentation are all immensely valuable to the community, so don't be afraid and get involved if you're up for it! Everyone is encouraged to start by saying 👋 in our public Discord channel. We discuss the latest trends in diffusion models, ask questions, show off personal projects, help each other with contributions, or just hang out ☕. <a href="https://discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=Discord&logoColor=white"></a> Whichever way you choose to contribute, we strive to be part of an open, welcoming, and kind community. Please, read our [code of conduct](https://github.com/huggingface/diffusers/blob/main/CODE_OF_CONDUCT.md) and be mindful to respect it during your interactions. We also recommend you become familiar with the [ethical guidelines](https://huggingface.co/docs/diffusers/conceptual/ethical_guidelines) that guide our project and ask you to adhere to the same principles of transparency and responsibility. We enormously value feedback from the community, so please do not be afraid to speak up if you believe you have valuable feedback that can help improve the library - every message, comment, issue, and pull request (PR) is read and considered. ## Overview You can contribute in many ways ranging from answering questions on issues to adding new diffusion models to the core library. In the following, we give an overview of different ways to contribute, ranked by difficulty in ascending order. All of them are valuable to the community. * 1. Asking and answering questions on [the Diffusers discussion forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers) or on [Discord](https://discord.gg/G7tWnz98XR). * 2. Opening new issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues/new/choose). * 3. Answering issues on [the GitHub Issues tab](https://github.com/huggingface/diffusers/issues). * 4. Fix a simple issue, marked by the "Good first issue" label, see [here](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). * 5. Contribute to the [documentation](https://github.com/huggingface/diffusers/tree/main/docs/source). * 6. Contribute a [Community Pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3Acommunity-examples). * 7. Contribute to the [examples](https://github.com/huggingface/diffusers/tree/main/examples). * 8. Fix a more difficult issue, marked by the "Good second issue" label, see [here](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22). * 9. Add a new pipeline, model, or scheduler, see ["New Pipeline/Model"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) and ["New scheduler"](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) issues. For this contribution, please have a look at [Design Philosophy](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md). As said before, **all contributions are valuable to the community**. In the following, we will explain each contribution a bit more in detail. For all contributions 4-9, you will need to open a PR. It is explained in detail how to do so in [Opening a pull request](#how-to-open-a-pr). ### 1. Asking and answering questions on the Diffusers discussion forum or on the Diffusers Discord Any question or comment related to the Diffusers library can be asked on the [discussion forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/) or on [Discord](https://discord.gg/G7tWnz98XR). Such questions and comments include (but are not limited to): - Reports of training or inference experiments in an attempt to share knowledge - Presentation of personal projects - Questions to non-official training examples - Project proposals - General feedback - Paper summaries - Asking for help on personal projects that build on top of the Diffusers library - General questions - Ethical questions regarding diffusion models - ... Every question that is asked on the forum or on Discord actively encourages the community to publicly share knowledge and might very well help a beginner in the future who has the same question you're having. Please do pose any questions you might have. In the same spirit, you are of immense help to the community by answering such questions because this way you are publicly documenting knowledge for everybody to learn from. **Please** keep in mind that the more effort you put into asking or answering a question, the higher the quality of the publicly documented knowledge. In the same way, well-posed and well-answered questions create a high-quality knowledge database accessible to everybody, while badly posed questions or answers reduce the overall quality of the public knowledge database. In short, a high quality question or answer is *precise*, *concise*, *relevant*, *easy-to-understand*, *accessible*, and *well-formatted/well-posed*. For more information, please have a look through the [How to write a good issue](#how-to-write-a-good-issue) section. **NOTE about channels**: [*The forum*](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) is much better indexed by search engines, such as Google. Posts are ranked by popularity rather than chronologically. Hence, it's easier to look up questions and answers that we posted some time ago. In addition, questions and answers posted in the forum can easily be linked to. In contrast, *Discord* has a chat-like format that invites fast back-and-forth communication. While it will most likely take less time for you to get an answer to your question on Discord, your question won't be visible anymore over time. Also, it's much harder to find information that was posted a while back on Discord. We therefore strongly recommend using the forum for high-quality questions and answers in an attempt to create long-lasting knowledge for the community. If discussions on Discord lead to very interesting answers and conclusions, we recommend posting the results on the forum to make the information more available for future readers. ### 2. Opening new issues on the GitHub issues tab The 🧨 Diffusers library is robust and reliable thanks to the users who notify us of the problems they encounter. So thank you for reporting an issue. Remember, GitHub issues are reserved for technical questions directly related to the Diffusers library, bug reports, feature requests, or feedback on the library design. In a nutshell, this means that everything that is **not** related to the **code of the Diffusers library** (including the documentation) should **not** be asked on GitHub, but rather on either the [forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or [Discord](https://discord.gg/G7tWnz98XR). **Please consider the following guidelines when opening a new issue**: - Make sure you have searched whether your issue has already been asked before (use the search bar on GitHub under Issues). - Please never report a new issue on another (related) issue. If another issue is highly related, please open a new issue nevertheless and link to the related issue. - Make sure your issue is written in English. Please use one of the great, free online translation services, such as [DeepL](https://www.deepl.com/translator) to translate from your native language to English if you are not comfortable in English. - Check whether your issue might be solved by updating to the newest Diffusers version. Before posting your issue, please make sure that `python -c "import diffusers; print(diffusers.__version__)"` is higher or matches the latest Diffusers version. - Remember that the more effort you put into opening a new issue, the higher the quality of your answer will be and the better the overall quality of the Diffusers issues. New issues usually include the following. #### 2.1. Reproducible, minimal bug reports A bug report should always have a reproducible code snippet and be as minimal and concise as possible. This means in more detail: - Narrow the bug down as much as you can, **do not just dump your whole code file**. - Format your code. - Do not include any external libraries except for Diffusers depending on them. - **Always** provide all necessary information about your environment; for this, you can run: `diffusers-cli env` in your shell and copy-paste the displayed information to the issue. - Explain the issue. If the reader doesn't know what the issue is and why it is an issue, she cannot solve it. - **Always** make sure the reader can reproduce your issue with as little effort as possible. If your code snippet cannot be run because of missing libraries or undefined variables, the reader cannot help you. Make sure your reproducible code snippet is as minimal as possible and can be copy-pasted into a simple Python shell. - If in order to reproduce your issue a model and/or dataset is required, make sure the reader has access to that model or dataset. You can always upload your model or dataset to the [Hub](https://huggingface.co) to make it easily downloadable. Try to keep your model and dataset as small as possible, to make the reproduction of your issue as effortless as possible. For more information, please have a look through the [How to write a good issue](#how-to-write-a-good-issue) section. You can open a bug report [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&projects=&template=bug-report.yml). #### 2.2. Feature requests A world-class feature request addresses the following points: 1. Motivation first: * Is it related to a problem/frustration with the library? If so, please explain why. Providing a code snippet that demonstrates the problem is best. * Is it related to something you would need for a project? We'd love to hear about it! * Is it something you worked on and think could benefit the community? Awesome! Tell us what problem it solved for you. 2. Write a *full paragraph* describing the feature; 3. Provide a **code snippet** that demonstrates its future use; 4. In case this is related to a paper, please attach a link; 5. Attach any additional information (drawings, screenshots, etc.) you think may help. You can open a feature request [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=). #### 2.3 Feedback Feedback about the library design and why it is good or not good helps the core maintainers immensely to build a user-friendly library. To understand the philosophy behind the current design philosophy, please have a look [here](https://huggingface.co/docs/diffusers/conceptual/philosophy). If you feel like a certain design choice does not fit with the current design philosophy, please explain why and how it should be changed. If a certain design choice follows the design philosophy too much, hence restricting use cases, explain why and how it should be changed. If a certain design choice is very useful for you, please also leave a note as this is great feedback for future design decisions. You can open an issue about feedback [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=). #### 2.4 Technical questions Technical questions are mainly about why certain code of the library was written in a certain way, or what a certain part of the code does. Please make sure to link to the code in question and please provide detail on why this part of the code is difficult to understand. You can open an issue about a technical question [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=bug&template=bug-report.yml). #### 2.5 Proposal to add a new model, scheduler, or pipeline If the diffusion model community released a new model, pipeline, or scheduler that you would like to see in the Diffusers library, please provide the following information: * Short description of the diffusion pipeline, model, or scheduler and link to the paper or public release. * Link to any of its open-source implementation. * Link to the model weights if they are available. If you are willing to contribute to the model yourself, let us know so we can best guide you. Also, don't forget to tag the original author of the component (model, scheduler, pipeline, etc.) by GitHub handle if you can find it. You can open a request for a model/pipeline/scheduler [here](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=New+model%2Fpipeline%2Fscheduler&template=new-model-addition.yml). ### 3. Answering issues on the GitHub issues tab Answering issues on GitHub might require some technical knowledge of Diffusers, but we encourage everybody to give it a try even if you are not 100% certain that your answer is correct. Some tips to give a high-quality answer to an issue: - Be as concise and minimal as possible. - Stay on topic. An answer to the issue should concern the issue and only the issue. - Provide links to code, papers, or other sources that prove or encourage your point. - Answer in code. If a simple code snippet is the answer to the issue or shows how the issue can be solved, please provide a fully reproducible code snippet. Also, many issues tend to be simply off-topic, duplicates of other issues, or irrelevant. It is of great help to the maintainers if you can answer such issues, encouraging the author of the issue to be more precise, provide the link to a duplicated issue or redirect them to [the forum](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) or [Discord](https://discord.gg/G7tWnz98XR). If you have verified that the issued bug report is correct and requires a correction in the source code, please have a look at the next sections. For all of the following contributions, you will need to open a PR. It is explained in detail how to do so in the [Opening a pull request](#how-to-open-a-pr) section. ### 4. Fixing a "Good first issue" *Good first issues* are marked by the [Good first issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) label. Usually, the issue already explains how a potential solution should look so that it is easier to fix. If the issue hasn't been closed and you would like to try to fix this issue, you can just leave a message "I would like to try this issue.". There are usually three scenarios: - a.) The issue description already proposes a fix. In this case and if the solution makes sense to you, you can open a PR or draft PR to fix it. - b.) The issue description does not propose a fix. In this case, you can ask what a proposed fix could look like and someone from the Diffusers team should answer shortly. If you have a good idea of how to fix it, feel free to directly open a PR. - c.) There is already an open PR to fix the issue, but the issue hasn't been closed yet. If the PR has gone stale, you can simply open a new PR and link to the stale PR. PRs often go stale if the original contributor who wanted to fix the issue suddenly cannot find the time anymore to proceed. This often happens in open-source and is very normal. In this case, the community will be very happy if you give it a new try and leverage the knowledge of the existing PR. If there is already a PR and it is active, you can help the author by giving suggestions, reviewing the PR or even asking whether you can contribute to the PR. ### 5. Contribute to the documentation A good library **always** has good documentation! The official documentation is often one of the first points of contact for new users of the library, and therefore contributing to the documentation is a **highly valuable contribution**. Contributing to the library can have many forms: - Correcting spelling or grammatical errors. - Correct incorrect formatting of the docstring. If you see that the official documentation is weirdly displayed or a link is broken, we are very happy if you take some time to correct it. - Correct the shape or dimensions of a docstring input or output tensor. - Clarify documentation that is hard to understand or incorrect. - Update outdated code examples. - Translating the documentation to another language. Anything displayed on [the official Diffusers doc page](https://huggingface.co/docs/diffusers/index) is part of the official documentation and can be corrected, adjusted in the respective [documentation source](https://github.com/huggingface/diffusers/tree/main/docs/source). Please have a look at [this page](https://github.com/huggingface/diffusers/tree/main/docs) on how to verify changes made to the documentation locally. ### 6. Contribute a community pipeline [Pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview) are usually the first point of contact between the Diffusers library and the user. Pipelines are examples of how to use Diffusers [models](https://huggingface.co/docs/diffusers/api/models/overview) and [schedulers](https://huggingface.co/docs/diffusers/api/schedulers/overview). We support two types of pipelines: - Official Pipelines - Community Pipelines Both official and community pipelines follow the same design and consist of the same type of components. Official pipelines are tested and maintained by the core maintainers of Diffusers. Their code resides in [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines). In contrast, community pipelines are contributed and maintained purely by the **community** and are **not** tested. They reside in [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) and while they can be accessed via the [PyPI diffusers package](https://pypi.org/project/diffusers/), their code is not part of the PyPI distribution. The reason for the distinction is that the core maintainers of the Diffusers library cannot maintain and test all possible ways diffusion models can be used for inference, but some of them may be of interest to the community. Officially released diffusion pipelines, such as Stable Diffusion are added to the core src/diffusers/pipelines package which ensures high quality of maintenance, no backward-breaking code changes, and testing. More bleeding edge pipelines should be added as community pipelines. If usage for a community pipeline is high, the pipeline can be moved to the official pipelines upon request from the community. This is one of the ways we strive to be a community-driven library. To add a community pipeline, one should add a <name-of-the-community>.py file to [examples/community](https://github.com/huggingface/diffusers/tree/main/examples/community) and adapt the [examples/community/README.md](https://github.com/huggingface/diffusers/tree/main/examples/community/README.md) to include an example of the new pipeline. An example can be seen [here](https://github.com/huggingface/diffusers/pull/2400). Community pipeline PRs are only checked at a superficial level and ideally they should be maintained by their original authors. Contributing a community pipeline is a great way to understand how Diffusers models and schedulers work. Having contributed a community pipeline is usually the first stepping stone to contributing an official pipeline to the core package. ### 7. Contribute to training examples Diffusers examples are a collection of training scripts that reside in [examples](https://github.com/huggingface/diffusers/tree/main/examples). We support two types of training examples: - Official training examples - Research training examples Research training examples are located in [examples/research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects) whereas official training examples include all folders under [examples](https://github.com/huggingface/diffusers/tree/main/examples) except the `research_projects` and `community` folders. The official training examples are maintained by the Diffusers' core maintainers whereas the research training examples are maintained by the community. This is because of the same reasons put forward in [6. Contribute a community pipeline](#6-contribute-a-community-pipeline) for official pipelines vs. community pipelines: It is not feasible for the core maintainers to maintain all possible training methods for diffusion models. If the Diffusers core maintainers and the community consider a certain training paradigm to be too experimental or not popular enough, the corresponding training code should be put in the `research_projects` folder and maintained by the author. Both official training and research examples consist of a directory that contains one or more training scripts, a `requirements.txt` file, and a `README.md` file. In order for the user to make use of the training examples, it is required to clone the repository: ```bash git clone https://github.com/huggingface/diffusers ``` as well as to install all additional dependencies required for training: ```bash cd diffusers pip install -r examples/<your-example-folder>/requirements.txt ``` Therefore when adding an example, the `requirements.txt` file shall define all pip dependencies required for your training example so that once all those are installed, the user can run the example's training script. See, for example, the [DreamBooth `requirements.txt` file](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/requirements.txt). Training examples of the Diffusers library should adhere to the following philosophy: - All the code necessary to run the examples should be found in a single Python file. - One should be able to run the example from the command line with `python <your-example>.py --args`. - Examples should be kept simple and serve as **an example** on how to use Diffusers for training. The purpose of example scripts is **not** to create state-of-the-art diffusion models, but rather to reproduce known training schemes without adding too much custom logic. As a byproduct of this point, our examples also strive to serve as good educational materials. To contribute an example, it is highly recommended to look at already existing examples such as [dreambooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) to get an idea of how they should look like. We strongly advise contributors to make use of the [Accelerate library](https://github.com/huggingface/accelerate) as it's tightly integrated with Diffusers. Once an example script works, please make sure to add a comprehensive `README.md` that states how to use the example exactly. This README should include: - An example command on how to run the example script as shown [here e.g.](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth#running-locally-with-pytorch). - A link to some training results (logs, models, ...) that show what the user can expect as shown [here e.g.](https://api.wandb.ai/report/patrickvonplaten/xm6cd5q5). - If you are adding a non-official/research training example, **please don't forget** to add a sentence that you are maintaining this training example which includes your git handle as shown [here](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/intel_opts#diffusers-examples-with-intel-optimizations). If you are contributing to the official training examples, please also make sure to add a test to [examples/test_examples.py](https://github.com/huggingface/diffusers/blob/main/examples/test_examples.py). This is not necessary for non-official training examples. ### 8. Fixing a "Good second issue" *Good second issues* are marked by the [Good second issue](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+second+issue%22) label. Good second issues are usually more complicated to solve than [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). The issue description usually gives less guidance on how to fix the issue and requires a decent understanding of the library by the interested contributor. If you are interested in tackling a good second issue, feel free to open a PR to fix it and link the PR to the issue. If you see that a PR has already been opened for this issue but did not get merged, have a look to understand why it wasn't merged and try to open an improved PR. Good second issues are usually more difficult to get merged compared to good first issues, so don't hesitate to ask for help from the core maintainers. If your PR is almost finished the core maintainers can also jump into your PR and commit to it in order to get it merged. ### 9. Adding pipelines, models, schedulers Pipelines, models, and schedulers are the most important pieces of the Diffusers library. They provide easy access to state-of-the-art diffusion technologies and thus allow the community to build powerful generative AI applications. By adding a new model, pipeline, or scheduler you might enable a new powerful use case for any of the user interfaces relying on Diffusers which can be of immense value for the whole generative AI ecosystem. Diffusers has a couple of open feature requests for all three components - feel free to gloss over them if you don't know yet what specific component you would like to add: - [Model or pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) - [Scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) Before adding any of the three components, it is strongly recommended that you give the [Philosophy guide](https://github.com/huggingface/diffusers/blob/main/PHILOSOPHY.md) a read to better understand the design of any of the three components. Please be aware that we cannot merge model, scheduler, or pipeline additions that strongly diverge from our design philosophy as it will lead to API inconsistencies. If you fundamentally disagree with a design choice, please open a [Feedback issue](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feedback.md&title=) instead so that it can be discussed whether a certain design pattern/design choice shall be changed everywhere in the library and whether we shall update our design philosophy. Consistency across the library is very important for us. Please make sure to add links to the original codebase/paper to the PR and ideally also ping the original author directly on the PR so that they can follow the progress and potentially help with questions. If you are unsure or stuck in the PR, don't hesitate to leave a message to ask for a first review or help. ## How to write a good issue **The better your issue is written, the higher the chances that it will be quickly resolved.** 1. Make sure that you've used the correct template for your issue. You can pick between *Bug Report*, *Feature Request*, *Feedback about API Design*, *New model/pipeline/scheduler addition*, *Forum*, or a blank issue. Make sure to pick the correct one when opening [a new issue](https://github.com/huggingface/diffusers/issues/new/choose). 2. **Be precise**: Give your issue a fitting title. Try to formulate your issue description as simple as possible. The more precise you are when submitting an issue, the less time it takes to understand the issue and potentially solve it. Make sure to open an issue for one issue only and not for multiple issues. If you found multiple issues, simply open multiple issues. If your issue is a bug, try to be as precise as possible about what bug it is - you should not just write "Error in diffusers". 3. **Reproducibility**: No reproducible code snippet == no solution. If you encounter a bug, maintainers **have to be able to reproduce** it. Make sure that you include a code snippet that can be copy-pasted into a Python interpreter to reproduce the issue. Make sure that your code snippet works, *i.e.* that there are no missing imports or missing links to images, ... Your issue should contain an error message **and** a code snippet that can be copy-pasted without any changes to reproduce the exact same error message. If your issue is using local model weights or local data that cannot be accessed by the reader, the issue cannot be solved. If you cannot share your data or model, try to make a dummy model or dummy data. 4. **Minimalistic**: Try to help the reader as much as you can to understand the issue as quickly as possible by staying as concise as possible. Remove all code / all information that is irrelevant to the issue. If you have found a bug, try to create the easiest code example you can to demonstrate your issue, do not just dump your whole workflow into the issue as soon as you have found a bug. E.g., if you train a model and get an error at some point during the training, you should first try to understand what part of the training code is responsible for the error and try to reproduce it with a couple of lines. Try to use dummy data instead of full datasets. 5. Add links. If you are referring to a certain naming, method, or model make sure to provide a link so that the reader can better understand what you mean. If you are referring to a specific PR or issue, make sure to link it to your issue. Do not assume that the reader knows what you are talking about. The more links you add to your issue the better. 6. Formatting. Make sure to nicely format your issue by formatting code into Python code syntax, and error messages into normal code syntax. See the [official GitHub formatting docs](https://docs.github.com/en/get-started/writing-on-github/getting-started-with-writing-and-formatting-on-github/basic-writing-and-formatting-syntax) for more information. 7. Think of your issue not as a ticket to be solved, but rather as a beautiful entry to a well-written encyclopedia. Every added issue is a contribution to publicly available knowledge. By adding a nicely written issue you not only make it easier for maintainers to solve your issue, but you are helping the whole community to better understand a certain aspect of the library. ## How to write a good PR 1. Be a chameleon. Understand existing design patterns and syntax and make sure your code additions flow seamlessly into the existing code base. Pull requests that significantly diverge from existing design patterns or user interfaces will not be merged. 2. Be laser focused. A pull request should solve one problem and one problem only. Make sure to not fall into the trap of "also fixing another problem while we're adding it". It is much more difficult to review pull requests that solve multiple, unrelated problems at once. 3. If helpful, try to add a code snippet that displays an example of how your addition can be used. 4. The title of your pull request should be a summary of its contribution. 5. If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people consulting the issue know you are working on it); 6. To indicate a work in progress please prefix the title with `[WIP]`. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged; 7. Try to formulate and format your text as explained in [How to write a good issue](#how-to-write-a-good-issue). 8. Make sure existing tests pass; 9. Add high-coverage tests. No quality testing = no merge. - If you are adding new `@slow` tests, make sure they pass using `RUN_SLOW=1 python -m pytest tests/test_my_new_model.py`. CircleCI does not run the slow tests, but GitHub Actions does every night! 10. All public methods must have informative docstrings that work nicely with markdown. See [`pipeline_latent_diffusion.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py) for an example. 11. Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) or [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images) to place these files. If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images to this dataset. ## How to open a PR Before writing code, we strongly advise you to search through the existing PRs or issues to make sure that nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback. You will need basic `git` proficiency to be able to contribute to 🧨 Diffusers. `git` is not the easiest tool to use but it has the greatest manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro Git](https://git-scm.com/book/en/v2) is a very good reference. Follow these steps to start contributing ([supported Python versions](https://github.com/huggingface/diffusers/blob/42f25d601a910dceadaee6c44345896b4cfa9928/setup.py#L270)): 1. Fork the [repository](https://github.com/huggingface/diffusers) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash $ git clone git@github.com:<your GitHub handle>/diffusers.git $ cd diffusers $ git remote add upstream https://github.com/huggingface/diffusers.git ``` 3. Create a new branch to hold your development changes: ```bash $ git checkout -b a-descriptive-name-for-my-changes ``` **Do not** work on the `main` branch. 4. Set up a development environment by running the following command in a virtual environment: ```bash $ pip install -e ".[dev]" ``` If you have already cloned the repo, you might need to `git pull` to get the most recent changes in the library. 5. Develop the features on your branch. As you work on the features, you should make sure that the test suite passes. You should run the tests impacted by your changes like this: ```bash $ pytest tests/<TEST_TO_RUN>.py ``` Before you run the tests, please make sure you install the dependencies required for testing. You can do so with this command: ```bash $ pip install -e ".[test]" ``` You can also run the full test suite with the following command, but it takes a beefy machine to produce a result in a decent amount of time now that Diffusers has grown a lot. Here is the command for it: ```bash $ make test ``` 🧨 Diffusers relies on `ruff` and `isort` to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can't be automated in one go with: ```bash $ make style ``` 🧨 Diffusers also uses `ruff` and a few custom scripts to check for coding mistakes. Quality control runs in CI, however, you can also run the same checks with: ```bash $ make quality ``` Once you're happy with your changes, add changed files using `git add` and make a commit with `git commit` to record your changes locally: ```bash $ git add modified_file.py $ git commit -m "A descriptive message about your changes." ``` It is a good idea to sync your copy of the code with the original repository regularly. This way you can quickly account for changes: ```bash $ git pull upstream main ``` Push the changes to your account using: ```bash $ git push -u origin a-descriptive-name-for-my-changes ``` 6. Once you are satisfied, go to the webpage of your fork on GitHub. Click on 'Pull request' to send your changes to the project maintainers for review. 7. It's ok if maintainers ask you for changes. It happens to core contributors too! So everyone can see the changes in the Pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request. ### Tests An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/diffusers/tree/main/tests). We like `pytest` and `pytest-xdist` because it's faster. From the root of the repository, here's how to run tests with `pytest` for the library: ```bash $ python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` In fact, that's how `make test` is implemented! You can specify a smaller set of tests in order to test only the feature you're working on. By default, slow tests are skipped. Set the `RUN_SLOW` environment variable to `yes` to run them. This will download many gigabytes of models — make sure you have enough disk space and a good Internet connection, or a lot of patience! ```bash $ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` `unittest` is fully supported, here's how to run tests with it: ```bash $ python -m unittest discover -s tests -t . -v $ python -m unittest discover -s examples -t examples -v ``` ### Syncing forked main with upstream (HuggingFace) main To avoid pinging the upstream repository which adds reference notes to each upstream PR and sends unnecessary notifications to the developers involved in these PRs, when syncing the main branch of a forked repository, please, follow these steps: 1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead, merge directly into the forked main. 2. If a PR is absolutely necessary, use the following steps after checking out your branch: ```bash $ git checkout -b your-branch-for-syncing $ git pull --squash --no-commit upstream main $ git commit -m '<your message without GitHub references>' $ git push --set-upstream origin your-branch-for-syncing ``` ### Style guide For documentation strings, 🧨 Diffusers follows the [Google style](https://google.github.io/styleguide/pyguide.html).
0
hf_public_repos
hf_public_repos/diffusers/CITATION.cff
cff-version: 1.2.0 title: 'Diffusers: State-of-the-art diffusion models' message: >- If you use this software, please cite it using the metadata from this file. type: software authors: - given-names: Patrick family-names: von Platen - given-names: Suraj family-names: Patil - given-names: Anton family-names: Lozhkov - given-names: Pedro family-names: Cuenca - given-names: Nathan family-names: Lambert - given-names: Kashif family-names: Rasul - given-names: Mishig family-names: Davaadorj - given-names: Dhruv family-names: Nair - given-names: Sayak family-names: Paul - given-names: Steven family-names: Liu - given-names: William family-names: Berman - given-names: Yiyi family-names: Xu - given-names: Thomas family-names: Wolf repository-code: 'https://github.com/huggingface/diffusers' abstract: >- Diffusers provides pretrained diffusion models across multiple modalities, such as vision and audio, and serves as a modular toolbox for inference and training of diffusion models. keywords: - deep-learning - pytorch - image-generation - hacktoberfest - diffusion - text2image - image2image - score-based-generative-modeling - stable-diffusion - stable-diffusion-diffusers license: Apache-2.0 version: 0.12.1
0
hf_public_repos
hf_public_repos/diffusers/README.md
<!--- Copyright 2022 - The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <p align="center"> <br> <img src="https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/en/imgs/diffusers_library.jpg" width="400"/> <br> <p> <p align="center"> <a href="https://github.com/huggingface/diffusers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue"></a> <a href="https://github.com/huggingface/diffusers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/diffusers.svg"></a> <a href="https://pepy.tech/project/diffusers"><img alt="GitHub release" src="https://static.pepy.tech/badge/diffusers/month"></a> <a href="CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg"></a> <a href="https://twitter.com/diffuserslib"><img alt="X account" src="https://img.shields.io/twitter/url/https/twitter.com/diffuserslib.svg?style=social&label=Follow%20%40diffuserslib"></a> </p> 🤗 Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. Whether you're looking for a simple inference solution or training your own diffusion models, 🤗 Diffusers is a modular toolbox that supports both. Our library is designed with a focus on [usability over performance](https://huggingface.co/docs/diffusers/conceptual/philosophy#usability-over-performance), [simple over easy](https://huggingface.co/docs/diffusers/conceptual/philosophy#simple-over-easy), and [customizability over abstractions](https://huggingface.co/docs/diffusers/conceptual/philosophy#tweakable-contributorfriendly-over-abstraction). 🤗 Diffusers offers three core components: - State-of-the-art [diffusion pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview) that can be run in inference with just a few lines of code. - Interchangeable noise [schedulers](https://huggingface.co/docs/diffusers/api/schedulers/overview) for different diffusion speeds and output quality. - Pretrained [models](https://huggingface.co/docs/diffusers/api/models/overview) that can be used as building blocks, and combined with schedulers, for creating your own end-to-end diffusion systems. ## Installation We recommend installing 🤗 Diffusers in a virtual environment from PyPI or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/) and [Flax](https://flax.readthedocs.io/en/latest/#installation), please refer to their official documentation. ### PyTorch With `pip` (official package): ```bash pip install --upgrade diffusers[torch] ``` With `conda` (maintained by the community): ```sh conda install -c conda-forge diffusers ``` ### Flax With `pip` (official package): ```bash pip install --upgrade diffusers[flax] ``` ### Apple Silicon (M1/M2) support Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggingface.co/docs/diffusers/optimization/mps) guide. ## Quickstart Generating outputs is super easy with 🤗 Diffusers. To generate an image from text, use the `from_pretrained` method to load any pretrained diffusion model (browse the [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) for 30,000+ checkpoints): ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16) pipeline.to("cuda") pipeline("An image of a squirrel in Picasso style").images[0] ``` You can also dig into the models and schedulers toolbox to build your own diffusion system: ```python from diffusers import DDPMScheduler, UNet2DModel from PIL import Image import torch scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256") model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda") scheduler.set_timesteps(50) sample_size = model.config.sample_size noise = torch.randn((1, 3, sample_size, sample_size), device="cuda") input = noise for t in scheduler.timesteps: with torch.no_grad(): noisy_residual = model(input, t).sample prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample input = prev_noisy_sample image = (input / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy()[0] image = Image.fromarray((image * 255).round().astype("uint8")) image ``` Check out the [Quickstart](https://huggingface.co/docs/diffusers/quicktour) to launch your diffusion journey today! ## How to navigate the documentation | **Documentation** | **What can I learn?** | |---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [Tutorial](https://huggingface.co/docs/diffusers/tutorials/tutorial_overview) | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. | | [Loading](https://huggingface.co/docs/diffusers/using-diffusers/loading) | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. | | [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/overview_techniques) | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. | | [Optimization](https://huggingface.co/docs/diffusers/optimization/fp16) | Guides for how to optimize your diffusion model to run faster and consume less memory. | | [Training](https://huggingface.co/docs/diffusers/training/overview) | Guides for how to train a diffusion model for different tasks with different training techniques. | ## Contribution We ❤️ contributions from the open-source community! If you want to contribute to this library, please check out our [Contribution guide](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md). You can look out for [issues](https://github.com/huggingface/diffusers/issues) you'd like to tackle to contribute to the library. - See [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) for general opportunities to contribute - See [New model/pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) to contribute exciting new diffusion models / diffusion pipelines - See [New scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22) Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a>. We discuss the hottest trends about diffusion models, help each other with contributions, personal projects or just hang out ☕. ## Popular Tasks & Pipelines <table> <tr> <th>Task</th> <th>Pipeline</th> <th>🤗 Hub</th> </tr> <tr style="border-top: 2px solid black"> <td>Unconditional Image Generation</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/ddpm"> DDPM </a></td> <td><a href="https://huggingface.co/google/ddpm-ema-church-256"> google/ddpm-ema-church-256 </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/text2img">Stable Diffusion Text-to-Image</a></td> <td><a href="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5"> stable-diffusion-v1-5/stable-diffusion-v1-5 </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/unclip">unCLIP</a></td> <td><a href="https://huggingface.co/kakaobrain/karlo-v1-alpha"> kakaobrain/karlo-v1-alpha </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/deepfloyd_if">DeepFloyd IF</a></td> <td><a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0"> DeepFloyd/IF-I-XL-v1.0 </a></td> </tr> <tr> <td>Text-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/kandinsky">Kandinsky</a></td> <td><a href="https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder"> kandinsky-community/kandinsky-2-2-decoder </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/controlnet">ControlNet</a></td> <td><a href="https://huggingface.co/lllyasviel/sd-controlnet-canny"> lllyasviel/sd-controlnet-canny </a></td> </tr> <tr> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/pix2pix">InstructPix2Pix</a></td> <td><a href="https://huggingface.co/timbrooks/instruct-pix2pix"> timbrooks/instruct-pix2pix </a></td> </tr> <tr> <td>Text-guided Image-to-Image</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/img2img">Stable Diffusion Image-to-Image</a></td> <td><a href="https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5"> stable-diffusion-v1-5/stable-diffusion-v1-5 </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Text-guided Image Inpainting</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpainting</a></td> <td><a href="https://huggingface.co/runwayml/stable-diffusion-inpainting"> runwayml/stable-diffusion-inpainting </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Image Variation</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/image_variation">Stable Diffusion Image Variation</a></td> <td><a href="https://huggingface.co/lambdalabs/sd-image-variations-diffusers"> lambdalabs/sd-image-variations-diffusers </a></td> </tr> <tr style="border-top: 2px solid black"> <td>Super Resolution</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/upscale">Stable Diffusion Upscale</a></td> <td><a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler"> stabilityai/stable-diffusion-x4-upscaler </a></td> </tr> <tr> <td>Super Resolution</td> <td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/latent_upscale">Stable Diffusion Latent Upscale</a></td> <td><a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler"> stabilityai/sd-x2-latent-upscaler </a></td> </tr> </table> ## Popular libraries using 🧨 Diffusers - https://github.com/microsoft/TaskMatrix - https://github.com/invoke-ai/InvokeAI - https://github.com/InstantID/InstantID - https://github.com/apple/ml-stable-diffusion - https://github.com/Sanster/lama-cleaner - https://github.com/IDEA-Research/Grounded-Segment-Anything - https://github.com/ashawkey/stable-dreamfusion - https://github.com/deep-floyd/IF - https://github.com/bentoml/BentoML - https://github.com/bmaltais/kohya_ss - +14,000 other amazing GitHub repositories 💪 Thank you for using us ❤️. ## Credits This library concretizes previous work by many different authors and would not have been possible without their great research and implementations. We'd like to thank, in particular, the following implementations which have helped us in our development and without which the API could not have been as polished today: - @CompVis' latent diffusion models library, available [here](https://github.com/CompVis/latent-diffusion) - @hojonathanho original DDPM implementation, available [here](https://github.com/hojonathanho/diffusion) as well as the extremely useful translation into PyTorch by @pesser, available [here](https://github.com/pesser/pytorch_diffusion) - @ermongroup's DDIM implementation, available [here](https://github.com/ermongroup/ddim) - @yang-song's Score-VE and Score-VP implementations, available [here](https://github.com/yang-song/score_sde_pytorch) We also want to thank @heejkoo for the very helpful overview of papers, code and resources on diffusion models, available [here](https://github.com/heejkoo/Awesome-Diffusion-Models) as well as @crowsonkb and @rromb for useful discussions and insights. ## Citation ```bibtex @misc{von-platen-etal-2022-diffusers, author = {Patrick von Platen and Suraj Patil and Anton Lozhkov and Pedro Cuenca and Nathan Lambert and Kashif Rasul and Mishig Davaadorj and Dhruv Nair and Sayak Paul and William Berman and Yiyi Xu and Steven Liu and Thomas Wolf}, title = {Diffusers: State-of-the-art diffusion models}, year = {2022}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/huggingface/diffusers}} } ```
0
hf_public_repos
hf_public_repos/diffusers/pyproject.toml
[tool.ruff] line-length = 119 [tool.ruff.lint] # Never enforce `E501` (line length violations). ignore = ["C901", "E501", "E741", "F402", "F823"] select = ["C", "E", "F", "I", "W"] # Ignore import violations in all `__init__.py` files. [tool.ruff.lint.per-file-ignores] "__init__.py" = ["E402", "F401", "F403", "F811"] "src/diffusers/utils/dummy_*.py" = ["F401"] [tool.ruff.lint.isort] lines-after-imports = 2 known-first-party = ["diffusers"] [tool.ruff.format] # Like Black, use double quotes for strings. quote-style = "double" # Like Black, indent with spaces, rather than tabs. indent-style = "space" # Like Black, respect magic trailing commas. skip-magic-trailing-comma = false # Like Black, automatically detect the appropriate line ending. line-ending = "auto"