language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/libtool_installation/package.py | {
"start": 472,
"end": 545
} | class ____(BuilderBase):
install_libtool_archives = True
| AutotoolsBuilder |
python | Textualize__textual | src/textual/command.py | {
"start": 4955,
"end": 9748
} | class ____(ABC):
"""Base class for command palette command providers.
To create new command provider, inherit from this class and implement
[`search`][textual.command.Provider.search].
"""
def __init__(self, screen: Screen[Any], match_style: Style | None = None) -> None:
"""Initialise the command provider.
Args:
screen: A reference to the active screen.
"""
if match_style is not None:
assert isinstance(
match_style, Style
), "match_style must be a Visual style (from textual.style import Style)"
self.__screen = screen
self.__match_style = match_style
self._init_task: Task | None = None
self._init_success = False
@property
def focused(self) -> Widget | None:
"""The currently-focused widget in the currently-active screen in the application.
If no widget has focus this will be `None`.
"""
return self.__screen.focused
@property
def screen(self) -> Screen[object]:
"""The currently-active screen in the application."""
return self.__screen
@property
def app(self) -> App[object]:
"""A reference to the application."""
return self.__screen.app
@property
def match_style(self) -> Style | None:
"""The preferred style to use when highlighting matching portions of the [`match_display`][textual.command.Hit.match_display]."""
return self.__match_style
def matcher(self, user_input: str, case_sensitive: bool = False) -> Matcher:
"""Create a [fuzzy matcher][textual.fuzzy.Matcher] for the given user input.
Args:
user_input: The text that the user has input.
case_sensitive: Should matching be case sensitive?
Returns:
A [fuzzy matcher][textual.fuzzy.Matcher] object for matching against candidate hits.
"""
return Matcher(
user_input,
match_style=self.match_style,
case_sensitive=case_sensitive,
)
def _post_init(self) -> None:
"""Internal method to run post init task."""
async def post_init_task() -> None:
"""Wrapper to post init that runs in a task."""
try:
await self.startup()
except Exception:
from rich.traceback import Traceback
self.app.log.error(Traceback())
else:
self._init_success = True
self._init_task = create_task(post_init_task())
async def _wait_init(self) -> None:
"""Wait for initialization."""
if self._init_task is not None:
await self._init_task
self._init_task = None
async def startup(self) -> None:
"""Called after the Provider is initialized, but before any calls to `search`."""
async def _search(self, query: str) -> Hits:
"""Internal method to perform search.
Args:
query: The user input to be matched.
Yields:
Instances of [`Hit`][textual.command.Hit].
"""
await self._wait_init()
if self._init_success:
# An empty search string is a discovery search, anything else is
# a conventional search.
hits = self.search(query) if query else self.discover()
async for hit in hits:
if hit is not NotImplemented:
yield hit
@abstractmethod
async def search(self, query: str) -> Hits:
"""A request to search for commands relevant to the given query.
Args:
query: The user input to be matched.
Yields:
Instances of [`Hit`][textual.command.Hit].
"""
yield NotImplemented
async def discover(self) -> Hits:
"""A default collection of hits for the provider.
Yields:
Instances of [`DiscoveryHit`][textual.command.DiscoveryHit].
Note:
This is different from
[`search`][textual.command.Provider.search] in that it should
yield [`DiscoveryHit`s][textual.command.DiscoveryHit] that
should be shown by default (before user input).
It is permitted to *not* implement this method.
"""
yield NotImplemented
async def _shutdown(self) -> None:
"""Internal method to call shutdown and log errors."""
try:
await self.shutdown()
except Exception:
from rich.traceback import Traceback
self.app.log.error(Traceback())
async def shutdown(self) -> None:
"""Called when the Provider is shutdown.
Use this method to perform an cleanup, if required.
"""
| Provider |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-zapier/llama_index/tools/zapier/base.py | {
"start": 235,
"end": 2627
} | class ____(BaseToolSpec):
"""Zapier tool spec."""
spec_functions = []
def __init__(
self, api_key: Optional[str] = None, oauth_access_token: Optional[str] = None
) -> None:
"""Initialize with parameters."""
if api_key:
self._headers = {"x-api-key": api_key}
elif oauth_access_token:
self._headers = {"Authorization": f"Bearer {oauth_access_token}"}
else:
raise ValueError("Must provide either api_key or oauth_access_token")
# Get the exposed actions from Zapier
actions = json.loads(self.list_actions())
if "results" not in actions:
raise ValueError(
"No Zapier actions exposed, visit https://nla.zapier.com/dev/actions/"
" to expose actions."
)
results = actions["results"]
# Register the actions as Tools
for action in results:
params = action["params"]
def function_action(id=action["id"], **kwargs):
return self.natural_language_query(id, **kwargs)
action_name = action["description"].split(": ")[1].replace(" ", "_")
function_action.__name__ = action_name
function_action.__doc__ = f"""
This is a Zapier Natural Language Action function wrapper.
The 'instructions' key is REQUIRED for all function calls.
The instructions key is a natural language string describing the action to be taken
The following are all of the valid arguments you can provide: {params}
Ignore the id field, it is provided for you.
If the returned error field is not null, interpret the error and try to fix it. Otherwise, inform the user of how they might fix it.
"""
setattr(self, action_name, function_action)
self.spec_functions.append(action_name)
def list_actions(self):
response = requests.get(
"https://nla.zapier.com/api/v1/dynamic/exposed/", headers=self._headers
)
return response.text
def natural_language_query(self, id: str, **kwargs):
response = requests.post(
ACTION_URL_TMPL.format(action_id=id),
headers=self._headers,
data=json.dumps(kwargs),
)
return response.text
| ZapierToolSpec |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 304784,
"end": 313983
} | class ____(rv_continuous):
r"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)
`skewnorm` takes a real number :math:`a` as a skewness parameter
When ``a = 0`` the distribution is identical to a normal distribution
(`norm`). `rvs` implements the method of [1]_.
This distribution uses routines from the Boost Math C++ library for
the computation of ``cdf``, ``ppf`` and ``isf`` methods. [2]_
%(after_notes)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of
the multivariate skew-normal distribution. J. Roy. Statist. Soc.,
B 61, 579-602. :arxiv:`0911.2093`
.. [2] The Boost Developers. "Boost C++ Libraries". https://www.boost.org/.
%(example)s
"""
def _argcheck(self, a):
return np.isfinite(a)
def _shape_info(self):
return [_ShapeInfo("a", False, (-np.inf, np.inf), (False, False))]
def _pdf(self, x, a):
return xpx.apply_where(
a == 0, (x, a),
lambda x, a: _norm_pdf(x),
lambda x, a: 2.*_norm_pdf(x)*_norm_cdf(a*x))
def _logpdf(self, x, a):
return xpx.apply_where(
a == 0, (x, a),
lambda x, a: _norm_logpdf(x),
lambda x, a: np.log(2)+_norm_logpdf(x)+_norm_logcdf(a*x))
def _cdf(self, x, a):
a = np.atleast_1d(a)
cdf = scu._skewnorm_cdf(x, 0.0, 1.0, a)
# for some reason, a isn't broadcasted if some of x are invalid
a = np.broadcast_to(a, cdf.shape)
# Boost is not accurate in left tail when a > 0
i_small_cdf = (cdf < 1e-6) & (a > 0)
cdf[i_small_cdf] = super()._cdf(x[i_small_cdf], a[i_small_cdf])
return np.clip(cdf, 0, 1)
def _ppf(self, x, a):
return scu._skewnorm_ppf(x, 0.0, 1.0, a)
def _sf(self, x, a):
# Boost's SF is implemented this way. Use whatever customizations
# we made in the _cdf.
return self._cdf(-x, -a)
def _isf(self, x, a):
return scu._skewnorm_isf(x, 0.0, 1.0, a)
def _rvs(self, a, size=None, random_state=None):
u0 = random_state.normal(size=size)
v = random_state.normal(size=size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
# For odd order, the each noncentral moment of the skew-normal distribution
# with location 0 and scale 1 can be expressed as a polynomial in delta,
# where delta = a/sqrt(1 + a**2) and `a` is the skew-normal shape
# parameter. The dictionary _skewnorm_odd_moments defines those
# polynomials for orders up to 19. The dict is implemented as a cached
# property to reduce the impact of the creation of the dict on import time.
@cached_property
def _skewnorm_odd_moments(self):
skewnorm_odd_moments = {
1: Polynomial([1]),
3: Polynomial([3, -1]),
5: Polynomial([15, -10, 3]),
7: Polynomial([105, -105, 63, -15]),
9: Polynomial([945, -1260, 1134, -540, 105]),
11: Polynomial([10395, -17325, 20790, -14850, 5775, -945]),
13: Polynomial([135135, -270270, 405405, -386100, 225225, -73710,
10395]),
15: Polynomial([2027025, -4729725, 8513505, -10135125, 7882875,
-3869775, 1091475, -135135]),
17: Polynomial([34459425, -91891800, 192972780, -275675400,
268017750, -175429800, 74220300, -18378360,
2027025]),
19: Polynomial([654729075, -1964187225, 4714049340, -7856748900,
9166207050, -7499623950, 4230557100, -1571349780,
346621275, -34459425]),
}
return skewnorm_odd_moments
def _munp(self, order, a):
if order % 2:
if order > 19:
raise NotImplementedError("skewnorm noncentral moments not "
"implemented for odd orders greater "
"than 19.")
# Use the precomputed polynomials that were derived from the
# moment generating function.
delta = a/np.sqrt(1 + a**2)
return (delta * self._skewnorm_odd_moments[order](delta**2)
* _SQRT_2_OVER_PI)
else:
# For even order, the moment is just (order-1)!!, where !! is the
# notation for the double factorial; for an odd integer m, m!! is
# m*(m-2)*...*3*1.
# We could use special.factorial2, but we know the argument is odd,
# so avoid the overhead of that function and compute the result
# directly here.
return sc.gamma((order + 1)/2) * 2**(order/2) / _SQRT_PI
@extend_notes_in_docstring(rv_continuous, notes="""\
If ``method='mm'``, parameters fixed by the user are respected, and the
remaining parameters are used to match distribution and sample moments
where possible. For example, if the user fixes the location with
``floc``, the parameters will only match the distribution skewness and
variance to the sample skewness and variance; no attempt will be made
to match the means or minimize a norm of the errors.
Note that the maximum possible skewness magnitude of a
`scipy.stats.skewnorm` distribution is approximately 0.9952717; if the
magnitude of the data's sample skewness exceeds this, the returned
shape parameter ``a`` will be infinite.
\n\n""")
def fit(self, data, *args, **kwds):
if kwds.pop("superfit", False):
return super().fit(data, *args, **kwds)
if isinstance(data, CensoredData):
if data.num_censored() == 0:
data = data._uncensor()
else:
return super().fit(data, *args, **kwds)
# this extracts fixed shape, location, and scale however they
# are specified, and also leaves them in `kwds`
data, fa, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
method = kwds.get("method", "mle").lower()
# See https://en.wikipedia.org/wiki/Skew_normal_distribution for
# moment formulas.
def skew_d(d): # skewness in terms of delta
return (4-np.pi)/2 * ((d * np.sqrt(2 / np.pi))**3
/ (1 - 2*d**2 / np.pi)**(3/2))
def d_skew(skew): # delta in terms of skewness
s_23 = np.abs(skew)**(2/3)
return np.sign(skew) * np.sqrt(
np.pi/2 * s_23 / (s_23 + ((4 - np.pi)/2)**(2/3))
)
# If method is method of moments, we don't need the user's guesses.
# Otherwise, extract the guesses from args and kwds.
if method == "mm":
a, loc, scale = None, None, None
else:
a = args[0] if len(args) else None
loc = kwds.pop('loc', None)
scale = kwds.pop('scale', None)
if fa is None and a is None: # not fixed and no guess: use MoM
# Solve for a that matches sample distribution skewness to sample
# skewness.
s = stats.skew(data)
if method == 'mle':
# For MLE initial conditions, clip skewness to a large but
# reasonable value in case the data skewness is out-of-range.
s = np.clip(s, -0.99, 0.99)
else:
s_max = skew_d(1)
s = np.clip(s, -s_max, s_max)
d = d_skew(s)
with np.errstate(divide='ignore'):
a = np.sqrt(np.divide(d**2, (1-d**2)))*np.sign(s)
else:
a = fa if fa is not None else a
d = a / np.sqrt(1 + a**2)
if fscale is None and scale is None:
v = np.var(data)
scale = np.sqrt(v / (1 - 2*d**2/np.pi))
elif fscale is not None:
scale = fscale
if floc is None and loc is None:
m = np.mean(data)
loc = m - scale*d*np.sqrt(2/np.pi)
elif floc is not None:
loc = floc
if method == 'mm':
return a, loc, scale
else:
# At this point, parameter "guesses" may equal the fixed parameters
# in kwds. No harm in passing them as guesses, too.
return super().fit(data, a, loc=loc, scale=scale, **kwds)
skewnorm = skewnorm_gen(name='skewnorm')
| skewnorm_gen |
python | astropy__astropy | astropy/table/tests/test_pprint.py | {
"start": 12156,
"end": 12955
} | class ____:
@pytest.mark.parametrize(
"scalar, exp",
[
(
1,
[
"None",
"----",
" 1",
],
),
(
u.Quantity(0.6, "eV"),
[
"None",
"----",
" 0.6",
],
),
],
)
def test_pprint_scalar(self, scalar, exp):
# see https://github.com/astropy/astropy/issues/12584
c = Column(scalar)
# Make sure pprint() does not raise an exception
c.pprint()
# Check actual output
out = c.pformat()
assert out == exp
@pytest.mark.usefixtures("table_type")
| TestPprintColumn |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/exception.py | {
"start": 591,
"end": 727
} | class ____(CurriculumError):
"""
Any error related to loading the Curriculum config file.
"""
pass
| CurriculumLoadingError |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/completion/fuzzy_completer.py | {
"start": 6580,
"end": 7639
} | class ____(Completer):
"""
Fuzzy completion on a list of words.
(This is basically a `WordCompleter` wrapped in a `FuzzyCompleter`.)
:param words: List of words or callable that returns a list of words.
:param meta_dict: Optional dict mapping words to their meta-information.
:param WORD: When True, use WORD characters.
"""
def __init__(
self,
words: Sequence[str] | Callable[[], Sequence[str]],
meta_dict: dict[str, str] | None = None,
WORD: bool = False,
) -> None:
self.words = words
self.meta_dict = meta_dict or {}
self.WORD = WORD
self.word_completer = WordCompleter(
words=self.words, WORD=self.WORD, meta_dict=self.meta_dict
)
self.fuzzy_completer = FuzzyCompleter(self.word_completer, WORD=self.WORD)
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
return self.fuzzy_completer.get_completions(document, complete_event)
| FuzzyWordCompleter |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_build.py | {
"start": 33567,
"end": 37213
} | class ____(GoogleCloudBaseOperator):
"""
Creates a new build using the original build request, which may or may not result in an identical build.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildRetryBuildOperator`
:param id_: Build ID of the original build.
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param wait: Optional, wait for operation to finish.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "id_", "gcp_conn_id", "location")
operator_extra_links = (CloudBuildLink(),)
def __init__(
self,
*,
id_: str,
project_id: str = PROVIDE_PROJECT_ID,
wait: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.id_ = id_
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.location,
}
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.retry_build(
id_=self.id_,
project_id=self.project_id,
wait=self.wait,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
context["task_instance"].xcom_push(key="id", value=result.id)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildLink.persist(
context=context,
project_id=project_id,
build_id=result.id,
)
return Build.to_dict(result)
| CloudBuildRetryBuildOperator |
python | Pylons__pyramid | src/pyramid/config/security.py | {
"start": 13796,
"end": 14271
} | class ____:
def __init__(
self,
require_csrf,
token,
header,
safe_methods,
check_origin,
allow_no_origin,
callback,
):
self.require_csrf = require_csrf
self.token = token
self.header = header
self.safe_methods = frozenset(safe_methods)
self.check_origin = check_origin
self.allow_no_origin = allow_no_origin
self.callback = callback
| DefaultCSRFOptions |
python | weaviate__weaviate-python-client | weaviate/collections/queries/near_media/generate/executor.py | {
"start": 1056,
"end": 20592
} | class ____(
Generic[ConnectionType, Properties, References], _BaseExecutor[ConnectionType]
):
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[GenerativeReturn[Properties, References]]: ...
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[GenerativeReturn[Properties, CrossReferences]]: ...
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[GenerativeReturn[Properties, TReferences]]: ...
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[GenerativeReturn[TProperties, References]]: ...
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[GenerativeReturn[TProperties, CrossReferences]]: ...
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Literal[None] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[GenerativeReturn[TProperties, TReferences]]: ...
### GroupBy ###
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Literal[None] = None,
) -> executor.Result[GenerativeGroupByReturn[Properties, References]]: ...
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: REFERENCES,
) -> executor.Result[GenerativeGroupByReturn[Properties, CrossReferences]]: ...
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Union[PROPERTIES, bool, None] = None,
return_references: Type[TReferences],
) -> executor.Result[GenerativeGroupByReturn[Properties, TReferences]]: ...
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Literal[None] = None,
) -> executor.Result[GenerativeGroupByReturn[TProperties, References]]: ...
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: REFERENCES,
) -> executor.Result[GenerativeGroupByReturn[TProperties, CrossReferences]]: ...
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: GroupBy,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Type[TProperties],
return_references: Type[TReferences],
) -> executor.Result[GenerativeGroupByReturn[TProperties, TReferences]]: ...
### DEFAULT ###
@overload
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[GroupBy] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[
GenerativeSearchReturnType[Properties, References, TProperties, TReferences]
]: ...
def near_media(
self,
media: BLOB_INPUT,
media_type: NearMediaType,
*,
single_prompt: Union[str, _SinglePrompt, None] = None,
grouped_task: Union[str, _GroupedTask, None] = None,
grouped_properties: Optional[List[str]] = None,
generative_provider: Optional[_GenerativeConfigRuntime] = None,
certainty: Optional[NUMBER] = None,
distance: Optional[NUMBER] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
auto_limit: Optional[int] = None,
filters: Optional[_Filters] = None,
group_by: Optional[GroupBy] = None,
rerank: Optional[Rerank] = None,
target_vector: Optional[TargetVectorJoinType] = None,
include_vector: INCLUDE_VECTOR = False,
return_metadata: Optional[METADATA] = None,
return_properties: Optional[ReturnProperties[TProperties]] = None,
return_references: Optional[ReturnReferences[TReferences]] = None,
) -> executor.Result[
GenerativeSearchReturnType[Properties, References, TProperties, TReferences]
]:
"""Perform retrieval-augmented generation (RaG) on the results of a by-audio object search in this collection using an audio-capable vectorization module and vector-based similarity search.
See the [docs](https://weaviate.io/developers/weaviate/modules/retriever-vectorizer-modules/multi2vec-bind) for a more detailed explanation.
NOTE:
You must have a multi-media-capable vectorization module installed in order to use this method, e.g. `multi2vec-bind`.
Args:
near_media: The media file to search on, REQUIRED. This can be a base64 encoded string of the binary, a path to the file, or a file-like object.
media_type: The type of the provided media file, REQUIRED.
certainty: The minimum similarity score to return. If not specified, the default certainty specified by the server is used.
distance: The maximum distance to search. If not specified, the default distance specified by the server is used.
limit: The maximum number of results to return. If not specified, the default limit specified by the server is returned.
offset: The offset to start from. If not specified, the retrieval begins from the first object in the server.
auto_limit: The maximum number of [autocut](https://weaviate.io/developers/weaviate/api/graphql/additional-operators#autocut) results to return. If not specified, no limit is applied.
filters: The filters to apply to the search.
group_by: How the results should be grouped by a specific property.
rerank: How the results should be reranked. NOTE: A `rerank-*` module must be enabled for this functionality to work.
target_vector: The name of the vector space to search in for named vector configurations. Required if multiple spaces are configured.
include_vector: Whether to include the vector in the results. If not specified, this is set to False.
return_metadata: The metadata to return for each object, defaults to `None`.
return_properties: The properties to return for each object.
return_references: The references to return for each object.
NOTE:
- If `return_properties` is not provided then all properties are returned except for blob properties.
- If `return_metadata` is not provided then no metadata is provided. Use MetadataQuery.full() to retrieve all metadata.
- If `return_references` is not provided then no references are provided.
Returns:
A `GenerativeReturn` or `GenerativeGroupByReturn` object that includes the searched objects.
If `group_by` is provided then a `GenerativeGroupByReturn` object is returned, otherwise a `GenerativeReturn` object is returned.
Raises:
weaviate.exceptions.WeaviateQueryError: If the request to the Weaviate server fails.
"""
def resp(
res: search_get_pb2.SearchReply,
) -> GenerativeSearchReturnType[Properties, References, TProperties, TReferences]:
return cast(
Any,
self._result_to_generative_return(
res,
_QueryOptions.from_input(
return_metadata,
return_properties,
include_vector,
self._references,
return_references,
rerank,
group_by,
),
),
)
request = self._query.near_media(
media=parse_blob(media),
type_=media_type.value,
certainty=certainty,
distance=distance,
filters=filters,
group_by=_GroupBy.from_input(group_by),
rerank=rerank,
target_vector=target_vector,
generative=_Generative(
single=single_prompt,
grouped=grouped_task,
grouped_properties=grouped_properties,
generative_provider=generative_provider,
),
limit=limit,
offset=offset,
autocut=auto_limit,
return_metadata=self._parse_return_metadata(return_metadata, include_vector),
return_properties=self._parse_return_properties(return_properties),
return_references=self._parse_return_references(return_references),
)
return executor.execute(
response_callback=resp,
method=self._connection.grpc_search,
request=request,
)
| _NearMediaGenerateExecutor |
python | pyca__cryptography | tests/hazmat/primitives/test_rsa.py | {
"start": 86835,
"end": 94819
} | class ____:
@pytest.mark.parametrize(
("fmt", "password"),
itertools.product(
[
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.PrivateFormat.PKCS8,
],
[
b"s",
b"longerpassword",
b"!*$&(@#$*&($T@%_somesymbols",
b"\x01" * 1000,
],
),
)
def test_private_bytes_encrypted_pem(
self, rsa_key_2048: rsa.RSAPrivateKey, backend, fmt, password
):
skip_fips_traditional_openssl(backend, fmt)
key = rsa_key_2048
serialized = key.private_bytes(
serialization.Encoding.PEM,
fmt,
serialization.BestAvailableEncryption(password),
)
loaded_key = serialization.load_pem_private_key(
serialized, password, backend, unsafe_skip_rsa_key_validation=True
)
assert isinstance(loaded_key, rsa.RSAPrivateKey)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.supported(
only_if=lambda backend: backend._fips_enabled,
skip_message="Requires FIPS",
)
def test_traditional_serialization_fips(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
key = rsa_key_2048
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.BestAvailableEncryption(b"password"),
)
@pytest.mark.parametrize(
("encoding", "fmt"),
[
(serialization.Encoding.Raw, serialization.PrivateFormat.PKCS8),
(serialization.Encoding.DER, serialization.PrivateFormat.Raw),
(serialization.Encoding.Raw, serialization.PrivateFormat.Raw),
(serialization.Encoding.X962, serialization.PrivateFormat.PKCS8),
],
)
def test_private_bytes_rejects_invalid(
self, rsa_key_2048: rsa.RSAPrivateKey, encoding, fmt, backend
):
key = rsa_key_2048
with pytest.raises((ValueError, TypeError)):
key.private_bytes(encoding, fmt, serialization.NoEncryption())
@pytest.mark.parametrize(
("fmt", "password"),
[
[serialization.PrivateFormat.PKCS8, b"s"],
[serialization.PrivateFormat.PKCS8, b"longerpassword"],
[serialization.PrivateFormat.PKCS8, b"!*$&(@#$*&($T@%_somesymbol"],
[serialization.PrivateFormat.PKCS8, b"\x01" * 1000],
],
)
def test_private_bytes_encrypted_der(
self, rsa_key_2048: rsa.RSAPrivateKey, backend, fmt, password
):
key = rsa_key_2048
serialized = key.private_bytes(
serialization.Encoding.DER,
fmt,
serialization.BestAvailableEncryption(password),
)
loaded_key = serialization.load_der_private_key(
serialized, password, backend, unsafe_skip_rsa_key_validation=True
)
assert isinstance(loaded_key, rsa.RSAPrivateKey)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.parametrize(
("encoding", "fmt", "loader_func"),
[
[
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.load_pem_private_key,
],
[
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.load_der_private_key,
],
[
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.load_pem_private_key,
],
[
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.load_der_private_key,
],
],
)
def test_private_bytes_unencrypted(
self,
rsa_key_2048: rsa.RSAPrivateKey,
backend,
encoding,
fmt,
loader_func,
):
key = rsa_key_2048
serialized = key.private_bytes(
encoding, fmt, serialization.NoEncryption()
)
loaded_key = loader_func(
serialized, None, backend, unsafe_skip_rsa_key_validation=True
)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.skip_fips(
reason="Traditional OpenSSL key format is not supported in FIPS mode."
)
@pytest.mark.parametrize(
("key_path", "encoding", "loader_func"),
[
[
os.path.join(
"asymmetric",
"Traditional_OpenSSL_Serialization",
"testrsa.pem",
),
serialization.Encoding.PEM,
serialization.load_pem_private_key,
],
[
os.path.join("asymmetric", "DER_Serialization", "testrsa.der"),
serialization.Encoding.DER,
serialization.load_der_private_key,
],
],
)
def test_private_bytes_traditional_openssl_unencrypted(
self, backend, key_path, encoding, loader_func
):
key_bytes = load_vectors_from_file(
key_path, lambda pemfile: pemfile.read(), mode="rb"
)
key = loader_func(
key_bytes, None, backend, unsafe_skip_rsa_key_validation=True
)
serialized = key.private_bytes(
encoding,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption(),
)
assert serialized == key_bytes
def test_private_bytes_traditional_der_encrypted_invalid(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
key = rsa_key_2048
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.BestAvailableEncryption(b"password"),
)
def test_private_bytes_invalid_encoding(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
key = rsa_key_2048
with pytest.raises(TypeError):
key.private_bytes(
"notencoding", # type: ignore[arg-type]
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
)
def test_private_bytes_invalid_format(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
key = rsa_key_2048
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
"invalidformat", # type: ignore[arg-type]
serialization.NoEncryption(),
)
def test_private_bytes_invalid_encryption_algorithm(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
key = rsa_key_2048
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
"notanencalg", # type: ignore[arg-type]
)
def test_private_bytes_unsupported_encryption_type(
self, rsa_key_2048: rsa.RSAPrivateKey, backend
):
key = rsa_key_2048
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
DummyKeySerializationEncryption(),
)
| TestRSAPrivateKeySerialization |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline_run_stats.py | {
"start": 182,
"end": 787
} | class ____(graphene.Interface):
id = graphene.NonNull(graphene.String)
runId = graphene.NonNull(graphene.String)
stepsSucceeded = graphene.NonNull(graphene.Int)
stepsFailed = graphene.NonNull(graphene.Int)
materializations = graphene.NonNull(graphene.Int)
expectations = graphene.NonNull(graphene.Int)
enqueuedTime = graphene.Field(graphene.Float)
launchTime = graphene.Field(graphene.Float)
startTime = graphene.Field(graphene.Float)
endTime = graphene.Field(graphene.Float)
class Meta:
name = "PipelineRunStatsSnapshot"
| GraphenePipelineRunStatsSnapshot |
python | PyCQA__bandit | bandit/core/metrics.py | {
"start": 157,
"end": 3454
} | class ____:
"""Bandit metric gathering.
This class is a singleton used to gather and process metrics collected when
processing a code base with bandit. Metric collection is stateful, that
is, an active metric block will be set when requested and all subsequent
operations will effect that metric block until it is replaced by a setting
a new one.
"""
def __init__(self):
self.data = dict()
self.data["_totals"] = {
"loc": 0,
"nosec": 0,
"skipped_tests": 0,
}
# initialize 0 totals for criteria and rank; this will be reset later
for rank in constants.RANKING:
for criteria in constants.CRITERIA:
self.data["_totals"][f"{criteria[0]}.{rank}"] = 0
def begin(self, fname):
"""Begin a new metric block.
This starts a new metric collection name "fname" and makes is active.
:param fname: the metrics unique name, normally the file name.
"""
self.data[fname] = {
"loc": 0,
"nosec": 0,
"skipped_tests": 0,
}
self.current = self.data[fname]
def note_nosec(self, num=1):
"""Note a "nosec" comment.
Increment the currently active metrics nosec count.
:param num: number of nosecs seen, defaults to 1
"""
self.current["nosec"] += num
def note_skipped_test(self, num=1):
"""Note a "nosec BXXX, BYYY, ..." comment.
Increment the currently active metrics skipped_tests count.
:param num: number of skipped_tests seen, defaults to 1
"""
self.current["skipped_tests"] += num
def count_locs(self, lines):
"""Count lines of code.
We count lines that are not empty and are not comments. The result is
added to our currently active metrics loc count (normally this is 0).
:param lines: lines in the file to process
"""
def proc(line):
tmp = line.strip()
return bool(tmp and not tmp.startswith(b"#"))
self.current["loc"] += sum(proc(line) for line in lines)
def count_issues(self, scores):
self.current.update(self._get_issue_counts(scores))
def aggregate(self):
"""Do final aggregation of metrics."""
c = collections.Counter()
for fname in self.data:
c.update(self.data[fname])
self.data["_totals"] = dict(c)
@staticmethod
def _get_issue_counts(scores):
"""Get issue counts aggregated by confidence/severity rankings.
:param scores: list of scores to aggregate / count
:return: aggregated total (count) of issues identified
"""
issue_counts = {}
for score in scores:
for criteria, _ in constants.CRITERIA:
for i, rank in enumerate(constants.RANKING):
label = f"{criteria}.{rank}"
if label not in issue_counts:
issue_counts[label] = 0
count = (
score[criteria][i]
// constants.RANKING_VALUES[rank]
)
issue_counts[label] += count
return issue_counts
| Metrics |
python | numpy__numpy | tools/swig/test/testTensor.py | {
"start": 12247,
"end": 12551
} | class ____(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "short"
self.typeCode = "h"
self.result = int(self.result)
######################################################################
| shortTestCase |
python | sanic-org__sanic | sanic/cookies/response.py | {
"start": 11211,
"end": 21414
} | class ____:
"""A representation of a HTTP cookie, providing an interface to manipulate cookie attributes intended for a response.
This class is a simplified representation of a cookie, similar to the Morsel SimpleCookie in Python's standard library.
It allows the manipulation of various cookie attributes including path, domain, security settings, and others.
Several "smart defaults" are provided to make it easier to create cookies that are secure by default. These include:
- Setting the `secure` flag to `True` by default
- Setting the `samesite` flag to `Lax` by default
Args:
key (str): The key (name) of the cookie.
value (str): The value of the cookie.
path (str, optional): The path for the cookie. Defaults to "/".
domain (Optional[str], optional): The domain for the cookie.
Defaults to `None`.
secure (bool, optional): Whether the cookie is secure.
Defaults to `True`.
max_age (Optional[int], optional): The maximum age of the cookie
in seconds. Defaults to `None`.
expires (Optional[datetime], optional): The expiration date of the
cookie. Defaults to `None`.
httponly (bool, optional): HttpOnly flag for the cookie.
Defaults to `False`.
samesite (Optional[SameSite], optional): The SameSite attribute for
the cookie. Defaults to `"Lax"`.
partitioned (bool, optional): Whether the cookie is partitioned.
Defaults to `False`.
comment (Optional[str], optional): A comment for the cookie.
Defaults to `None`.
host_prefix (bool, optional): Whether to use the host prefix.
Defaults to `False`.
secure_prefix (bool, optional): Whether to use the secure prefix.
Defaults to `False`.
""" # noqa: E501
HOST_PREFIX = "__Host-"
SECURE_PREFIX = "__Secure-"
__slots__ = (
"key",
"value",
"_path",
"_comment",
"_domain",
"_secure",
"_httponly",
"_partitioned",
"_expires",
"_max_age",
"_samesite",
)
_keys = {
"path": "Path",
"comment": "Comment",
"domain": "Domain",
"max-age": "Max-Age",
"expires": "expires",
"samesite": "SameSite",
# "version": "Version",
"secure": "Secure",
"httponly": "HttpOnly",
"partitioned": "Partitioned",
}
_flags = {"secure", "httponly", "partitioned"}
def __init__(
self,
key: str,
value: str,
*,
path: str = "/",
domain: str | None = None,
secure: bool = True,
max_age: int | None = None,
expires: datetime | None = None,
httponly: bool = False,
samesite: SameSite | None = "Lax",
partitioned: bool = False,
comment: str | None = None,
host_prefix: bool = False,
secure_prefix: bool = False,
):
if key in self._keys:
raise KeyError("Cookie name is a reserved word")
if not _is_legal_key(key):
raise KeyError("Cookie key contains illegal characters")
if host_prefix:
if not secure:
raise ServerError(
"Cannot set host_prefix on a cookie without secure=True"
)
if path != "/":
raise ServerError(
"Cannot set host_prefix on a cookie unless path='/'"
)
if domain:
raise ServerError(
"Cannot set host_prefix on a cookie with a defined domain"
)
elif secure_prefix and not secure:
raise ServerError(
"Cannot set secure_prefix on a cookie without secure=True"
)
if partitioned and not host_prefix:
# This is technically possible, but it is not advisable so we will
# take a stand and say "don't shoot yourself in the foot"
raise ServerError(
"Cannot create a partitioned cookie without "
"also setting host_prefix=True"
)
self.key = self.make_key(key, host_prefix, secure_prefix)
self.value = value
self._path = path
self._comment = comment
self._domain = domain
self._secure = secure
self._httponly = httponly
self._partitioned = partitioned
self._expires = None
self._max_age = None
self._samesite = None
if expires is not None:
self.expires = expires
if max_age is not None:
self.max_age = max_age
if samesite is not None:
self.samesite = samesite
def __str__(self):
"""Format as a Set-Cookie header value."""
output = ["{}={}".format(self.key, _quote(self.value))]
ordered_keys = list(self._keys.keys())
for key in sorted(
self._keys.keys(), key=lambda k: ordered_keys.index(k)
):
value = getattr(self, key.replace("-", "_"))
if value is not None and value is not False:
if key == "max-age":
try:
output.append("%s=%d" % (self._keys[key], value))
except TypeError:
output.append("{}={}".format(self._keys[key], value))
elif key == "expires":
output.append(
"%s=%s"
% (
self._keys[key],
value.strftime("%a, %d-%b-%Y %T GMT"),
)
)
elif key in self._flags:
output.append(self._keys[key])
else:
output.append("{}={}".format(self._keys[key], value))
return "; ".join(output)
@property
def path(self) -> str: # no cov
"""The path of the cookie. Defaults to `"/"`."""
return self._path
@path.setter
def path(self, value: str) -> None: # no cov
self._path = value
@property
def expires(self) -> datetime | None: # no cov
"""The expiration date of the cookie. Defaults to `None`."""
return self._expires
@expires.setter
def expires(self, value: datetime) -> None: # no cov
if not isinstance(value, datetime):
raise TypeError("Cookie 'expires' property must be a datetime")
self._expires = value
@property
def comment(self) -> str | None: # no cov
"""A comment for the cookie. Defaults to `None`."""
return self._comment
@comment.setter
def comment(self, value: str) -> None: # no cov
self._comment = value
@property
def domain(self) -> str | None: # no cov
"""The domain of the cookie. Defaults to `None`."""
return self._domain
@domain.setter
def domain(self, value: str) -> None: # no cov
self._domain = value
@property
def max_age(self) -> int | None: # no cov
"""The maximum age of the cookie in seconds. Defaults to `None`."""
return self._max_age
@max_age.setter
def max_age(self, value: int) -> None: # no cov
if not str(value).isdigit():
raise ValueError("Cookie max-age must be an integer")
self._max_age = value
@property
def secure(self) -> bool: # no cov
"""Whether the cookie is secure. Defaults to `True`."""
return self._secure
@secure.setter
def secure(self, value: bool) -> None: # no cov
self._secure = value
@property
def httponly(self) -> bool: # no cov
"""Whether the cookie is HTTP only. Defaults to `False`."""
return self._httponly
@httponly.setter
def httponly(self, value: bool) -> None: # no cov
self._httponly = value
@property
def samesite(self) -> SameSite | None: # no cov
"""The SameSite attribute for the cookie. Defaults to `"Lax"`."""
return self._samesite
@samesite.setter
def samesite(self, value: SameSite) -> None: # no cov
if value.lower() not in SAMESITE_VALUES:
raise TypeError(
"Cookie 'samesite' property must "
f"be one of: {','.join(SAMESITE_VALUES)}"
)
self._samesite = value.title()
@property
def partitioned(self) -> bool: # no cov
"""Whether the cookie is partitioned. Defaults to `False`."""
return self._partitioned
@partitioned.setter
def partitioned(self, value: bool) -> None: # no cov
self._partitioned = value
@classmethod
def make_key(
cls, key: str, host_prefix: bool = False, secure_prefix: bool = False
) -> str:
"""Create a cookie key with the appropriate prefix.
Cookies can have one ow two prefixes. The first is `__Host-` which
requires that the cookie be set with `path="/", domain=None, and
secure=True`. The second is `__Secure-` which requires that
`secure=True`.
They cannot be combined.
Args:
key (str): The key (name) of the cookie.
host_prefix (bool, optional): Whether to add __Host- as a prefix to the key.
This requires that path="/", domain=None, and secure=True.
Defaults to `False`.
secure_prefix (bool, optional): Whether to add __Secure- as a prefix to the key.
This requires that secure=True. Defaults to `False`.
Raises:
ServerError: If both host_prefix and secure_prefix are set.
Returns:
str: The key with the appropriate prefix.
""" # noqa: E501
if host_prefix and secure_prefix:
raise ServerError(
"Both host_prefix and secure_prefix were requested. "
"A cookie should have only one prefix."
)
elif host_prefix:
key = cls.HOST_PREFIX + key
elif secure_prefix:
key = cls.SECURE_PREFIX + key
return key
| Cookie |
python | dagster-io__dagster | python_modules/dagster/dagster/components/lib/sql_component/sql_component.py | {
"start": 2548,
"end": 2895
} | class ____(BaseModel):
"""A file containing SQL content."""
path: str = Field(..., description="Path to the SQL file")
ResolvedSqlTemplate = Annotated[
Union[str, SqlFile],
Resolver(
lambda ctx, template: template,
model_field_type=Union[str, SqlFile],
inject_before_resolve=False,
),
]
@public
| SqlFile |
python | huggingface__transformers | tests/pipelines/test_pipelines_zero_shot_object_detection.py | {
"start": 1157,
"end": 9818
} | class ____(unittest.TestCase):
model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
object_detector = ZeroShotObjectDetectionPipeline(
model=model,
processor=processor,
tokenizer=tokenizer,
image_processor=image_processor,
dtype=dtype,
)
examples = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def run_pipeline_test(self, object_detector, examples):
outputs = object_detector(examples[0].get("image"), examples[0].get("candidate_labels"), threshold=0.0)
n = len(outputs)
self.assertGreater(n, 0)
self.assertEqual(
outputs,
[
{
"score": ANY(float),
"label": ANY(str),
"box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)},
}
for i in range(n)
],
)
@require_torch
def test_small_model_pt(self):
object_detector = pipeline(
"zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection"
)
outputs = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png",
candidate_labels=["cat", "remote", "couch"],
threshold=0.64,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
],
)
outputs = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
],
threshold=0.64,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
],
)
@require_torch
@slow
def test_large_model_pt(self):
object_detector = pipeline("zero-shot-object-detection")
outputs = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg",
candidate_labels=["cat", "remote", "couch"],
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
)
outputs = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
],
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
],
)
@require_torch
@slow
def test_threshold(self):
threshold = 0.2
object_detector = pipeline("zero-shot-object-detection")
outputs = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg",
candidate_labels=["cat", "remote", "couch"],
threshold=threshold,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
],
)
@require_torch
@slow
def test_top_k(self):
top_k = 2
object_detector = pipeline("zero-shot-object-detection")
outputs = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg",
candidate_labels=["cat", "remote", "couch"],
top_k=top_k,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
],
)
| ZeroShotObjectDetectionPipelineTests |
python | numba__numba | numba/core/typing/npdatetime.py | {
"start": 4227,
"end": 4347
} | class ____(TimedeltaBinOp):
key = operator.sub
@infer_global(operator.mul)
@infer_global(operator.imul)
| TimedeltaBinSub |
python | ray-project__ray | rllib/examples/rl_modules/classes/action_masking_rlm.py | {
"start": 3442,
"end": 9351
} | class ____(ActionMaskingRLModule, PPOTorchRLModule):
@override(PPOTorchRLModule)
def setup(self):
super().setup()
# We need to reset here the observation space such that the
# super`s (`PPOTorchRLModule`) observation space is the
# original space (i.e. without the action mask) and `self`'s
# observation space contains the action mask.
self.observation_space = self.observation_space_with_mask
@override(PPOTorchRLModule)
def _forward_inference(
self, batch: Dict[str, TensorType], **kwargs
) -> Dict[str, TensorType]:
# Preprocess the original batch to extract the action mask.
action_mask, batch = self._preprocess_batch(batch)
# Run the forward pass.
outs = super()._forward_inference(batch, **kwargs)
# Mask the action logits and return.
return self._mask_action_logits(outs, action_mask)
@override(PPOTorchRLModule)
def _forward_exploration(
self, batch: Dict[str, TensorType], **kwargs
) -> Dict[str, TensorType]:
# Preprocess the original batch to extract the action mask.
action_mask, batch = self._preprocess_batch(batch)
# Run the forward pass.
outs = super()._forward_exploration(batch, **kwargs)
# Mask the action logits and return.
return self._mask_action_logits(outs, action_mask)
@override(PPOTorchRLModule)
def _forward_train(
self, batch: Dict[str, TensorType], **kwargs
) -> Dict[str, TensorType]:
# Run the forward pass.
outs = super()._forward_train(batch, **kwargs)
# Mask the action logits and return.
return self._mask_action_logits(outs, batch["action_mask"])
@override(ValueFunctionAPI)
def compute_values(self, batch: Dict[str, TensorType], embeddings=None):
# Check, if the observations are still in `dict` form.
if isinstance(batch[Columns.OBS], dict):
# Preprocess the batch to extract the `observations` to `Columns.OBS`.
action_mask, batch = self._preprocess_batch(batch)
# NOTE: Because we manipulate the batch we need to add the `action_mask`
# to the batch to access them in `_forward_train`.
batch["action_mask"] = action_mask
# Call the super's method to compute values for GAE.
return super().compute_values(batch, embeddings)
def _preprocess_batch(
self, batch: Dict[str, TensorType], **kwargs
) -> Tuple[TensorType, Dict[str, TensorType]]:
"""Extracts observations and action mask from the batch
Args:
batch: A dictionary containing tensors (at least `Columns.OBS`)
Returns:
A tuple with the action mask tensor and the modified batch containing
the original observations.
"""
# Check observation specs for action mask and observation keys.
self._check_batch(batch)
# Extract the available actions tensor from the observation.
action_mask = batch[Columns.OBS].pop("action_mask")
# Modify the batch for the `DefaultPPORLModule`'s `forward` method, i.e.
# pass only `"obs"` into the `forward` method.
batch[Columns.OBS] = batch[Columns.OBS].pop("observations")
# Return the extracted action mask and the modified batch.
return action_mask, batch
def _mask_action_logits(
self, batch: Dict[str, TensorType], action_mask: TensorType
) -> Dict[str, TensorType]:
"""Masks the action logits for the output of `forward` methods
Args:
batch: A dictionary containing tensors (at least action logits).
action_mask: A tensor containing the action mask for the current
observations.
Returns:
A modified batch with masked action logits for the action distribution
inputs.
"""
# Convert action mask into an `[0.0][-inf]`-type mask.
inf_mask = torch.clamp(torch.log(action_mask), min=FLOAT_MIN)
# Mask the logits.
batch[Columns.ACTION_DIST_INPUTS] += inf_mask
# Return the batch with the masked action logits.
return batch
def _check_batch(self, batch: Dict[str, TensorType]) -> Optional[ValueError]:
"""Assert that the batch includes action mask and observations.
Args:
batch: A dicitonary containing tensors (at least `Columns.OBS`) to be
checked.
Raises:
`ValueError` if the column `Columns.OBS` does not contain observations
and action mask.
"""
if not self._checked_observations:
if "action_mask" not in batch[Columns.OBS]:
raise ValueError(
"No action mask found in observation. This `RLModule` requires "
"the environment to provide observations that include an "
"action mask (i.e. an observation space of the Dict space "
"type that looks as follows: \n"
"{'action_mask': Box(0.0, 1.0, shape=(self.action_space.n,)),"
"'observations': self.observation_space}"
)
if "observations" not in batch[Columns.OBS]:
raise ValueError(
"No observations found in observation. This 'RLModule` requires "
"the environment to provide observations that include the original "
"observations under a key `'observations'` in a dict (i.e. an "
"observation space of the Dict space type that looks as follows: \n"
"{'action_mask': Box(0.0, 1.0, shape=(self.action_space.n,)),"
"'observations': <observation_space>}"
)
self._checked_observations = True
| ActionMaskingTorchRLModule |
python | optuna__optuna | optuna/samplers/nsgaii/_crossovers/_base.py | {
"start": 157,
"end": 2063
} | class ____(abc.ABC):
"""Base class for crossovers.
A crossover operation is used by :class:`~optuna.samplers.NSGAIISampler`
to create new parameter combination from parameters of ``n`` parent individuals.
.. note::
Concrete implementations of this class are expected to only accept parameters
from numerical distributions. At the moment, only crossover operation for categorical
parameters (uniform crossover) is built-in into :class:`~optuna.samplers.NSGAIISampler`.
"""
def __str__(self) -> str:
return self.__class__.__name__
@property
@abc.abstractmethod
def n_parents(self) -> int:
"""Number of parent individuals required to perform crossover."""
raise NotImplementedError
@abc.abstractmethod
def crossover(
self,
parents_params: np.ndarray,
rng: np.random.RandomState,
study: Study,
search_space_bounds: np.ndarray,
) -> np.ndarray:
"""Perform crossover of selected parent individuals.
This method is called in :func:`~optuna.samplers.NSGAIISampler.sample_relative`.
Args:
parents_params:
A ``numpy.ndarray`` with dimensions ``num_parents x num_parameters``.
Represents a parameter space for each parent individual. This space is
continuous for numerical parameters.
rng:
An instance of ``numpy.random.RandomState``.
study:
Target study object.
search_space_bounds:
A ``numpy.ndarray`` with dimensions ``len_search_space x 2`` representing
numerical distribution bounds constructed from transformed search space.
Returns:
A 1-dimensional ``numpy.ndarray`` containing new parameter combination.
"""
raise NotImplementedError
| BaseCrossover |
python | realpython__materials | python-getter-setter/label2.py | {
"start": 0,
"end": 241
} | class ____:
def __init__(self, text, font):
self.set_text(text)
self.font = font
def get_text(self):
return self._text
def set_text(self, value):
self._text = value.upper() # Attached behavior
| Label |
python | explosion__spaCy | spacy/pipeline/tok2vec.py | {
"start": 8784,
"end": 13476
} | class ____(Model):
"""A layer that gets fed its answers from an upstream connection,
for instance from a component earlier in the pipeline.
The Tok2VecListener layer is used as a sublayer within a component such
as a parser, NER or text categorizer. Usually you'll have multiple listeners
connecting to a single upstream Tok2Vec component, that's earlier in the
pipeline. The Tok2VecListener layers act as proxies, passing the predictions
from the Tok2Vec component into downstream components, and communicating
gradients back upstream.
"""
name = "tok2vec-listener"
def __init__(self, upstream_name: str, width: int) -> None:
"""
upstream_name (str): A string to identify the 'upstream' Tok2Vec component
to communicate with. The upstream name should either be the wildcard
string '*', or the name of the `Tok2Vec` component. You'll almost
never have multiple upstream Tok2Vec components, so the wildcard
string will almost always be fine.
width (int):
The width of the vectors produced by the upstream tok2vec component.
"""
Model.__init__(self, name=self.name, forward=forward, dims={"nO": width})
self.upstream_name = upstream_name
self._batch_id: Optional[int] = None
self._outputs = None
self._backprop = None
@classmethod
def get_batch_id(cls, inputs: Iterable[Doc]) -> int:
"""Calculate a content-sensitive hash of the batch of documents, to check
whether the next batch of documents is unexpected.
"""
return sum(sum(token.orth for token in doc) for doc in inputs)
def receive(self, batch_id: int, outputs, backprop) -> None:
"""Store a batch of training predictions and a backprop callback. The
predictions and callback are produced by the upstream Tok2Vec component,
and later will be used when the listener's component's model is called.
"""
self._batch_id = batch_id
self._outputs = outputs
self._backprop = backprop
def verify_inputs(self, inputs) -> bool:
"""Check that the batch of Doc objects matches the ones we have a
prediction for.
"""
if self._batch_id is None and self._outputs is None:
raise ValueError(Errors.E954)
else:
batch_id = self.get_batch_id(inputs)
if batch_id != self._batch_id:
raise ValueError(Errors.E953.format(id1=batch_id, id2=self._batch_id))
else:
return True
def forward(model: Tok2VecListener, inputs, is_train: bool):
"""Supply the outputs from the upstream Tok2Vec component."""
if is_train:
# This might occur during training when the tok2vec layer is frozen / hasn't been updated.
# In that case, it should be set to "annotating" so we can retrieve the embeddings from the doc.
if model._batch_id is None:
outputs = []
for doc in inputs:
if doc.tensor.size == 0:
raise ValueError(Errors.E203.format(name="tok2vec"))
else:
outputs.append(doc.tensor)
return outputs, _empty_backprop
else:
model.verify_inputs(inputs)
return model._outputs, model._backprop
else:
# This is pretty grim, but it's hard to do better :(.
# It's hard to avoid relying on the doc.tensor attribute, because the
# pipeline components can batch the data differently during prediction.
# That doesn't happen in update, where the nlp object works on batches
# of data.
# When the components batch differently, we don't receive a matching
# prediction from the upstream, so we can't predict.
outputs = []
width = model.get_dim("nO")
for doc in inputs:
if doc.tensor.size == 0:
# But we do need to do *something* if the tensor hasn't been set.
# The compromise is to at least return data of the right shape,
# so the output is valid.
outputs.append(model.ops.alloc2f(len(doc), width))
else:
outputs.append(doc.tensor)
return outputs, _empty_backprop
def _empty_backprop(dX): # for pickling
return []
# Setup backwards compatibility hook for factories
def __getattr__(name):
if name == "make_tok2vec":
module = importlib.import_module("spacy.pipeline.factories")
return module.make_tok2vec
raise AttributeError(f"module {__name__} has no attribute {name}")
| Tok2VecListener |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 289196,
"end": 289679
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of ResolveReviewThread"""
__schema__ = github_schema
__field_names__ = ("thread_id", "client_mutation_id")
thread_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="threadId")
"""The ID of the thread to resolve"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| ResolveReviewThreadInput |
python | getsentry__sentry | src/sentry/analytics/events/join_request_link_viewed.py | {
"start": 81,
"end": 206
} | class ____(analytics.Event):
organization_id: int
analytics.register(JoinRequestLinkViewedEvent)
| JoinRequestLinkViewedEvent |
python | crytic__slither | slither/printers/summary/require_calls.py | {
"start": 471,
"end": 1984
} | class ____(AbstractPrinter):
ARGUMENT = "require"
HELP = "Print the require and assert calls of each function"
WIKI = "https://github.com/trailofbits/slither/wiki/Printer-documentation#require"
@staticmethod
def _convert(l):
return "\n".join(l)
def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
all_tables = []
all_txt = ""
for contract in self.slither.contracts_derived:
txt = f"\nContract {contract.name}"
table = MyPrettyTable(["Function", "require or assert"])
for function in contract.functions:
require = function.all_slithir_operations()
require = [
ir
for ir in require
if isinstance(ir, SolidityCall) and ir.function in require_or_assert
]
require = [ir.node for ir in require]
table.add_row(
[
function.name,
self._convert(sorted([str(m.expression) for m in set(require)])),
]
)
txt += "\n" + str(table)
self.info(txt)
all_tables.append((contract.name, table))
all_txt += txt
res = self.generate_output(all_txt)
for name, table in all_tables:
res.add_pretty_table(table, name)
return res
| RequireOrAssert |
python | realpython__materials | django-pagination/terms/views.py | {
"start": 194,
"end": 287
} | class ____(ListView):
model = Keyword
template_name = "terms/base.html"
| AllKeywordsView |
python | walkccc__LeetCode | solutions/1014. Best Sightseeing Pair/1014.py | {
"start": 0,
"end": 231
} | class ____:
def maxScoreSightseeingPair(self, values: list[int]) -> int:
ans = 0
bestPrev = 0
for value in values:
ans = max(ans, value + bestPrev)
bestPrev = max(bestPrev, value) - 1
return ans
| Solution |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/transform.py | {
"start": 422,
"end": 2286
} | class ____(Chain):
"""Chain that transforms the chain output.
Example:
```python
from langchain_classic.chains import TransformChain
transform_chain = TransformChain(input_variables=["text"],
output_variables["entities"], transform=func())
```
"""
input_variables: list[str]
"""The keys expected by the transform's input dictionary."""
output_variables: list[str]
"""The keys returned by the transform's output dictionary."""
transform_cb: Callable[[dict[str, str]], dict[str, str]] = Field(alias="transform")
"""The transform function."""
atransform_cb: Callable[[dict[str, Any]], Awaitable[dict[str, Any]]] | None = Field(
None, alias="atransform"
)
"""The async coroutine transform function."""
@staticmethod
@functools.lru_cache
def _log_once(msg: str) -> None:
"""Log a message once."""
logger.warning(msg)
@property
def input_keys(self) -> list[str]:
"""Expect input keys."""
return self.input_variables
@property
def output_keys(self) -> list[str]:
"""Return output keys."""
return self.output_variables
@override
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
return self.transform_cb(inputs)
@override
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
if self.atransform_cb is not None:
return await self.atransform_cb(inputs)
self._log_once(
"TransformChain's atransform is not provided, falling"
" back to synchronous transform",
)
return self.transform_cb(inputs)
| TransformChain |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 302652,
"end": 305641
} | class ____:
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1, 1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2, 3, 4).astype(dt)
b = np.arange(24, 48).reshape(2, 3, 4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]]
).astype(dt)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2, 3, 0, 1), desired)
| TestInner |
python | walkccc__LeetCode | solutions/3425. Longest Special Path/3425.py | {
"start": 0,
"end": 1083
} | class ____:
def longestSpecialPath(
self,
edges: list[list[int]],
nums: list[int]
) -> list[int]:
maxLength = 0
minNodes = 1
graph = [[] for _ in range(len(nums))]
for u, v, w in edges:
graph[u].append((v, w))
graph[v].append((u, w))
prefix = [0]
lastSeenDepth = {}
def dfs(
u: int,
prev: int,
leftBoundary: int,
) -> None:
nonlocal maxLength, minNodes
prevDepth = lastSeenDepth.get(nums[u], 0)
lastSeenDepth[nums[u]] = len(prefix)
leftBoundary = max(leftBoundary, prevDepth)
length = prefix[-1] - prefix[leftBoundary]
nodes = len(prefix) - leftBoundary
if length > maxLength or (length == maxLength and nodes < minNodes):
maxLength = length
minNodes = nodes
for v, w in graph[u]:
if v == prev:
continue
prefix.append(prefix[-1] + w)
dfs(v, u, leftBoundary)
prefix.pop()
lastSeenDepth[nums[u]] = prevDepth
dfs(0, -1, leftBoundary=0)
return [maxLength, minNodes]
| Solution |
python | getsentry__sentry-python | tests/test_basics.py | {
"start": 29903,
"end": 36573
} | class ____:
@staticmethod
def static(arg):
return arg
@classmethod
def class_(cls, arg):
return cls, arg
# We need to fork here because the test modifies tests.test_basics.TracingTestClass
@pytest.mark.forked
def test_staticmethod_class_tracing(sentry_init, capture_events):
sentry_init(
debug=True,
traces_sample_rate=1.0,
functions_to_trace=[
{"qualified_name": "tests.test_basics.TracingTestClass.static"}
],
)
events = capture_events()
with sentry_sdk.start_transaction(name="test"):
assert TracingTestClass.static(1) == 1
(event,) = events
assert event["type"] == "transaction"
assert event["transaction"] == "test"
(span,) = event["spans"]
assert span["description"] == "tests.test_basics.TracingTestClass.static"
# We need to fork here because the test modifies tests.test_basics.TracingTestClass
@pytest.mark.forked
def test_staticmethod_instance_tracing(sentry_init, capture_events):
sentry_init(
debug=True,
traces_sample_rate=1.0,
functions_to_trace=[
{"qualified_name": "tests.test_basics.TracingTestClass.static"}
],
)
events = capture_events()
with sentry_sdk.start_transaction(name="test"):
assert TracingTestClass().static(1) == 1
(event,) = events
assert event["type"] == "transaction"
assert event["transaction"] == "test"
(span,) = event["spans"]
assert span["description"] == "tests.test_basics.TracingTestClass.static"
# We need to fork here because the test modifies tests.test_basics.TracingTestClass
@pytest.mark.forked
def test_classmethod_class_tracing(sentry_init, capture_events):
sentry_init(
debug=True,
traces_sample_rate=1.0,
functions_to_trace=[
{"qualified_name": "tests.test_basics.TracingTestClass.class_"}
],
)
events = capture_events()
with sentry_sdk.start_transaction(name="test"):
assert TracingTestClass.class_(1) == (TracingTestClass, 1)
(event,) = events
assert event["type"] == "transaction"
assert event["transaction"] == "test"
(span,) = event["spans"]
assert span["description"] == "tests.test_basics.TracingTestClass.class_"
# We need to fork here because the test modifies tests.test_basics.TracingTestClass
@pytest.mark.forked
def test_classmethod_instance_tracing(sentry_init, capture_events):
sentry_init(
debug=True,
traces_sample_rate=1.0,
functions_to_trace=[
{"qualified_name": "tests.test_basics.TracingTestClass.class_"}
],
)
events = capture_events()
with sentry_sdk.start_transaction(name="test"):
assert TracingTestClass().class_(1) == (TracingTestClass, 1)
(event,) = events
assert event["type"] == "transaction"
assert event["transaction"] == "test"
(span,) = event["spans"]
assert span["description"] == "tests.test_basics.TracingTestClass.class_"
def test_last_event_id(sentry_init):
sentry_init(enable_tracing=True)
assert last_event_id() is None
capture_exception(Exception("test"))
assert last_event_id() is not None
def test_last_event_id_transaction(sentry_init):
sentry_init(enable_tracing=True)
assert last_event_id() is None
with start_transaction(name="test"):
pass
assert last_event_id() is None, "Transaction should not set last_event_id"
def test_last_event_id_scope(sentry_init):
sentry_init(enable_tracing=True)
# Should not crash
with isolation_scope() as scope:
assert scope.last_event_id() is None
def test_hub_constructor_deprecation_warning():
with pytest.warns(sentry_sdk.hub.SentryHubDeprecationWarning):
Hub()
def test_hub_current_deprecation_warning():
with pytest.warns(sentry_sdk.hub.SentryHubDeprecationWarning) as warning_records:
Hub.current
# Make sure we only issue one deprecation warning
assert len(warning_records) == 1
def test_hub_main_deprecation_warnings():
with pytest.warns(sentry_sdk.hub.SentryHubDeprecationWarning):
Hub.main
@pytest.mark.skipif(sys.version_info < (3, 11), reason="add_note() not supported")
def test_notes(sentry_init, capture_events):
sentry_init()
events = capture_events()
try:
e = ValueError("aha!")
e.add_note("Test 123")
e.add_note("another note")
raise e
except Exception:
capture_exception()
(event,) = events
assert event["exception"]["values"][0]["value"] == "aha!\nTest 123\nanother note"
@pytest.mark.skipif(sys.version_info < (3, 11), reason="add_note() not supported")
def test_notes_safe_str(sentry_init, capture_events):
class Note2:
def __repr__(self):
raise TypeError
def __str__(self):
raise TypeError
sentry_init()
events = capture_events()
try:
e = ValueError("aha!")
e.add_note("note 1")
e.__notes__.append(Note2()) # type: ignore
e.add_note("note 3")
e.__notes__.append(2) # type: ignore
raise e
except Exception:
capture_exception()
(event,) = events
assert event["exception"]["values"][0]["value"] == "aha!\nnote 1\nnote 3"
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="this test appears to cause a segfault on Python < 3.11",
)
def test_stacktrace_big_recursion(sentry_init, capture_events):
"""
Ensure that if the recursion limit is increased, the full stacktrace is not captured,
as it would take too long to process the entire stack trace.
Also, ensure that the capturing does not take too long.
"""
sentry_init()
events = capture_events()
def recurse():
recurse()
old_recursion_limit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(100_000)
recurse()
except RecursionError as e:
capture_start_time = time.perf_counter_ns()
sentry_sdk.capture_exception(e)
capture_end_time = time.perf_counter_ns()
finally:
sys.setrecursionlimit(old_recursion_limit)
(event,) = events
assert event["exception"]["values"][0]["stacktrace"] is None
assert event["_meta"]["exception"] == {
"values": {"0": {"stacktrace": {"": {"rem": [["!config", "x"]]}}}}
}
# On my machine, it takes about 100-200ms to capture the exception,
# so this limit should be generous enough.
assert capture_end_time - capture_start_time < 10**9 * 2, (
"stacktrace capture took too long, check that frame limit is set correctly"
)
| TracingTestClass |
python | getsentry__sentry | tests/sentry/incidents/models/test_alert_rule.py | {
"start": 11326,
"end": 12510
} | class ____(TestCase):
@pytest.fixture(autouse=True)
def _setup_metric_patch(self) -> Generator[None]:
with mock.patch("sentry.incidents.models.alert_rule.metrics") as self.metrics:
yield
def setUp(self) -> None:
self.suspended_registry = TemporaryAlertRuleTriggerActionRegistry.suspend()
def tearDown(self) -> None:
self.suspended_registry.restore()
def test_unhandled(self) -> None:
trigger = AlertRuleTriggerAction(type=AlertRuleTriggerAction.Type.EMAIL.value)
trigger.build_handler(type=AlertRuleTriggerAction.Type(trigger.type))
self.metrics.incr.assert_called_once_with("alert_rule_trigger.unhandled_type.0")
def test_handled(self) -> None:
mock_handler = Mock()
type = AlertRuleTriggerAction.Type.EMAIL
AlertRuleTriggerAction.register_type("something", type, [])(mock_handler)
trigger = AlertRuleTriggerAction(type=AlertRuleTriggerAction.Type.EMAIL.value)
trigger.build_handler(type=AlertRuleTriggerAction.Type(trigger.type))
mock_handler.assert_called_once_with()
assert not self.metrics.incr.called
| AlertRuleTriggerActionActivateTest |
python | crytic__slither | slither/core/scope/scope.py | {
"start": 1123,
"end": 8585
} | class ____:
def __init__(self, filename: Filename) -> None:
self.filename = filename
self.accessible_scopes: List[FileScope] = []
self.exported_symbols: Set[int] = set()
self.contracts: Dict[str, Contract] = {}
# Custom error are a list instead of a dict
# Because we parse the function signature later on
# So we simplify the logic and have the scope fields all populated
self.custom_errors: Set[CustomErrorTopLevel] = set()
self.enums: Dict[str, EnumTopLevel] = {}
# Functions is a list instead of a dict
# Because we parse the function signature later on
# So we simplify the logic and have the scope fields all populated
self.functions: Set[FunctionTopLevel] = set()
self.events: Set[EventTopLevel] = set()
self.using_for_directives: Set[UsingForTopLevel] = set()
self.imports: Set[Import] = set()
self.pragmas: Set[Pragma] = set()
self.structures: Dict[str, StructureTopLevel] = {}
self.variables: Dict[str, TopLevelVariable] = {}
# Renamed created by import
# import A as B
# local name -> original name (A -> B)
self.renaming: Dict[str, str] = {}
# User defined types
# Name -> type alias
self.type_aliases: Dict[str, TypeAlias] = {}
def add_accessible_scopes(self) -> bool: # pylint: disable=too-many-branches
"""
Add information from accessible scopes. Return true if new information was obtained
:return:
:rtype:
"""
learn_something = False
for new_scope in self.accessible_scopes:
# To support using for directives on user defined types and user defined functions,
# we need to propagate the using for directives from the imported file to the importing file
# since it is not reflected in the "exportedSymbols" field of the AST.
if not new_scope.using_for_directives.issubset(self.using_for_directives):
self.using_for_directives |= new_scope.using_for_directives
learn_something = True
if not _dict_contain(new_scope.type_aliases, self.type_aliases):
self.type_aliases.update(new_scope.type_aliases)
learn_something = True
if not new_scope.functions.issubset(self.functions):
self.functions |= new_scope.functions
learn_something = True
# To get around this bug for aliases https://github.com/ethereum/solidity/pull/11881,
# we propagate the exported_symbols from the imported file to the importing file
# See tests/e2e/solc_parsing/test_data/top-level-nested-import-0.7.1.sol
if not new_scope.exported_symbols.issubset(self.exported_symbols):
self.exported_symbols |= new_scope.exported_symbols
learn_something = True
# This is need to support aliasing when we do a late lookup using SolidityImportPlaceholder
if not _dict_contain(new_scope.renaming, self.renaming):
self.renaming.update(new_scope.renaming)
learn_something = True
return learn_something
def get_contract_from_name(self, name: Union[str, Constant]) -> Optional[Contract]:
if isinstance(name, Constant):
return self.contracts.get(name.name, None)
return self.contracts.get(name, None)
AbstractReturnType = TypeVar("AbstractReturnType")
def _generic_source_unit_getter(
self,
crytic_compile_compilation_unit: CompilationUnit,
name: str,
getter: Callable[[SourceUnit], Dict[str, AbstractReturnType]],
) -> Optional[AbstractReturnType]:
assert self.filename in crytic_compile_compilation_unit.source_units
source_unit = crytic_compile_compilation_unit.source_unit(self.filename)
if name in getter(source_unit):
return getter(source_unit)[name]
for scope in self.accessible_scopes:
source_unit = crytic_compile_compilation_unit.source_unit(scope.filename)
if name in getter(source_unit):
return getter(source_unit)[name]
return None
def bytecode_init(
self, crytic_compile_compilation_unit: CompilationUnit, contract_name: str
) -> Optional[str]:
"""
Return the init bytecode
Args:
crytic_compile_compilation_unit:
contract_name:
Returns:
"""
getter: Callable[[SourceUnit], Dict[str, str]] = lambda x: x.bytecodes_init
return self._generic_source_unit_getter(
crytic_compile_compilation_unit, contract_name, getter
)
def bytecode_runtime(
self, crytic_compile_compilation_unit: CompilationUnit, contract_name: str
) -> Optional[str]:
"""
Return the runtime bytecode
Args:
crytic_compile_compilation_unit:
contract_name:
Returns:
"""
getter: Callable[[SourceUnit], Dict[str, str]] = lambda x: x.bytecodes_runtime
return self._generic_source_unit_getter(
crytic_compile_compilation_unit, contract_name, getter
)
def srcmap_init(
self, crytic_compile_compilation_unit: CompilationUnit, contract_name: str
) -> Optional[List[str]]:
"""
Return the init scrmap
Args:
crytic_compile_compilation_unit:
contract_name:
Returns:
"""
getter: Callable[[SourceUnit], Dict[str, List[str]]] = lambda x: x.srcmaps_init
return self._generic_source_unit_getter(
crytic_compile_compilation_unit, contract_name, getter
)
def srcmap_runtime(
self, crytic_compile_compilation_unit: CompilationUnit, contract_name: str
) -> Optional[List[str]]:
"""
Return the runtime srcmap
Args:
crytic_compile_compilation_unit:
contract_name:
Returns:
"""
getter: Callable[[SourceUnit], Dict[str, List[str]]] = lambda x: x.srcmaps_runtime
return self._generic_source_unit_getter(
crytic_compile_compilation_unit, contract_name, getter
)
def abi(self, crytic_compile_compilation_unit: CompilationUnit, contract_name: str) -> Any:
"""
Return the abi
Args:
crytic_compile_compilation_unit:
contract_name:
Returns:
"""
getter: Callable[[SourceUnit], Dict[str, List[str]]] = lambda x: x.abis
return self._generic_source_unit_getter(
crytic_compile_compilation_unit, contract_name, getter
)
# region Built in definitions
###################################################################################
###################################################################################
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return other == self.filename
return NotImplemented
def __neq__(self, other: Any) -> bool:
if isinstance(other, str):
return other != self.filename
return NotImplemented
def __str__(self) -> str:
return str(self.filename.relative)
def __hash__(self) -> int:
return hash(self.filename.relative)
# endregion
| FileScope |
python | huggingface__transformers | src/transformers/models/biogpt/modeling_biogpt.py | {
"start": 24956,
"end": 29180
} | class ____(BioGptPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.biogpt = BioGptModel(config)
if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
classifier_dropout = config.classifier_dropout
else:
classifier_dropout = config.hidden_dropout_prob
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
position_ids: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.biogpt(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = transformer_outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + transformer_outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The BioGpt Model transformer with a sequence classification head on top (linear layer).
[`BioGptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-2) do.
Since it does classification on the last token, it is required to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
"""
)
| BioGptForTokenClassification |
python | sqlalchemy__sqlalchemy | test/orm/test_descriptor.py | {
"start": 953,
"end": 3785
} | class ____(fixtures.ORMTest):
def _fixture(self):
Base = declarative_base()
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
return Foo
def test_fixture(self):
Foo = self._fixture()
d = MockDescriptor(Foo, "foo")
d.instrument_class(Foo.__mapper__)
assert Foo.foo
def test_property_wrapped_classlevel(self):
Foo = self._fixture()
prop = property(lambda self: None)
Foo.foo = prop
d = MockDescriptor(Foo, "foo")
d.instrument_class(Foo.__mapper__)
assert Foo().foo is None
assert Foo.foo is not prop
def test_property_subclass_wrapped_classlevel(self):
Foo = self._fixture()
class myprop(property):
attr = "bar"
def method1(self):
return "method1"
prop = myprop(lambda self: None)
Foo.foo = prop
d = MockDescriptor(Foo, "foo")
d.instrument_class(Foo.__mapper__)
assert Foo().foo is None
assert Foo.foo is not prop
assert Foo.foo.attr == "bar"
assert Foo.foo.method1() == "method1"
def test_comparator(self):
class Comparator(PropComparator):
__hash__ = None
attr = "bar"
def method1(self):
return "method1"
def method2(self, other):
return "method2"
def __getitem__(self, key):
return "value"
def __eq__(self, other):
return column("foo") == func.upper(other)
Foo = self._fixture()
d = MockDescriptor(Foo, "foo", comparator_factory=Comparator)
d.instrument_class(Foo.__mapper__)
eq_(Foo.foo.method1(), "method1")
eq_(Foo.foo.method2("x"), "method2")
assert Foo.foo.attr == "bar"
assert Foo.foo["bar"] == "value"
eq_((Foo.foo == "bar").__str__(), "foo = upper(:upper_1)")
def test_aliased_comparator(self):
class Comparator(ColumnProperty.Comparator):
__hash__ = None
def __eq__(self, other):
return func.foobar(self.__clause_element__()) == func.foobar(
other
)
Foo = self._fixture()
Foo._name = Column("name", String)
def comparator_factory(self, mapper):
prop = mapper._props["_name"]
return Comparator(prop, mapper)
d = MockDescriptor(Foo, "foo", comparator_factory=comparator_factory)
d.instrument_class(Foo.__mapper__)
eq_(str(Foo.foo == "ed"), "foobar(foo.name) = foobar(:foobar_1)")
eq_(
str(aliased(Foo).foo == "ed"),
"foobar(foo_1.name) = foobar(:foobar_1)",
)
| DescriptorInstrumentationTest |
python | tornadoweb__tornado | tornado/web.py | {
"start": 103196,
"end": 125171
} | class ____(RequestHandler):
"""A simple handler that can serve static content from a directory.
A `StaticFileHandler` is configured automatically if you pass the
``static_path`` keyword argument to `Application`. This handler
can be customized with the ``static_url_prefix``, ``static_handler_class``,
and ``static_handler_args`` settings.
To map an additional path to this handler for a static data directory
you would add a line to your application like::
application = web.Application([
(r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The handler constructor requires a ``path`` argument, which specifies the
local root directory of the content to be served.
Note that a capture group in the regex is required to parse the value for
the ``path`` argument to the get() method (different than the constructor
argument above); see `URLSpec` for details.
To serve a file like ``index.html`` automatically when a directory is
requested, set ``static_handler_args=dict(default_filename="index.html")``
in your application settings, or add ``default_filename`` as an initializer
argument for your ``StaticFileHandler``.
To maximize the effectiveness of browser caching, this class supports
versioned urls (by default using the argument ``?v=``). If a version
is given, we instruct the browser to cache this file indefinitely.
`make_static_url` (also available as `RequestHandler.static_url`) can
be used to construct a versioned url.
This handler is intended primarily for use in development and light-duty
file serving; for heavy traffic it will be more efficient to use
a dedicated static file server (such as nginx or Apache). We support
the HTTP ``Accept-Ranges`` mechanism to return partial content (because
some browsers require this functionality to be present to seek in
HTML5 audio or video).
**Subclassing notes**
This class is designed to be extensible by subclassing, but because
of the way static urls are generated with class methods rather than
instance methods, the inheritance patterns are somewhat unusual.
Be sure to use the ``@classmethod`` decorator when overriding a
class method. Instance methods may use the attributes ``self.path``
``self.absolute_path``, and ``self.modified``.
Subclasses should only override methods discussed in this section;
overriding other methods is error-prone. Overriding
``StaticFileHandler.get`` is particularly problematic due to the
tight coupling with ``compute_etag`` and other methods.
To change the way static urls are generated (e.g. to match the behavior
of another server or CDN), override `make_static_url`, `parse_url_path`,
`get_cache_time`, and/or `get_version`.
To replace all interaction with the filesystem (e.g. to serve
static content from a database), override `get_content`,
`get_content_size`, `get_modified_time`, `get_absolute_path`, and
`validate_absolute_path`.
.. versionchanged:: 3.1
Many of the methods for subclasses were added in Tornado 3.1.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {} # type: Dict[str, Optional[str]]
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path: str, default_filename: Optional[str] = None) -> None:
self.root = path
self.default_filename = default_filename
@classmethod
def reset(cls) -> None:
with cls._lock:
cls._static_hashes = {}
def head(self, path: str) -> Awaitable[None]:
return self.get(path, include_body=False)
async def get(self, path: str, include_body: bool = True) -> None:
# Set up our path instance variables.
self.path = self.parse_url_path(path)
del path # make sure we don't refer to path instead of self.path again
absolute_path = self.get_absolute_path(self.root, self.path)
self.absolute_path = self.validate_absolute_path(self.root, absolute_path)
if self.absolute_path is None:
return
self.modified = self.get_modified_time()
self.set_headers()
if self.should_return_304():
self.set_status(304)
return
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if start is not None and start < 0:
start += size
if start < 0:
start = 0
if (
start is not None
and (start >= size or (end is not None and start >= end))
) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified.
# https://tools.ietf.org/html/rfc7233#section-2.1
# A byte-range-spec is invalid if the last-byte-pos value is present
# and less than the first-byte-pos.
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", f"bytes */{size}")
return
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header(
"Content-Range", httputil._get_content_range(start, end, size)
)
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(self.absolute_path, start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
await self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def compute_etag(self) -> Optional[str]:
"""Sets the ``Etag`` header based on static url version.
This allows efficient ``If-None-Match`` checks against cached
versions, and sends the correct ``Etag`` for a partial response
(i.e. the same ``Etag`` as the full file).
.. versionadded:: 3.1
"""
assert self.absolute_path is not None
version_hash = self._get_cached_version(self.absolute_path)
if not version_hash:
return None
return f'"{version_hash}"'
def set_headers(self) -> None:
"""Sets the content and caching headers on the response.
.. versionadded:: 3.1
"""
self.set_header("Accept-Ranges", "bytes")
self.set_etag_header()
if self.modified is not None:
self.set_header("Last-Modified", self.modified)
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
cache_time = self.get_cache_time(self.path, self.modified, content_type)
if cache_time > 0:
self.set_header(
"Expires",
datetime.datetime.now(datetime.timezone.utc)
+ datetime.timedelta(seconds=cache_time),
)
self.set_header("Cache-Control", "max-age=" + str(cache_time))
self.set_extra_headers(self.path)
def should_return_304(self) -> bool:
"""Returns True if the headers indicate that we should return 304.
.. versionadded:: 3.1
"""
# If client sent If-None-Match, use it, ignore If-Modified-Since
if self.request.headers.get("If-None-Match"):
return self.check_etag_header()
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
try:
if_since = email.utils.parsedate_to_datetime(ims_value)
except Exception:
return False
if if_since.tzinfo is None:
if_since = if_since.replace(tzinfo=datetime.timezone.utc)
assert self.modified is not None
if if_since >= self.modified:
return True
return False
@classmethod
def get_absolute_path(cls, root: str, path: str) -> str:
"""Returns the absolute location of ``path`` relative to ``root``.
``root`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
This class method may be overridden in subclasses. By default
it returns a filesystem path, but other strings may be used
as long as they are unique and understood by the subclass's
overridden `get_content`.
.. versionadded:: 3.1
"""
abspath = os.path.abspath(os.path.join(root, path))
return abspath
def validate_absolute_path(self, root: str, absolute_path: str) -> Optional[str]:
"""Validate and return the absolute path.
``root`` is the configured path for the `StaticFileHandler`,
and ``path`` is the result of `get_absolute_path`
This is an instance method called during request processing,
so it may raise `HTTPError` or use methods like
`RequestHandler.redirect` (return None after redirecting to
halt further processing). This is where 404 errors for missing files
are generated.
This method may modify the path before returning it, but note that
any such modifications will not be understood by `make_static_url`.
In instance methods, this method's result is available as
``self.absolute_path``.
.. versionadded:: 3.1
"""
# os.path.abspath strips a trailing /.
# We must add it back to `root` so that we only match files
# in a directory named `root` instead of files starting with
# that prefix.
root = os.path.abspath(root)
if not root.endswith(os.path.sep):
# abspath always removes a trailing slash, except when
# root is '/'. This is an unusual case, but several projects
# have independently discovered this technique to disable
# Tornado's path validation and (hopefully) do their own,
# so we need to support it.
root += os.path.sep
# The trailing slash also needs to be temporarily added back
# the requested path so a request to root/ will match.
if not (absolute_path + os.path.sep).startswith(root):
raise HTTPError(403, "%s is not in root static directory", self.path)
if os.path.isdir(absolute_path) and self.default_filename is not None:
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
if self.request.path.startswith("//"):
# A redirect with two initial slashes is a "protocol-relative" URL.
# This means the next path segment is treated as a hostname instead
# of a part of the path, making this effectively an open redirect.
# Reject paths starting with two slashes to prevent this.
# This is only reachable under certain configurations.
raise HTTPError(
403, "cannot redirect path with two initial slashes"
)
self.redirect(self.request.path + "/", permanent=True)
return None
absolute_path = os.path.join(absolute_path, self.default_filename)
if not os.path.exists(absolute_path):
raise HTTPError(404)
if not os.path.isfile(absolute_path):
raise HTTPError(403, "%s is not a file", self.path)
return absolute_path
@classmethod
def get_content(
cls, abspath: str, start: Optional[int] = None, end: Optional[int] = None
) -> Generator[bytes, None, None]:
"""Retrieve the content of the requested resource which is located
at the given absolute path.
This class method may be overridden by subclasses. Note that its
signature is different from other overridable class methods
(no ``settings`` argument); this is deliberate to ensure that
``abspath`` is able to stand on its own as a cache key.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
.. versionadded:: 3.1
"""
with open(abspath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0) # type: Optional[int]
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
@classmethod
def get_content_version(cls, abspath: str) -> str:
"""Returns a version string for the resource at the given path.
This class method may be overridden by subclasses. The
default implementation is a SHA-512 hash of the file's contents.
.. versionadded:: 3.1
"""
data = cls.get_content(abspath)
hasher = hashlib.sha512()
if isinstance(data, bytes):
hasher.update(data)
else:
for chunk in data:
hasher.update(chunk)
return hasher.hexdigest()
def _stat(self) -> os.stat_result:
assert self.absolute_path is not None
if not hasattr(self, "_stat_result"):
self._stat_result = os.stat(self.absolute_path)
return self._stat_result
def get_content_size(self) -> int:
"""Retrieve the total size of the resource at the given path.
This method may be overridden by subclasses.
.. versionadded:: 3.1
.. versionchanged:: 4.0
This method is now always called, instead of only when
partial results are requested.
"""
stat_result = self._stat()
return stat_result.st_size
def get_modified_time(self) -> Optional[datetime.datetime]:
"""Returns the time that ``self.absolute_path`` was last modified.
May be overridden in subclasses. Should return a `~datetime.datetime`
object or None.
.. versionadded:: 3.1
.. versionchanged:: 6.4
Now returns an aware datetime object instead of a naive one.
Subclasses that override this method may return either kind.
"""
stat_result = self._stat()
# NOTE: Historically, this used stat_result[stat.ST_MTIME],
# which truncates the fractional portion of the timestamp. It
# was changed from that form to stat_result.st_mtime to
# satisfy mypy (which disallows the bracket operator), but the
# latter form returns a float instead of an int. For
# consistency with the past (and because we have a unit test
# that relies on this), we truncate the float here, although
# I'm not sure that's the right thing to do.
modified = datetime.datetime.fromtimestamp(
int(stat_result.st_mtime), datetime.timezone.utc
)
return modified
def get_content_type(self) -> str:
"""Returns the ``Content-Type`` header to be used for this request.
.. versionadded:: 3.1
"""
assert self.absolute_path is not None
mime_type, encoding = mimetypes.guess_type(self.absolute_path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
def set_extra_headers(self, path: str) -> None:
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(
self, path: str, modified: Optional[datetime.datetime], mime_type: str
) -> int:
"""Override to customize cache control behavior.
Return a positive number of seconds to make the result
cacheable for that amount of time or 0 to mark resource as
cacheable for an unspecified amount of time (subject to
browser heuristics).
By default returns cache expiry of 10 years for resources requested
with ``v`` argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(
cls, settings: Dict[str, Any], path: str, include_version: bool = True
) -> str:
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it
is a class method rather than an instance method). Subclasses
are only required to implement the signature
``make_static_url(cls, settings, path)``; other keyword
arguments may be passed through `~RequestHandler.static_url`
but are not standard.
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
``include_version`` determines whether the generated URL should
include the query string containing the version hash of the
file corresponding to the given ``path``.
"""
url = settings.get("static_url_prefix", "/static/") + path
if not include_version:
return url
version_hash = cls.get_version(settings, path)
if not version_hash:
return url
return f"{url}?v={version_hash}"
def parse_url_path(self, url_path: str) -> str:
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
This is the inverse of `make_static_url`.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
@classmethod
def get_version(cls, settings: Dict[str, Any], path: str) -> Optional[str]:
"""Generate the version string to be used in static URLs.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
.. versionchanged:: 3.1
This method was previously recommended for subclasses to override;
`get_content_version` is now preferred as it allows the base
class to handle caching of the result.
"""
abs_path = cls.get_absolute_path(settings["static_path"], path)
return cls._get_cached_version(abs_path)
@classmethod
def _get_cached_version(cls, abs_path: str) -> Optional[str]:
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
hashes[abs_path] = cls.get_content_version(abs_path)
except Exception:
gen_log.error("Could not open static file %r", abs_path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh
return None
| StaticFileHandler |
python | django__django | tests/template_tests/utils.py | {
"start": 2301,
"end": 2370
} | class ____(Exception):
silent_variable_failure = True
| SomeException |
python | pytorch__pytorch | test/distributed/checkpoint/test_fsspec.py | {
"start": 2140,
"end": 2554
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.net1 = nn.Sequential(nn.Linear(8, 16), nn.ReLU())
self.net2 = nn.Sequential(nn.Linear(16, 32), nn.ReLU())
self.net3 = nn.Linear(32, 64)
self.net4 = nn.Sequential(nn.ReLU(), nn.Linear(64, 8))
def forward(self, x):
return self.net4(self.net3(self.net2(self.net1(x))))
| MyTestModule |
python | fastai__fastai | fastai/callback/tracker.py | {
"start": 2473,
"end": 3835
} | class ____(TrackerCallback):
"A `TrackerCallback` that terminates training when monitored quantity stops improving."
order=TrackerCallback.order+3
def __init__(self,
monitor='valid_loss', # value (usually loss or metric) being monitored.
comp=None, # numpy comparison operator; np.less if monitor is loss, np.greater if monitor is metric.
min_delta=0., # minimum delta between the last monitor value and the best monitor value.
patience=1, # number of epochs to wait when training has not improved model.
reset_on_fit=True # before model fitting, reset value being monitored to -infinity (if monitor is metric) or +infinity (if monitor is loss).
):
super().__init__(monitor=monitor, comp=comp, min_delta=min_delta, reset_on_fit=reset_on_fit)
self.patience = patience
def before_fit(self): self.wait = 0; super().before_fit()
def after_epoch(self):
"Compare the value monitored to its best score and maybe stop training."
super().after_epoch()
if self.new_best: self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
print(f'No improvement since epoch {self.epoch-self.wait}: early stopping')
raise CancelFitException()
# %% ../../nbs/17_callback.tracker.ipynb 26
| EarlyStoppingCallback |
python | PrefectHQ__prefect | tests/server/models/test_orm.py | {
"start": 8427,
"end": 14029
} | class ____:
async def test_task_run_state_relationship_retrieves_current_state(
self, many_task_run_states, session, db
):
# efficient query for most recent state without knowing its ID
# by getting the state with the most recent timestamp
frs_alias = sa.orm.aliased(db.TaskRunState)
query = (
sa.select(
db.TaskRun,
db.TaskRunState.id,
db.TaskRunState.type,
)
.select_from(db.TaskRun)
.join(
db.TaskRunState,
db.TaskRun.id == db.TaskRunState.task_run_id,
isouter=True,
)
.join(
frs_alias,
sa.and_(
db.TaskRunState.task_run_id == frs_alias.task_run_id,
db.TaskRunState.timestamp < frs_alias.timestamp,
),
isouter=True,
)
.where(frs_alias.id is None)
)
result = await session.execute(query)
objs = result.all()
# assert that our handcrafted query picked up all the FINAL states
assert all([o[2] == schemas.states.StateType.COMPLETED for o in objs])
# assert that the `state` relationship picked up all the FINAL states
assert all(
[o[0].state.type == schemas.states.StateType.COMPLETED for o in objs]
)
# assert that the `state` relationship picked up the correct state id
assert all([o[0].state.id == o[1] for o in objs])
# assert that the `state_id` stores the correct state id
assert all([o[0].state_id == o[1] for o in objs])
async def test_task_run_state_relationship_query_matches_current_data(
self, many_task_run_states, session, db
):
query = sa.select(db.TaskRun).where(
db.TaskRun.state.has(
db.TaskRunState.type == schemas.states.StateType.COMPLETED
)
)
result = await session.execute(query)
assert len(result.all()) == 5
async def test_task_run_state_relationship_query_doesnt_match_old_data(
self, many_task_run_states, session, db
):
query = sa.select(db.TaskRun.id).where(
db.TaskRun.state.has(
db.TaskRunState.type == schemas.states.StateType.RUNNING
)
)
result = await session.execute(query)
assert len(result.all()) == 0
async def test_task_run_state_relationship_type_filter_selects_current_state(
self, flow_run, many_task_run_states, session, db
):
# the task runs are most recently in a completed state
match_query = sa.select(sa.func.count(db.TaskRun.id)).where(
db.TaskRun.flow_run_id == flow_run.id,
db.TaskRun.state.has(
db.TaskRunState.type == schemas.states.StateType.COMPLETED
),
)
result = await session.execute(match_query)
assert result.scalar() == 5
# no task run is in a running state
miss_query = sa.select(sa.func.count(db.TaskRun.id)).where(
db.TaskRun.flow_run_id == flow_run.id,
db.TaskRun.state.has(
db.TaskRunState.type == schemas.states.StateType.RUNNING
),
)
result = await session.execute(miss_query)
assert result.scalar() == 0
async def test_assign_to_state_inserts_state(self, task_run, session, db):
task_run_id = task_run.id
assert task_run.state is None
# delete all states
await session.execute(sa.delete(db.TaskRunState))
task_run.set_state(db.TaskRunState(**schemas.states.Completed().orm_dict()))
await session.commit()
session.expire_all()
retrieved_flow_run = await session.get(db.TaskRun, task_run_id)
assert retrieved_flow_run.state.type.value == "COMPLETED"
result = await session.execute(
sa.select(db.TaskRunState).filter_by(task_run_id=task_run_id)
)
states = result.scalars().all()
assert len(states) == 1
assert states[0].type.value == "COMPLETED"
async def test_assign_multiple_to_state_inserts_states(self, task_run, session, db):
task_run_id = task_run.id
# delete all states
await session.execute(sa.delete(db.TaskRunState))
now_dt = now("UTC")
task_run.set_state(
db.TaskRunState(**schemas.states.Pending(timestamp=now_dt).orm_dict())
)
task_run.set_state(
db.TaskRunState(
**schemas.states.Running(
timestamp=now_dt + datetime.timedelta(minutes=1)
).orm_dict()
)
)
task_run.set_state(
db.TaskRunState(
**schemas.states.Completed(
timestamp=now_dt + datetime.timedelta(minutes=2)
).orm_dict()
)
)
await session.commit()
session.expire_all()
retrieved_flow_run = await session.get(db.TaskRun, task_run_id)
assert retrieved_flow_run.state.type.value == "COMPLETED"
result = await session.execute(
sa.select(db.TaskRunState)
.filter_by(task_run_id=task_run_id)
.order_by(db.TaskRunState.timestamp.asc())
)
states = result.scalars().all()
assert len(states) == 3
assert states[0].type.value == "PENDING"
assert states[1].type.value == "RUNNING"
assert states[2].type.value == "COMPLETED"
| TestTaskRun |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 26447,
"end": 27792
} | class ____(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
struct _longobject {
PyObject_VAR_HEAD
digit ob_digit[1];
};
with this description:
The absolute value of a number is equal to
SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i)
Negative numbers are represented with ob_size < 0;
zero is represented by ob_size == 0.
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
'''
ob_size = int(self.field('ob_size'))
if ob_size == 0:
return 0
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15
else:
SHIFT = 30
digits = [int(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 int literal, i.e. without the "L" suffix
proxy = self.proxyval(visited)
out.write("%s" % proxy)
| PyLongObjectPtr |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 93057,
"end": 93290
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuCount', c_uint),
('vgpuInstances', POINTER(c_uint)),
]
nvmlActiveVgpuInstanceInfo_v1 = 0x1000010
| c_nvmlActiveVgpuInstanceInfo_v1_t |
python | cython__cython | tests/run/pure_mode_cmethod_inheritance_T583.py | {
"start": 0,
"end": 327
} | class ____(object):
'''
>>> base = Base()
>>> print(base.noargs())
Base
>>> print(base.int_arg(1))
Base
>>> print(base._class())
Base
'''
def noargs(self):
return "Base"
def int_arg(self, i):
return "Base"
@classmethod
def _class(tp):
return "Base"
| Base |
python | PrefectHQ__prefect | src/prefect/server/database/configurations.py | {
"start": 3523,
"end": 6738
} | class ____(ABC):
"""
Abstract base class used to inject database connection configuration into Prefect.
This configuration is responsible for defining how Prefect REST API creates and manages
database connections and sessions.
"""
def __init__(
self,
connection_url: str,
echo: Optional[bool] = None,
timeout: Optional[float] = None,
connection_timeout: Optional[float] = None,
sqlalchemy_pool_size: Optional[int] = None,
sqlalchemy_max_overflow: Optional[int] = None,
connection_app_name: Optional[str] = None,
statement_cache_size: Optional[int] = None,
prepared_statement_cache_size: Optional[int] = None,
) -> None:
self.connection_url = connection_url
self.echo: bool = echo or PREFECT_API_DATABASE_ECHO.value()
self.timeout: Optional[float] = timeout or PREFECT_API_DATABASE_TIMEOUT.value()
self.connection_timeout: Optional[float] = (
connection_timeout or PREFECT_API_DATABASE_CONNECTION_TIMEOUT.value()
)
self.sqlalchemy_pool_size: Optional[int] = (
sqlalchemy_pool_size
or get_current_settings().server.database.sqlalchemy.pool_size
)
self.sqlalchemy_max_overflow: Optional[int] = (
sqlalchemy_max_overflow
or get_current_settings().server.database.sqlalchemy.max_overflow
)
self.connection_app_name: Optional[str] = (
connection_app_name
or get_current_settings().server.database.sqlalchemy.connect_args.application_name
)
self.statement_cache_size: Optional[int] = (
statement_cache_size
or get_current_settings().server.database.sqlalchemy.connect_args.statement_cache_size
)
self.prepared_statement_cache_size: Optional[int] = (
prepared_statement_cache_size
or get_current_settings().server.database.sqlalchemy.connect_args.prepared_statement_cache_size
)
def unique_key(self) -> tuple[Hashable, ...]:
"""
Returns a key used to determine whether to instantiate a new DB interface.
"""
return (self.__class__, self.connection_url)
@abstractmethod
async def engine(self) -> AsyncEngine:
"""Returns a SqlAlchemy engine"""
@abstractmethod
async def session(self, engine: AsyncEngine) -> AsyncSession:
"""
Retrieves a SQLAlchemy session for an engine.
"""
@abstractmethod
async def create_db(
self, connection: AsyncConnection, base_metadata: sa.MetaData
) -> None:
"""Create the database"""
@abstractmethod
async def drop_db(
self, connection: AsyncConnection, base_metadata: sa.MetaData
) -> None:
"""Drop the database"""
@abstractmethod
def is_inmemory(self) -> bool:
"""Returns true if database is run in memory"""
@abstractmethod
def begin_transaction(
self, session: AsyncSession, with_for_update: bool = False
) -> AbstractAsyncContextManager[AsyncSessionTransaction]:
"""Enter a transaction for a session"""
pass
| BaseDatabaseConfiguration |
python | numba__numba | numba/tests/test_listobject.py | {
"start": 30425,
"end": 31183
} | class ____(MemoryLeakMixin, TestCase):
"""Test list clear. """
def test_list_clear_empty(self):
@njit
def foo():
l = listobject.new_list(int32)
l.clear()
return len(l)
self.assertEqual(foo(), 0)
def test_list_clear_singleton(self):
@njit
def foo():
l = listobject.new_list(int32)
l.append(0)
l.clear()
return len(l)
self.assertEqual(foo(), 0)
def test_list_clear_multiple(self):
@njit
def foo():
l = listobject.new_list(int32)
for j in range(10):
l.append(0)
l.clear()
return len(l)
self.assertEqual(foo(), 0)
| TestClear |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_model_checkpoint_edge_cases.py | {
"start": 837,
"end": 1747
} | class ____(LightningModule):
"""Logs a validation metric on every validation run, even if validation is run multiple times per epoch."""
def __init__(self, val_scores: list[float]):
super().__init__()
self.layer = nn.Linear(1, 1)
self._val_scores = [float(s) for s in val_scores]
self._val_call_idx = 0
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.layer(x)
loss = F.mse_loss(y_hat, y)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
pass
def on_validation_epoch_end(self):
score = self._val_scores[self._val_call_idx]
self._val_call_idx += 1
self.log("auroc", torch.tensor(score, dtype=torch.float32), prog_bar=False, logger=True)
def configure_optimizers(self):
return torch.optim.SGD(self.parameters(), lr=0.01)
| MultiValPerEpochModule |
python | wandb__wandb | wandb/automations/events.py | {
"start": 3938,
"end": 5358
} | class ____(GQLBase): # from: TriggeringRunMetricEvent
run: Annotated[
JsonEncoded[MongoLikeFilter],
AfterValidator(wrap_run_event_run_filter),
Field(alias="run_filter"),
] = And()
"""Filters that must match any runs that will trigger this event."""
metric: Annotated[
Union[
_WrappedMetricThresholdFilter,
_WrappedMetricChangeFilter,
_WrappedMetricZScoreFilter,
],
Field(alias="run_metric_filter"),
]
"""Metric condition(s) that must be satisfied for this event to trigger."""
# ------------------------------------------------------------------------------
legacy_metric_filter: Annotated[
Optional[JsonEncoded[MetricThresholdFilter]],
Field(alias="metric_filter", deprecated=True),
] = None
"""Deprecated legacy field for defining run metric threshold events.
For new automations, use the `metric` field (JSON alias `run_metric_filter`).
"""
@model_validator(mode="before")
@classmethod
def _nest_metric_filter(cls, v: Any) -> Any:
# If no run filter is given, automatically nest the metric filter and
# let inner validators reshape further as needed.
if pydantic_isinstance(
v, (MetricThresholdFilter, MetricChangeFilter, MetricZScoreFilter)
):
return cls(metric=v)
return v
| RunMetricFilter |
python | apache__airflow | task-sdk/src/airflow/sdk/exceptions.py | {
"start": 8891,
"end": 9559
} | class ____(AirflowException):
"""Raise when an XCom reference is being resolved against a non-existent XCom."""
def __init__(self, dag_id: str, task_id: str, key: str) -> None:
super().__init__()
self.dag_id = dag_id
self.task_id = task_id
self.key = key
def __str__(self) -> str:
return f'XComArg result from {self.task_id} at {self.dag_id} with key="{self.key}" is not found!'
def serialize(self):
cls = self.__class__
return (
f"{cls.__module__}.{cls.__name__}",
(),
{"dag_id": self.dag_id, "task_id": self.task_id, "key": self.key},
)
| XComNotFound |
python | huggingface__transformers | src/transformers/models/starcoder2/modeling_starcoder2.py | {
"start": 11847,
"end": 12397
} | class ____(PreTrainedModel):
config: Starcoder2Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Starcoder2DecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Starcoder2DecoderLayer,
"attentions": Starcoder2Attention,
}
| Starcoder2PreTrainedModel |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 34321,
"end": 34516
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("BILLING", "MIGRATING", "MOVING", "RENAME")
| RepositoryLockReason |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py | {
"start": 5266,
"end": 5340
} | class ____(Qwen3VLMoeVisionConfig):
pass
| Qwen3OmniMoeVisionEncoderConfig |
python | milvus-io__pymilvus | pymilvus/client/embedding_list.py | {
"start": 243,
"end": 11045
} | class ____:
"""
A container for multiple embeddings that can be used directly in Milvus searches.
Represents a single query containing multiple vectors for array-of-vector fields.
This is particularly useful for searching struct array fields that contain vectors,
enabling array-of-vector to array-of-vector searches.
Examples:
>>> # Create empty and add vectors
>>> query1 = EmbeddingList()
>>> query1.add(embedding1)
>>> query1.add(embedding2)
>>>
>>> # Create from list of vectors
>>> vectors = [vec1, vec2, vec3]
>>> query2 = EmbeddingList(vectors)
>>>
>>> # Use in search
>>> results = client.search(
>>> collection_name="my_collection",
>>> data=[query1, query2], # List of EmbeddingList
>>> ...
>>> )
"""
def __init__(
self,
embeddings: Optional[Union[List[np.ndarray], np.ndarray]] = None,
dim: Optional[int] = None,
dtype: Optional[Union[np.dtype, str, DataType]] = None,
):
"""
Initialize an EmbeddingList.
Args:
embeddings: Initial embeddings. Can be:
- None: Creates an empty list
- np.ndarray with shape (n, dim): Batch of n embeddings
- np.ndarray with shape (dim,): Single embedding
- List[np.ndarray]: List of embedding arrays
dim: Expected dimension for validation (optional).
If provided, all added embeddings will be validated against this dimension.
dtype: Data type of the embeddings (optional). Can be:
- numpy dtype (e.g., np.float32, np.float16, np.uint8)
- string (e.g., 'float32', 'float16', 'uint8')
- DataType enum (e.g., DataType.FLOAT_VECTOR, DataType.BINARY_VECTOR)
If not specified, will infer from the first embedding added.
"""
self._embeddings: List[np.ndarray] = []
self._dim = dim
self._dtype = self._parse_dtype(dtype) if dtype is not None else None
if embeddings is not None:
if isinstance(embeddings, np.ndarray):
if embeddings.ndim == 1:
# Single vector
self.add(embeddings)
elif embeddings.ndim == 2:
# Multiple vectors
for i in range(len(embeddings)):
self.add(embeddings[i])
else:
msg = "Embeddings array must be 1D or 2D"
raise ValueError(msg)
elif isinstance(embeddings, list):
for emb in embeddings:
self.add(emb)
else:
msg = "Embeddings must be numpy array or list"
raise TypeError(msg)
def _parse_dtype(self, dtype: Union[np.dtype, str, DataType]) -> np.dtype:
"""Parse and validate data type."""
if isinstance(dtype, np.dtype):
return dtype
if isinstance(dtype, str):
return np.dtype(dtype)
if isinstance(dtype, DataType):
# Map DataType enum to numpy dtype
dtype_map = {
DataType.FLOAT_VECTOR: np.float32,
DataType.FLOAT16_VECTOR: np.float16,
DataType.BFLOAT16_VECTOR: np.float16, # Use float16 as approximation
DataType.BINARY_VECTOR: np.uint8,
DataType.INT8_VECTOR: np.int8,
}
if dtype in dtype_map:
return np.dtype(dtype_map[dtype])
msg = f"Unsupported DataType: {dtype}"
raise ParamError(message=msg)
msg = f"dtype must be numpy dtype, string, or DataType, got {type(dtype)}"
raise TypeError(msg)
def _infer_dtype(self, array: np.ndarray) -> np.dtype:
"""Infer dtype from array, with smart defaults."""
if array.dtype == np.float64:
# Default double precision to single precision for efficiency
return np.dtype(np.float32)
return array.dtype
def add(self, embedding: Union[np.ndarray, List[Any]]) -> "EmbeddingList":
"""
Add a single embedding vector to the list.
Args:
embedding: A single embedding vector (1D array or list)
Returns:
Self for method chaining
Raises:
ValueError: If embedding dimension doesn't match existing embeddings
"""
embedding = np.asarray(embedding)
if embedding.ndim != 1:
msg = f"Embedding must be 1D, got shape {embedding.shape}"
raise ValueError(msg)
# Validate dimension
if self._embeddings:
if len(embedding) != self.dim:
msg = f"Embedding dimension {len(embedding)} doesn't match existing {self.dim}"
raise ValueError(msg)
elif self._dim is not None and len(embedding) != self._dim:
msg = f"Embedding dimension {len(embedding)} doesn't match expected {self._dim}"
raise ValueError(msg)
# Handle dtype
if self._dtype is None:
# Infer dtype from first embedding
self._dtype = self._infer_dtype(embedding)
# Convert to the established dtype
if embedding.dtype != self._dtype:
embedding = embedding.astype(self._dtype)
self._embeddings.append(embedding)
return self
def add_batch(self, embeddings: Union[List[np.ndarray], np.ndarray]) -> "EmbeddingList":
"""
Add multiple embeddings at once.
Args:
embeddings: Batch of embeddings (2D array or list of 1D arrays)
Returns:
Self for method chaining
Raises:
ValueError: If embeddings have inconsistent dimensions
"""
if isinstance(embeddings, np.ndarray):
if embeddings.ndim != 2:
msg = f"Batch embeddings must be 2D, got {embeddings.ndim}D"
raise ValueError(msg)
for i in range(len(embeddings)):
self.add(embeddings[i])
else:
for emb in embeddings:
self.add(emb)
return self
@classmethod
def _from_random_test(
cls,
num_vectors: int,
dim: int,
dtype: Optional[Union[np.dtype, str, DataType]] = None,
seed: Optional[int] = None,
) -> "EmbeddingList":
"""
Create an EmbeddingList with random vectors for testing purposes.
WARNING: This method is intended for testing and demonstration only.
Do not use in production code.
Args:
num_vectors: Number of random vectors to generate
dim: Dimension of each vector
dtype: Data type of the vectors (default: np.float32)
seed: Random seed for reproducibility
Returns:
New EmbeddingList with random test vectors
"""
rng = np.random.default_rng(seed)
# Default dtype to float32 if None
if dtype is None:
dtype = np.dtype(np.float32)
# Parse dtype if needed
if not isinstance(dtype, np.dtype):
dtype = cls(None, dim=dim, dtype=dtype)._dtype
# Generate random data based on dtype
if dtype == np.uint8:
# For binary vectors, generate random bits
embeddings = rng.integers(0, 256, size=(num_vectors, dim), dtype=np.uint8)
elif dtype == np.int8:
# For int8 vectors
embeddings = rng.integers(-128, 128, size=(num_vectors, dim), dtype=np.int8)
elif dtype in [np.float16, np.float32, np.float64]:
# For float vectors
embeddings = rng.random((num_vectors, dim)).astype(dtype)
else:
msg = f"Unsupported dtype for random generation: {dtype}"
raise ValueError(msg)
return cls(embeddings, dim=dim, dtype=dtype)
def to_flat_array(self) -> np.ndarray:
"""
Convert to flat array format required by Milvus for array-of-vector fields.
Returns:
Flattened numpy array containing all embeddings concatenated
Raises:
ValueError: If the list is empty
"""
if not self._embeddings:
msg = "EmbeddingList is empty"
raise ValueError(msg)
# Preserve the dtype of the embeddings
return np.concatenate(self._embeddings)
def to_numpy(self) -> np.ndarray:
"""
Convert to 2D numpy array.
Returns:
2D numpy array with shape (num_embeddings, dim)
Raises:
ValueError: If the list is empty
"""
if not self._embeddings:
msg = "EmbeddingList is empty"
raise ValueError(msg)
return np.stack(self._embeddings)
def clear(self) -> "EmbeddingList":
"""Clear all embeddings from the list."""
self._embeddings.clear()
return self
def __len__(self) -> int:
"""Return the number of embeddings in the list."""
return len(self._embeddings)
def __getitem__(self, index: int) -> np.ndarray:
"""Get embedding at specific index."""
return self._embeddings[index]
def __iter__(self):
"""Iterate over embeddings."""
return iter(self._embeddings)
@property
def dim(self) -> int:
"""Dimension of each embedding."""
if self._embeddings:
return len(self._embeddings[0])
return self._dim or 0
@property
def dtype(self) -> Optional[np.dtype]:
"""Data type of the embeddings."""
return self._dtype
@property
def shape(self) -> tuple:
"""Shape as (num_embeddings, dim)."""
return (len(self), self.dim)
@property
def total_dim(self) -> int:
"""Total dimension of all embeddings combined."""
return len(self) * self.dim
@property
def is_empty(self) -> bool:
"""Check if the list is empty."""
return len(self._embeddings) == 0
@property
def nbytes(self) -> int:
"""Total number of bytes used by all embeddings."""
if self.is_empty:
return 0
return sum(emb.nbytes for emb in self._embeddings)
def __repr__(self) -> str:
dtype_str = f", dtype={self.dtype}" if self.dtype else ""
return f"EmbeddingList(count={len(self)}, dim={self.dim}{dtype_str})"
def __str__(self) -> str:
if self.is_empty:
return "EmbeddingList(empty)"
dtype_str = f" ({self.dtype})" if self.dtype else ""
return f"EmbeddingList with {len(self)} embeddings of dimension {self.dim}{dtype_str}"
| EmbeddingList |
python | scipy__scipy | scipy/fftpack/tests/test_real_transforms.py | {
"start": 15047,
"end": 15186
} | class ____(_TestDSTBase):
def setup_method(self):
self.rdt = np.float32
self.dec = 6
self.type = 2
| TestDSTIIFloat |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 7487,
"end": 9252
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
host: str,
port: int,
database: str,
user: str,
password: str,
auth_source: str,
replica_set: Optional[str] = None,
ssl: Optional[bool] = None,
):
"""Airbyte Source for Mongodb.
Documentation for this source is no longer available (see MongodbV2Source)
Args:
name (str): The name of the destination.
host (str): Host of a Mongo database to be replicated.
port (int): Port of a Mongo database to be replicated.
database (str): Database to be replicated.
user (str): User
password (str): Password
auth_source (str): Authentication source where user information is stored. See the Mongo docs for more info.
replica_set (Optional[str]): The name of the set to filter servers by, when connecting to a replica set (Under this condition, the 'TLS connection' value automatically becomes 'true'). See the Mongo docs for more info.
ssl (Optional[bool]): If this switch is enabled, TLS connections will be used to connect to MongoDB.
"""
self.host = check.str_param(host, "host")
self.port = check.int_param(port, "port")
self.database = check.str_param(database, "database")
self.user = check.str_param(user, "user")
self.password = check.str_param(password, "password")
self.auth_source = check.str_param(auth_source, "auth_source")
self.replica_set = check.opt_str_param(replica_set, "replica_set")
self.ssl = check.opt_bool_param(ssl, "ssl")
super().__init__("Mongodb", name)
| MongodbSource |
python | donnemartin__interactive-coding-challenges | online_judges/sentence_screen_fit/test_count_sentence_fit.py | {
"start": 18,
"end": 1033
} | class ____(unittest.TestCase):
def test_count_sentence_fit(self):
solution = Solution()
self.assertRaises(TypeError, solution.count_sentence_fit,
None, None, None)
self.assertRaises(ValueError, solution.count_sentence_fit,
'abc', rows=-1, cols=-1)
sentence = ["hello", "world"]
expected = 1
self.assertEqual(solution.count_sentence_fit(sentence, rows=2, cols=8),
expected)
sentence = ["a", "bcd", "e"]
expected = 2
self.assertEqual(solution.count_sentence_fit(sentence, rows=3, cols=6),
expected)
sentence = ["I", "had", "apple", "pie"]
expected = 1
self.assertEqual(solution.count_sentence_fit(sentence, rows=4, cols=5),
expected)
print('Success: test_count_sentence_fit')
def main():
test = TestSolution()
test.test_count_sentence_fit()
if __name__ == '__main__':
main()
| TestSolution |
python | ray-project__ray | python/ray/tune/tests/test_actor_reuse.py | {
"start": 1044,
"end": 1191
} | class ____(FIFOScheduler):
def on_trial_result(self, tune_controller, trial, result):
return TrialScheduler.PAUSE
| FrequentPausesScheduler |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_exact_device_request.py | {
"start": 383,
"end": 14190
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'admin_access': 'bool',
'allocation_mode': 'str',
'capacity': 'V1beta2CapacityRequirements',
'count': 'int',
'device_class_name': 'str',
'selectors': 'list[V1beta2DeviceSelector]',
'tolerations': 'list[V1beta2DeviceToleration]'
}
attribute_map = {
'admin_access': 'adminAccess',
'allocation_mode': 'allocationMode',
'capacity': 'capacity',
'count': 'count',
'device_class_name': 'deviceClassName',
'selectors': 'selectors',
'tolerations': 'tolerations'
}
def __init__(self, admin_access=None, allocation_mode=None, capacity=None, count=None, device_class_name=None, selectors=None, tolerations=None, local_vars_configuration=None): # noqa: E501
"""V1beta2ExactDeviceRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._admin_access = None
self._allocation_mode = None
self._capacity = None
self._count = None
self._device_class_name = None
self._selectors = None
self._tolerations = None
self.discriminator = None
if admin_access is not None:
self.admin_access = admin_access
if allocation_mode is not None:
self.allocation_mode = allocation_mode
if capacity is not None:
self.capacity = capacity
if count is not None:
self.count = count
self.device_class_name = device_class_name
if selectors is not None:
self.selectors = selectors
if tolerations is not None:
self.tolerations = tolerations
@property
def admin_access(self):
"""Gets the admin_access of this V1beta2ExactDeviceRequest. # noqa: E501
AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations. This is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled. # noqa: E501
:return: The admin_access of this V1beta2ExactDeviceRequest. # noqa: E501
:rtype: bool
"""
return self._admin_access
@admin_access.setter
def admin_access(self, admin_access):
"""Sets the admin_access of this V1beta2ExactDeviceRequest.
AdminAccess indicates that this is a claim for administrative access to the device(s). Claims with AdminAccess are expected to be used for monitoring or other management services for a device. They ignore all ordinary claims to the device with respect to access modes and any resource allocations. This is an alpha field and requires enabling the DRAAdminAccess feature gate. Admin access is disabled if this field is unset or set to false, otherwise it is enabled. # noqa: E501
:param admin_access: The admin_access of this V1beta2ExactDeviceRequest. # noqa: E501
:type: bool
"""
self._admin_access = admin_access
@property
def allocation_mode(self):
"""Gets the allocation_mode of this V1beta2ExactDeviceRequest. # noqa: E501
AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are: - ExactCount: This request is for a specific number of devices. This is the default. The exact number is provided in the count field. - All: This request is for all of the matching devices in a pool. At least one device must exist on the node for the allocation to succeed. Allocation will fail if some devices are already allocated, unless adminAccess is requested. If AllocationMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field. More modes may get added in the future. Clients must refuse to handle requests with unknown modes. # noqa: E501
:return: The allocation_mode of this V1beta2ExactDeviceRequest. # noqa: E501
:rtype: str
"""
return self._allocation_mode
@allocation_mode.setter
def allocation_mode(self, allocation_mode):
"""Sets the allocation_mode of this V1beta2ExactDeviceRequest.
AllocationMode and its related fields define how devices are allocated to satisfy this request. Supported values are: - ExactCount: This request is for a specific number of devices. This is the default. The exact number is provided in the count field. - All: This request is for all of the matching devices in a pool. At least one device must exist on the node for the allocation to succeed. Allocation will fail if some devices are already allocated, unless adminAccess is requested. If AllocationMode is not specified, the default mode is ExactCount. If the mode is ExactCount and count is not specified, the default count is one. Any other requests must specify this field. More modes may get added in the future. Clients must refuse to handle requests with unknown modes. # noqa: E501
:param allocation_mode: The allocation_mode of this V1beta2ExactDeviceRequest. # noqa: E501
:type: str
"""
self._allocation_mode = allocation_mode
@property
def capacity(self):
"""Gets the capacity of this V1beta2ExactDeviceRequest. # noqa: E501
:return: The capacity of this V1beta2ExactDeviceRequest. # noqa: E501
:rtype: V1beta2CapacityRequirements
"""
return self._capacity
@capacity.setter
def capacity(self, capacity):
"""Sets the capacity of this V1beta2ExactDeviceRequest.
:param capacity: The capacity of this V1beta2ExactDeviceRequest. # noqa: E501
:type: V1beta2CapacityRequirements
"""
self._capacity = capacity
@property
def count(self):
"""Gets the count of this V1beta2ExactDeviceRequest. # noqa: E501
Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one. # noqa: E501
:return: The count of this V1beta2ExactDeviceRequest. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this V1beta2ExactDeviceRequest.
Count is used only when the count mode is \"ExactCount\". Must be greater than zero. If AllocationMode is ExactCount and this field is not specified, the default is one. # noqa: E501
:param count: The count of this V1beta2ExactDeviceRequest. # noqa: E501
:type: int
"""
self._count = count
@property
def device_class_name(self):
"""Gets the device_class_name of this V1beta2ExactDeviceRequest. # noqa: E501
DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request. A DeviceClassName is required. Administrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference. # noqa: E501
:return: The device_class_name of this V1beta2ExactDeviceRequest. # noqa: E501
:rtype: str
"""
return self._device_class_name
@device_class_name.setter
def device_class_name(self, device_class_name):
"""Sets the device_class_name of this V1beta2ExactDeviceRequest.
DeviceClassName references a specific DeviceClass, which can define additional configuration and selectors to be inherited by this request. A DeviceClassName is required. Administrators may use this to restrict which devices may get requested by only installing classes with selectors for permitted devices. If users are free to request anything without restrictions, then administrators can create an empty DeviceClass for users to reference. # noqa: E501
:param device_class_name: The device_class_name of this V1beta2ExactDeviceRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and device_class_name is None: # noqa: E501
raise ValueError("Invalid value for `device_class_name`, must not be `None`") # noqa: E501
self._device_class_name = device_class_name
@property
def selectors(self):
"""Gets the selectors of this V1beta2ExactDeviceRequest. # noqa: E501
Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered. # noqa: E501
:return: The selectors of this V1beta2ExactDeviceRequest. # noqa: E501
:rtype: list[V1beta2DeviceSelector]
"""
return self._selectors
@selectors.setter
def selectors(self, selectors):
"""Sets the selectors of this V1beta2ExactDeviceRequest.
Selectors define criteria which must be satisfied by a specific device in order for that device to be considered for this request. All selectors must be satisfied for a device to be considered. # noqa: E501
:param selectors: The selectors of this V1beta2ExactDeviceRequest. # noqa: E501
:type: list[V1beta2DeviceSelector]
"""
self._selectors = selectors
@property
def tolerations(self):
"""Gets the tolerations of this V1beta2ExactDeviceRequest. # noqa: E501
If specified, the request's tolerations. Tolerations for NoSchedule are required to allocate a device which has a taint with that effect. The same applies to NoExecute. In addition, should any of the allocated devices get tainted with NoExecute after allocation and that effect is not tolerated, then all pods consuming the ResourceClaim get deleted to evict them. The scheduler will not let new pods reserve the claim while it has these tainted devices. Once all pods are evicted, the claim will get deallocated. The maximum number of tolerations is 16. This is an alpha field and requires enabling the DRADeviceTaints feature gate. # noqa: E501
:return: The tolerations of this V1beta2ExactDeviceRequest. # noqa: E501
:rtype: list[V1beta2DeviceToleration]
"""
return self._tolerations
@tolerations.setter
def tolerations(self, tolerations):
"""Sets the tolerations of this V1beta2ExactDeviceRequest.
If specified, the request's tolerations. Tolerations for NoSchedule are required to allocate a device which has a taint with that effect. The same applies to NoExecute. In addition, should any of the allocated devices get tainted with NoExecute after allocation and that effect is not tolerated, then all pods consuming the ResourceClaim get deleted to evict them. The scheduler will not let new pods reserve the claim while it has these tainted devices. Once all pods are evicted, the claim will get deallocated. The maximum number of tolerations is 16. This is an alpha field and requires enabling the DRADeviceTaints feature gate. # noqa: E501
:param tolerations: The tolerations of this V1beta2ExactDeviceRequest. # noqa: E501
:type: list[V1beta2DeviceToleration]
"""
self._tolerations = tolerations
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2ExactDeviceRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2ExactDeviceRequest):
return True
return self.to_dict() != other.to_dict()
| V1beta2ExactDeviceRequest |
python | huggingface__transformers | tests/models/encoder_decoder/test_modeling_encoder_decoder.py | {
"start": 40796,
"end": 46336
} | class ____(EncoderDecoderMixin, unittest.TestCase):
def get_pretrained_model(self):
return EncoderDecoderModel.from_encoder_decoder_pretrained(
"google/bert_for_seq_generation_L-24_bbc_encoder", "google/bert_for_seq_generation_L-24_bbc_encoder"
)
def get_encoder_decoder_model(self, config, decoder_config):
encoder_model = BertGenerationEncoder(config)
decoder_model = BertGenerationDecoder(decoder_config)
return encoder_model, decoder_model
def prepare_config_and_inputs(self):
model_tester = BertGenerationEncoderTester(self)
encoder_config_and_inputs = model_tester.prepare_config_and_inputs()
decoder_config_and_inputs = model_tester.prepare_config_and_inputs_for_decoder()
(
config,
input_ids,
input_mask,
token_labels,
) = encoder_config_and_inputs
(
decoder_config,
decoder_input_ids,
decoder_input_mask,
decoder_token_labels,
encoder_hidden_states,
encoder_attention_mask,
) = decoder_config_and_inputs
# make sure that cross attention layers are added
decoder_config.add_cross_attention = True
return {
"config": config,
"input_ids": input_ids,
"attention_mask": input_mask,
"decoder_config": decoder_config,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_input_mask,
"decoder_token_labels": decoder_token_labels,
"encoder_hidden_states": encoder_hidden_states,
"labels": decoder_token_labels,
}
@slow
@require_deterministic_for_xpu
def test_roberta2roberta_summarization(self):
model = EncoderDecoderModel.from_pretrained("google/roberta2roberta_L-24_bbc")
model.to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("google/roberta2roberta_L-24_bbc")
ARTICLE_PS3 = """The problem is affecting people using the older versions of the PlayStation 3, called the "Fat" model.The problem isn't affecting the newer PS3 Slim systems that have been on sale since September last year.Sony have also said they are aiming to have the problem fixed shortly but is advising some users to avoid using their console for the time being."We hope to resolve this problem within the next 24 hours," a statement reads. "In the meantime, if you have a model other than the new slim PS3, we advise that you do not use your PS3 system, as doing so may result in errors in some functionality, such as recording obtained trophies, and not being able to restore certain data."We believe we have identified that this problem is being caused by a bug in the clock functionality incorporated in the system."The PlayStation Network is used by millions of people around the world.It allows users to play their friends at games like Fifa over the internet and also do things like download software or visit online stores."""
ARTICLE_TOSHIBA = """An independent panel appointed by Toshiba found institutional accounting irregularities, the firm said in a statement to investors. Toshiba said it "takes the situation it has caused very seriously" and that it "deeply apologised" to shareholders. The overstatement was roughly triple an initial Toshiba estimate. The probe could lead to a restatement of earnings, a board overhaul and potential action by regulators. "Within Toshiba, there was a corporate culture in which one could not go against the wishes of superiors," the report said. "Therefore, when top management presented 'challenges', division presidents, line managers and employees below them continually carried out inappropriate accounting practices to meet targets in line with the wishes of their superiors." The improper accounting practices stretched back to 2008."""
# fmt: off
EXPECTED_SUMMARIES_PS3 = Expectations(
{
("xpu", 3): """Sony has said that a bug in its PlayStation 3 console is preventing them from using the machine as a computer .""",
("cuda", 7): """Sony has said that a bug in its PlayStation 3 console is preventing them from using the machine as a computer.""",
}
) # fmt: on
EXPECTED_SUMMARY_PS3 = EXPECTED_SUMMARIES_PS3.get_expectation()
EXPECTED_SUMMARIES_TOSHIBA = Expectations(
{
(
"xpu",
3,
): """Japanese electronics giant Toshiba overstated its annual earnings by more than a third last year , according to a report .""",
(
"cuda",
7,
): """Japanese electronics giant Toshiba overstated its annual earnings by more than a third last year, according to a report.""",
}
)
EXPECTED_SUMMARY_TOSHIBA = EXPECTED_SUMMARIES_TOSHIBA.get_expectation()
input_dict = tokenizer(
[ARTICLE_PS3, ARTICLE_TOSHIBA], max_length=512, padding="max_length", return_tensors="pt"
)
output_ids = model.generate(
input_dict["input_ids"].to(torch_device), attention_mask=input_dict["attention_mask"].to(torch_device)
)
summary = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
self.assertEqual(summary, [EXPECTED_SUMMARY_PS3, EXPECTED_SUMMARY_TOSHIBA])
@require_torch
| BertGenerationEncoderDecoderModelTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_cond_format02.py | {
"start": 315,
"end": 1121
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("cond_format02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = None # Test the null format.
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1", {"type": "cell", "format": cell_format, "criteria": "<", "value": 5}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/pipeline/views/nested.py | {
"start": 272,
"end": 1957
} | class ____[P1: Pipeline[Any, Any], P2: Pipeline[Any, Any]]:
"""
A NestedPipelineView can be used within other pipelines to process another
pipeline within a pipeline. Note that the nested pipelines finish_pipeline
will NOT be called, instead it's data will be bound into the parent
pipeline and the parents pipeline moved to the next step.
Useful for embedding an identity authentication pipeline.
"""
def __init__(
self,
bind_key: str,
pipeline_cls: type[P2],
provider_key: str,
config: Mapping[str, Any] | None = None,
) -> None:
super().__init__()
self.provider_key = provider_key
self.config = config or {}
class NestedPipeline(pipeline_cls): # type: ignore[misc, valid-type]
def set_parent_pipeline(self, parent_pipeline: P1) -> None:
self.parent_pipeline = parent_pipeline
def finish_pipeline(self) -> HttpResponseBase:
self.parent_pipeline.bind_state(bind_key, self.fetch_state())
self.clear_session()
return self.parent_pipeline.next_step()
self.pipeline_cls = NestedPipeline
def dispatch(self, request: HttpRequest, pipeline: P1) -> HttpResponseBase:
nested_pipeline = self.pipeline_cls(
organization=pipeline.organization,
request=request,
provider_key=self.provider_key,
config=self.config,
)
nested_pipeline.set_parent_pipeline(pipeline)
if not nested_pipeline.is_valid():
nested_pipeline.initialize()
return nested_pipeline.current_step()
| NestedPipelineView |
python | django__django | tests/gis_tests/relatedapp/models.py | {
"start": 1196,
"end": 1339
} | class ____(SimpleModel):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, models.CASCADE, unique=True)
| Article |
python | wandb__wandb | wandb/sdk/artifacts/_generated/add_aliases.py | {
"start": 180,
"end": 250
} | class ____(GQLResult):
result: Optional[AddAliasesResult]
| AddAliases |
python | etianen__django-reversion | tests/test_app/tests/test_models.py | {
"start": 10586,
"end": 10927
} | class ____(TestBase):
def testFieldDictFieldFields(self):
reversion.register(TestModel, fields=("name",))
with reversion.create_revision():
obj = TestModel.objects.create()
self.assertEqual(Version.objects.get_for_object(obj).get().field_dict, {
"name": "v1",
})
| FieldDictFieldsTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 64246,
"end": 64396
} | class ____(ColumnElement[_T]):
"""ColumnElement where ``.key`` is non-None."""
_is_keyed_column_element = True
key: str
| KeyedColumnElement |
python | viewflow__viewflow | tests/json/test_json__integer.py | {
"start": 95,
"end": 295
} | class ____(models.Model):
data = models.JSONField(default=dict)
integer_field = jsonstore.IntegerField(null=True)
default_integer_field = jsonstore.IntegerField(default=42)
| IntegerFieldModel |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType20.py | {
"start": 199,
"end": 242
} | class ____(Generic[T]):
y: type[T]
| Parent |
python | google__pytype | pytype/tests/test_cmp1.py | {
"start": 6975,
"end": 7599
} | class ____(test_base.BaseTest):
"""Test for "x != y". Also test overloading."""
def test_concrete(self):
self.Check("""
def f(x, y):
return x != y
assert_type(f(1, 2), bool)
assert_type(f(1, "a"), bool)
assert_type(f(object(), "x"), bool)
""")
def test_overloaded(self):
ty = self.Infer("""
class Foo:
def __ne__(self, x):
return 3j
def f():
return Foo() != 3
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
def __ne__(self, x) -> complex: ...
def f() -> complex: ...
""",
)
| NeTest |
python | EpistasisLab__tpot | tpot/builtin_modules/passthrough.py | {
"start": 1938,
"end": 2379
} | class ____(TransformerMixin,BaseEstimator):
"""
A transformer returns an empty array. When combined with FeatureUnion, it can be used to skip a branch.
"""
def fit(self, X=None, y=None):
"""
Nothing to fit, just returns self.
"""
return self
def transform(self, X):
"""
returns an empty array.
"""
return np.array([]).reshape(X.shape[0],0)
| SkipTransformer |
python | spack__spack | lib/spack/spack/test/utilities.py | {
"start": 227,
"end": 1018
} | class ____:
"""Use this to get an Args object like what is passed into
a command.
Useful for emulating args in unit tests that want to check
helper functions in Spack commands. Ensures that you get all
the default arg values established by the parser.
Example usage::
install_args = SpackCommandArgs("install")("-v", "mpich")
"""
def __init__(self, command_name):
self.parser = make_argument_parser()
self.command_name = command_name
def __call__(self, *argv, **kwargs):
self.parser.add_command(self.command_name)
prepend = kwargs["global_args"] if "global_args" in kwargs else []
args, unknown = self.parser.parse_known_args(prepend + [self.command_name] + list(argv))
return args
| SpackCommandArgs |
python | Textualize__rich | rich/markdown.py | {
"start": 2236,
"end": 2434
} | class ____(MarkdownElement):
"""An unknown element.
Hopefully there will be no unknown elements, and we will have a MarkdownElement for
everything in the document.
"""
| UnknownElement |
python | TheAlgorithms__Python | maths/polynomials/single_indeterminate_operations.py | {
"start": 237,
"end": 5833
} | class ____:
def __init__(self, degree: int, coefficients: MutableSequence[float]) -> None:
"""
The coefficients should be in order of degree, from smallest to largest.
>>> p = Polynomial(2, [1, 2, 3])
>>> p = Polynomial(2, [1, 2, 3, 4])
Traceback (most recent call last):
...
ValueError: The number of coefficients should be equal to the degree + 1.
"""
if len(coefficients) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1."
)
self.coefficients: list[float] = list(coefficients)
self.degree = degree
def __add__(self, polynomial_2: Polynomial) -> Polynomial:
"""
Polynomial addition
>>> p = Polynomial(2, [1, 2, 3])
>>> q = Polynomial(2, [1, 2, 3])
>>> p + q
6x^2 + 4x + 2
"""
if self.degree > polynomial_2.degree:
coefficients = self.coefficients[:]
for i in range(polynomial_2.degree + 1):
coefficients[i] += polynomial_2.coefficients[i]
return Polynomial(self.degree, coefficients)
else:
coefficients = polynomial_2.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_2.degree, coefficients)
def __sub__(self, polynomial_2: Polynomial) -> Polynomial:
"""
Polynomial subtraction
>>> p = Polynomial(2, [1, 2, 4])
>>> q = Polynomial(2, [1, 2, 3])
>>> p - q
1x^2
"""
return self + polynomial_2 * Polynomial(0, [-1])
def __neg__(self) -> Polynomial:
"""
Polynomial negation
>>> p = Polynomial(2, [1, 2, 3])
>>> -p
- 3x^2 - 2x - 1
"""
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__(self, polynomial_2: Polynomial) -> Polynomial:
"""
Polynomial multiplication
>>> p = Polynomial(2, [1, 2, 3])
>>> q = Polynomial(2, [1, 2, 3])
>>> p * q
9x^4 + 12x^3 + 10x^2 + 4x + 1
"""
coefficients: list[float] = [0] * (self.degree + polynomial_2.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_2.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_2.coefficients[j]
)
return Polynomial(self.degree + polynomial_2.degree, coefficients)
def evaluate(self, substitution: float) -> float:
"""
Evaluates the polynomial at x.
>>> p = Polynomial(2, [1, 2, 3])
>>> p.evaluate(2)
17
"""
result: int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__(self) -> str:
"""
>>> p = Polynomial(2, [1, 2, 3])
>>> print(p)
3x^2 + 2x + 1
"""
polynomial = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(i)
return polynomial
def __repr__(self) -> str:
"""
>>> p = Polynomial(2, [1, 2, 3])
>>> p
3x^2 + 2x + 1
"""
return self.__str__()
def derivative(self) -> Polynomial:
"""
Returns the derivative of the polynomial.
>>> p = Polynomial(2, [1, 2, 3])
>>> p.derivative()
6x + 2
"""
coefficients: list[float] = [0] * self.degree
for i in range(self.degree):
coefficients[i] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, coefficients)
def integral(self, constant: float = 0) -> Polynomial:
"""
Returns the integral of the polynomial.
>>> p = Polynomial(2, [1, 2, 3])
>>> p.integral()
1.0x^3 + 1.0x^2 + 1.0x
"""
coefficients: list[float] = [0] * (self.degree + 2)
coefficients[0] = constant
for i in range(self.degree + 1):
coefficients[i + 1] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, coefficients)
def __eq__(self, polynomial_2: object) -> bool:
"""
Checks if two polynomials are equal.
>>> p = Polynomial(2, [1, 2, 3])
>>> q = Polynomial(2, [1, 2, 3])
>>> p == q
True
"""
if not isinstance(polynomial_2, Polynomial):
return False
if self.degree != polynomial_2.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_2.coefficients[i]:
return False
return True
def __ne__(self, polynomial_2: object) -> bool:
"""
Checks if two polynomials are not equal.
>>> p = Polynomial(2, [1, 2, 3])
>>> q = Polynomial(2, [1, 2, 3])
>>> p != q
False
"""
return not self.__eq__(polynomial_2)
| Polynomial |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py | {
"start": 31345,
"end": 34451
} | class ____(Data2VecVisionPreTrainedModel):
def __init__(self, config: Data2VecVisionConfig, add_pooling_layer: bool = False) -> None:
r"""
add_pooling_layer (bool, *optional*, defaults to `False`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = Data2VecVisionEmbeddings(config)
self.encoder = Data2VecVisionEncoder(config, window_size=self.embeddings.patch_embeddings.patch_shape)
self.layernorm = (
nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
)
self.pooler = Data2VecVisionPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, Data2VecVisionModelOutputWithPooling]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
embedding_output, _ = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
resolution = pixel_values.shape[2:]
encoder_outputs = self.encoder(
embedding_output,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
resolution=resolution,
return_dict=return_dict,
interpolate_pos_encoding=interpolate_pos_encoding,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,)
return head_outputs + encoder_outputs[1:]
return Data2VecVisionModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
# Copied from transformers.models.beit.modeling_beit.BeitPooler with Beit->Data2VecVision
| Data2VecVisionModel |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 59355,
"end": 59504
} | class ____:
fts_index: Optional[FtsIndexType] = None
string_inverted_index: Optional[StringInvertedIndexType] = None
@dataclass
| StringValueType |
python | encode__starlette | starlette/schemas.py | {
"start": 934,
"end": 4468
} | class ____:
def get_schema(self, routes: list[BaseRoute]) -> dict[str, Any]:
raise NotImplementedError() # pragma: no cover
def get_endpoints(self, routes: list[BaseRoute]) -> list[EndpointInfo]:
"""
Given the routes, yields the following information:
- path
eg: /users/
- http_method
one of 'get', 'post', 'put', 'patch', 'delete', 'options'
- func
method ready to extract the docstring
"""
endpoints_info: list[EndpointInfo] = []
for route in routes:
if isinstance(route, Mount | Host):
routes = route.routes or []
if isinstance(route, Mount):
path = self._remove_converter(route.path)
else:
path = ""
sub_endpoints = [
EndpointInfo(
path="".join((path, sub_endpoint.path)),
http_method=sub_endpoint.http_method,
func=sub_endpoint.func,
)
for sub_endpoint in self.get_endpoints(routes)
]
endpoints_info.extend(sub_endpoints)
elif not isinstance(route, Route) or not route.include_in_schema:
continue
elif inspect.isfunction(route.endpoint) or inspect.ismethod(route.endpoint):
path = self._remove_converter(route.path)
for method in route.methods or ["GET"]:
if method == "HEAD":
continue
endpoints_info.append(EndpointInfo(path, method.lower(), route.endpoint))
else:
path = self._remove_converter(route.path)
for method in ["get", "post", "put", "patch", "delete", "options"]:
if not hasattr(route.endpoint, method):
continue
func = getattr(route.endpoint, method)
endpoints_info.append(EndpointInfo(path, method.lower(), func))
return endpoints_info
def _remove_converter(self, path: str) -> str:
"""
Remove the converter from the path.
For example, a route like this:
Route("/users/{id:int}", endpoint=get_user, methods=["GET"])
Should be represented as `/users/{id}` in the OpenAPI schema.
"""
return _remove_converter_pattern.sub("}", path)
def parse_docstring(self, func_or_method: Callable[..., Any]) -> dict[str, Any]:
"""
Given a function, parse the docstring as YAML and return a dictionary of info.
"""
docstring = func_or_method.__doc__
if not docstring:
return {}
assert yaml is not None, "`pyyaml` must be installed to use parse_docstring."
# We support having regular docstrings before the schema
# definition. Here we return just the schema part from
# the docstring.
docstring = docstring.split("---")[-1]
parsed = yaml.safe_load(docstring)
if not isinstance(parsed, dict):
# A regular docstring (not yaml formatted) can return
# a simple string here, which wouldn't follow the schema.
return {}
return parsed
def OpenAPIResponse(self, request: Request) -> Response:
routes = request.app.routes
schema = self.get_schema(routes=routes)
return OpenAPIResponse(schema)
| BaseSchemaGenerator |
python | keras-team__keras | keras/src/backend/tests/compute_output_spec_test.py | {
"start": 421,
"end": 4056
} | class ____(unittest.TestCase):
def test_dynamic_batch_size(self):
x = KerasTensor(shape=(None, 3, 5))
y = backend.compute_output_spec(single_arg_test_fn, x)
self.assertEqual(y.shape, (None, 3, 10))
x1 = KerasTensor(shape=(None, 3, 5))
x2 = KerasTensor(shape=(None, 3, 5))
x3 = KerasTensor(shape=(None, 3, 5))
y = backend.compute_output_spec(
three_args_2_kwarg_test_fn, x1, x2, x3=x3
)
self.assertEqual(y.shape, (None, 5))
def test_dynamic_everything(self):
x = KerasTensor(shape=(2, None, 3))
y = backend.compute_output_spec(single_arg_test_fn, x)
self.assertEqual(y.shape, (2, None, 6))
x1 = KerasTensor(shape=(None, None, 5))
x2 = KerasTensor(shape=(None, None, 5))
x3 = KerasTensor(shape=(None, None, 5))
y = backend.compute_output_spec(
three_args_2_kwarg_test_fn, x1, x2, x3=x3
)
self.assertEqual(y.shape, (None, 5))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse_to_sparse(self):
def single_arg_sparse_fn(x):
y0 = ops.transpose(x, axes=(0, 2, 1))
y1 = ops.squeeze(ops.expand_dims(x, axis=3), axis=3)
return (y0, y1)
x = KerasTensor(shape=(None, 3, 3), sparse=True)
ys = backend.compute_output_spec(single_arg_sparse_fn, x)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertTrue(y.sparse)
def three_args_sparse_fn(x1, x2, x3=None):
y0 = ops.add(x1, x2) # sparse, sparse
y1 = ops.divide(x1, x3) # sparse, dense
y2 = ops.matmul(x1, x2) # sparse, sparse
y3 = ops.multiply(x1, x3) # sparse, dense
return (y0, y1, y2, y3)
x1 = KerasTensor(shape=(None, 3, 3), sparse=True)
x2 = KerasTensor(shape=(None, 3, 3), sparse=True)
x3 = KerasTensor(shape=(None, 3, 3), sparse=False)
ys = backend.compute_output_spec(three_args_sparse_fn, x1, x2, x3=x3)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertTrue(y.sparse)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse_to_dense(self):
def single_arg_dense_fn(x):
y0 = ops.exp(x)
return (y0,)
x = KerasTensor(shape=(None, 3, 3), sparse=True)
ys = backend.compute_output_spec(single_arg_dense_fn, x)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertFalse(y.sparse)
def three_args_dense_fn(x1, x2, x3=None):
y0 = ops.add(x1, x2) # sparse, dense
y1 = ops.add(x2, x1) # dense, sparse
y2 = ops.concatenate([x1, x2], axis=0) # sparse, dense
y3 = ops.matmul(x1, x2) # sparse, dense
y4 = ops.matmul(x2, x1) # dense, sparse
y5 = ops.take(x2, indices=x3, axis=1) # dense, sparse
y6 = ops.divide(x1, x1) # sparse, sparse
return (y0, y1, y2, y3, y4, y5, y6)
x1 = KerasTensor(shape=(None, 3, 3), sparse=True)
x2 = KerasTensor(shape=(None, 3, 3), sparse=False)
x3 = KerasTensor(shape=(3,), dtype="int64", sparse=True)
ys = backend.compute_output_spec(three_args_dense_fn, x1, x2, x3=x3)
for y in ys:
self.assertEqual(y.shape, (None, 3, 3))
self.assertFalse(y.sparse)
| ComputeOutputSpecTest |
python | gevent__gevent | src/gevent/tests/test__issue467.py | {
"start": 416,
"end": 1205
} | class ____(greentest.TestCase):
def test(self):
finished = 0
# Wait on a group that includes one that will already be
# done, plus some that will finish as we watch
done_worker = gevent.spawn(worker, "done")
gevent.joinall((done_worker,))
workers = [gevent.spawn(worker, i) for i in range(3)]
workers.append(done_worker)
for _ in gevent.iwait(workers):
finished += 1
# Simulate doing something that causes greenlets to switch;
# a non-zero timeout is crucial
try:
gevent.sleep(0.01)
except ValueError as ex:
self.assertEqual(ex.args[0], 2)
self.assertEqual(finished, 4)
if __name__ == '__main__':
greentest.main()
| Test |
python | readthedocs__readthedocs.org | readthedocs/oauth/services/__init__.py | {
"start": 443,
"end": 579
} | class ____(SettingsOverrideObject):
_default_class = github.GitHubService
_override_setting = "OAUTH_GITHUB_SERVICE"
| GitHubService |
python | django__django | django/utils/html.py | {
"start": 5609,
"end": 8464
} | class ____(HTMLParser):
def __init__(self):
super().__init__(convert_charrefs=False)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append("&%s;" % name)
def handle_charref(self, name):
self.fed.append("&#%s;" % name)
def get_data(self):
return "".join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
s.feed(value)
s.close()
return s.get_data()
@keep_lazy_text
def strip_tags(value):
"""Return the given HTML with all tags stripped."""
value = str(value)
for long_open_tag in long_open_tag_without_closing_re.finditer(value):
if long_open_tag.group().count("<") >= MAX_STRIP_TAGS_DEPTH:
raise SuspiciousOperation
# Note: in typical case this loop executes _strip_once twice (the second
# execution does not remove any more tags).
strip_tags_depth = 0
while "<" in value and ">" in value:
if strip_tags_depth >= MAX_STRIP_TAGS_DEPTH:
raise SuspiciousOperation
new_value = _strip_once(value)
if value.count("<") == new_value.count("<"):
# _strip_once wasn't able to detect more tags.
break
value = new_value
strip_tags_depth += 1
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
"""Return the given HTML with spaces between tags removed."""
return re.sub(r">\s+<", "><", str(value))
def smart_urlquote(url):
"""Quote a URL if it isn't already quoted."""
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC 3986 Section 2.3 Unreserved Characters,
# see also https://bugs.python.org/issue16285
return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + "~")
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
# Handle IDN as percent-encoded UTF-8 octets, per WHATWG URL Specification
# section 3.5 and RFC 3986 section 3.2.2. Defer any IDNA to the user agent.
# See #36013.
netloc = unquote_quote(netloc)
if query:
# Separately unquoting key/value, so as to not mix querystring
# separators included in query values. See #22267.
query_parts = [
(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)
]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
| MLStripper |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dlp.py | {
"start": 87479,
"end": 91207
} | class ____(GoogleCloudBaseOperator):
"""
Lists job triggers.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPListJobTriggersOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:param order_by: (Optional) Optional comma separated list of fields to order by,
followed by asc or desc postfix.
:param results_filter: (Optional) Filter used to specify a subset of results.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobTriggersListLink(),)
def __init__(
self,
*,
project_id: str = PROVIDE_PROJECT_ID,
page_size: int | None = None,
order_by: str | None = None,
results_filter: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.page_size = page_size
self.order_by = order_by
self.results_filter = results_filter
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
jobs = hook.list_job_triggers(
project_id=self.project_id,
page_size=self.page_size,
order_by=self.order_by,
results_filter=self.results_filter,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobTriggersListLink.persist(
context=context,
project_id=project_id,
)
return [JobTrigger.to_dict(j) for j in jobs]
| CloudDLPListJobTriggersOperator |
python | pypa__setuptools | setuptools/command/test.py | {
"start": 724,
"end": 1400
} | class ____(Command):
"""
Stub to warn when test command is referenced or used.
"""
description = "stub for old test command (do not use)"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
(
'test-suite=',
's',
"Run single test, case or suite (e.g. 'module.test_suite')",
),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> NoReturn:
raise RuntimeError("Support for the test command was removed in Setuptools 72")
| _test |
python | django__django | tests/migration_test_data_persistence/models.py | {
"start": 31,
"end": 104
} | class ____(models.Model):
title = models.CharField(max_length=100)
| Book |
python | aimacode__aima-python | games.py | {
"start": 10408,
"end": 11089
} | class ____(Game):
"""Similar to Fig52Game but bigger. Useful for visualisation"""
succs = {i: dict(l=i * 3 + 1, m=i * 3 + 2, r=i * 3 + 3) for i in range(13)}
utils = dict()
def actions(self, state):
return sorted(list(self.succs.get(state, {}).keys()))
def result(self, state, move):
return self.succs[state][move]
def utility(self, state, player):
if player == 'MAX':
return self.utils[state]
else:
return -self.utils[state]
def terminal_test(self, state):
return state not in range(13)
def to_move(self, state):
return 'MIN' if state in {1, 2, 3} else 'MAX'
| Fig52Extended |
python | kamyu104__LeetCode-Solutions | Python/maximize-the-distance-between-points-on-a-square.py | {
"start": 111,
"end": 1680
} | class ____(object):
def maxDistance(self, side, points, k):
"""
:type side: int
:type points: List[List[int]]
:type k: int
:rtype: int
"""
def binary_search_right(left, right, check):
while left <= right:
mid = left + (right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right
def check(d):
intervals = [(0, 0, 1)]
i = 0
for right in xrange(1, len(p)):
left, cnt = right, 1
while i < len(intervals):
l, r, c = intervals[i]
if p[right]-p[r] < d:
break
if (p[l]+4*side)-p[right] >= d:
if c+1 >= cnt:
cnt = c+1
left = l
i += 1
intervals.append((left, right, cnt))
return max(x[2] for x in intervals) >= k
p = []
for x, y in points:
if x == 0:
p.append(0*side+y)
elif y == side:
p.append(1*side+x)
elif x == side:
p.append(2*side+(side-y))
else:
p.append(3*side+(side-x))
p.sort()
return binary_search_right(1, 4*side//k, check)
# Time: O(nlogn + nlogs), s = side
# Space: O(n)
# sort, binary search, greedy, two pointers, sliding window
| Solution |
python | PrefectHQ__prefect | src/prefect/logging/loggers.py | {
"start": 917,
"end": 11268
} | class ____(LoggingAdapter):
"""
Adapter that ensures extra kwargs are passed through correctly; without this
the `extra` fields set on the adapter would overshadow any provided on a
log-by-log basis.
See https://bugs.python.org/issue32732 — the Python team has declared that this is
not a bug in the LoggingAdapter and subclassing is the intended workaround.
"""
def process(
self, msg: str, kwargs: MutableMapping[str, Any]
) -> tuple[str, MutableMapping[str, Any]]:
kwargs["extra"] = {**(self.extra or {}), **(kwargs.get("extra") or {})}
return (msg, kwargs)
def getChild(
self, suffix: str, extra: dict[str, Any] | None = None
) -> "PrefectLogAdapter":
_extra: Mapping[str, object] = extra or {}
return PrefectLogAdapter(
self.logger.getChild(suffix),
extra={
**(self.extra or {}),
**_extra,
},
)
@lru_cache()
def get_logger(name: str | None = None) -> logging.Logger:
"""
Get a `prefect` logger. These loggers are intended for internal use within the
`prefect` package.
See `get_run_logger` for retrieving loggers for use within task or flow runs.
By default, only run-related loggers are connected to the `APILogHandler`.
"""
parent_logger = logging.getLogger("prefect")
if name:
# Append the name if given but allow explicit full names e.g. "prefect.test"
# should not become "prefect.prefect.test"
if not name.startswith(parent_logger.name + "."):
logger = parent_logger.getChild(name)
else:
logger = logging.getLogger(name)
else:
logger = parent_logger
# Prevent the current API key from being logged in plain text
obfuscate_api_key_filter = ObfuscateApiKeyFilter()
logger.addFilter(obfuscate_api_key_filter)
return logger
def get_run_logger(
context: Optional["RunContext"] = None, **kwargs: Any
) -> Union[logging.Logger, LoggingAdapter]:
"""
Get a Prefect logger for the current task run or flow run.
The logger will be named either `prefect.task_runs` or `prefect.flow_runs`.
Contextual data about the run will be attached to the log records.
These loggers are connected to the `APILogHandler` by default to send log records to
the API.
Arguments:
context: A specific context may be provided as an override. By default, the
context is inferred from global state and this should not be needed.
**kwargs: Additional keyword arguments will be attached to the log records in
addition to the run metadata
Raises:
MissingContextError: If no context can be found
"""
from prefect.context import FlowRunContext, TaskRunContext
# Check for existing contexts
task_run_context = TaskRunContext.get()
flow_run_context = FlowRunContext.get()
# Apply the context override
if context:
if isinstance(context, FlowRunContext):
flow_run_context = context
elif isinstance(context, TaskRunContext):
task_run_context = context
else:
raise TypeError(
f"Received unexpected type {type(context).__name__!r} for context. "
"Expected one of 'None', 'FlowRunContext', or 'TaskRunContext'."
)
# Determine if this is a task or flow run logger
if task_run_context:
logger = task_run_logger(
task_run=task_run_context.task_run,
task=task_run_context.task,
flow_run=flow_run_context.flow_run if flow_run_context else None,
flow=flow_run_context.flow if flow_run_context else None,
**kwargs,
)
elif flow_run_context:
logger = flow_run_logger(
flow_run=flow_run_context.flow_run, # type: ignore
flow=flow_run_context.flow,
**kwargs,
)
elif (
get_logger("prefect.flow_runs").disabled
and get_logger("prefect.task_runs").disabled
):
logger = logging.getLogger("null")
logger.disabled = True
else:
raise MissingContextError("There is no active flow or task run context.")
return logger
def flow_run_logger(
flow_run: "FlowRun",
flow: Optional["Flow[Any, Any]"] = None,
**kwargs: str,
) -> PrefectLogAdapter:
"""
Create a flow run logger with the run's metadata attached.
Additional keyword arguments can be provided to attach custom data to the log
records.
If the flow run context is available, see `get_run_logger` instead.
"""
return PrefectLogAdapter(
get_logger("prefect.flow_runs"),
extra={
**{
"flow_run_name": flow_run.name if flow_run else "<unknown>",
"flow_run_id": str(flow_run.id) if flow_run else "<unknown>",
"flow_name": flow.name if flow else "<unknown>",
},
**kwargs,
},
)
def task_run_logger(
task_run: "TaskRun",
task: Optional["Task[Any, Any]"] = None,
flow_run: Optional["FlowRun"] = None,
flow: Optional["Flow[Any, Any]"] = None,
**kwargs: Any,
) -> LoggingAdapter:
"""
Create a task run logger with the run's metadata attached.
Additional keyword arguments can be provided to attach custom data to the log
records.
If the task run context is available, see `get_run_logger` instead.
If only the flow run context is available, it will be used for default values
of `flow_run` and `flow`.
"""
from prefect.context import FlowRunContext
if not flow_run or not flow:
flow_run_context = FlowRunContext.get()
if flow_run_context:
flow_run = flow_run or flow_run_context.flow_run
flow = flow or flow_run_context.flow
return PrefectLogAdapter(
get_logger("prefect.task_runs"),
extra={
**{
"task_run_id": str(task_run.id),
"flow_run_id": str(task_run.flow_run_id),
"task_run_name": task_run.name,
"task_name": task.name if task else "<unknown>",
"flow_run_name": flow_run.name if flow_run else "<unknown>",
"flow_name": flow.name if flow else "<unknown>",
},
**kwargs,
},
)
def get_worker_logger(
worker: "BaseWorker[Any, Any, Any]", name: Optional[str] = None
) -> logging.Logger | LoggingAdapter:
"""
Create a worker logger with the worker's metadata attached.
If the worker has a backend_id, it will be attached to the log records.
If the worker does not have a backend_id a basic logger will be returned.
If the worker does not have a backend_id attribute, a basic logger will be returned.
"""
worker_log_name = name or f"workers.{worker.__class__.type}.{worker.name.lower()}"
worker_id = getattr(worker, "backend_id", None)
if worker_id:
return PrefectLogAdapter(
get_logger(worker_log_name),
extra={
"worker_id": str(worker.backend_id),
},
)
else:
return get_logger(worker_log_name)
@contextmanager
def disable_logger(name: str):
"""
Get a logger by name and disables it within the context manager.
Upon exiting the context manager, the logger is returned to its
original state.
"""
logger = logging.getLogger(name=name)
# determine if it's already disabled
base_state = logger.disabled
try:
# disable the logger
logger.disabled = True
yield
finally:
# return to base state
logger.disabled = base_state
@contextmanager
def disable_run_logger():
"""
Gets both `prefect.flow_run` and `prefect.task_run` and disables them
within the context manager. Upon exiting the context manager, both loggers
are returned to their original state.
"""
with disable_logger("prefect.flow_runs"), disable_logger("prefect.task_runs"):
yield
def print_as_log(*args: Any, **kwargs: Any) -> None:
"""
A patch for `print` to send printed messages to the Prefect run logger.
If no run is active, `print` will behave as if it were not patched.
If `print` sends data to a file other than `sys.stdout` or `sys.stderr`, it will
not be forwarded to the Prefect logger either.
"""
from prefect.context import FlowRunContext, TaskRunContext
# When both contexts exist, we need to determine which one represents the
# currently executing code:
# - If we're in a subflow that's wrapped by a task, FlowRunContext represents
# the subflow and should take precedence
# - If we're in a regular task, TaskRunContext represents the task
#
# We can distinguish by checking flow_run_id:
# - Regular task: flow_ctx.flow_run.id == task_ctx.task_run.flow_run_id
# - Subflow in task: flow_ctx.flow_run.id != task_ctx.task_run.flow_run_id
flow_ctx = FlowRunContext.get()
task_ctx = TaskRunContext.get()
if flow_ctx and task_ctx:
# If the flow_run_id from the flow context differs from the task's flow_run_id,
# we're in a subflow that's running inside a task, so prefer the flow context
if flow_ctx.flow_run and flow_ctx.flow_run.id != task_ctx.task_run.flow_run_id:
context = flow_ctx
else:
# We're in a regular task within the flow
context = task_ctx
else:
context = flow_ctx or task_ctx
if (
not context
or not context.log_prints
or kwargs.get("file") not in {None, sys.stdout, sys.stderr}
):
return print(*args, **kwargs)
logger = get_run_logger()
# Print to an in-memory buffer; so we do not need to implement `print`
buffer = io.StringIO()
kwargs["file"] = buffer
print(*args, **kwargs)
# Remove trailing whitespace to prevent duplicates
logger.info(buffer.getvalue().rstrip())
@contextmanager
def patch_print():
"""
Patches the Python builtin `print` method to use `print_as_log`
"""
import builtins
original = builtins.print
try:
builtins.print = print_as_log
yield
finally:
builtins.print = original
| PrefectLogAdapter |
python | tqdm__tqdm | tqdm/utils.py | {
"start": 5435,
"end": 6874
} | class ____(ObjectWrapper):
"""
Disable the given `tqdm_instance` upon `write()` or `flush()` errors.
"""
@staticmethod
def disable_on_exception(tqdm_instance, func):
"""
Quietly set `tqdm_instance.miniters=inf` if `func` raises `errno=5`.
"""
tqdm_instance = proxy(tqdm_instance)
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except OSError as e:
if e.errno != 5:
raise
try:
tqdm_instance.miniters = float('inf')
except ReferenceError:
pass
except ValueError as e:
if 'closed' not in str(e):
raise
try:
tqdm_instance.miniters = float('inf')
except ReferenceError:
pass
return inner
def __init__(self, wrapped, tqdm_instance):
super().__init__(wrapped)
if hasattr(wrapped, 'write'):
self.wrapper_setattr(
'write', self.disable_on_exception(tqdm_instance, wrapped.write))
if hasattr(wrapped, 'flush'):
self.wrapper_setattr(
'flush', self.disable_on_exception(tqdm_instance, wrapped.flush))
def __eq__(self, other):
return self._wrapped == getattr(other, '_wrapped', other)
| DisableOnWriteError |
python | pytorch__pytorch | test/package/package_a/fake_interface.py | {
"start": 61,
"end": 173
} | class ____(torch.nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
| ModuleInterface |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ROI.py | {
"start": 69518,
"end": 69957
} | class ____(ROI):
def __init__(self, pos, size, **args):
ROI.__init__(self, pos, size, **args)
self.addTranslateHandle([0.5, 0.5])
self.addScaleHandle([1, 1], [0, 0])
self.addScaleHandle([0, 0], [1, 1])
self.addScaleRotateHandle([1, 0.5], [0.5, 0.5])
self.addScaleHandle([0.5, 1], [0.5, 0.5])
self.addRotateHandle([1, 0], [0, 0])
self.addRotateHandle([0, 1], [1, 1])
| TestROI |
python | Textualize__textual | src/textual/widgets/_markdown.py | {
"start": 25419,
"end": 45386
} | class ____(Widget):
DEFAULT_CSS = """
Markdown {
height: auto;
padding: 0 2 0 2;
layout: vertical;
color: $foreground;
overflow-y: hidden;
MarkdownBlock {
&:dark > .code_inline {
background: $warning 10%;
color: $text-warning 95%;
}
&:light > .code_inline {
background: $error 5%;
color: $text-error 95%;
}
& > .em {
text-style: italic;
}
& > .strong {
text-style: bold;
}
& > .s {
text-style: strike;
}
}
}
"""
BULLETS = ["• ", "▪ ", "‣ ", "⭑ ", "◦ "]
"""Unicode bullets used for unordered lists."""
BLOCKS: dict[str, type[MarkdownBlock]] = {
"h1": MarkdownH1,
"h2": MarkdownH2,
"h3": MarkdownH3,
"h4": MarkdownH4,
"h5": MarkdownH5,
"h6": MarkdownH6,
"hr": MarkdownHorizontalRule,
"paragraph_open": MarkdownParagraph,
"blockquote_open": MarkdownBlockQuote,
"bullet_list_open": MarkdownBulletList,
"ordered_list_open": MarkdownOrderedList,
"list_item_ordered_open": MarkdownOrderedListItem,
"list_item_unordered_open": MarkdownUnorderedListItem,
"table_open": MarkdownTable,
"tbody_open": MarkdownTBody,
"thead_open": MarkdownTHead,
"tr_open": MarkdownTR,
"th_open": MarkdownTH,
"td_open": MarkdownTD,
"fence": MarkdownFence,
"code_block": MarkdownFence,
}
"""Mapping of block names on to a widget class."""
def __init__(
self,
markdown: str | None = None,
*,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
parser_factory: Callable[[], MarkdownIt] | None = None,
open_links: bool = True,
):
"""A Markdown widget.
Args:
markdown: String containing Markdown or None to leave blank for now.
name: The name of the widget.
id: The ID of the widget in the DOM.
classes: The CSS classes of the widget.
parser_factory: A factory function to return a configured MarkdownIt instance. If `None`, a "gfm-like" parser is used.
open_links: Open links automatically. If you set this to `False`, you can handle the [`LinkClicked`][textual.widgets.markdown.Markdown.LinkClicked] events.
"""
super().__init__(name=name, id=id, classes=classes)
self._initial_markdown: str | None = markdown
self._markdown = ""
self._parser_factory = parser_factory
self._table_of_contents: TableOfContentsType | None = None
self._open_links = open_links
self._last_parsed_line = 0
self._theme = ""
@property
def table_of_contents(self) -> TableOfContentsType:
"""The document's table of contents."""
if self._table_of_contents is None:
self._table_of_contents = [
(header.LEVEL, header._content.plain, header.id)
for header in self.children
if isinstance(header, MarkdownHeader)
]
return self._table_of_contents
class TableOfContentsUpdated(Message):
"""The table of contents was updated."""
def __init__(
self, markdown: Markdown, table_of_contents: TableOfContentsType
) -> None:
super().__init__()
self.markdown: Markdown = markdown
"""The `Markdown` widget associated with the table of contents."""
self.table_of_contents: TableOfContentsType = table_of_contents
"""Table of contents."""
@property
def control(self) -> Markdown:
"""The `Markdown` widget associated with the table of contents.
This is an alias for [`TableOfContentsUpdated.markdown`][textual.widgets.Markdown.TableOfContentsSelected.markdown]
and is used by the [`on`][textual.on] decorator.
"""
return self.markdown
class TableOfContentsSelected(Message):
"""An item in the TOC was selected."""
def __init__(self, markdown: Markdown, block_id: str) -> None:
super().__init__()
self.markdown: Markdown = markdown
"""The `Markdown` widget where the selected item is."""
self.block_id: str = block_id
"""ID of the block that was selected."""
@property
def control(self) -> Markdown:
"""The `Markdown` widget where the selected item is.
This is an alias for [`TableOfContentsSelected.markdown`][textual.widgets.Markdown.TableOfContentsSelected.markdown]
and is used by the [`on`][textual.on] decorator.
"""
return self.markdown
class LinkClicked(Message):
"""A link in the document was clicked."""
def __init__(self, markdown: Markdown, href: str) -> None:
super().__init__()
self.markdown: Markdown = markdown
"""The `Markdown` widget containing the link clicked."""
self.href: str = unquote(href)
"""The link that was selected."""
@property
def control(self) -> Markdown:
"""The `Markdown` widget containing the link clicked.
This is an alias for [`LinkClicked.markdown`][textual.widgets.Markdown.LinkClicked.markdown]
and is used by the [`on`][textual.on] decorator.
"""
return self.markdown
@property
def source(self) -> str:
"""The markdown source."""
return self._markdown or ""
def get_block_class(self, block_name: str) -> type[MarkdownBlock]:
"""Get the block widget class.
Args:
block_name: Name of the block.
Returns:
A MarkdownBlock class
"""
return self.BLOCKS[block_name]
async def _on_mount(self, _: Mount) -> None:
initial_markdown = self._initial_markdown
self._initial_markdown = None
await self.update(initial_markdown or "")
if initial_markdown is None:
self.post_message(
Markdown.TableOfContentsUpdated(
self, self._table_of_contents
).set_sender(self)
)
@classmethod
def get_stream(cls, markdown: Markdown) -> MarkdownStream:
"""Get a [MarkdownStream][textual.widgets.markdown.MarkdownStream] instance to stream Markdown in the background.
If you append to the Markdown document many times a second, it is possible the widget won't
be able to update as fast as you write (occurs around 20 appends per second). It will still
work, but the user will have to wait for the UI to catch up after the document has be retrieved.
Using a [MarkdownStream][textual.widgets.markdown.MarkdownStream] will combine several updates in to one
as necessary to keep up with the incoming data.
example:
```python
# self.get_chunk is a hypothetical method that retrieves a
# markdown fragment from the network
@work
async def stream_markdown(self) -> None:
markdown_widget = self.query_one(Markdown)
container = self.query_one(VerticalScroll)
container.anchor()
stream = Markdown.get_stream(markdown_widget)
try:
while (chunk:= await self.get_chunk()) is not None:
await stream.write(chunk)
finally:
await stream.stop()
```
Args:
markdown: A [Markdown][textual.widgets.Markdown] widget instance.
Returns:
The Markdown stream object.
"""
updater = MarkdownStream(markdown)
updater.start()
return updater
def on_markdown_link_clicked(self, event: LinkClicked) -> None:
if self._open_links:
self.app.open_url(event.href)
@staticmethod
def sanitize_location(location: str) -> tuple[Path, str]:
"""Given a location, break out the path and any anchor.
Args:
location: The location to sanitize.
Returns:
A tuple of the path to the location cleaned of any anchor, plus
the anchor (or an empty string if none was found).
"""
location, _, anchor = location.partition("#")
return Path(location), anchor
def goto_anchor(self, anchor: str) -> bool:
"""Try and find the given anchor in the current document.
Args:
anchor: The anchor to try and find.
Note:
The anchor is found by looking at all of the headings in the
document and finding the first one whose slug matches the
anchor.
Note that the slugging method used is similar to that found on
GitHub.
Returns:
True when the anchor was found in the current document, False otherwise.
"""
if not self._table_of_contents or not isinstance(self.parent, Widget):
return False
unique = TrackedSlugs()
for _, title, header_id in self._table_of_contents:
if unique.slug(title) == anchor:
self.query_one(f"#{header_id}").scroll_visible(top=True)
return True
return False
async def load(self, path: Path) -> None:
"""Load a new Markdown document.
Args:
path: Path to the document.
Raises:
OSError: If there was some form of error loading the document.
Note:
The exceptions that can be raised by this method are all of
those that can be raised by calling [`Path.read_text`][pathlib.Path.read_text].
"""
path, anchor = self.sanitize_location(str(path))
data = await asyncio.get_running_loop().run_in_executor(
None, partial(path.read_text, encoding="utf-8")
)
await self.update(data)
if anchor:
self.goto_anchor(anchor)
def unhandled_token(self, token: Token) -> MarkdownBlock | None:
"""Process an unhandled token.
Args:
token: The MarkdownIt token to handle.
Returns:
Either a widget to be added to the output, or `None`.
"""
return None
def _parse_markdown(self, tokens: Iterable[Token]) -> Iterable[MarkdownBlock]:
"""Create a stream of MarkdownBlock widgets from markdown.
Args:
tokens: List of tokens.
Yields:
Widgets for mounting.
"""
stack: list[MarkdownBlock] = []
stack_append = stack.append
get_block_class = self.get_block_class
for token in tokens:
token_type = token.type
if token_type == "heading_open":
stack_append(get_block_class(token.tag)(self, token))
elif token_type == "hr":
yield get_block_class("hr")(self, token)
elif token_type == "paragraph_open":
stack_append(get_block_class("paragraph_open")(self, token))
elif token_type == "blockquote_open":
stack_append(get_block_class("blockquote_open")(self, token))
elif token_type == "bullet_list_open":
stack_append(get_block_class("bullet_list_open")(self, token))
elif token_type == "ordered_list_open":
stack_append(get_block_class("ordered_list_open")(self, token))
elif token_type == "list_item_open":
if token.info:
stack_append(
get_block_class("list_item_ordered_open")(
self, token, token.info
)
)
else:
item_count = sum(
1
for block in stack
if isinstance(block, MarkdownUnorderedListItem)
)
stack_append(
get_block_class("list_item_unordered_open")(
self,
token,
self.BULLETS[item_count % len(self.BULLETS)],
)
)
elif token_type == "table_open":
stack_append(get_block_class("table_open")(self, token))
elif token_type == "tbody_open":
stack_append(get_block_class("tbody_open")(self, token))
elif token_type == "thead_open":
stack_append(get_block_class("thead_open")(self, token))
elif token_type == "tr_open":
stack_append(get_block_class("tr_open")(self, token))
elif token_type == "th_open":
stack_append(get_block_class("th_open")(self, token))
elif token_type == "td_open":
stack_append(get_block_class("td_open")(self, token))
elif token_type.endswith("_close"):
block = stack.pop()
if token.type == "heading_close":
block.id = (
f"heading-{slug_for_tcss_id(block._content.plain)}-{id(block)}"
)
if stack:
stack[-1]._blocks.append(block)
else:
yield block
elif token_type == "inline":
stack[-1].build_from_token(token)
elif token_type in ("fence", "code_block"):
fence_class = get_block_class(token_type)
assert issubclass(fence_class, MarkdownFence)
fence = fence_class(self, token, token.content.rstrip())
if stack:
stack[-1]._blocks.append(fence)
else:
yield fence
else:
external = self.unhandled_token(token)
if external is not None:
if stack:
stack[-1]._blocks.append(external)
else:
yield external
def _build_from_source(self, markdown: str) -> list[MarkdownBlock]:
"""Build blocks from markdown source.
Args:
markdown: A Markdown document, or partial document.
Returns:
A list of MarkdownBlock instances.
"""
parser = (
MarkdownIt("gfm-like")
if self._parser_factory is None
else self._parser_factory()
)
tokens = parser.parse(markdown)
return list(self._parse_markdown(tokens))
def update(self, markdown: str) -> AwaitComplete:
"""Update the document with new Markdown.
Args:
markdown: A string containing Markdown.
Returns:
An optionally awaitable object. Await this to ensure that all children have been mounted.
"""
self._theme = self.app.theme
parser = (
MarkdownIt("gfm-like")
if self._parser_factory is None
else self._parser_factory()
)
markdown_block = self.query("MarkdownBlock")
self._markdown = markdown
self._table_of_contents = None
async def await_update() -> None:
"""Update in batches."""
BATCH_SIZE = 200
batch: list[MarkdownBlock] = []
# Lock so that you can't update with more than one document simultaneously
async with self.lock:
tokens = await asyncio.get_running_loop().run_in_executor(
None, parser.parse, markdown
)
# Remove existing blocks for the first batch only
removed: bool = False
async def mount_batch(batch: list[MarkdownBlock]) -> None:
"""Mount a single match of blocks.
Args:
batch: A list of blocks to mount.
"""
nonlocal removed
if removed:
await self.mount_all(batch)
else:
with self.app.batch_update():
await markdown_block.remove()
await self.mount_all(batch)
removed = True
for block in self._parse_markdown(tokens):
batch.append(block)
if len(batch) == BATCH_SIZE:
await mount_batch(batch)
batch.clear()
if batch:
await mount_batch(batch)
if not removed:
await markdown_block.remove()
lines = markdown.splitlines()
self._last_parsed_line = len(lines) - (1 if lines and lines[-1] else 0)
self.post_message(
Markdown.TableOfContentsUpdated(
self, self.table_of_contents
).set_sender(self)
)
return AwaitComplete(await_update())
def append(self, markdown: str) -> AwaitComplete:
"""Append to markdown.
Args:
markdown: A fragment of markdown to be appended.
Returns:
An optionally awaitable object. Await this to ensure that the markdown has been append by the next line.
"""
parser = (
MarkdownIt("gfm-like")
if self._parser_factory is None
else self._parser_factory()
)
self._markdown = self.source + markdown
updated_source = "".join(
self._markdown.splitlines(keepends=True)[self._last_parsed_line :]
)
async def await_append() -> None:
"""Append new markdown widgets."""
async with self.lock:
tokens = parser.parse(updated_source)
existing_blocks = [
child for child in self.children if isinstance(child, MarkdownBlock)
]
start_line = self._last_parsed_line
for token in reversed(tokens):
if token.map is not None and token.level == 0:
self._last_parsed_line += token.map[0]
break
new_blocks = list(self._parse_markdown(tokens))
any_headers = any(
isinstance(block, MarkdownHeader) for block in new_blocks
)
for block in new_blocks:
start, end = block.source_range
block.source_range = (
start + start_line,
end + start_line,
)
with self.app.batch_update():
if existing_blocks and new_blocks:
last_block = existing_blocks[-1]
last_block.source_range = new_blocks[0].source_range
try:
await last_block._update_from_block(new_blocks[0])
except IndexError:
pass
else:
new_blocks = new_blocks[1:]
if new_blocks:
await self.mount_all(new_blocks)
if any_headers:
self._table_of_contents = None
self.post_message(
Markdown.TableOfContentsUpdated(
self, self.table_of_contents
).set_sender(self)
)
return AwaitComplete(await_append())
| Markdown |
python | sanic-org__sanic | sanic/logging/filter.py | {
"start": 17,
"end": 298
} | class ____(logging.Filter):
"""
Filter log records based on verbosity level.
"""
verbosity: int = 0
def filter(self, record: logging.LogRecord) -> bool:
verbosity = getattr(record, "verbosity", 0)
return verbosity <= self.verbosity
| VerbosityFilter |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_version_slug.py | {
"start": 225,
"end": 891
} | class ____(TestCase):
pattern = re.compile("^{pattern}$".format(pattern=VERSION_SLUG_REGEX))
def test_single_char(self):
self.assertTrue(self.pattern.match("v"))
self.assertFalse(self.pattern.match("."))
def test_trailing_punctuation(self):
self.assertTrue(self.pattern.match("with_"))
self.assertTrue(self.pattern.match("with."))
self.assertTrue(self.pattern.match("with-"))
self.assertFalse(self.pattern.match("with!"))
def test_multiple_words(self):
self.assertTrue(self.pattern.match("release-1.0"))
self.assertTrue(self.pattern.match("fix_this-and-that."))
| VersionSlugPatternTests |
python | django-extensions__django-extensions | django_extensions/management/jobs.py | {
"start": 567,
"end": 613
} | class ____(BaseJob):
when = "daily"
| DailyJob |
python | Textualize__textual | tests/css/test_parse.py | {
"start": 27293,
"end": 27881
} | class ____:
def test_valid_layout_name(self):
css = "#some-widget { layout: vertical; }"
stylesheet = Stylesheet()
stylesheet.add_source(css)
styles = stylesheet.rules[0].styles
assert isinstance(styles.layout, VerticalLayout)
def test_invalid_layout_name(self):
css = "#some-widget { layout: invalidlayout; }"
stylesheet = Stylesheet()
with pytest.raises(StylesheetParseError) as ex:
stylesheet.add_source(css)
stylesheet.parse()
assert ex.value.errors is not None
| TestParseLayout |
python | huggingface__transformers | src/transformers/models/jamba/modeling_jamba.py | {
"start": 36606,
"end": 44094
} | class ____(JambaPreTrainedModel):
def __init__(self, config: JambaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
decoder_layers = []
for i in range(config.num_hidden_layers):
layer_class = ALL_DECODER_LAYER_TYPES[config.layers_block_type[i]]
decoder_layers.append(layer_class(config, layer_idx=i))
self.layers = nn.ModuleList(decoder_layers)
self.final_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[HybridMambaAttentionDynamicCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = HybridMambaAttentionDynamicCache(
config=self.config,
batch_size=inputs_embeds.shape[0],
dtype=inputs_embeds.dtype,
device=inputs_embeds.device,
)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
hidden_states = inputs_embeds
for decoder_layer in self.layers:
layer_mask = mamba_mask if isinstance(decoder_layer, JambaMambaDecoderLayer) else causal_mask
hidden_states = decoder_layer(
hidden_states,
attention_mask=layer_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.final_layernorm(hidden_states)
if past_key_values and not past_key_values.has_previous_state:
past_key_values.has_previous_state = True
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def _update_mamba_mask(self, attention_mask, cache_position):
"""
No need for zeroing states when
1. Cached forward
2. Attending to all inputs
"""
mamba_mask = attention_mask
if (cache_position is not None and cache_position[0] > 0) or (
attention_mask is not None and torch.all(attention_mask == 1)
):
mamba_mask = None
return mamba_mask
def load_balancing_loss_func(
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
num_experts: Optional[int] = None,
top_k=2,
attention_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, int]:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [batch_size X sequence_length, num_experts].
num_experts:
Number of experts
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
if isinstance(gate_logits, tuple):
compute_device = gate_logits[0].device
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
.reshape(-1, top_k, num_experts)
.to(compute_device)
)
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
expert_attention_mask, dim=0
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
.reshape(-1, num_experts)
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
return overall_loss * num_experts
@auto_docstring
| JambaModel |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 41138,
"end": 41953
} | class ____(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self) -> None:
metrics = self._metrics = self.fontset.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self) -> None:
super().shrink()
self._update_metrics()
def render(self, output: Output, x: float, y: float) -> None:
self.fontset.render_glyph(
output, x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
| Accent |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 984581,
"end": 985552
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for Repository."""
__schema__ = github_schema
__field_names__ = ("edges", "is_over_limit", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("StarredRepositoryEdge"), graphql_name="edges")
"""A list of edges."""
is_over_limit = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isOverLimit")
"""Is the list of stars for this user truncated? This is true for
users that have many stars.
"""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Repository"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| StarredRepositoryConnection |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 9487,
"end": 9797
} | class ____(Message):
message = "'...' %% ... has %d placeholder(s) but %d substitution(s)"
def __init__(self, filename, loc, n_placeholders, n_substitutions):
Message.__init__(self, filename, loc)
self.message_args = (n_placeholders, n_substitutions)
| PercentFormatPositionalCountMismatch |
python | tensorflow__tensorflow | tensorflow/python/data/ops/from_sparse_tensor_slices_op.py | {
"start": 1208,
"end": 2464
} | class ____(dataset_ops.DatasetSource):
"""A `Dataset` that splits a rank-N `tf.sparse.SparseTensor` into its rows."""
def __init__(self, sparse_tensor):
"""See `Dataset.from_sparse_tensor_slices()` for details."""
if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):
raise TypeError(f"Invalid `sparse_tensor`. `sparse_tensor` must be a "
f"`tf.sparse.SparseTensor`. Got {type(sparse_tensor)}.")
self._sparse_tensor = sparse_tensor
indices_shape = self._sparse_tensor.indices.get_shape()
shape_shape = self._sparse_tensor.dense_shape.get_shape()
rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)
self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64),
tensor_spec.TensorSpec([None],
self._sparse_tensor.dtype),
tensor_spec.TensorSpec([rank], dtypes.int64))
variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(
self._sparse_tensor.indices, self._sparse_tensor.values,
self._sparse_tensor.dense_shape)
super().__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
| _SparseTensorSliceDataset |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 144915,
"end": 147259
} | class ____(GeneratedAirbyteSource):
class Unencrypted:
@public
def __init__(
self,
):
self.encryption_method = "unencrypted"
class TLSEncryptedVerifyCertificate:
@public
def __init__(self, ssl_certificate: str, key_store_password: Optional[str] = None):
self.encryption_method = "encrypted_verify_certificate"
self.ssl_certificate = check.str_param(ssl_certificate, "ssl_certificate")
self.key_store_password = check.opt_str_param(key_store_password, "key_store_password")
@public
def __init__(
self,
name: str,
host: str,
port: int,
db: str,
username: str,
password: str,
encryption: Union["Db2Source.Unencrypted", "Db2Source.TLSEncryptedVerifyCertificate"],
jdbc_url_params: Optional[str] = None,
):
"""Airbyte Source for Db2.
Documentation can be found at https://docs.airbyte.com/integrations/sources/db2
Args:
name (str): The name of the destination.
host (str): Host of the Db2.
port (int): Port of the database.
db (str): Name of the database.
username (str): Username to use to access the database.
password (str): Password associated with the username.
jdbc_url_params (Optional[str]): Additional properties to pass to the JDBC URL string when connecting to the database formatted as 'key=value' pairs separated by the symbol '&'. (example: key1=value1&key2=value2&key3=value3).
encryption (Union[Db2Source.Unencrypted, Db2Source.TLSEncryptedVerifyCertificate]): Encryption method to use when communicating with the database
"""
self.host = check.str_param(host, "host")
self.port = check.int_param(port, "port")
self.db = check.str_param(db, "db")
self.username = check.str_param(username, "username")
self.password = check.str_param(password, "password")
self.jdbc_url_params = check.opt_str_param(jdbc_url_params, "jdbc_url_params")
self.encryption = check.inst_param(
encryption,
"encryption",
(Db2Source.Unencrypted, Db2Source.TLSEncryptedVerifyCertificate),
)
super().__init__("Db2", name)
| Db2Source |
python | euske__pdfminer | pdfminer/pdffont.py | {
"start": 22358,
"end": 23237
} | class ____(PDFSimpleFont):
def __init__(self, rsrcmgr, spec):
firstchar = int_value(spec.get('FirstChar', 0))
#lastchar = int_value(spec.get('LastChar', 0))
widths = list_value(spec.get('Widths', [0]*256))
widths = dict((i+firstchar, w) for (i, w) in enumerate(widths))
if 'FontDescriptor' in spec:
descriptor = dict_value(spec['FontDescriptor'])
else:
descriptor = {'Ascent': 0, 'Descent': 0,
'FontBBox': spec['FontBBox']}
PDFSimpleFont.__init__(self, descriptor, widths, spec)
self.matrix = tuple(list_value(spec.get('FontMatrix')))
(_, self.descent, _, self.ascent) = self.bbox
(self.hscale, self.vscale) = apply_matrix_norm(self.matrix, (1, 1))
return
def __repr__(self):
return '<PDFType3Font>'
# PDFCIDFont
| PDFType3Font |
python | openai__openai-python | src/openai/types/beta/realtime/session_update_event_param.py | {
"start": 4770,
"end": 10428
} | class ____(TypedDict, total=False):
client_secret: SessionClientSecret
"""Configuration options for the generated client secret."""
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
"""The format of input audio.
Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
byte order.
"""
input_audio_noise_reduction: SessionInputAudioNoiseReduction
"""Configuration for input audio noise reduction.
This can be set to `null` to turn off. Noise reduction filters audio added to
the input audio buffer before it is sent to VAD and the model. Filtering the
audio can improve VAD and turn detection accuracy (reducing false positives) and
model performance by improving perception of the input audio.
"""
input_audio_transcription: SessionInputAudioTranscription
"""
Configuration for input audio transcription, defaults to off and can be set to
`null` to turn off once on. Input audio transcription is not native to the
model, since the model consumes audio directly. Transcription runs
asynchronously through
[the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
and should be treated as guidance of input audio content rather than precisely
what the model heard. The client can optionally set the language and prompt for
transcription, these offer additional guidance to the transcription service.
"""
instructions: str
"""The default system instructions (i.e.
system message) prepended to model calls. This field allows the client to guide
the model on desired responses. The model can be instructed on response content
and format, (e.g. "be extremely succinct", "act friendly", "here are examples of
good responses") and on audio behavior (e.g. "talk quickly", "inject emotion
into your voice", "laugh frequently"). The instructions are not guaranteed to be
followed by the model, but they provide guidance to the model on the desired
behavior.
Note that the server sets default instructions which will be used if this field
is not set and are visible in the `session.created` event at the start of the
session.
"""
max_response_output_tokens: Union[int, Literal["inf"]]
"""
Maximum number of output tokens for a single assistant response, inclusive of
tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
`inf` for the maximum available tokens for a given model. Defaults to `inf`.
"""
modalities: List[Literal["text", "audio"]]
"""The set of modalities the model can respond with.
To disable audio, set this to ["text"].
"""
model: Literal[
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
]
"""The Realtime model used for this session."""
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
"""The format of output audio.
Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
sampled at a rate of 24kHz.
"""
speed: float
"""The speed of the model's spoken response.
1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed.
This value can only be changed in between model turns, not while a response is
in progress.
"""
temperature: float
"""Sampling temperature for the model, limited to [0.6, 1.2].
For audio models a temperature of 0.8 is highly recommended for best
performance.
"""
tool_choice: str
"""How the model chooses tools.
Options are `auto`, `none`, `required`, or specify a function.
"""
tools: Iterable[SessionTool]
"""Tools (functions) available to the model."""
tracing: SessionTracing
"""Configuration options for tracing.
Set to null to disable tracing. Once tracing is enabled for a session, the
configuration cannot be modified.
`auto` will create a trace for the session with default values for the workflow
name, group id, and metadata.
"""
turn_detection: SessionTurnDetection
"""Configuration for turn detection, ether Server VAD or Semantic VAD.
This can be set to `null` to turn off, in which case the client must manually
trigger model response. Server VAD means that the model will detect the start
and end of speech based on audio volume and respond at the end of user speech.
Semantic VAD is more advanced and uses a turn detection model (in conjunction
with VAD) to semantically estimate whether the user has finished speaking, then
dynamically sets a timeout based on this probability. For example, if user audio
trails off with "uhhm", the model will score a low probability of turn end and
wait longer for the user to continue speaking. This can be useful for more
natural conversations, but may have a higher latency.
"""
voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
"""The voice the model uses to respond.
Voice cannot be changed during the session once the model has responded with
audio at least once. Current voice options are `alloy`, `ash`, `ballad`,
`coral`, `echo`, `sage`, `shimmer`, and `verse`.
"""
| Session |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.