language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/1700-1799/1754.Largest Merge Of Two Strings/Solution.py | {
"start": 0,
"end": 423
} | class ____:
def largestMerge(self, word1: str, word2: str) -> str:
i = j = 0
ans = []
while i < len(word1) and j < len(word2):
if word1[i:] > word2[j:]:
ans.append(word1[i])
i += 1
else:
ans.append(word2[j])
j += 1
ans.append(word1[i:])
ans.append(word2[j:])
return "".join(ans)
| Solution |
python | arrow-py__arrow | arrow/locales.py | {
"start": 96475,
"end": 97742
} | class ____(Locale):
names = ["ro", "ro-ro"]
past = "{0} în urmă"
future = "peste {0}"
and_word = "și"
timeframes = {
"now": "acum",
"second": "o secunda",
"seconds": "{0} câteva secunde",
"minute": "un minut",
"minutes": "{0} minute",
"hour": "o oră",
"hours": "{0} ore",
"day": "o zi",
"days": "{0} zile",
"month": "o lună",
"months": "{0} luni",
"year": "un an",
"years": "{0} ani",
}
month_names = [
"",
"ianuarie",
"februarie",
"martie",
"aprilie",
"mai",
"iunie",
"iulie",
"august",
"septembrie",
"octombrie",
"noiembrie",
"decembrie",
]
month_abbreviations = [
"",
"ian",
"febr",
"mart",
"apr",
"mai",
"iun",
"iul",
"aug",
"sept",
"oct",
"nov",
"dec",
]
day_names = [
"",
"luni",
"marți",
"miercuri",
"joi",
"vineri",
"sâmbătă",
"duminică",
]
day_abbreviations = ["", "Lun", "Mar", "Mie", "Joi", "Vin", "Sâm", "Dum"]
| RomanianLocale |
python | getsentry__sentry | src/sentry/api/endpoints/release_thresholds/release_threshold_index.py | {
"start": 1135,
"end": 2309
} | class ____(OrganizationEndpoint):
owner: ApiOwner = ApiOwner.ENTERPRISE
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
def get(self, request: Request, organization: Organization) -> HttpResponse:
validator = ReleaseThresholdIndexGETValidator(
data=request.query_params,
)
if not validator.is_valid():
return Response(validator.errors, status=400)
environments_list = self.get_environments(request, organization)
projects_list = self.get_projects(request, organization)
release_query = Q()
if environments_list:
release_query &= Q(
environment__in=environments_list,
)
if projects_list:
release_query &= Q(
project__in=projects_list,
)
queryset = ReleaseThreshold.objects.filter(release_query)
return self.paginate(
request=request,
queryset=queryset,
order_by="date_added",
paginator_cls=OffsetPaginator,
on_results=lambda x: serialize(x, request.user),
)
| ReleaseThresholdIndexEndpoint |
python | pytorch__pytorch | torchgen/model.py | {
"start": 54780,
"end": 73333
} | class ____:
# The name of the operator this function schema describes.
name: OperatorName
arguments: Arguments
# TODO: Need to handle collisions with argument names at some point
returns: tuple[Return, ...]
@property
def is_mutable(self) -> bool:
def is_write(arg: Argument) -> bool:
if arg.annotation is None:
return False
return arg.annotation.is_write
# Corresponds to torch._C._FunctionSchema.is_mutable
# See aten/src/ATen/core/function_schema.h (keep these in sync)
return any(is_write(a) for a in self.arguments.flat_all)
def schema_order_arguments(self) -> Iterator[Argument]:
return itertools.chain(
self.arguments.flat_positional,
self.arguments.flat_kwarg_only,
self.arguments.out,
)
decl_re = re.compile(r"(?P<name>[^\(]+)\((?P<args>.*)\) -> (?P<returns>.*)")
@staticmethod
def parse(func: str) -> FunctionSchema:
# We should probably get a proper parser here
decls = FunctionSchema.decl_re.findall(func)
assert len(decls) == 1, f"Invalid function schema: {func}"
ops, args, return_decl = decls[0]
name = OperatorName.parse(ops)
arguments = Arguments.parse(args)
returns = parse_returns(return_decl)
r = FunctionSchema(name=name, arguments=arguments, returns=returns)
assert str(r) == func, f"{str(r)} != {func}"
return r
def returns_are_aliased(self) -> bool:
# We assert earlier that schemas can't have a mix of aliased and non-aliased returns
return any(
r
for r in self.returns
if r.annotation is not None and r.annotation.is_write
)
def __post_init__(self) -> None:
for arg, ret in zip(self.arguments.out, self.returns):
assert arg.annotation == ret.annotation, (
"Out arguments must have matching return Tensor; furthermore, "
"the ith-argument needs to correspond to the ith return"
)
# We also enforce that if you have any mutable, positional args, then they are not returned.
# This makes it easier to group these functions properly with their functional/out= counterparts.
for a in self.arguments.post_self_positional_mutable:
assert not any(a.annotation == r.annotation for r in self.returns), (
f"If you have a schema with mutable positional args, we expect them to not be returned. schema: {str(self)}"
)
# Invariant: we expect out arguments to appear as keyword arguments in the schema.
# This means that all mutable returns should be aliased to a keyword argument
# (except for "self", which we explicitly don't treat as an out argument because of its use in methods)
# See Note [is_out_fn]
out_and_self = list(self.arguments.out) + [
arg for arg in self.arguments.flat_positional if arg.name == "self"
]
mutable_returns = [
ret
for ret in self.returns
if ret.annotation is not None and ret.annotation.is_write
]
immutable_returns = [
ret
for ret in self.returns
if ret.annotation is None or not ret.annotation.is_write
]
# Some assertions: We don't want any functions with a return type of "-> (Tensor(a!), Tensor)",
# because:
# (1) It's more annoying to handle properly
# (2) It's unnecessary - you can't method-chain on the first (mutated) output because it's part of a tuple.
# Instead, we expect the (a!) argument to not be returned.
assert len(mutable_returns) == 0 or len(immutable_returns) == 0, (
f"NativeFunctions must have either only mutable returns, or only immutable returns. Found: {str(self)}"
)
for ret in mutable_returns:
assert any(ret.annotation == arg.annotation for arg in out_and_self), (
'All mutable returns must be aliased either to a keyword argument, or to "self". '
"Did you forget to mark an out argument as keyword-only?"
)
if self.arguments.out:
# out= ops that return their mutable inputs are only really useful for method chaining.
# And method chaining is only really useful if the thing you're returning is a plain Tensor.
# So ideally, we'd enforce that out= ops with a single plain mutable tensor should return the tensor,
# and all other types of out= op schemas should return void.
# There are a bunch of existing out= ops that return tuples of tensors though, so we're stuck with allowing that.
if any(a.type != BaseType(BaseTy.Tensor) for a in self.arguments.out):
assert len(self.returns) == 0, (
"out= ops that accept tensor lists as out arguments "
)
"are expected to have no return type (since you can't do method chaining on them)"
else:
# mutable keyword arguments whose name has _scratch_ prefix are
# scratch tensors for memory planning and should not be returned
assert len(
[
arg
for arg in self.arguments.out
if not arg.name.startswith("_scratch_")
]
) == len(self.returns), (
"Must return as many arguments as there are out arguments, or no return at all"
)
if self.name.name.inplace:
self_a = self.arguments.self_arg
assert (
self_a
and self_a.argument.annotation
and self_a.argument.annotation.is_write
)
if self_a.argument.type == BaseType(BaseTy.Tensor):
# All inplace ops with an ordinary `Tensor self` argument should return self,
# to allow for method chaining.
assert (
len(self.returns) == 1
and self.returns[0].annotation == self_a.argument.annotation
)
else:
# You can't method chain on non-tensor self arguments though (like a list[Tensor])
# so in all other cases we expect the return type to be none.
assert len(self.returns) == 0
if self.arguments.tensor_options is not None:
assert self.kind() == SchemaKind.functional, (
"Found an operator that is not functional or out variant, but has tensor options arguments."
"This is not allowed- tensor options arguments are only allowed for factory functions."
f"schema: {str(self)}"
)
if self.is_functional_fn():
assert self.kind() == SchemaKind.functional, (
"Found an operator that is not functional, but its overload contains the string 'functional'."
"This is a special keyword in the codegen, please use a different overload name."
f"schema: {str(self)}"
)
def is_functional_fn(self) -> bool:
return "functional" in self.name.overload_name
def is_out_fn(self) -> bool:
# Note [is_out_fn]
#
# out functions are the variants which take an explicit out= argument
# to populate into. We need to know if a schema corresponds to an
# out function for several reasons:
#
# - They codegen differently in C++ API
# - codegen to at::add_out rather than at::add
# - out argument is moved to front of C++ argument list
#
# out functions are DEFINED to be any function with a keyword-only
# argument that is mutable. In principle, this could lead to a
# false positive if you define a function that mutates a
# kwarg only argument, but this isn't the "true" output of this
# function. A more robust definition that would work in this
# case would also look at:
#
# - The output types. Out functions take in the arguments
# they mutate and then return them again; this is sort
# of "definitionally" what makes something an out function.
# Historically, we DO check this for consistency.
# - Correspondence with pure variant. An out function
# should have a signature equivalent to its pure variant,
# but just with extra kwargs for the output elements. This
# is difficult to actually check for and historically
# we only do this check in tools/
return bool(self.arguments.out)
def kind(self) -> SchemaKind:
"""
What kind of schema is this? A functional schema is one
that returns a newly allocated output; an inplace schema
modifies the self argument inplace; an out schema writes
the result into an explicitly provided out argument.
"""
is_out = bool(self.arguments.out)
is_scratch = bool(
[arg for arg in self.arguments.out if arg.name.startswith("_scratch_")]
)
is_inplace = self.name.name.inplace
is_mutable = any(
a.annotation is not None and a.annotation.is_write
for a in self.arguments.post_self_positional
)
assert not (is_out and is_inplace)
# out= and inplace schemas can also have post_self_positional mutable args,
# but we give precedence to out= and inplace when deciding the schema kind.
# Tradeoff: we probably don't want to have to teach codegen that looks at inplace ops
# to also worry about mutable post_self_positional arguments,
# but it seems like a much bigger lift to classify them has having a new schema kind.
# The number of ops that fit in this strange category is small enough that
# we can probably manually write code for them instead of forcing the codegen to handle them.
if is_inplace:
return SchemaKind.inplace
elif is_scratch:
assert is_out, (
"invariant: all scratch operators are expected to be out= operators too"
)
return SchemaKind.scratch
elif is_out:
assert not is_scratch, (
"We should not categorize a scratch op as an out variant. Check if the order of if statements are expected!"
) # noqa: B950
return SchemaKind.out
elif is_mutable:
return SchemaKind.mutable
else:
return SchemaKind.functional
# For every return:
# - If the return aliases an input, we return the input name
# - Otherwise, we return None.
# If return names were enforced to be consistent with aliasing information, then we wouldn't need this.
def aliased_return_names(self) -> list[str | None]:
outs: list[str | None] = []
for r in self.returns:
aliased_args = [
a
for a in self.arguments.flat_all
if a.annotation is not None and a.annotation == r.annotation
]
if len(aliased_args) == 0:
outs.append(None)
elif len(aliased_args) == 1:
outs.append(aliased_args[0].name)
else:
aliased_names = ", ".join(a.name for a in aliased_args)
raise AssertionError(
f"Found a return ({r.name})that aliases multiple inputs ({aliased_names})"
)
return outs
def signature(
self,
*,
strip_default: bool = False,
strip_view_copy_name: bool = False,
keep_return_names: bool = False,
) -> FunctionSchema:
"""
Certain schemas are 'related', in that they are simply
inplace/out/functional versions of the same function. This method
factors these schemas into the "core" functional signature which
is equal across all versions.
Here is what normalization happens to the schema to convert
it to a signature:
- The overload name is stripped (name is retained, since
it expresses semantic content about what the function does)
- Inplace is set False
- Out arguments are stripped
- Mutable post_self_positional args are converted to returns
- Mutability annotations are stripped (this is sound
because you cannot overload on mutability annotation)
- Return names are stripped since they are not overloadable and
some variants have return names but some not
- TensorOptions are dropped
because out= variants of factory functions don't include them
(and we want to be able to pair up factory functions with their out variants)
Finally, we want to be able to pair up related "view" and their
corresponding "view_copy" operators. We do this by optionally
stripping the trailing "_copy" from the base name.
Example of a mutable op before and after:
f.func (Mutable operator):
_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask) # noqa: B950
f.func (Corresponding functional operator):
_fused_moving_avg_obs_fq_helper.functional(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor output, Tensor mask, Tensor running_min_out, Tensor running_max_out, Tensor scale_out, Tensor zero_point_out) # noqa: B950
f.func.signature() output:
_fused_moving_avg_obs_fq_helper(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor running_min, Tensor running_max, Tensor scale, Tensor zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) # noqa: B950
"""
def strip_ret_annotation(r: Return) -> Return:
return Return(
name=r.name if keep_return_names else None,
type=r.type,
annotation=None,
)
base_name = self.name.name.base
if strip_view_copy_name:
if base_name.endswith("_copy"):
base_name = base_name.replace("_copy", "")
elif base_name.endswith("_scatter"):
base_name = base_name.replace("scatter", "inverse")
# find mutable inputs that are not originally returned, and convert them to returns
returns_from_mutable_inputs = tuple(
# When we're grouping functions we strip the return names,
# but when we're generating the actual functional variants then we follow
# a convention for what to name the returns
Return(
name=f"{a.name}_out" if keep_return_names else None,
type=a.type,
annotation=None,
)
for a in itertools.chain(
# Order is important here (otherwise e.g. inplace with mutable args
# and out= with mutable args won't have the same signature)
(
[self.arguments.self_arg.argument]
if self.arguments.self_arg is not None
else []
),
self.arguments.out,
self.arguments.post_self_positional,
)
if a.annotation is not None
and a.annotation.is_write
and not any(a.annotation == r.annotation for r in self.returns)
)
original_returns = tuple(map(strip_ret_annotation, self.returns))
# Ordering is important here. We expect the "mutable input" returns to come last.
returns = original_returns + returns_from_mutable_inputs
args_sig = self.arguments.signature(strip_default=strip_default)
# See Note [bernoulli.p schema]
if str(self.name) == "bernoulli.p":
args_sig = Arguments.parse(str(args_sig).replace("float p", "float p=0.5"))
return FunctionSchema(
name=OperatorName(
name=BaseOperatorName(
base=base_name,
inplace=False,
dunder_method=self.name.name.dunder_method,
),
overload_name="", # stripped
),
arguments=args_sig,
returns=returns,
)
def view_signature(self) -> FunctionSchema:
return self.signature(strip_view_copy_name=True)
def with_name(self, name: OperatorName) -> FunctionSchema:
return FunctionSchema(
name=name,
arguments=self.arguments,
returns=self.returns,
)
@property
def modifies_arguments(self) -> bool:
return self.kind() in [SchemaKind.inplace, SchemaKind.out, SchemaKind.mutable]
def has_symint(self) -> bool:
return self.arguments.has_symint_arg()
def __str__(self) -> str:
all_arguments_str = str(self.arguments)
if len(self.returns) == 1:
returns = str(self.returns[0]) # omit parentheses
else:
returns = "(" + ", ".join(map(str, self.returns)) + ")"
return f"{self.name}({all_arguments_str}) -> {returns}"
# Here is the rest of the data model, described more briefly.
# Simplified version for what actually shows up in built-ins.
# Look at alias_info.h for expanded syntax. If you need the structure,
# you also need to make this structure recursive so it can be lined
# up with the type components too. For primitives this isn't really
# necessary
@dataclass(frozen=True)
| FunctionSchema |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/forms.py | {
"start": 4096,
"end": 4189
} | class ____(ReprForm):
_regex = forms.RegexField(regex="[A-Z]{3}\\.[a-z]{4}")
| RegexFieldForm |
python | walkccc__LeetCode | solutions/2222. Number of Ways to Select Buildings/2222.py | {
"start": 0,
"end": 489
} | class ____:
def numberOfWays(self, s: str) -> int:
ans = 0
# before[i] := the number of i before the current digit
before = [0] * 2
# after[i] := the number of i after the current digit
after = [0] * 2
after[0] = s.count('0')
after[1] = len(s) - after[0]
for c in s:
num = int(c)
after[num] -= 1
if num == 0:
ans += before[1] * after[1]
else:
ans += before[0] * after[0]
before[num] += 1
return ans
| Solution |
python | scipy__scipy | scipy/linalg/tests/test_fblas.py | {
"start": 4360,
"end": 4590
} | class ____(BaseScal):
blas_func = fblas.dscal
dtype = float64
try:
class TestCscal(BaseScal):
blas_func = fblas.cscal
dtype = complex64
except AttributeError:
class TestCscal:
pass
| TestDscal |
python | run-llama__llama_index | llama-index-integrations/storage/kvstore/llama-index-storage-kvstore-gel/llama_index/storage/kvstore/gel/base.py | {
"start": 2481,
"end": 10731
} | class ____(BaseKVStore):
"""Gel Key-Value store."""
def __init__(self, record_type: str = "Record") -> None:
"""
Initialize GelKVStore.
Args:
record_type: The name of the record type in Gel schema.
"""
self.record_type = record_type
self._sync_client = None
self._async_client = None
def get_sync_client(self):
"""
Get or initialize a synchronous Gel client.
Ensures the client is connected and the record type exists.
Returns:
A connected synchronous Gel client.
"""
if self._async_client is not None:
raise RuntimeError(
"GelKVStore has already been used in async mode. "
"If you were intentionally trying to use different IO modes at the same time, "
"please create a new instance instead."
)
if self._sync_client is None:
self._sync_client = gel.create_client()
try:
self._sync_client.ensure_connected()
except gel.errors.ClientConnectionError as e:
_logger.error(NO_PROJECT_MESSAGE)
raise
try:
self._sync_client.query(f"select {self.record_type};")
except gel.errors.InvalidReferenceError as e:
_logger.error(
Template(MISSING_RECORD_TYPE_TEMPLATE).render(
record_type=self.record_type
)
)
raise
return self._sync_client
async def get_async_client(self):
"""
Get or initialize an asynchronous Gel client.
Ensures the client is connected and the record type exists.
Returns:
A connected asynchronous Gel client.
"""
if self._sync_client is not None:
raise RuntimeError(
"GelKVStore has already been used in sync mode. "
"If you were intentionally trying to use different IO modes at the same time, "
"please create a new instance instead."
)
if self._async_client is None:
self._async_client = gel.create_async_client()
try:
await self._async_client.ensure_connected()
except gel.errors.ClientConnectionError as e:
_logger.error(NO_PROJECT_MESSAGE)
raise
try:
await self._async_client.query(f"select {self.record_type};")
except gel.errors.InvalidReferenceError as e:
_logger.error(
Template(MISSING_RECORD_TYPE_TEMPLATE).render(
record_type=self.record_type
)
)
raise
return self._async_client
def put(
self,
key: str,
val: dict,
collection: str = DEFAULT_COLLECTION,
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
client = self.get_sync_client()
client.query(
PUT_QUERY,
key=key,
namespace=collection,
value=json.dumps(val),
)
async def aput(
self,
key: str,
val: dict,
collection: str = DEFAULT_COLLECTION,
) -> None:
"""
Put a key-value pair into the store.
Args:
key (str): key
val (dict): value
collection (str): collection name
"""
client = await self.get_async_client()
await client.query(
PUT_QUERY,
key=key,
namespace=collection,
value=json.dumps(val),
)
def put_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""
Store multiple key-value pairs in batches.
Args:
kv_pairs: List of (key, value) tuples to store.
collection: Namespace for the keys.
batch_size: Number of pairs to store in each batch.
"""
for chunk in (
kv_pairs[pos : pos + batch_size]
for pos in range(0, len(kv_pairs), batch_size)
):
client = self.get_sync_client()
client.query(
PUT_ALL_QUERY,
data=json.dumps([{"key": key, "value": value} for key, value in chunk]),
namespace=collection,
)
async def aput_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
"""
Async version of put_all.
Args:
kv_pairs: List of (key, value) tuples to store.
collection: Namespace for the keys.
batch_size: Number of pairs to store in each batch.
"""
for chunk in (
kv_pairs[pos : pos + batch_size]
for pos in range(0, len(kv_pairs), batch_size)
):
client = await self.get_async_client()
await client.query(
PUT_ALL_QUERY,
data=json.dumps([{"key": key, "value": value} for key, value in chunk]),
namespace=collection,
)
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
client = self.get_sync_client()
result = client.query_single(
GET_QUERY,
key=key,
namespace=collection,
)
return json.loads(result) if result is not None else None
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
"""
Get a value from the store.
Args:
key (str): key
collection (str): collection name
"""
client = await self.get_async_client()
result = await client.query_single(
GET_QUERY,
key=key,
namespace=collection,
)
return json.loads(result) if result is not None else None
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
client = self.get_sync_client()
results = client.query(
GET_ALL_QUERY,
namespace=collection,
)
return {result.key: json.loads(result.value) for result in results}
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
"""
Get all values from the store.
Args:
collection (str): collection name
"""
client = await self.get_async_client()
results = await client.query(
GET_ALL_QUERY,
namespace=collection,
)
return {result.key: json.loads(result.value) for result in results}
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
client = self.get_sync_client()
result = client.query(
DELETE_QUERY,
key=key,
namespace=collection,
)
return len(result) > 0
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
"""
Delete a value from the store.
Args:
key (str): key
collection (str): collection name
"""
client = await self.get_async_client()
result = await client.query(
DELETE_QUERY,
key=key,
namespace=collection,
)
return len(result) > 0
| GelKVStore |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 45248,
"end": 47171
} | class ____(TypedDict, total=False):
type: Required[Literal['enum']]
cls: Required[Any]
members: Required[list[Any]]
sub_type: Literal['str', 'int', 'float']
missing: Callable[[Any], Any]
strict: bool
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def enum_schema(
cls: Any,
members: list[Any],
*,
sub_type: Literal['str', 'int', 'float'] | None = None,
missing: Callable[[Any], Any] | None = None,
strict: bool | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> EnumSchema:
"""
Returns a schema that matches an enum value, e.g.:
```py
from enum import Enum
from pydantic_core import SchemaValidator, core_schema
class Color(Enum):
RED = 1
GREEN = 2
BLUE = 3
schema = core_schema.enum_schema(Color, list(Color.__members__.values()))
v = SchemaValidator(schema)
assert v.validate_python(2) is Color.GREEN
```
Args:
cls: The enum class
members: The members of the enum, generally `list(MyEnum.__members__.values())`
sub_type: The type of the enum, either 'str' or 'int' or None for plain enums
missing: A function to use when the value is not found in the enum, from `_missing_`
strict: Whether to use strict mode, defaults to False
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='enum',
cls=cls,
members=members,
sub_type=sub_type,
missing=missing,
strict=strict,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| EnumSchema |
python | pennersr__django-allauth | allauth/socialaccount/providers/mediawiki/provider.py | {
"start": 445,
"end": 821
} | class ____(ProviderAccount):
def get_profile_url(self):
userpage = settings.get(
"USERPAGE_TEMPLATE", "https://meta.wikimedia.org/wiki/User:{username}"
)
username = self.account.extra_data.get("username")
if not username:
return None
return userpage.format(username=username.replace(" ", "_"))
| MediaWikiAccount |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 10590,
"end": 10692
} | class ____(json.JSON):
def result_processor(self, dialect, coltype):
return None
| AsyncpgJSON |
python | pypa__pipenv | pipenv/vendor/click/exceptions.py | {
"start": 8356,
"end": 8880
} | class ____(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename: str, hint: t.Optional[str] = None) -> None:
if hint is None:
hint = _("unknown error")
super().__init__(hint)
self.ui_filename: str = format_filename(filename)
self.filename = filename
def format_message(self) -> str:
return _("Could not open file {filename!r}: {message}").format(
filename=self.ui_filename, message=self.message
)
| FileError |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/steps/changelog.py | {
"start": 445,
"end": 2648
} | class ____(StepModifyingFiles):
context: ConnectorContext
title = "Add changelog entry"
def __init__(
self,
context: ConnectorContext,
documentation_directory: Directory,
new_version: str,
comment: str,
pull_request_number: str | int | None,
) -> None:
super().__init__(context, documentation_directory)
self.new_version = semver.VersionInfo.parse(new_version)
self.comment = comment
self.pull_request_number = pull_request_number or "*PR_NUMBER_PLACEHOLDER*"
async def _run(self, pull_request_number: int | str | None = None) -> StepResult:
if pull_request_number is None:
# this allows passing it dynamically from a result of another action (like creating a pull request)
pull_request_number = self.pull_request_number
try:
original_markdown = await dagger_read_file(self.modified_directory, self.context.connector.documentation_file_name)
except FileNotFoundError:
return StepResult(
step=self,
status=StepStatus.SKIPPED,
stderr="Connector does not have a documentation file.",
)
try:
changelog = Changelog(original_markdown)
changelog.add_entry(self.new_version, datetime.date.today(), pull_request_number, self.comment)
updated_doc = changelog.to_markdown()
except Exception as e:
return StepResult(
step=self,
status=StepStatus.FAILURE,
stderr=f"Could not add changelog entry: {e}",
output=self.modified_directory,
exc_info=e,
)
self.modified_directory = dagger_write_file(self.modified_directory, self.context.connector.documentation_file_name, updated_doc)
self.modified_files.append(self.context.connector.documentation_file_name)
return StepResult(
step=self,
status=StepStatus.SUCCESS,
stdout=f"Added changelog entry to {self.context.connector.documentation_file_name}",
output=self.modified_directory,
)
| AddChangelogEntry |
python | scipy__scipy | scipy/stats/tests/test_mstats_basic.py | {
"start": 48029,
"end": 52197
} | class ____:
def test_vs_nonmasked(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
assert_array_almost_equal(mstats.normaltest(x),
stats.normaltest(x))
assert_array_almost_equal(mstats.skewtest(x),
stats.skewtest(x))
assert_array_almost_equal(mstats.kurtosistest(x),
stats.kurtosistest(x))
funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
x = [1, 2, 3, 4]
for func, mfunc in zip(funcs, mfuncs):
with pytest.warns(SmallSampleWarning, match=too_small_1d_not_omit):
res = func(x)
assert np.isnan(res.statistic)
assert np.isnan(res.pvalue)
assert_raises(ValueError, mfunc, x)
def test_axis_None(self):
# Test axis=None (equal to axis=0 for 1-D input)
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
assert_allclose(mstats.kurtosistest(x, axis=None),
mstats.kurtosistest(x))
def test_maskedarray_input(self):
# Add some masked values, test result doesn't change
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
xm = np.ma.array(np.r_[np.inf, x, 10],
mask=np.r_[True, [False] * x.size, True])
assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
def test_nd_input(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
x_2d = np.vstack([x] * 2).T
for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
res_1d = func(x)
res_2d = func(x_2d)
assert_allclose(res_2d[0], [res_1d[0]] * 2)
assert_allclose(res_2d[1], [res_1d[1]] * 2)
def test_normaltest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.normaltest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kurtosistest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.kurtosistest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_regression_9033(self):
# x clearly non-normal but power of negative denom needs
# to be handled correctly to reject normality
counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
assert_equal(mstats.kurtosistest(x)[1] < 0.01, True)
@pytest.mark.parametrize("test", ["skewtest", "kurtosistest"])
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, test, alternative):
x = stats.norm.rvs(loc=10, scale=2.5, size=30, random_state=123)
stats_test = getattr(stats, test)
mstats_test = getattr(mstats, test)
z_ex, p_ex = stats_test(x, alternative=alternative)
z, p = mstats_test(x, alternative=alternative)
assert_allclose(z, z_ex, atol=1e-12)
assert_allclose(p, p_ex, atol=1e-12)
# test with masked arrays
x[1:5] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
z_ex, p_ex = stats_test(x.compressed(), alternative=alternative)
z, p = mstats_test(x, alternative=alternative)
assert_allclose(z, z_ex, atol=1e-12)
assert_allclose(p, p_ex, atol=1e-12)
def test_bad_alternative(self):
x = stats.norm.rvs(size=20, random_state=123)
msg = r"`alternative` must be..."
with pytest.raises(ValueError, match=msg):
mstats.skewtest(x, alternative='error')
with pytest.raises(ValueError, match=msg):
mstats.kurtosistest(x, alternative='error')
| TestNormalitytests |
python | PrefectHQ__prefect | tests/server/models/test_filters.py | {
"start": 15066,
"end": 22211
} | class ____:
params = [
[{}, 12],
[dict(flow_filter=filters.FlowFilter(name=dict(any_=["f-1", "f-2"]))), 8],
[dict(flow_filter=filters.FlowFilter(name=dict(any_=["f-1", "f-100"]))), 5],
[dict(flow_filter=filters.FlowFilter(name=dict(any_=["f-1"]))), 5],
[dict(flow_filter=filters.FlowFilter(name=dict(like_="f-"))), 12],
[dict(flow_filter=filters.FlowFilter(tags=dict(all_=["db"]))), 8],
[dict(flow_filter=filters.FlowFilter(tags=dict(all_=["db", "blue"]))), 5],
[dict(flow_filter=filters.FlowFilter(tags=dict(all_=["db", "red"]))), 0],
[dict(flow_run_filter=filters.FlowRunFilter(tags=dict(all_=["db", "red"]))), 3],
[
dict(flow_run_filter=filters.FlowRunFilter(tags=dict(all_=["db", "blue"]))),
3,
],
[dict(flow_run_filter=filters.FlowRunFilter(tags=dict(is_null_=True))), 4],
[
dict(flow_run_filter=filters.FlowRunFilter(name=dict(like_="test-happy"))),
3,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
name=dict(like_="test-happy-mallard")
)
),
1,
],
[dict(task_run_filter=filters.TaskRunFilter(name=dict(like_="2a"))), 1],
[dict(deployment_filter=filters.DeploymentFilter(id=dict(any_=[d_1_1_id]))), 2],
[dict(deployment_filter=filters.DeploymentFilter(name=dict(like_="d_1"))), 3],
# next two check that filters are applied as an intersection not a union
[
dict(
task_run_filter=filters.TaskRunFilter(
state=dict(type=dict(any_=["FAILED"]))
)
),
1,
],
[
dict(
task_run_filter=filters.TaskRunFilter(
state=dict(type=dict(any_=["FAILED"]))
),
flow_filter=filters.FlowFilter(tags=dict(all_=["xyz"])),
),
0,
],
# search for completed states with "NOT-COMPLETED" as the name, should return nothing
[
dict(
flow_run_filter=filters.FlowRunFilter(
state=dict(
type=dict(any_=["COMPLETED"]), name=dict(any_=["NOT-COMPLETED"])
)
)
),
0,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
state=dict(name=dict(any_=["Completed"]))
)
),
4,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
state=dict(name=dict(any_=["Failed"]))
)
),
2,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
state=dict(name=dict(any_=["Failed", "Completed"]))
)
),
6,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
state=dict(
type=dict(any_=["FAILED"]),
name=dict(any_=["Failed", "Completed"]),
)
)
),
2,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
deployment_id=dict(any_=[d_1_1_id, d_1_2_id])
)
),
3,
],
[
dict(
flow_run_filter=filters.FlowRunFilter(
deployment_id=dict(any_=[d_1_1_id, d_3_1_id])
)
),
3,
],
# flow runs that are subflows (via task attribute)
[
dict(
task_run_filter=filters.TaskRunFilter(subflow_runs=dict(exists_=True))
),
1,
],
# flow runs that are subflows (via flow run attribute)
[
dict(
flow_run_filter=filters.FlowRunFilter(
parent_task_run_id=dict(is_null_=False)
)
),
1,
],
# empty filter
[dict(flow_filter=filters.FlowFilter()), 12],
# multiple empty filters
[
dict(
flow_filter=filters.FlowFilter(),
flow_run_filter=filters.FlowRunFilter(),
),
12,
],
[
dict(
work_pool_filter=filters.WorkPoolFilter(name=dict(any_=["Test Pool"]))
),
1,
],
[
dict(
work_queue_filter=filters.WorkQueueFilter(name=dict(any_=["default"]))
),
1,
],
[
dict(
work_pool_filter=filters.WorkPoolFilter(name=dict(any_=["Test Pool"])),
work_queue_filter=filters.WorkQueueFilter(name=dict(any_=["default"])),
),
1,
],
[
dict(
work_pool_filter=filters.WorkPoolFilter(
name=dict(any_=["A pool that doesn't exist"])
),
work_queue_filter=filters.WorkQueueFilter(name=dict(any_=["default"])),
),
0,
],
[
dict(
work_pool_filter=filters.WorkPoolFilter(name=dict(any_=["Test Pool"])),
work_queue_filter=filters.WorkQueueFilter(
name=dict(any_=["a queue that doesn't exist"])
),
),
0,
],
]
@pytest.mark.parametrize("kwargs,expected", params)
async def test_python_client_filter(self, kwargs, expected):
async with get_client() as client:
flow_runs = await client.read_flow_runs(**kwargs)
assert len(flow_runs) == expected
@pytest.mark.parametrize("kwargs,expected", params)
async def test_models_count(self, session, kwargs, expected):
count = await models.flow_runs.count_flow_runs(session=session, **kwargs)
assert count == expected
@pytest.mark.parametrize("kwargs,expected", params)
async def test_models_read(self, session, kwargs, expected):
read = await models.flow_runs.read_flow_runs(session=session, **kwargs)
assert len({r.id for r in read}) == expected
@pytest.mark.parametrize("kwargs,expected", params)
async def test_api_count(self, client, kwargs, expected):
adjusted_kwargs = adjust_kwargs_for_client(kwargs)
response = await client.post("/flow_runs/count", json=adjusted_kwargs)
assert response.json() == expected
@pytest.mark.parametrize("kwargs,expected", params)
async def test_api_read(self, client, kwargs, expected):
adjusted_kwargs = adjust_kwargs_for_client(kwargs)
response = await client.post(
"/flow_runs/filter",
json=adjusted_kwargs,
)
assert len({r["id"] for r in response.json()}) == expected
| TestCountFlowRunModels |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_materializations.py | {
"start": 369,
"end": 4332
} | class ____(ExecutingGraphQLContextTestMatrix):
def test_materializations(self, graphql_context: WorkspaceRequestContext, snapshot):
selector = infer_job_selector(graphql_context, "materialization_job")
logs = sync_execute_get_events(
context=graphql_context,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
}
},
)
materializations = [log for log in logs if log["__typename"] == "MaterializationEvent"]
assert len(materializations) == 1
mat = materializations[0]
assert mat["label"] == "all_types"
entry = mat["metadataEntries"][0]
assert entry["__typename"] == "TextMetadataEntry"
assert entry["text"]
entry = mat["metadataEntries"][1]
assert entry["__typename"] == "UrlMetadataEntry"
assert entry["url"]
entry = mat["metadataEntries"][2]
assert entry["__typename"] == "PathMetadataEntry"
assert entry["path"]
entry = mat["metadataEntries"][3]
assert entry["__typename"] == "JsonMetadataEntry"
assert entry["jsonString"]
entry = mat["metadataEntries"][4]
assert entry["__typename"] == "PythonArtifactMetadataEntry"
assert entry["module"]
assert entry["name"]
entry = mat["metadataEntries"][5]
assert entry["__typename"] == "PythonArtifactMetadataEntry"
assert entry["module"]
assert entry["name"]
entry = mat["metadataEntries"][6]
assert entry["__typename"] == "FloatMetadataEntry"
assert entry["floatValue"]
entry = mat["metadataEntries"][7]
assert entry["__typename"] == "IntMetadataEntry"
assert entry["intRepr"]
entry = mat["metadataEntries"][8]
assert entry["__typename"] == "FloatMetadataEntry"
assert entry["floatValue"] is None # float NaN test
entry = mat["metadataEntries"][9]
assert entry["__typename"] == "IntMetadataEntry"
assert int(entry["intRepr"]) == LONG_INT
entry = mat["metadataEntries"][10]
assert entry["__typename"] == "PipelineRunMetadataEntry"
assert entry["runId"] == "fake_run_id"
entry = mat["metadataEntries"][11]
assert entry["__typename"] == "AssetMetadataEntry"
assert entry["assetKey"]
assert entry["assetKey"]["path"]
entry = mat["metadataEntries"][12]
assert entry["__typename"] == "TableMetadataEntry"
assert entry["table"]
assert entry["table"]["records"]
assert entry["table"]["schema"]
entry = mat["metadataEntries"][13]
assert entry["__typename"] == "TableSchemaMetadataEntry"
assert entry["schema"]
assert entry["schema"]["columns"]
assert entry["schema"]["columns"][0]["constraints"]
assert entry["schema"]["constraints"]
assert entry["schema"]["columns"]
assert entry["schema"]["columns"][0]["tags"]
entry = mat["metadataEntries"][14]
assert entry["__typename"] == "JobMetadataEntry"
assert entry["jobName"]
entry = mat["metadataEntries"][15]
assert entry["__typename"] == "TextMetadataEntry"
assert entry["text"] == "SomeClass"
entry = mat["metadataEntries"][16]
assert entry["__typename"] == "FloatMetadataEntry"
assert entry["floatValue"] is None
assert entry["floatRepr"] == "inf"
entry = mat["metadataEntries"][17]
assert entry["__typename"] == "FloatMetadataEntry"
assert entry["floatValue"] is None
assert entry["floatRepr"] == "-inf"
non_engine_event_logs = [
message for message in logs if message["__typename"] != "EngineEvent"
]
snapshot.assert_match([message["__typename"] for message in non_engine_event_logs])
| TestMaterializations |
python | fluentpython__example-code-2e | 22-dyn-attr-prop/oscon/schedule_v3.py | {
"start": 888,
"end": 2070
} | class ____(Record):
def __repr__(self):
try:
return f'<{self.__class__.__name__} {self.name!r}>'
except AttributeError:
return super().__repr__()
@property
def venue(self):
key = f'venue.{self.venue_serial}'
return self.__class__.fetch(key)
# tag::SCHEDULE3_SPEAKERS[]
@property
def speakers(self):
spkr_serials = self.__dict__['speakers'] # <1>
fetch = self.__class__.fetch
return [fetch(f'speaker.{key}')
for key in spkr_serials] # <2>
# end::SCHEDULE3_SPEAKERS[]
def load(path=JSON_PATH):
records = {}
with open(path) as fp:
raw_data = json.load(fp)
for collection, raw_records in raw_data['Schedule'].items():
record_type = collection[:-1]
cls_name = record_type.capitalize()
cls = globals().get(cls_name, Record)
if inspect.isclass(cls) and issubclass(cls, Record):
factory = cls
else:
factory = Record
for raw_record in raw_records:
key = f'{record_type}.{raw_record["serial"]}'
records[key] = factory(**raw_record)
return records
| Event |
python | run-llama__llama_index | llama-index-core/llama_index/core/node_parser/text/sentence.py | {
"start": 999,
"end": 12616
} | class ____(MetadataAwareTextSplitter):
"""
Parse text with a preference for complete sentences.
In general, this class tries to keep sentences and paragraphs together. Therefore
compared to the original TokenTextSplitter, there are less likely to be
hanging sentences or parts of sentences at the end of the node chunk.
"""
chunk_size: int = Field(
default=DEFAULT_CHUNK_SIZE,
description="The token chunk size for each chunk.",
gt=0,
)
chunk_overlap: int = Field(
default=SENTENCE_CHUNK_OVERLAP,
description="The token overlap of each chunk when splitting.",
ge=0,
)
separator: str = Field(
default=" ", description="Default separator for splitting into words"
)
paragraph_separator: str = Field(
default=DEFAULT_PARAGRAPH_SEP, description="Separator between paragraphs."
)
secondary_chunking_regex: Optional[str] = Field(
default=CHUNKING_REGEX, description="Backup regex for splitting into sentences."
)
_chunking_tokenizer_fn: Callable[[str], List[str]] = PrivateAttr()
_tokenizer: Callable = PrivateAttr()
_split_fns: List[Callable] = PrivateAttr()
_sub_sentence_split_fns: List[Callable] = PrivateAttr()
def __init__(
self,
separator: str = " ",
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
tokenizer: Optional[Callable] = None,
paragraph_separator: str = DEFAULT_PARAGRAPH_SEP,
chunking_tokenizer_fn: Optional[Callable[[str], List[str]]] = None,
secondary_chunking_regex: Optional[str] = CHUNKING_REGEX,
callback_manager: Optional[CallbackManager] = None,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
id_func: Optional[Callable] = None,
):
"""Initialize with parameters."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
id_func = id_func or default_id_func
callback_manager = callback_manager or CallbackManager([])
super().__init__(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
secondary_chunking_regex=secondary_chunking_regex,
separator=separator,
paragraph_separator=paragraph_separator,
callback_manager=callback_manager,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
id_func=id_func,
)
self._chunking_tokenizer_fn = (
chunking_tokenizer_fn or split_by_sentence_tokenizer()
)
self._tokenizer = tokenizer or get_tokenizer()
self._split_fns = [
split_by_sep(paragraph_separator),
self._chunking_tokenizer_fn,
]
if secondary_chunking_regex:
self._sub_sentence_split_fns = [
split_by_regex(secondary_chunking_regex),
split_by_sep(separator),
split_by_char(),
]
else:
self._sub_sentence_split_fns = [
split_by_sep(separator),
split_by_char(),
]
@classmethod
def from_defaults(
cls,
separator: str = " ",
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
tokenizer: Optional[Callable] = None,
paragraph_separator: str = DEFAULT_PARAGRAPH_SEP,
chunking_tokenizer_fn: Optional[Callable[[str], List[str]]] = None,
secondary_chunking_regex: str = CHUNKING_REGEX,
callback_manager: Optional[CallbackManager] = None,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
) -> "SentenceSplitter":
"""Initialize with parameters."""
callback_manager = callback_manager or CallbackManager([])
return cls(
separator=separator,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
tokenizer=tokenizer,
paragraph_separator=paragraph_separator,
chunking_tokenizer_fn=chunking_tokenizer_fn,
secondary_chunking_regex=secondary_chunking_regex,
callback_manager=callback_manager,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
)
@classmethod
def class_name(cls) -> str:
return "SentenceSplitter"
def split_text_metadata_aware(self, text: str, metadata_str: str) -> List[str]:
metadata_len = len(self._tokenizer(metadata_str))
effective_chunk_size = self.chunk_size - metadata_len
if effective_chunk_size <= 0:
raise ValueError(
f"Metadata length ({metadata_len}) is longer than chunk size "
f"({self.chunk_size}). Consider increasing the chunk size or "
"decreasing the size of your metadata to avoid this."
)
elif effective_chunk_size < 50:
print(
f"Metadata length ({metadata_len}) is close to chunk size "
f"({self.chunk_size}). Resulting chunks are less than 50 tokens. "
"Consider increasing the chunk size or decreasing the size of "
"your metadata to avoid this.",
flush=True,
)
return self._split_text(text, chunk_size=effective_chunk_size)
def split_text(self, text: str) -> List[str]:
return self._split_text(text, chunk_size=self.chunk_size)
def _split_text(self, text: str, chunk_size: int) -> List[str]:
"""
_Split incoming text and return chunks with overlap size.
Has a preference for complete sentences, phrases, and minimal overlap.
"""
if text == "":
return [text]
with self.callback_manager.event(
CBEventType.CHUNKING, payload={EventPayload.CHUNKS: [text]}
) as event:
splits = self._split(text, chunk_size)
chunks = self._merge(splits, chunk_size)
event.on_end(payload={EventPayload.CHUNKS: chunks})
return chunks
def _split(self, text: str, chunk_size: int) -> List[_Split]:
r"""
Break text into splits that are smaller than chunk size.
The order of splitting is:
1. split by paragraph separator
2. split by chunking tokenizer (default is nltk sentence tokenizer)
3. split by second chunking regex (default is "[^,\.;]+[,\.;]?")
4. split by default separator (" ")
"""
token_size = self._token_size(text)
if token_size <= chunk_size:
return [_Split(text, is_sentence=True, token_size=token_size)]
text_splits_by_fns, is_sentence = self._get_splits_by_fns(text)
text_splits = []
for text_split_by_fns in text_splits_by_fns:
token_size = self._token_size(text_split_by_fns)
if token_size <= chunk_size:
text_splits.append(
_Split(
text_split_by_fns,
is_sentence=is_sentence,
token_size=token_size,
)
)
else:
recursive_text_splits = self._split(
text_split_by_fns, chunk_size=chunk_size
)
text_splits.extend(recursive_text_splits)
return text_splits
def _merge(self, splits: List[_Split], chunk_size: int) -> List[str]:
"""Merge splits into chunks."""
chunks: List[str] = []
cur_chunk: List[Tuple[str, int]] = [] # list of (text, length)
last_chunk: List[Tuple[str, int]] = []
cur_chunk_len = 0
new_chunk = True
def close_chunk() -> None:
nonlocal chunks, cur_chunk, last_chunk, cur_chunk_len, new_chunk
chunks.append("".join([text for text, length in cur_chunk]))
last_chunk = cur_chunk
cur_chunk = []
cur_chunk_len = 0
new_chunk = True
# add overlap to the next chunk using the last one first
if len(last_chunk) > 0:
last_index = len(last_chunk) - 1
while (
last_index >= 0
and cur_chunk_len + last_chunk[last_index][1] <= self.chunk_overlap
):
overlap_text, overlap_length = last_chunk[last_index]
cur_chunk_len += overlap_length
cur_chunk.insert(0, (overlap_text, overlap_length))
last_index -= 1
split_idx = 0
while split_idx < len(splits):
cur_split = splits[split_idx]
if cur_split.token_size > chunk_size:
raise ValueError("Single token exceeded chunk size")
if cur_chunk_len + cur_split.token_size > chunk_size and not new_chunk:
# if adding split to current chunk exceeds chunk size: close out chunk
close_chunk()
else:
# If this is a new chunk with overlap, and adding the split would
# exceed chunk_size, remove overlap to make room
if new_chunk and cur_chunk_len + cur_split.token_size > chunk_size:
# Remove overlap from the beginning until split fits
while (
len(cur_chunk) > 0
and cur_chunk_len + cur_split.token_size > chunk_size
):
_, length = cur_chunk.pop(0)
cur_chunk_len -= length
if (
cur_split.is_sentence
or cur_chunk_len + cur_split.token_size <= chunk_size
or new_chunk # new chunk, always add at least one split
):
# add split to chunk
cur_chunk_len += cur_split.token_size
cur_chunk.append((cur_split.text, cur_split.token_size))
split_idx += 1
new_chunk = False
else:
# close out chunk
close_chunk()
# handle the last chunk
if not new_chunk:
chunk = "".join([text for text, length in cur_chunk])
chunks.append(chunk)
# run postprocessing to remove blank spaces
return self._postprocess_chunks(chunks)
def _postprocess_chunks(self, chunks: List[str]) -> List[str]:
"""
Post-process chunks.
Remove whitespace only chunks and remove leading and trailing whitespace.
"""
new_chunks = []
for chunk in chunks:
stripped_chunk = chunk.strip()
if stripped_chunk == "":
continue
new_chunks.append(stripped_chunk)
return new_chunks
def _token_size(self, text: str) -> int:
return len(self._tokenizer(text))
def _get_splits_by_fns(self, text: str) -> Tuple[List[str], bool]:
for split_fn in self._split_fns:
splits = split_fn(text)
if len(splits) > 1:
return splits, True
for split_fn in self._sub_sentence_split_fns:
splits = split_fn(text)
if len(splits) > 1:
break
return splits, False
| SentenceSplitter |
python | cherrypy__cherrypy | cherrypy/tutorial/tut10_http_errors.py | {
"start": 333,
"end": 3023
} | class ____(object):
"""HTTP error representation app."""
# Set a custom response for 403 errors.
_cp_config = {'error_page.403': os.path.join(curpath, 'custom_error.html')}
@cherrypy.expose
def index(self):
"""Produce HTTP response body of error display app index URI."""
# display some links that will result in errors
tracebacks = cherrypy.request.show_tracebacks
if tracebacks:
trace = 'off'
else:
trace = 'on'
return (
"""
<html><body>
<p>Toggle tracebacks <a href="toggleTracebacks">%s</a></p>
<p><a href="/doesNotExist">Click me; I'm a broken link!</a></p>
<p>
<a href="/error?code=403">
Use a custom error page from a file.
</a>
</p>
<p>These errors are explicitly raised by the application:</p>
<ul>
<li><a href="/error?code=400">400</a></li>
<li><a href="/error?code=401">401</a></li>
<li><a href="/error?code=402">402</a></li>
<li><a href="/error?code=500">500</a></li>
</ul>
<p><a href="/messageArg">You can also set the response body
when you raise an error.</a></p>
</body></html>
"""
% trace
)
@cherrypy.expose
def toggleTracebacks(self):
"""Switch tracebacks setting on ``/toggleTracebacks`` URI."""
# simple function to toggle tracebacks on and off
tracebacks = cherrypy.request.show_tracebacks
cherrypy.config.update({'request.show_tracebacks': not tracebacks})
# redirect back to the index
raise cherrypy.HTTPRedirect('/')
@cherrypy.expose
def error(self, code):
"""Respond with a given HTTP error."""
# raise an error based on the get query
raise cherrypy.HTTPError(status=code)
@cherrypy.expose
def messageArg(self):
"""Respond with an HTTP 500 and a custom message."""
message = (
"If you construct an HTTPError with a 'message' "
'argument, it wil be placed on the error page '
'(underneath the status line by default).'
)
raise cherrypy.HTTPError(500, message=message)
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HTTPErrorDemo(), config=tutconf)
| HTTPErrorDemo |
python | getsentry__sentry | src/sentry/seer/anomaly_detection/types.py | {
"start": 2756,
"end": 2883
} | class ____(TypedDict):
success: bool
message: str | None
data: list[AnomalyThresholdDataPoint]
| SeerDetectorDataResponse |
python | ethereum__web3.py | tests/utils.py | {
"start": 81,
"end": 2157
} | class ____:
def __init__(self, initial_delay=0, max_delay=1, initial_step=0.01):
self.initial_delay = initial_delay
self.initial_step = initial_step
self.max_delay = max_delay
self.current_delay = initial_delay
def __call__(self):
delay = self.current_delay
if self.current_delay == 0:
self.current_delay += self.initial_step
else:
self.current_delay *= 2
self.current_delay = min(self.current_delay, self.max_delay)
return delay
def reset(self):
self.current_delay = self.initial_delay
def get_open_port():
sock = socket.socket()
sock.bind(("127.0.0.1", 0))
port = sock.getsockname()[1]
sock.close()
return str(port)
async def _async_wait_for_block_fixture_logic(async_w3, block_number=1, timeout=None):
if not timeout:
current_block_number = await async_w3.eth.block_number # type:ignore
timeout = (block_number - current_block_number) * 3
poll_delay_counter = PollDelayCounter()
with Timeout(timeout) as timeout:
eth_block_number = await async_w3.eth.block_number
while eth_block_number < block_number:
await async_w3.manager.coro_request("evm_mine", [])
await timeout.async_sleep(poll_delay_counter())
eth_block_number = await async_w3.eth.block_number
async def _async_wait_for_transaction_fixture_logic(async_w3, txn_hash, timeout=120):
poll_delay_counter = PollDelayCounter()
with Timeout(timeout) as timeout:
while True:
txn_receipt = await async_w3.eth.get_transaction_receipt(txn_hash)
if txn_receipt is not None:
break
asyncio.sleep(poll_delay_counter())
timeout.check()
return txn_receipt
def async_partial(f, *args, **kwargs):
async def f2(*args2, **kwargs2):
result = f(*args, *args2, **kwargs, **kwargs2)
if asyncio.iscoroutinefunction(f):
result = await result
return result
return f2
| PollDelayCounter |
python | doocs__leetcode | solution/1800-1899/1861.Rotating the Box/Solution.py | {
"start": 0,
"end": 660
} | class ____:
def rotateTheBox(self, box: List[List[str]]) -> List[List[str]]:
m, n = len(box), len(box[0])
ans = [[None] * m for _ in range(n)]
for i in range(m):
for j in range(n):
ans[j][m - i - 1] = box[i][j]
for j in range(m):
q = deque()
for i in range(n - 1, -1, -1):
if ans[i][j] == '*':
q.clear()
elif ans[i][j] == '.':
q.append(i)
elif q:
ans[q.popleft()][j] = '#'
ans[i][j] = '.'
q.append(i)
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 460303,
"end": 461023
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of
AcceptEnterpriseAdministratorInvitation
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "invitation", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
invitation = sgqlc.types.Field("EnterpriseAdministratorInvitation", graphql_name="invitation")
"""The invitation that was accepted."""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of accepting an administrator
invitation.
"""
| AcceptEnterpriseAdministratorInvitationPayload |
python | doocs__leetcode | solution/2500-2599/2582.Pass the Pillow/Solution.py | {
"start": 0,
"end": 222
} | class ____:
def passThePillow(self, n: int, time: int) -> int:
ans = k = 1
for _ in range(time):
ans += k
if ans == 1 or ans == n:
k *= -1
return ans
| Solution |
python | django__django | tests/lookup/models.py | {
"start": 925,
"end": 1089
} | class ____(models.Model):
articles = models.ManyToManyField(Article)
name = models.CharField(max_length=100)
class Meta:
ordering = ("name",)
| Tag |
python | tensorflow__tensorflow | tensorflow/python/grappler/layout_optimizer_test.py | {
"start": 7422,
"end": 83195
} | class ____(test.TestCase):
"""Tests the Grappler layout optimizer."""
def _assert_trans_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-TransposeNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_trans_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-TransposeNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_trans_ncdhw_to_ndhwc(self, name, nodes):
self.assertIn(name + '-TransposeNCDHWToNDHWC-LayoutOptimizer', nodes)
def _assert_trans_ndhwc_to_ncdhw(self, name, nodes):
self.assertIn(name + '-TransposeNDHWCToNCDHW-LayoutOptimizer', nodes)
def _assert_map_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-DimMapNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_map_ndhwc_to_ncdhw(self, name, nodes):
self.assertIn(name + '-DataFormatDimMapNDHWCToNCDHW-LayoutOptimizer', nodes)
def _assert_vec_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-VecPermuteNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_vec_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-VecPermuteNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_vec_ncdhw_to_ndhwc(self, name, nodes):
self.assertIn(name + '-DataFormatVecPermuteNCDHWToNDHWC-LayoutOptimizer',
nodes)
def _assert_vec_ndhwc_to_ncdhw(self, name, nodes):
self.assertIn(name + '-DataFormatVecPermuteNDHWCToNCDHW-LayoutOptimizer',
nodes)
def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
ops.reset_default_graph()
graph = ops.get_default_graph()
with session.Session(
config=_get_config(layout_optimizer), graph=graph) as sess:
batch = 2
height = 6
width = 7
input_channels = 3
shape = [batch, height, width, input_channels]
image = array_ops.placeholder(dtype='float32', shape=shape)
conv1 = conv_layers.conv2d(image, 32, [3, 3])
conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
loss = math_ops.reduce_mean(conv2)
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
if restore:
saver.restore(sess, checkpoint_path)
else:
self.evaluate(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(np.float32)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval(session=sess) for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
@test_util.deprecated_graph_mode_only
def testTwoConvLayers(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
output = _two_layer_model(x)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Relu_1-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSplitWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
split = array_ops.split(conv, 2, axis=dim)
scale = constant_op.constant(0.1, shape=[32])
offset = constant_op.constant(0.3, shape=[32])
bn0 = nn.fused_batch_norm(split[0], scale, offset)
bn1 = nn.fused_batch_norm(split[1], scale, offset)
add = bn0[0] + bn1[0]
output = array_ops.identity(add)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes)
self._assert_map_nhwc_to_nchw('split-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSplitVWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
sizes = constant_op.constant([50, 10, 4], shape=[3])
split = gen_array_ops.split_v(
value=conv, size_splits=sizes, axis=dim, num_split=3)
output = math_ops.reduce_sum(split[0])
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('SplitV-0-0', nodes)
self._assert_map_nhwc_to_nchw('SplitV-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testPadWithConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
paddings = constant_op.constant(
paddings_val, dtype='int32', name='PaddingsConst')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self.assertIn('Pad-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSum(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testCast(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
cast = math_ops.cast(conv, dtype='bool')
output = array_ops.identity(cast)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Cast-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSqueeze(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2])
squeeze = array_ops.squeeze(reduce_sum)
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSqueezeAlongHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSqueezeAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[0, 1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongHWC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2, 3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Sum-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongHKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceSumAlongWCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2, 3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testConcatWithControlDependency(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
axis = constant_op.constant(3)
var = variables.Variable(3)
assign = state_ops.assign(var, 6)
with ops.control_dependencies([assign]):
concat = array_ops.concat([conv, conv], axis)
output = array_ops.identity(concat)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('concat-0-0', nodes)
self.assertIn('concat-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testConcatWithControlDependencyFor5DTensor(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 2, 14, 14, 1], seed=0)
w = random_ops.truncated_normal([2, 2, 2, 1, 2], seed=0)
strides = [1, 1, 1, 1, 1]
y = gen_nn_ops.conv3d(x, w, strides, 'SAME')
axis = constant_op.constant(4)
var = variables.Variable(3)
assign = state_ops.assign(var, 6)
with ops.control_dependencies([assign]):
concat = array_ops.concat([y, y], axis)
output = array_ops.identity(concat)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes)
self._assert_trans_ncdhw_to_ndhwc('concat-0-0', nodes)
self._assert_map_ndhwc_to_ncdhw('concat-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testFill(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shape = array_ops.shape(conv)
scalar = array_ops.constant(5.7)
fill = array_ops.fill(shape, scalar)
output = array_ops.identity(fill)
x_val = [3.4] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
num_vec_permute = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
if _is_permute(node.name):
num_vec_permute += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
# Two vector permute nodes were initially added in the Expand phase of
# LayoutOptimizer; they cancelled out each other in the Collapse phase.
expected_vec_permute = 0
self.assertEqual(expected_vec_permute, num_vec_permute)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Fill-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testTile(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
multiple = array_ops.placeholder(dtype='int32')
tile = array_ops.tile(conv, multiple)
output = array_ops.identity(tile)
multiple_val = [2, 3, 4, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={multiple: multiple_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
multiple: multiple_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Tile-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Tile-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReverseWithConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = constant_op.constant([3, 1], name='DimsConst')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self.assertIn('ReverseV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReverseWithNonConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = array_ops.placeholder(dtype='int32')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
dims_val = [2, 3]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dims: dims_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
dims: dims_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self._assert_map_nhwc_to_nchw('ReverseV2-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSelectOp(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
mean = math_ops.reduce_mean(conv)
condition = math_ops.less(conv, mean)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSelectOpConditionUnknownShape(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = array_ops.placeholder(dtype='bool')
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
condition_val = np.zeros((1, 7, 7, 64))
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={condition: condition_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={condition: condition_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSelectOpScalarCondition(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = constant_op.constant(True)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testPadWithNonConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings = array_ops.placeholder(dtype='int32')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
paddings: paddings_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Pad-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testMaxPoolV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool = gen_nn_ops.max_pool_v2(conv, ksize, strides, 'VALID')
output = array_ops.identity(max_pool)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolV2-2', nodes)
self.assertIn('MaxPoolV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testMaxPoolGradV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize,
strides, 'VALID')
output = array_ops.identity(max_pool_grad)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolGradV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolGradV2-4', nodes)
self.assertIn('MaxPoolGradV2-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testLeakyRelu(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([4, 14, 14, 1], seed=0)
w = random_ops.truncated_normal([2, 2, 1, 2], seed=0)
y = nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
y = nn.leaky_relu(y, alpha=0.2)
output = array_ops.identity(y)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nchw_to_nhwc('LeakyRelu-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testLeakyReluGrad(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([4, 14, 14, 1], seed=0)
w = random_ops.truncated_normal([2, 2, 1, 1], seed=0)
y = nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
y = gen_nn_ops.leaky_relu_grad(y, x, alpha=0.2)
output = array_ops.identity(y)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('LeakyReluGrad-1', nodes)
self._assert_trans_nchw_to_nhwc('LeakyReluGrad-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testLeakyReluGradFor5DTensors(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 4, 2, 3, 3], seed=0)
w = random_ops.truncated_normal([2, 2, 2, 3, 3], seed=0)
y = gen_nn_ops.conv3d(x, w, [1, 1, 1, 1, 1], 'SAME')
y = gen_nn_ops.leaky_relu_grad(y, x, alpha=0.2)
output = array_ops.identity(y)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('LeakyReluGrad-1', nodes)
self._assert_trans_ncdhw_to_ndhwc('LeakyReluGrad-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testReduceOpsFor5DTensors(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 4, 2, 3, 3], seed=0)
w = random_ops.truncated_normal([2, 2, 2, 3, 3], seed=0)
conv3d = gen_nn_ops.conv3d(x, w, [1, 1, 1, 1, 1], 'SAME')
y = math_ops.reduce_mean(conv3d, [0, 1, 2, 3], keepdims=True)
output = array_ops.identity(y)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# The reduce op Mean needs to dim map the input reduce index to NCDHW.
# Then, the output needs to be transposed back to NDHWC.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes)
self._assert_map_ndhwc_to_ncdhw('Mean-1', nodes)
self._assert_trans_ncdhw_to_ndhwc('Mean-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testBinaryOpsFor5DTensors(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 4, 2, 3, 3], seed=0)
w = random_ops.truncated_normal([2, 2, 2, 3, 3], seed=0)
mean = random_ops.truncated_normal([1, 1, 1, 1, 3], seed=0)
variance = random_ops.truncated_normal([1, 1, 1, 1, 3], seed=0)
gamma = random_ops.truncated_normal([1, 1, 1, 1, 3], seed=0)
beta = random_ops.truncated_normal([1, 1, 1, 1, 3], seed=0)
conv3d = gen_nn_ops.conv3d(x, w, [1, 1, 1, 1, 1], 'SAME')
y = nn.batch_normalization(
conv3d,
mean=mean,
variance=variance,
scale=gamma,
offset=beta,
variance_epsilon=0.001)
output = array_ops.identity(y)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# The binary ops mul_1 and add_1 in batch norm need to transpose one of
# the two inputs to NCDHW. The other input has already been transposed via
# Conv3D.
expected_num_transposes = 4
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes)
self._assert_trans_ndhwc_to_ncdhw('batchnorm/mul_1-1', nodes)
self._assert_trans_ndhwc_to_ncdhw('batchnorm/add_1-1', nodes)
self._assert_trans_ncdhw_to_ndhwc('batchnorm/add_1-0-0', nodes)
@test_util.deprecated_graph_mode_only
def testBatchNorm3D(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x_3d = random_ops.truncated_normal([1, 4, 2, 3, 3], seed=0)
filters = random_ops.truncated_normal([2, 2, 2, 3, 3], seed=0)
strides_val = [1, 1, 1, 1, 1]
scale = constant_op.constant(0.1, shape=[3])
offset = constant_op.constant(0.3, shape=[3])
conv3d = gen_nn_ops.conv3d(x_3d, filters, strides_val, 'SAME')
y, _, _ = nn.fused_batch_norm(conv3d, scale, offset, data_format='NDHWC')
output = array_ops.identity(y)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes)
self._assert_trans_ncdhw_to_ndhwc('FusedBatchNormV3-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testBatchNormGrad3D(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x_3d = random_ops.truncated_normal([1, 4, 2, 3, 3], seed=0)
filters = random_ops.truncated_normal([2, 2, 2, 3, 3], seed=0)
strides_val = [1, 1, 1, 1, 1]
scale = constant_op.constant(0.1, shape=[3])
offset = constant_op.constant(0.3, shape=[3])
mean = constant_op.constant(0.1, shape=[3])
variance = constant_op.constant(0.3, shape=[3])
conv3d = gen_nn_ops.conv3d(x_3d, filters, strides_val, 'SAME')
y, running_mean, running_var, r0, r1, r2 = gen_nn_ops.fused_batch_norm_v3(
conv3d,
scale,
offset,
mean,
variance,
epsilon=1.001e-5,
exponential_avg_factor=1.0,
data_format='NDHWC',
is_training=True,
name='batch_norm')
dx, dscale, doffset, _, _ = gen_nn_ops.fused_batch_norm_grad_v3(
y,
x_3d,
scale,
r0,
r1,
r2,
epsilon=1.001e-5,
data_format='NDHWC',
is_training=True)
output = array_ops.identity(dx)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes)
self._assert_trans_ndhwc_to_ncdhw('FusedBatchNormGradV3-1', nodes)
self._assert_trans_ncdhw_to_ndhwc('FusedBatchNormGradV3-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testConv3D(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 2, 14, 14, 1], seed=0)
w = random_ops.truncated_normal([2, 2, 2, 1, 2], seed=0)
strides = [1, 1, 1, 1, 1]
y = gen_nn_ops.conv3d(x, w, strides, 'SAME')
output = array_ops.identity(y)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes)
self._assert_trans_ncdhw_to_ndhwc('Conv3D-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testConv3DBackpropInput(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
random_seed.set_random_seed(0)
dy = random_ops.truncated_normal([2, 2, 14, 14, 1], seed=0)
w = random_ops.truncated_normal([2, 2, 2, 1, 1], seed=0)
strides = [1, 1, 1, 1, 1]
x_shape = array_ops.shape(dy)
dx = gen_nn_ops.conv3d_backprop_input_v2(x_shape, w, dy, strides, 'SAME')
output = array_ops.identity(dx)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_vec_ndhwc_to_ncdhw('Conv3DBackpropInputV2-0', nodes)
self._assert_trans_ndhwc_to_ncdhw('Conv3DBackpropInputV2-2', nodes)
self._assert_trans_ncdhw_to_ndhwc('Conv3DBackpropInputV2-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testConv3DBackpropFilter(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 2, 14, 14, 1], seed=0)
dy = random_ops.truncated_normal([2, 2, 14, 14, 1], seed=0)
strides = [1, 1, 1, 1, 1]
w_shape = constant_op.constant([2, 2, 2, 1, 1], shape=[5])
dw = gen_nn_ops.conv3d_backprop_filter_v2(x, w_shape, dy, strides, 'SAME')
output = array_ops.identity(dw)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3DBackpropFilterV2-0', nodes)
self._assert_trans_ndhwc_to_ncdhw('Conv3DBackpropFilterV2-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testBiasAddFor5DTensor(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 2, 14, 14, 1], seed=0)
w = random_ops.truncated_normal([2, 2, 2, 1, 2], seed=0)
b = random_ops.truncated_normal([2], seed=0)
strides = [1, 1, 1, 1, 1]
y = gen_nn_ops.conv3d(x, w, strides, 'SAME')
y = gen_nn_ops.bias_add(y, b, 'NHWC')
output = array_ops.identity(y)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes)
self._assert_trans_ncdhw_to_ndhwc('BiasAdd-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testBiasAddGradFor5DTensor(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
random_seed.set_random_seed(0)
dy = random_ops.truncated_normal([2, 2, 14, 14, 1], seed=0)
w = random_ops.truncated_normal([2, 2, 2, 1, 1], seed=0)
strides = [1, 1, 1, 1, 1]
dy_shape = array_ops.shape(dy)
dx = gen_nn_ops.conv3d_backprop_input_v2(dy_shape, w, dy, strides, 'SAME')
db = gen_nn_ops.bias_add_grad(dx, 'NHWC')
output = array_ops.identity(db)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# The output of Conv3DBackpropInputV2 won't be converted back to NDHWC
# because of the BiasAddGrad.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_vec_ndhwc_to_ncdhw('Conv3DBackpropInputV2-0', nodes)
self._assert_trans_ndhwc_to_ncdhw('Conv3DBackpropInputV2-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(conv, [0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
size: size_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Slice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Slice-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testSliceWithNonConstAxisFor5DTensor(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 2, 14, 14, 1], seed=0)
w = random_ops.truncated_normal([2, 2, 2, 1, 2], seed=0)
strides = [1, 1, 1, 1, 1]
y = gen_nn_ops.conv3d(x, w, strides, 'SAME')
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(y, [0, 0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 1, 2, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={size: size_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes)
self._assert_trans_ncdhw_to_ndhwc('Slice-0-0', nodes)
self._assert_vec_ndhwc_to_ncdhw('Slice-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testStridedSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
s = array_ops.strided_slice(conv, [0, 0, 0, 0], end, strides=[1, 2, 3, 1])
output = array_ops.identity(s)
end_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSlice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSlice-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testStridedSliceWithMask1011(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 11(1011).
s = conv[:, :, 1:-1, :]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testStridedSliceWithMask0111(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 7(0111).
s = conv[:, :, :, 1:-1]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testStridedSliceGradWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
shape = array_ops.shape(conv)
end_val = [1, 2, 3, 4]
s = array_ops.strided_slice(
conv, [0, 0, 0, 0], end_val, strides=[1, 2, 3, 1])
s_grad = array_ops.strided_slice_grad(shape, [0, 0, 0, 0], end,
[1, 2, 3, 1], s)
output = array_ops.identity(s_grad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSliceGrad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSliceGrad-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testShapeN(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shapen = array_ops.shape_n([conv, conv])
output = math_ops.add(shapen[0], shapen[1])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_vec_nchw_to_nhwc('ShapeN-0-0', nodes)
self.assertAllEqual(output_val_ref, output_val)
@test_util.deprecated_graph_mode_only
def testShapeNFor5DTensor(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
h = array_ops.placeholder(dtype='float32')
x = array_ops.reshape(h, [-1, 2, 14, 14, 1])
w = random_ops.truncated_normal([2, 2, 2, 1, 2], seed=0)
strides = [1, 1, 1, 1, 1]
y = gen_nn_ops.conv3d(x, w, strides, 'SAME')
shapen = array_ops.shape_n([y, y])
output = math_ops.add(shapen[0], shapen[1])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={h: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={h: x_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes)
self._assert_vec_ncdhw_to_ndhwc('ShapeN-0-0', nodes)
self._assert_vec_ncdhw_to_ndhwc('ShapeN-1-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testIdentityNFor4DAnd5DTensors(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
h = array_ops.placeholder(dtype='float32')
x = array_ops.reshape(h, [-1, 2, 14, 14, 1])
w = random_ops.truncated_normal([2, 2, 2, 1, 4], seed=0)
strides = [1, 1, 1, 1, 1]
y = gen_nn_ops.conv3d(x, w, strides, 'SAME')
x1 = array_ops.reshape(h, [-1, 784])
y1 = _two_layer_model(x1)
outputs = array_ops.identity_n([y1, y])
new_x0 = array_ops.reshape(outputs[0], [-1, 2, 14, 14, 1])
new_x1 = array_ops.reshape(outputs[1], [-1, 2, 14, 14, 1])
output = math_ops.add(new_x0, new_x1)
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={h: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={h: x_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 4
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_ncdhw_to_ndhwc('IdentityN-1-0', nodes)
self._assert_trans_nchw_to_nhwc('IdentityN-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testShapeNFollowedByNotConvertibleNodeReshape(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
conv_reshape = array_ops.reshape(conv, [1, 1, 1, -1])
shapen = array_ops.shape_n([conv, conv_reshape])
shape = array_ops.identity(shapen[1])
ones = array_ops.ones(shape)
output = math_ops.add_n([conv_reshape, ones])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={x: x_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testLoop(self):
if test.is_gpu_available(cuda_only=True):
output = _loop()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/MaxPool_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testLoopWithBranch(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_branch()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testLoopWithVecAnd4D(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_vec_and_4d()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testBinaryOpSecondPort(self):
if test.is_gpu_available(cuda_only=True):
output = _model_with_second_port()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = self.evaluate(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('FusedBatchNormV3-0', nodes)
self._assert_trans_nchw_to_nhwc('Add-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
@test_util.deprecated_graph_mode_only
def testGradient(self):
meta_graph = _simple_metagraph()
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
min_graph_nodes=-1))
optimized_graph = tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 5)
@test_util.deprecated_graph_mode_only
def testDepthwise(self):
meta_graph = _simple_metagraph(depthwise=True)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
min_graph_nodes=-1))
optimized_graph = tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in [
'DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropFilter',
'DepthwiseConv2dNativeBackpropInput'
]:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 6)
def testCheckpointCompatibility(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
checkpoint_path = self.get_temp_dir()
self._train(checkpoint_path)
vars_expected = self._train(checkpoint_path, restore=True)
vars_layout_optimized = self._train(
checkpoint_path, restore=True, layout_optimizer=True)
for var_expected, var_layout_optimized in zip(vars_expected,
vars_layout_optimized):
self.assertAllClose(var_expected, var_layout_optimized, atol=1e-6)
if __name__ == '__main__':
test.main()
| LayoutOptimizerTest |
python | PrefectHQ__prefect | tests/input/test_actions.py | {
"start": 318,
"end": 580
} | class ____(pydantic.BaseModel):
name: str
age: int
@pytest.fixture
def flow_run_context(flow_run, prefect_client):
with FlowRunContext.model_construct(
flow_run=flow_run, client=prefect_client
) as context:
yield context
| DemoModel |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/compute.py | {
"start": 10961,
"end": 18714
} | class ____(ComputeEngineBaseOperator):
"""
Creates an Instance in Google Compute Engine based on specified parameters from existing Template.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineInsertInstanceFromTemplateOperator`
:param body: Instance representation as object. For this Operator only 'name' parameter is required for
creating new Instance since all other parameters will be passed through the Template.
:param source_instance_template: Existing Instance Template that will be used as a base while creating
new Instance. When specified, only name of new Instance should be provided as input arguments in
'body' parameter when creating new Instance. All other parameters, such as 'machine_type', 'disks'
and 'network_interfaces' will be passed to Instance as they are specified in the Instance Template.
Full or partial URL and can be represented as examples below:
1. "https://www.googleapis.com/compute/v1/projects/your-project-name/global/instanceTemplates/temp"
2. "projects/your-project-name/global/instanceTemplates/temp"
3. "global/instanceTemplates/temp"
:param zone: Google Cloud zone where the instance exists.
:param project_id: Google Cloud project ID where the Compute Engine Instance exists.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param resource_id: Name of the Instance. If the name of Instance is not specified in body['name'],
the name will be taken from 'resource_id' parameter
:param request_id: Unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'.
:param api_version: API version used (for example v1 - or beta). Defaults to v1.
:param impersonation_chain: Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
operator_extra_links = (ComputeInstanceDetailsLink(),)
# [START gce_instance_insert_from_template_fields]
template_fields: Sequence[str] = (
"body",
"source_instance_template",
"project_id",
"zone",
"request_id",
"gcp_conn_id",
"api_version",
"impersonation_chain",
"resource_id",
)
# [END gce_instance_insert_from_template_fields]
def __init__(
self,
*,
source_instance_template: str,
body: dict,
zone: str,
resource_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
request_id: str | None = None,
retry: Retry | None = None,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
validate_body: bool = True,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
self.source_instance_template = source_instance_template
self.body = body
self.zone = zone
if "name" in body:
resource_id = self.body["name"]
self.request_id = request_id
self._field_validator = None # Optional[GcpBodyFieldValidator]
self.retry = retry
self.timeout = timeout
self.metadata = metadata
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE)
super().__init__(
resource_id=resource_id,
zone=zone,
project_id=project_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body)
def _validate_inputs(self) -> None:
super()._validate_inputs()
if not self.resource_id and "name" not in self.body:
raise AirflowException(
"The required parameters 'resource_id' and body['name'] are missing. "
"Please, provide at least one of them."
)
def execute(self, context: Context) -> dict:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
try:
# Idempotence check (sort of) - we want to check if the new Instance
# is already created and if is, then we assume it was created - we do
# not check if content of the Instance is as expected.
# We assume success if the Instance is simply present
existing_instance = hook.get_instance(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
except exceptions.NotFound as e:
# We actually expect to get 404 / Not Found here as the template should
# not yet exist
if e.code != 404:
raise e
else:
self.log.info("The %s Instance already exists", self.resource_id)
ComputeInstanceDetailsLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
return Instance.to_dict(existing_instance)
self._field_sanitizer.sanitize(self.body)
self.log.info("Creating Instance with specified body: %s", self.body)
hook.insert_instance(
body=self.body,
request_id=self.request_id,
project_id=self.project_id,
zone=self.zone,
source_instance_template=self.source_instance_template,
)
self.log.info("The specified Instance has been created SUCCESSFULLY")
new_instance_from_template = hook.get_instance(
resource_id=self.resource_id,
project_id=self.project_id,
zone=self.zone,
)
ComputeInstanceDetailsLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
return Instance.to_dict(new_instance_from_template)
| ComputeEngineInsertInstanceFromTemplateOperator |
python | plotly__plotly.py | plotly/graph_objs/barpolar/marker/_pattern.py | {
"start": 233,
"end": 15295
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "barpolar.marker"
_path_str = "barpolar.marker.pattern"
_valid_props = {
"bgcolor",
"bgcolorsrc",
"fgcolor",
"fgcolorsrc",
"fgopacity",
"fillmode",
"path",
"pathsrc",
"shape",
"shapesrc",
"size",
"sizesrc",
"solidity",
"soliditysrc",
}
@property
def bgcolor(self):
"""
When there is no colorscale sets the color of background
pattern fill. Defaults to a `marker.color` background when
`fillmode` is "overlay". Otherwise, defaults to a transparent
background.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def fgcolor(self):
"""
When there is no colorscale sets the color of foreground
pattern fill. Defaults to a `marker.color` background when
`fillmode` is "replace". Otherwise, defaults to dark grey or
white to increase contrast with the `bgcolor`.
The 'fgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["fgcolor"]
@fgcolor.setter
def fgcolor(self, val):
self["fgcolor"] = val
@property
def fgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `fgcolor`.
The 'fgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["fgcolorsrc"]
@fgcolorsrc.setter
def fgcolorsrc(self, val):
self["fgcolorsrc"] = val
@property
def fgopacity(self):
"""
Sets the opacity of the foreground pattern fill. Defaults to a
0.5 when `fillmode` is "overlay". Otherwise, defaults to 1.
The 'fgopacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["fgopacity"]
@fgopacity.setter
def fgopacity(self, val):
self["fgopacity"] = val
@property
def fillmode(self):
"""
Determines whether `marker.color` should be used as a default
to `bgcolor` or a `fgcolor`.
The 'fillmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['replace', 'overlay']
Returns
-------
Any
"""
return self["fillmode"]
@fillmode.setter
def fillmode(self, val):
self["fillmode"] = val
@property
def path(self):
"""
Sets a custom path for pattern fill. Use with no `shape` or
`solidity`, provide an SVG path string for the regions of the
square from (0,0) to (`size`,`size`) to color.
The 'path' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["path"]
@path.setter
def path(self, val):
self["path"] = val
@property
def pathsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `path`.
The 'pathsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["pathsrc"]
@pathsrc.setter
def pathsrc(self, val):
self["pathsrc"] = val
@property
def shape(self):
"""
Sets the shape of the pattern fill. By default, no pattern is
used for filling the area.
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['', '/', '\\', 'x', '-', '|', '+', '.']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
@property
def shapesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shape`.
The 'shapesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shapesrc"]
@shapesrc.setter
def shapesrc(self, val):
self["shapesrc"] = val
@property
def size(self):
"""
Sets the size of unit squares of the pattern fill in pixels,
which corresponds to the interval of repetition of the pattern.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def solidity(self):
"""
Sets the solidity of the pattern fill. Solidity is roughly the
fraction of the area filled by the pattern. Solidity of 0 shows
only the background color without pattern and solidty of 1
shows only the foreground color without pattern.
The 'solidity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["solidity"]
@solidity.setter
def solidity(self, val):
self["solidity"] = val
@property
def soliditysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `solidity`.
The 'soliditysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["soliditysrc"]
@soliditysrc.setter
def soliditysrc(self, val):
self["soliditysrc"] = val
@property
def _prop_descriptions(self):
return """\
bgcolor
When there is no colorscale sets the color of
background pattern fill. Defaults to a `marker.color`
background when `fillmode` is "overlay". Otherwise,
defaults to a transparent background.
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
fgcolor
When there is no colorscale sets the color of
foreground pattern fill. Defaults to a `marker.color`
background when `fillmode` is "replace". Otherwise,
defaults to dark grey or white to increase contrast
with the `bgcolor`.
fgcolorsrc
Sets the source reference on Chart Studio Cloud for
`fgcolor`.
fgopacity
Sets the opacity of the foreground pattern fill.
Defaults to a 0.5 when `fillmode` is "overlay".
Otherwise, defaults to 1.
fillmode
Determines whether `marker.color` should be used as a
default to `bgcolor` or a `fgcolor`.
path
Sets a custom path for pattern fill. Use with no
`shape` or `solidity`, provide an SVG path string for
the regions of the square from (0,0) to (`size`,`size`)
to color.
pathsrc
Sets the source reference on Chart Studio Cloud for
`path`.
shape
Sets the shape of the pattern fill. By default, no
pattern is used for filling the area.
shapesrc
Sets the source reference on Chart Studio Cloud for
`shape`.
size
Sets the size of unit squares of the pattern fill in
pixels, which corresponds to the interval of repetition
of the pattern.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
solidity
Sets the solidity of the pattern fill. Solidity is
roughly the fraction of the area filled by the pattern.
Solidity of 0 shows only the background color without
pattern and solidty of 1 shows only the foreground
color without pattern.
soliditysrc
Sets the source reference on Chart Studio Cloud for
`solidity`.
"""
def __init__(
self,
arg=None,
bgcolor=None,
bgcolorsrc=None,
fgcolor=None,
fgcolorsrc=None,
fgopacity=None,
fillmode=None,
path=None,
pathsrc=None,
shape=None,
shapesrc=None,
size=None,
sizesrc=None,
solidity=None,
soliditysrc=None,
**kwargs,
):
"""
Construct a new Pattern object
Sets the pattern within the marker.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.barpolar.marker.Pattern`
bgcolor
When there is no colorscale sets the color of
background pattern fill. Defaults to a `marker.color`
background when `fillmode` is "overlay". Otherwise,
defaults to a transparent background.
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
fgcolor
When there is no colorscale sets the color of
foreground pattern fill. Defaults to a `marker.color`
background when `fillmode` is "replace". Otherwise,
defaults to dark grey or white to increase contrast
with the `bgcolor`.
fgcolorsrc
Sets the source reference on Chart Studio Cloud for
`fgcolor`.
fgopacity
Sets the opacity of the foreground pattern fill.
Defaults to a 0.5 when `fillmode` is "overlay".
Otherwise, defaults to 1.
fillmode
Determines whether `marker.color` should be used as a
default to `bgcolor` or a `fgcolor`.
path
Sets a custom path for pattern fill. Use with no
`shape` or `solidity`, provide an SVG path string for
the regions of the square from (0,0) to (`size`,`size`)
to color.
pathsrc
Sets the source reference on Chart Studio Cloud for
`path`.
shape
Sets the shape of the pattern fill. By default, no
pattern is used for filling the area.
shapesrc
Sets the source reference on Chart Studio Cloud for
`shape`.
size
Sets the size of unit squares of the pattern fill in
pixels, which corresponds to the interval of repetition
of the pattern.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
solidity
Sets the solidity of the pattern fill. Solidity is
roughly the fraction of the area filled by the pattern.
Solidity of 0 shows only the background color without
pattern and solidty of 1 shows only the foreground
color without pattern.
soliditysrc
Sets the source reference on Chart Studio Cloud for
`solidity`.
Returns
-------
Pattern
"""
super().__init__("pattern")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.barpolar.marker.Pattern
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.marker.Pattern`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("fgcolor", arg, fgcolor)
self._set_property("fgcolorsrc", arg, fgcolorsrc)
self._set_property("fgopacity", arg, fgopacity)
self._set_property("fillmode", arg, fillmode)
self._set_property("path", arg, path)
self._set_property("pathsrc", arg, pathsrc)
self._set_property("shape", arg, shape)
self._set_property("shapesrc", arg, shapesrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("solidity", arg, solidity)
self._set_property("soliditysrc", arg, soliditysrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Pattern |
python | huggingface__transformers | src/transformers/models/bros/processing_bros.py | {
"start": 707,
"end": 1129
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": False,
"stride": 0,
"return_overflowing_tokens": False,
"return_special_tokens_mask": False,
"return_offsets_mapping": False,
"return_length": False,
"verbose": True,
},
}
| BrosProcessorKwargs |
python | openai__openai-python | src/openai/resources/responses/input_items.py | {
"start": 898,
"end": 4328
} | class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> InputItemsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return InputItemsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> InputItemsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return InputItemsWithStreamingResponse(self)
def list(
self,
response_id: str,
*,
after: str | Omit = omit,
include: List[ResponseIncludable] | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncCursorPage[ResponseItem]:
"""
Returns a list of input items for a given response.
Args:
after: An item ID to list items after, used in pagination.
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
limit: A limit on the number of objects to be returned. Limit can range between 1 and
100, and the default is 20.
order: The order to return the input items in. Default is `desc`.
- `asc`: Return the input items in ascending order.
- `desc`: Return the input items in descending order.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not response_id:
raise ValueError(f"Expected a non-empty value for `response_id` but received {response_id!r}")
return self._get_api_list(
f"/responses/{response_id}/input_items",
page=SyncCursorPage[ResponseItem],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"include": include,
"limit": limit,
"order": order,
},
input_item_list_params.InputItemListParams,
),
),
model=cast(Any, ResponseItem), # Union types cannot be passed in as arguments in the type system
)
| InputItems |
python | walkccc__LeetCode | solutions/1255. Maximum Score Words Formed by Letters/1255.py | {
"start": 0,
"end": 812
} | class ____:
def maxScoreWords(
self,
words: list[str],
letters: list[str],
score: list[int],
) -> int:
count = collections.Counter(letters)
def useWord(i: int) -> int:
isValid = True
earned = 0
for c in words[i]:
count[c] -= 1
if count[c] < 0:
isValid = False
earned += score[ord(c) - ord('a')]
return earned if isValid else -1
def unuseWord(i: int) -> None:
for c in words[i]:
count[c] += 1
def dfs(s: int) -> int:
"""Returns the maximum score you can get from words[s..n)."""
ans = 0
for i in range(s, len(words)):
earned = useWord(i)
if earned > 0:
ans = max(ans, earned + dfs(i + 1))
unuseWord(i)
return ans
return dfs(0)
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/dates.py | {
"start": 52317,
"end": 54054
} | class ____(RRuleLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Parameters
----------
base : int, default: 1
Mark ticks every *base* years.
month : int, default: 1
The month on which to place the ticks, starting from 1. Default is
January.
day : int, default: 1
The day on which to place the ticks.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
rule = rrulewrapper(YEARLY, interval=base, bymonth=month,
bymonthday=day, **self.hms0d)
super().__init__(rule, tz=tz)
self.base = ticker._Edge_integer(base, 0)
def _create_rrule(self, vmin, vmax):
# 'start' needs to be a multiple of the interval to create ticks on
# interval multiples when the tick frequency is YEARLY
ymin = max(self.base.le(vmin.year) * self.base.step, 1)
ymax = min(self.base.ge(vmax.year) * self.base.step, 9999)
c = self.rule._construct
replace = {'year': ymin,
'month': c.get('bymonth', 1),
'day': c.get('bymonthday', 1),
'hour': 0, 'minute': 0, 'second': 0}
start = vmin.replace(**replace)
stop = start.replace(year=ymax)
self.rule.set(dtstart=start, until=stop)
return start, stop
| YearLocator |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/vertex_ai/pipeline_job.py | {
"start": 1964,
"end": 24535
} | class ____(GoogleBaseHook, OperationHelper):
"""Hook for Google Cloud Vertex AI Pipeline Job APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._pipeline_job: PipelineJob | None = None
def get_pipeline_service_client(
self,
region: str | None = None,
) -> PipelineServiceClient:
"""Return PipelineServiceClient object."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return PipelineServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_pipeline_job_object(
self,
display_name: str,
template_path: str,
job_id: str | None = None,
pipeline_root: str | None = None,
parameter_values: dict[str, Any] | None = None,
input_artifacts: dict[str, str] | None = None,
enable_caching: bool | None = None,
encryption_spec_key_name: str | None = None,
labels: dict[str, str] | None = None,
project: str | None = None,
location: str | None = None,
failure_policy: str | None = None,
) -> PipelineJob:
"""Return PipelineJob object."""
return PipelineJob(
display_name=display_name,
template_path=template_path,
job_id=job_id,
pipeline_root=pipeline_root,
parameter_values=parameter_values,
input_artifacts=input_artifacts,
enable_caching=enable_caching,
encryption_spec_key_name=encryption_spec_key_name,
labels=labels,
credentials=self.get_credentials(),
project=project,
location=location,
failure_policy=failure_policy,
)
def cancel_pipeline_job(self) -> None:
"""Cancel PipelineJob."""
if self._pipeline_job:
self._pipeline_job.cancel()
@GoogleBaseHook.fallback_to_default_project_id
def create_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job: PipelineJob,
pipeline_job_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> types.PipelineJob:
"""
Create a PipelineJob. A PipelineJob will run immediately when created.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job: Required. The PipelineJob to create.
:param pipeline_job_id: The ID to use for the PipelineJob, which will become the final component of
the PipelineJob name. If not provided, an ID will be automatically generated.
This value should be less than 128 characters, and valid characters are /[a-z][0-9]-/.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.create_pipeline_job(
request={
"parent": parent,
"pipeline_job": pipeline_job,
"pipeline_job_id": pipeline_job_id,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def run_pipeline_job(
self,
project_id: str,
region: str,
display_name: str,
template_path: str,
job_id: str | None = None,
pipeline_root: str | None = None,
parameter_values: dict[str, Any] | None = None,
input_artifacts: dict[str, str] | None = None,
enable_caching: bool | None = None,
encryption_spec_key_name: str | None = None,
labels: dict[str, str] | None = None,
failure_policy: str | None = None,
# START: run param
service_account: str | None = None,
network: str | None = None,
create_request_timeout: float | None = None,
experiment: str | experiment_resources.Experiment | None = None,
# END: run param
) -> PipelineJob:
"""
Create and run a PipelineJob until its completion.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param display_name: Required. The user-defined name of this Pipeline.
:param template_path: Required. The path of PipelineJob or PipelineSpec JSON or YAML file. It can be
a local path, a Google Cloud Storage URI (e.g. "gs://project.name"), an Artifact Registry URI
(e.g. "https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"), or an HTTPS URI.
:param job_id: Optional. The unique ID of the job run. If not specified, pipeline name + timestamp
will be used.
:param pipeline_root: Optional. The root of the pipeline outputs. If not set, the staging bucket set
in aiplatform.init will be used. If that's not set a pipeline-specific artifacts bucket will be
used.
:param parameter_values: Optional. The mapping from runtime parameter names to its values that
control the pipeline run.
:param input_artifacts: Optional. The mapping from the runtime parameter name for this artifact to
its resource id. For example: "vertex_model":"456". Note: full resource name
("projects/123/locations/us-central1/metadataStores/default/artifacts/456") cannot be used.
:param enable_caching: Optional. Whether to turn on caching for the run.
If this is not set, defaults to the compile time settings, which are True for all tasks by
default, while users may specify different caching options for individual tasks.
If this is set, the setting applies to all tasks in the pipeline. Overrides the compile time
settings.
:param encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed
encryption key used to protect the job. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created. If this is set,
then all resources created by the PipelineJob will be encrypted with the provided encryption key.
Overrides encryption_spec_key_name set in aiplatform.init.
:param labels: Optional. The user defined metadata to organize PipelineJob.
:param failure_policy: Optional. The failure policy - "slow" or "fast". Currently, the default of a
pipeline is that the pipeline will continue to run until no more tasks can be executed, also
known as PIPELINE_FAILURE_POLICY_FAIL_SLOW (corresponds to "slow"). However, if a pipeline is set
to PIPELINE_FAILURE_POLICY_FAIL_FAST (corresponds to "fast"), it will stop scheduling any new
tasks when a task has failed. Any scheduled tasks will continue to completion.
:param service_account: Optional. Specifies the service account for workload run-as account. Users
submitting jobs must have act-as permission on this run-as account.
:param network: Optional. The full name of the Compute Engine network to which the job should be
peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network. If left unspecified, the
network set in aiplatform.init will be used. Otherwise, the job is not peered with any network.
:param create_request_timeout: Optional. The timeout for the create request in seconds.
:param experiment: Optional. The Vertex AI experiment name or instance to associate to this
PipelineJob. Metrics produced by the PipelineJob as system.Metric Artifacts will be associated as
metrics to the current Experiment Run. Pipeline parameters will be associated as parameters to
the current Experiment Run.
"""
self._pipeline_job = self.get_pipeline_job_object(
display_name=display_name,
template_path=template_path,
job_id=job_id,
pipeline_root=pipeline_root,
parameter_values=parameter_values,
input_artifacts=input_artifacts,
enable_caching=enable_caching,
encryption_spec_key_name=encryption_spec_key_name,
labels=labels,
project=project_id,
location=region,
failure_policy=failure_policy,
)
self._pipeline_job.submit(
service_account=service_account,
network=network,
create_request_timeout=create_request_timeout,
experiment=experiment,
)
self._pipeline_job.wait()
return self._pipeline_job
@GoogleBaseHook.fallback_to_default_project_id
def submit_pipeline_job(
self,
project_id: str,
region: str,
display_name: str,
template_path: str,
job_id: str | None = None,
pipeline_root: str | None = None,
parameter_values: dict[str, Any] | None = None,
input_artifacts: dict[str, str] | None = None,
enable_caching: bool | None = None,
encryption_spec_key_name: str | None = None,
labels: dict[str, str] | None = None,
failure_policy: str | None = None,
# START: run param
service_account: str | None = None,
network: str | None = None,
create_request_timeout: float | None = None,
experiment: str | experiment_resources.Experiment | None = None,
# END: run param
) -> PipelineJob:
"""
Create and start a PipelineJob run.
For more info about the client method please see:
https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform.PipelineJob#google_cloud_aiplatform_PipelineJob_submit
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param display_name: Required. The user-defined name of this Pipeline.
:param template_path: Required. The path of PipelineJob or PipelineSpec JSON or YAML file. It can be
a local path, a Google Cloud Storage URI (e.g. "gs://project.name"), an Artifact Registry URI
(e.g. "https://us-central1-kfp.pkg.dev/proj/repo/pack/latest"), or an HTTPS URI.
:param job_id: Optional. The unique ID of the job run. If not specified, pipeline name + timestamp
will be used.
:param pipeline_root: Optional. The root of the pipeline outputs. If not set, the staging bucket set
in aiplatform.init will be used. If that's not set a pipeline-specific artifacts bucket will be
used.
:param parameter_values: Optional. The mapping from runtime parameter names to its values that
control the pipeline run.
:param input_artifacts: Optional. The mapping from the runtime parameter name for this artifact to
its resource id. For example: "vertex_model":"456". Note: full resource name
("projects/123/locations/us-central1/metadataStores/default/artifacts/456") cannot be used.
:param enable_caching: Optional. Whether to turn on caching for the run.
If this is not set, defaults to the compile time settings, which are True for all tasks by
default, while users may specify different caching options for individual tasks.
If this is set, the setting applies to all tasks in the pipeline. Overrides the compile time
settings.
:param encryption_spec_key_name: Optional. The Cloud KMS resource identifier of the customer managed
encryption key used to protect the job. Has the form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute resource is created. If this is set,
then all resources created by the PipelineJob will be encrypted with the provided encryption key.
Overrides encryption_spec_key_name set in aiplatform.init.
:param labels: Optional. The user defined metadata to organize PipelineJob.
:param failure_policy: Optional. The failure policy - "slow" or "fast". Currently, the default of a
pipeline is that the pipeline will continue to run until no more tasks can be executed, also
known as PIPELINE_FAILURE_POLICY_FAIL_SLOW (corresponds to "slow"). However, if a pipeline is set
to PIPELINE_FAILURE_POLICY_FAIL_FAST (corresponds to "fast"), it will stop scheduling any new
tasks when a task has failed. Any scheduled tasks will continue to completion.
:param service_account: Optional. Specifies the service account for workload run-as account. Users
submitting jobs must have act-as permission on this run-as account.
:param network: Optional. The full name of the Compute Engine network to which the job should be
peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network. If left unspecified, the
network set in aiplatform.init will be used. Otherwise, the job is not peered with any network.
:param create_request_timeout: Optional. The timeout for the create request in seconds.
:param experiment: Optional. The Vertex AI experiment name or instance to associate to this PipelineJob.
Metrics produced by the PipelineJob as system.Metric Artifacts will be associated as metrics
to the current Experiment Run. Pipeline parameters will be associated as parameters to
the current Experiment Run.
"""
self._pipeline_job = self.get_pipeline_job_object(
display_name=display_name,
template_path=template_path,
job_id=job_id,
pipeline_root=pipeline_root,
parameter_values=parameter_values,
input_artifacts=input_artifacts,
enable_caching=enable_caching,
encryption_spec_key_name=encryption_spec_key_name,
labels=labels,
project=project_id,
location=region,
failure_policy=failure_policy,
)
self._pipeline_job.submit(
service_account=service_account,
network=network,
create_request_timeout=create_request_timeout,
experiment=experiment,
)
return self._pipeline_job
@GoogleBaseHook.fallback_to_default_project_id
def get_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> types.PipelineJob:
"""
Get a PipelineJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job_id: Required. The ID of the PipelineJob resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.pipeline_job_path(project_id, region, pipeline_job_id)
result = client.get_pipeline_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_pipeline_jobs(
self,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
filter: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListPipelineJobsPager:
"""
List PipelineJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: Optional. Lists the PipelineJobs that match the filter expression. The
following fields are supported:
- ``pipeline_name``: Supports ``=`` and ``!=`` comparisons.
- ``display_name``: Supports ``=``, ``!=`` comparisons, and
``:`` wildcard.
- ``pipeline_job_user_id``: Supports ``=``, ``!=``
comparisons, and ``:`` wildcard. for example, can check
if pipeline's display_name contains *step* by doing
display_name:"*step*"
- ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``labels``: Supports key-value equality and key presence.
Filter expressions can be combined together using logical
operators (``AND`` & ``OR``). For example:
``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``.
The syntax to define filter expression is based on
https://google.aip.dev/160.
:param page_size: Optional. The standard list page size.
:param page_token: Optional. The standard list page token. Typically obtained via
[ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token]
of the previous
[PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]
call.
:param order_by: Optional. A comma-separated list of fields to order by. The default
sort order is in ascending order. Use "desc" after a field
name for descending. You can have multiple order_by fields
provided e.g. "create_time desc, end_time", "end_time,
start_time, update_time" For example, using "create_time
desc, end_time" will order results by create time in
descending order, and if there are multiple jobs having the
same create time, order them by the end time in ascending
order. if order_by is not specified, it will order by
default order is create time in descending order. Supported
fields:
- ``create_time``
- ``update_time``
- ``end_time``
- ``start_time``
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_pipeline_jobs(
request={
"parent": parent,
"page_size": page_size,
"page_token": page_token,
"filter": filter,
"order_by": order_by,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_pipeline_job(
self,
project_id: str,
region: str,
pipeline_job_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete a PipelineJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param pipeline_job_id: Required. The ID of the PipelineJob resource to be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_pipeline_service_client(region)
name = client.pipeline_job_path(project_id, region, pipeline_job_id)
result = client.delete_pipeline_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@staticmethod
def extract_pipeline_job_id(obj: dict) -> str:
"""Return unique id of a pipeline job from its name."""
return obj["name"].rpartition("/")[-1]
| PipelineJobHook |
python | huggingface__transformers | src/transformers/models/efficientloftr/modeling_efficientloftr.py | {
"start": 19191,
"end": 19975
} | class ____(nn.Module):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__()
hidden_size = config.hidden_size
intermediate_size = config.intermediate_size
self.fc1 = nn.Linear(hidden_size * 2, intermediate_size, bias=False)
self.activation = ACT2FN[config.mlp_activation_function]
self.fc2 = nn.Linear(intermediate_size, hidden_size, bias=False)
self.layer_norm = nn.LayerNorm(hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.fc2(hidden_states)
hidden_states = self.layer_norm(hidden_states)
return hidden_states
| EfficientLoFTRMLP |
python | tensorflow__tensorflow | tensorflow/lite/python/convert.py | {
"start": 5076,
"end": 44930
} | class ____(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
# Convert model using only TensorFlow Lite quantized int8 operations.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
# Convert model using only TensorFlow Lite operations with quantized int8
# weights, int16 activations and int64 bias.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
# This quantization mode may be used in models for super-resolution,
# audio signal processing or image de-noising. It improves accuracy
# significantly, but only slightly increases the model size.
# WARNING: These ops are currently experimental and have not yet been
# finalized.
# They are only compatible with CPU execution, and have not been optimized for
# production.
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 = (
"EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8"
)
# Convert model using only stablehlo ops.
# This option can not be combined with other OpsSets.
# The feature is in early development.
# The code to execute StableHLO ops in the runtime is to be implemented
# and the serialization format is not stabilized yet.
EXPERIMENTAL_STABLEHLO_OPS = "EXPERIMENTAL_STABLEHLO_OPS"
def __str__(self):
return str(self.value)
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.QUANTIZE)
def mlir_quantize(
input_data_str,
disable_per_channel=False,
fully_quantize=False,
inference_type=_types_pb2.QUANTIZED_INT8,
input_data_type=dtypes.float32,
output_data_type=dtypes.float32,
enable_numeric_verify=False,
enable_whole_model_verify=False,
denylisted_ops=None,
denylisted_nodes=None,
enable_variable_quantization=False,
disable_per_channel_for_dense_layers=False,
debug_options_str="",
):
"""Quantize `input_data_str` with calibration results.
Args:
input_data_str: Input data in serialized form (e.g. a TFLITE model with
calibration results).
disable_per_channel: Bool indicating whether to do per-channel or per-tensor
quantization
fully_quantize: Bool indicating whether to fully quantize the model. Besides
model body, the input/output will be quantized as well.
inference_type: Data type for the activations. The default value is int8.
input_data_type: Data type for the inputs. The default value is float32.
output_data_type: Data type for the outputs. The default value is float32.
enable_numeric_verify: Experimental. Subject to change. Bool indicating
whether to add NumericVerify ops into the debug mode quantized model.
enable_whole_model_verify: Experimental. Subject to change. Bool indicating
whether to add verification for layer by layer, or on whole model. When
disabled (per-layer) float and quantized ops will be run from same input
(output of previous quantized layer). When enabled, float and quantized
ops will run with respective float and quantized output of previous ops.
denylisted_ops: Experimental. Subject to change. Set of ops to denylist.
denylisted_nodes: Experimental. Subject to change. Set of notes to denylist.
enable_variable_quantization: Experimental. Subject to change. Bool
indicating whether to enable quantization of the residual variables
remaining after the variable freezing pass.
disable_per_channel_for_dense_layers: Bool indicating whether to do
per-channel or per-tensor quantization in Fully Connected layers. Default
value is False meaning per-channel quantization is enabled.
debug_options_str: Serialized proto describing TFLite converter debug
options, see `debug/debug_options.proto`.
Returns:
Quantized model in serialized form (e.g. a TFLITE model) with floating-point
inputs and outputs.
"""
return wrap_converter.wrapped_experimental_mlir_quantize(
input_data_str,
disable_per_channel,
fully_quantize,
inference_type,
convert_tensor_tf_type_to_tflite_type(input_data_type),
convert_tensor_tf_type_to_tflite_type(output_data_type),
enable_numeric_verify,
enable_whole_model_verify,
denylisted_ops,
denylisted_nodes,
enable_variable_quantization,
disable_per_channel_for_dense_layers,
debug_options_str,
)
@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.SPARSIFY)
def mlir_sparsify(input_data_str):
"""Sparsify `input_data_str` to encode sparse tensor with proper format.
Args:
input_data_str: Input data in serialized form (e.g. a TFLITE model).
Returns:
Sparsified model in serialized form (e.g. a TFLITE model).
"""
return wrap_converter.wrapped_experimental_mlir_sparsify(input_data_str)
def register_custom_opdefs(custom_opdefs_list):
"""Register the given custom opdefs to the TensorFlow global op registry.
Args:
custom_opdefs_list: String representing the custom ops OpDefs that are
included in the GraphDef.
Returns:
True if the registration is successfully completed.
"""
return wrap_converter.wrapped_register_custom_opdefs(custom_opdefs_list)
def convert(
model_flags: _model_flags_pb2.ModelFlags,
conversion_flags: _conversion_flags_pb2.ConverterFlags,
input_data_str: Optional[str] = None,
debug_info_str: Optional[str] = None,
):
"""Converts `input_data_str` to a TFLite model.
Args:
model_flags: Proto describing model properties, see `model_flags.proto`.
conversion_flags: Proto describing conversion properties, see
`compiler/mlir/lite/converter_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common, or
it can be hlo text or proto)
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
information.
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
"""
try:
return wrap_converter.wrapped_convert(
model_flags.SerializeToString(),
conversion_flags.SerializeToString(),
input_data_str,
debug_info_str,
)
except Exception as e:
converter_error = ConverterError(str(e))
for error_data in _metrics_wrapper.retrieve_collected_errors():
converter_error.append_error(error_data)
# Seldom we encounter the case where an unsupported
# `StatefulPartitionedCallOp` is not inlined and remains in the final
# IR. If this occurs we can set `guarantee_all_funcs_one_use` and retry.
# This makes the converter copy functions definitions called by
# multiple StatefulPartitionedCall, thus allowing them to be properly
# inlined.
if (
error_data.error_code
== converter_error_data_pb2.ConverterErrorData.ERROR_STATEFUL_PARTITIONED_CALL_IN_FINAL_IR
and not conversion_flags.guarantee_all_funcs_one_use
):
conversion_flags.guarantee_all_funcs_one_use = True
return convert(
model_flags,
conversion_flags,
input_data_str,
debug_info_str,
)
raise converter_error
def build_model_flags(
change_concat_input_ranges=False,
allow_nonexistent_arrays=False,
saved_model_dir=None,
saved_model_version=0,
saved_model_tags=None,
saved_model_exported_names=None,
**_,
):
"""Builds the model flags object from params.
Args:
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
allow_nonexistent_arrays: Allow specifying array names that don't exist or
are unused in the final graph. (default False)
saved_model_dir: Filepath of the saved model to be converted. This value
will be non-empty only when the saved model import path will be used.
Otherwises, the graph def-based conversion will be processed.
saved_model_version: SavedModel file format version of The saved model file
to be converted. This value will be set only when the SavedModel import
path will be used.
saved_model_tags: Set of string saved model tags, formatted in the
comma-separated value. This value will be set only when the SavedModel
import path will be used.
saved_model_exported_names: Names to be exported (default: export all) when
the saved model import path is on. This value will be set only when the
SavedModel import path will be used.
Returns:
model_flags: protocol buffer describing the model.
"""
model_flags = _model_flags_pb2.ModelFlags()
model_flags.change_concat_input_ranges = change_concat_input_ranges
model_flags.allow_nonexistent_arrays = allow_nonexistent_arrays
if saved_model_dir:
model_flags.saved_model_dir = saved_model_dir
model_flags.saved_model_version = saved_model_version
if saved_model_tags:
model_flags.saved_model_tags.extend(saved_model_tags)
if saved_model_exported_names:
model_flags.saved_model_exported_names.extend(saved_model_exported_names)
return model_flags
def build_conversion_flags(
inference_type=dtypes.float32,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
output_format=lite_constants.TFLITE,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
conversion_summary_dir=None,
select_user_tf_ops=None,
allow_all_select_tf_ops=False,
enable_tflite_resource_variables=True,
unfold_batchmatmul=False,
legalize_custom_tensor_list_ops=False,
lower_tensor_list_ops=True,
default_to_single_batch_in_tensor_list_ops=False,
accumulation_type=None,
allow_bfloat16=False,
unfold_large_splat_constant=False,
supported_backends=None,
disable_per_channel_quantization=False,
enable_mlir_dynamic_range_quantizer=False,
tf_quantization_mode=None,
disable_infer_tensor_range=False,
use_fake_quant_num_bits=False,
enable_dynamic_update_slice=False,
preserve_assert_op=False,
guarantee_all_funcs_one_use=False,
enable_mlir_variable_quantization=False,
disable_fuse_mul_and_fc=False,
ir_dump_dir=None,
ir_dump_pass_regex=None,
ir_dump_func_regex=None,
enable_timing=None,
print_ir_before=None,
print_ir_after=None,
print_ir_module_scope=None,
elide_elementsattrs_if_larger=None,
use_buffer_offset=False,
reduce_type_precision=False,
qdq_conversion_mode=None,
strict_qdq_mode=False,
disable_per_channel_quantization_for_dense_layers=False,
enable_composite_direct_lowering=False,
model_origin_framework=lite_constants.UNSET,
canonicalizing_inf_as_min_max_float=True,
serialize_debug_metadata=False,
unsafe_fuse_dynamic_shaped_broadcast=False,
**_,
):
"""Builds protocol buffer describing a conversion of a model.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
inference_type: Data type of numeric arrays, excluding the input layer.
(default tf.float32, must be in {tf.float32, tf.int8, tf.uint8})
inference_input_type: Data type of the numeric arrays in the input layer. If
`inference_input_type` is in {tf.int8, tf.uint8}, then
`quantized_input_stats` must be provided. (default is the value assigned
to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8})
input_format: Type of data to read. (default TENSORFLOW_GRAPHDEF, must be in
{TENSORFLOW_GRAPHDEF})
output_format: Output file format. (default TFLITE, must be in {TFLITE,
GRAPHVIZ_DOT})
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver. (default
False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy). (default False) If
quantization_options is set, all quantization arg will be ignored.
quantize_to_float16: Boolean indicating whether to convert float buffers to
float16. (default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet options
indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS]))
conversion_summary_dir: A string, the path to the generated conversion logs.
select_user_tf_ops: List of user's defined TensorFlow ops need to be
supported in the TensorFlow Lite runtime. These ops will be supported as
select TensorFlow ops.
allow_all_select_tf_ops: If True, automatically add all TF ops (including
custom TF ops) to the converted model as flex ops.
enable_tflite_resource_variables: Experimental flag, subject to change.
Enables conversion of resource variables. (default False)
unfold_batchmatmul: Whether to unfold tf.BatchMatMul to a set of
tfl.fully_connected ops. If not, translate to tfl.batch_matmul.
legalize_custom_tensor_list_ops: Whether to legalize `tf.TensorList*` ops to
tfl custom if they can all be supported.
lower_tensor_list_ops: Whether to lower tensor list ops to builtin ops. If
not, use Flex tensor list ops.
default_to_single_batch_in_tensor_list_ops: Whether to force to use batch
size one when the tensor list ops has the unspecified batch size.
accumulation_type: Data type of the accumulators in quantized inference.
Typically used for float16 quantization and is either fp16 or fp32.
allow_bfloat16: Whether the converted model supports reduced precision
inference with the bfloat16 type.
unfold_large_splat_constant: Whether to unfold large splat constant tensors
in the flatbuffer model to reduce size.
supported_backends: List of TFLite backends which needs to check
compatibility.
disable_per_channel_quantization: Disable per-channel quantized weights for
dynamic range quantization. Only per-tensor quantization will be used.
enable_mlir_dynamic_range_quantizer: Enable MLIR dynamic range quantization.
If False, the old converter dynamic range quantizer is used.
tf_quantization_mode: Indicates the mode of TF Quantization when the output
model is used for TF Quantization.
disable_infer_tensor_range: Disable infering tensor ranges.
use_fake_quant_num_bits: Allow quantization parameters to be calculated from
num_bits attribute.
enable_dynamic_update_slice: Enable to convert to DynamicUpdateSlice op.
(default: False).
preserve_assert_op: Whether to preserve `TF::AssertOp` (default: False).
guarantee_all_funcs_one_use: Whether to clone functions so that each
function only has a single use. This option will be helpful if the
conversion fails when the `PartitionedCall` or `StatefulPartitionedCall`
can't be properly inlined (default: False).
enable_mlir_variable_quantization: Enable MLIR variable quantization. There
is a variable freezing pass, but some variables may not be fully frozen by
it. This flag enables quantization of those residual variables in the MLIR
graph.
disable_fuse_mul_and_fc: Disable fusing input multiplication with
fullyconnected operations. Useful when quantizing weights.
ir_dump_dir: A string specifying the target directory to output MLIR dumps
produced during conversion. If populated, enables MLIR dumps.
ir_dump_pass_regex: A string containing a regular expression for filtering
the pass names to be dumped. Effective only if `ir_dump_dir` is populated.
ir_dump_func_regex: A string containing a regular expression for filtering
the function names to be dumped. Effective only if `ir_dump_dir` is
populated.
enable_timing: A boolean, if set to true reports the execution time of each
MLIR pass.
print_ir_before: A string containing a regular expression. If specified,
prints MLIR before passes which match.
print_ir_after: A string containing a regular expression. If specified,
prints MLIR after passes which match.
print_ir_module_scope: A boolean, if set to true always print the top-level
operation when printing IR for print_ir_[before|after].
elide_elementsattrs_if_larger: An int, if specified elides ElementsAttrs
with '...' that have more elements than the given upper limit.
use_buffer_offset: Force the model use buffer_offset & buffer_size fields
instead of data. i.e. store the constant tensor and custom op binaries
outside of Flatbuffers
reduce_type_precision: Convert some tensor types to a lower precision if all
values within that tensor are within the range of the lower precision.
This could have side effects e.g. reduced flatbuffer size.
qdq_conversion_mode: If set, assume input model is a quantized model
represented with QDQ ops and convert to quantized kernels.
strict_qdq_mode: If set, adheres to the QDQ annotations added by the
framework when possible rather than quantizing any op that is possible to
quantize.
disable_per_channel_quantization_for_dense_layers: If set, disables per
channel end enables per tensor integer quantization for weights in Dense
layers. The flag works only for integer quantized model.
enable_composite_direct_lowering: If set, attempts to lower composite ops
directly to tflite ops.
model_origin_framework: A str specifying the framework of the original
model. Can be {TENSORFLOW, KERAS, JAX, PYTORCH}
canonicalizing_inf_as_min_max_float: When set to true, convert +Inf/-Inf to
MIN/MAX float value and output of converter only contains finite values.
serialize_debug_metadata: When set to true, serialize debug metadata in the
flatbuffer.
unsafe_fuse_dynamic_shaped_broadcast: When set to true, allows fusion of
dynamic shaped broadcast ops. It helps fusing implicit broadcasting ops
when output shape has dynamic dimensions, but it may cause incorrect
results when broadcasting ops are introduced by explicit broadcasting in
the source model.
Returns:
conversion_flags: protocol buffer describing the conversion process.
Raises:
ValueError, if the input tensor type is unknown.
"""
conversion_flags = _conversion_flags_pb2.ConverterFlags()
conversion_flags.inference_type = convert_inference_tf_type_to_tflite_type(
inference_type, usage="inference_type flag"
)
if inference_input_type:
conversion_flags.inference_input_type = (
convert_inference_tf_type_to_tflite_type(
inference_input_type, usage="inference_input_type flag"
)
)
else:
conversion_flags.inference_input_type = conversion_flags.inference_type
conversion_flags.input_format = input_format
conversion_flags.output_format = output_format
if default_ranges_stats:
conversion_flags.default_ranges_min = default_ranges_stats[0]
conversion_flags.default_ranges_max = default_ranges_stats[1]
conversion_flags.drop_control_dependency = drop_control_dependency
conversion_flags.reorder_across_fake_quant = reorder_across_fake_quant
conversion_flags.allow_custom_ops = allow_custom_ops
conversion_flags.post_training_quantize = post_training_quantize
conversion_flags.quantize_to_float16 = quantize_to_float16
if dump_graphviz_dir:
conversion_flags.dump_graphviz_dir = dump_graphviz_dir
conversion_flags.dump_graphviz_include_video = dump_graphviz_video
if target_ops:
if OpsSet.SELECT_TF_OPS in target_ops:
conversion_flags.enable_select_tf_ops = True
if set(target_ops) == {OpsSet.SELECT_TF_OPS}:
conversion_flags.force_select_tf_ops = True
if OpsSet.EXPERIMENTAL_STABLEHLO_OPS in target_ops:
conversion_flags.convert_to_stablehlo = True
if OpsSet.EXPERIMENTAL_STABLEHLO_OPS in target_ops and len(target_ops) > 1:
raise ValueError(
"StableHLO Ops set can not be specified with other Ops set together"
)
if conversion_summary_dir:
conversion_flags.conversion_summary_dir = conversion_summary_dir
if select_user_tf_ops:
conversion_flags.select_user_tf_ops.extend(select_user_tf_ops)
conversion_flags.allow_all_select_tf_ops = allow_all_select_tf_ops
conversion_flags.enable_tflite_resource_variables = (
enable_tflite_resource_variables
)
conversion_flags.unfold_batchmatmul = unfold_batchmatmul
conversion_flags.legalize_custom_tensor_list_ops = (
legalize_custom_tensor_list_ops
)
conversion_flags.lower_tensor_list_ops = lower_tensor_list_ops
conversion_flags.default_to_single_batch_in_tensor_list_ops = (
default_to_single_batch_in_tensor_list_ops
)
if accumulation_type:
conversion_flags.accumulation_type = convert_tensor_tf_type_to_tflite_type(
accumulation_type, usage="accumulation_type flag"
)
conversion_flags.allow_bfloat16 = allow_bfloat16
conversion_flags.unfold_large_splat_constant = unfold_large_splat_constant
if supported_backends:
conversion_flags.supported_backends.extend(supported_backends)
conversion_flags.disable_per_channel_quantization = (
disable_per_channel_quantization
)
conversion_flags.enable_mlir_dynamic_range_quantizer = (
enable_mlir_dynamic_range_quantizer
)
conversion_flags.enable_dynamic_update_slice = enable_dynamic_update_slice
conversion_flags.preserve_assert_op = preserve_assert_op
conversion_flags.guarantee_all_funcs_one_use = guarantee_all_funcs_one_use
if tf_quantization_mode:
conversion_flags.tf_quantization_mode = tf_quantization_mode
conversion_flags.disable_infer_tensor_range = disable_infer_tensor_range
conversion_flags.use_fake_quant_num_bits = use_fake_quant_num_bits
conversion_flags.enable_mlir_variable_quantization = (
enable_mlir_variable_quantization
)
conversion_flags.disable_fuse_mul_and_fc = disable_fuse_mul_and_fc
# Transfer debug options. Check for existence before populating in order to
# leverage defaults specified in proto definition.
# TODO: b/319329480 - Match the debug_options fields with the user-facing
# flags.
if ir_dump_dir is not None:
conversion_flags.debug_options.ir_dump_dir = ir_dump_dir
if ir_dump_pass_regex is not None:
conversion_flags.debug_options.ir_dump_pass_regex = ir_dump_pass_regex
if ir_dump_func_regex is not None:
conversion_flags.debug_options.ir_dump_func_regex = ir_dump_func_regex
if enable_timing is not None:
conversion_flags.debug_options.enable_timing = enable_timing
if print_ir_before is not None:
conversion_flags.debug_options.print_ir_before = print_ir_before
if print_ir_after is not None:
conversion_flags.debug_options.print_ir_after = print_ir_after
if print_ir_module_scope is not None:
conversion_flags.debug_options.print_ir_module_scope = print_ir_module_scope
if elide_elementsattrs_if_larger is not None:
conversion_flags.debug_options.elide_elementsattrs_if_larger = (
elide_elementsattrs_if_larger
)
if use_buffer_offset is not None:
conversion_flags.use_buffer_offset = use_buffer_offset
if reduce_type_precision is not None:
conversion_flags.reduce_type_precision = reduce_type_precision
if qdq_conversion_mode is not None:
conversion_flags.qdq_conversion_mode = qdq_conversion_mode
conversion_flags.strict_qdq_mode = strict_qdq_mode
conversion_flags.disable_per_channel_quantization_for_dense_layers = (
disable_per_channel_quantization_for_dense_layers
)
conversion_flags.enable_composite_direct_lowering = (
enable_composite_direct_lowering
)
conversion_flags.model_origin_framework = (
_conversion_flags_pb2.ConverterFlags.ModelOriginFramework.Value(
model_origin_framework
)
)
conversion_flags.canonicalizing_inf_as_min_max_float = (
canonicalizing_inf_as_min_max_float
)
conversion_flags.serialize_debug_metadata = serialize_debug_metadata
conversion_flags.unsafe_fuse_dynamic_shaped_broadcast = (
unsafe_fuse_dynamic_shaped_broadcast
)
return conversion_flags
@convert_phase(
Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_GRAPHDEF
)
def convert_graphdef_with_arrays(
input_data,
input_arrays_with_shape,
output_arrays,
control_output_arrays,
**kwargs,
):
"""Convert a frozen GraphDef that can't be loaded in TF.
Conversion can be customized by providing arguments that are forwarded to
`build_model_flags` and `build_conversion_flags` (see documentation).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes (e.g., [("foo" : [1, 16,
16, 3])]). Use only when graph cannot be loaded into TensorFlow and when
`input_tensors` is None.
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
control_output_arrays: Control output node names. This is used when
converting a Graph with no output tensors. For example, if the graph's
last operation is a Print op, just specify that op's name in this field.
This can be used together with the `output_arrays` parameter.
**kwargs: See `build_model_flags` and `build_conversion_flags`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_conversion_flags`.
"""
model_flags = build_model_flags(**kwargs)
conversion_flags = build_conversion_flags(**kwargs)
quantized_input_stats = kwargs.get("quantized_input_stats", None)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if _is_quantized_input_stats_required(conversion_flags):
if quantized_input_stats:
input_array.mean_value, input_array.std_value = quantized_input_stats[
idx
]
else:
raise ValueError(
"The `quantized_input_stats` flag must be defined when either "
"`inference_type` flag or `inference_input_type` flag is set to "
"tf.int8 or tf.uint8."
)
input_array.name = name
input_array.shape.dims.extend(list(map(int, shape)))
if output_arrays:
for name in output_arrays:
model_flags.output_arrays.append(name)
if control_output_arrays:
for name in control_output_arrays:
model_flags.control_output_arrays.append(name)
data = convert(
model_flags,
conversion_flags,
input_data.SerializeToString(),
debug_info_str=None,
)
return data
@convert_phase(
Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_GRAPHDEF
)
def convert_graphdef(input_data, input_tensors, output_tensors, **kwargs):
"""Convert a frozen GraphDef model using the TF Lite converter.
Conversion can be customized by providing arguments that are forwarded to
`build_model_flags` and `build_conversion_flags` (see documentation).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
**kwargs: See `build_model_flags` and `build_conversion_flags`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_conversion_flags`.
"""
model_flags = build_model_flags(**kwargs)
conversion_flags = build_conversion_flags(**kwargs)
saved_model_dir = kwargs.get("saved_model_dir", None)
input_shapes = kwargs.get("input_shapes", None)
quantized_input_stats = kwargs.get("quantized_input_stats", None)
debug_info = kwargs.get("debug_info", None)
for idx, input_tensor in enumerate(input_tensors):
input_array = model_flags.input_arrays.add()
if saved_model_dir:
input_array.name = input_tensor.name
else:
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = convert_tensor_tf_type_to_tflite_type(
input_tensor.dtype, usage="input type of the TensorFlow model"
)
if _is_quantized_input_stats_required(conversion_flags):
if quantized_input_stats:
input_array.mean_value, input_array.std_value = quantized_input_stats[
idx
]
else:
# We should ideally raise an error here, but we don't as it would break
# several models/projects that depend on this workflow.
warnings.warn(
"Statistics for quantized inputs were expected, but not "
"specified; continuing anyway."
)
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
if shape.rank is not None:
# Create shapes with -1 for unknown dimensions.
dims = []
for dim in shape:
if dim is None or (
isinstance(dim, tensor_shape.Dimension) and dim.value is None
):
dims.append(-1)
else:
dims.append(int(dim))
input_array.shape.dims.extend(dims)
input_array.shape.unknown_rank = False
else:
input_array.shape.unknown_rank = True
for output_tensor in output_tensors:
if saved_model_dir:
model_flags.output_arrays.append(output_tensor.name)
else:
model_flags.output_arrays.append(util.get_tensor_name(output_tensor))
data = convert(
model_flags,
conversion_flags,
input_data.SerializeToString(),
debug_info_str=debug_info.SerializeToString() if debug_info else None,
)
return data
@convert_phase(
Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_SAVED_MODEL
)
def convert_saved_model(**kwargs):
"""Converts a SavedModel using TF Lite converter."""
model_flags = build_model_flags(**kwargs)
conversion_flags = build_conversion_flags(**kwargs)
data = convert(
model_flags,
conversion_flags,
input_data_str=None,
debug_info_str=None,
)
return data
@convert_phase(
Component.CONVERT_TF_TO_TFLITE_MODEL, SubComponent.CONVERT_JAX_HLO
)
def convert_jax_hlo(input_content, input_names, is_proto_format, **kwargs):
"""Converts a Jax hlo-based model using TFLite converter."""
model_flags = _model_flags_pb2.ModelFlags()
model_flags.use_hlo_import = True
if is_proto_format:
model_flags.hlo_file_type = _model_flags_pb2.ModelFlags.HLO_PROTO
else:
model_flags.hlo_file_type = _model_flags_pb2.ModelFlags.HLO_TEXT
# Build input names.
for input_name in input_names:
input_array = model_flags.input_arrays.add()
input_array.name = input_name
conversion_flags = build_conversion_flags(**kwargs)
data = convert(
model_flags,
conversion_flags,
input_data_str=input_content,
debug_info_str=None,
)
return data
@_tf_export(v1=["lite.toco_convert"])
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a TensorFlow GraphDef to TFLite.
This function is deprecated. Please use `tf.lite.TFLiteConverter` API instead.
Conversion can be customized by providing arguments that are forwarded to
`build_model_flags` and `build_conversion_flags` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`).
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_model_flags` and `build_conversion_flags`.
**kwargs: See `build_model_flags` and `build_conversion_flags`.
Returns:
The converted TensorFlow Lite model in a bytes array.
Raises:
Defined in `convert`.
"""
return convert_graphdef(
input_data, input_tensors, output_tensors, *args, **kwargs
)
def deduplicate_readonly_buffers(tflite_model):
"""Generates a new model byte array after deduplicating readonly buffers.
This function should be invoked after the model optimization toolkit. The
model optimization toolkit assumes that each tensor object owns its each
buffer separately.
Args:
tflite_model: TFLite flatbuffer in a byte array to be deduplicated.
Returns:
TFLite flatbuffer in a bytes array, processed with the deduplication method.
"""
# Load TFLite Flatbuffer byte array into an object.
model = flatbuffer_utils.convert_bytearray_to_object(tflite_model)
# Get all the read-only buffers, which can be modified without causing any
# issue in the graph invocation stage.
read_only_buffer_indices = set()
for subgraph in model.subgraphs:
# To get all the read-only buffers:
# (1) Get all read-only input tensors.
# (2) Discard intermediate or output tensors.
# (3) Discard the subgraph's input/output tensors.
# (4) Gather the buffers of the read-only input tensors.
# (1) Get read-only input tensors.
read_only_input_tensor_indices = set()
for op in subgraph.operators:
if op.inputs is None:
continue
for i, input_tensor_idx in enumerate(op.inputs):
# Ignore mutable tensors.
if op.mutatingVariableInputs is not None:
# Ignore invalid tensors.
if (
i < len(op.mutatingVariableInputs)
and op.mutatingVariableInputs[i]
):
continue
# Ignore variable tensors.
if subgraph.tensors[input_tensor_idx].isVariable:
continue
read_only_input_tensor_indices.add(input_tensor_idx)
# (2) Discard intermediate or output tensors.
for op in subgraph.operators:
if op.outputs is not None:
for output_tensor_idx in op.outputs:
read_only_input_tensor_indices.discard(output_tensor_idx)
if op.intermediates is not None:
for intermediate_tensor_idx in op.intermediates:
read_only_input_tensor_indices.discard(intermediate_tensor_idx)
# (3) Discard the subgraph's input and output tensors.
if subgraph.inputs is not None:
for input_tensor_idx in subgraph.inputs:
read_only_input_tensor_indices.discard(input_tensor_idx)
if subgraph.outputs is not None:
for output_tensor_idx in subgraph.outputs:
read_only_input_tensor_indices.discard(output_tensor_idx)
# (4) Gather the buffers of the read-only input tensors.
for tensor_idx in read_only_input_tensor_indices:
read_only_buffer_indices.add(subgraph.tensors[tensor_idx].buffer)
# Ignore invalid negative index or zero-sized buffers.
for buffer_idx in read_only_buffer_indices.copy():
if buffer_idx < 0 or (
model.buffers[buffer_idx].data is None
or isinstance(model.buffers[buffer_idx].data, list)
or model.buffers[buffer_idx].data.size == 0
):
read_only_buffer_indices.discard(buffer_idx)
class BufferIndex:
"""A class to store index, size, hash of the buffers in TFLite model."""
def __init__(self, idx, size, hash_value):
self.idx = idx
self.size = size
self.hash_value = hash_value
read_only_buffers = list(
map(
lambda index: BufferIndex( # pylint: disable=g-long-lambda
index,
model.buffers[index].data.size,
hashlib.md5(model.buffers[index].data.data.tobytes()).hexdigest(),
),
read_only_buffer_indices,
)
)
# Sort read_only_buffers by buffer size & hash in descending order.
read_only_buffers = sorted(
read_only_buffers,
key=lambda buffer: (buffer.size, buffer.hash_value),
reverse=True,
)
# Create a map of duplicate buffers (same size and same type).
# eg: In [1, 2, 3, 4, 5, 6] if (1, 4, 6) and (2, 5) are each, groups of buffer
# indices of the same size and type, then the map would be {4:1, 6:1, 5:2}
duplicate_buffer_map = {}
for i, buffer_i in enumerate(read_only_buffers):
# This buffer is a duplicate.
if buffer_i.idx in duplicate_buffer_map:
continue
# This buffer is unique. Scan rest of the list to find duplicates
# of this buffer and mark them accordingly.
for buffer_j in read_only_buffers[i + 1 :]:
if buffer_j.idx in duplicate_buffer_map:
continue
if buffer_i.size != buffer_j.size:
break
if buffer_i.hash_value != buffer_j.hash_value:
continue
# Found duplicate. Nullify j-th buffer and use i-th buffer instead.
duplicate_buffer_map[buffer_j.idx] = buffer_i.idx
# Make the duplicated tensors use the single shared buffer index.
for subgraph in model.subgraphs:
for op in subgraph.operators:
if op.inputs is None:
continue
for input_tensor in op.inputs:
buffer_idx = subgraph.tensors[input_tensor].buffer
if buffer_idx in duplicate_buffer_map:
subgraph.tensors[input_tensor].buffer = duplicate_buffer_map[
buffer_idx
]
# Nullify the unused buffers.
for idx in duplicate_buffer_map:
model.buffers[idx].data = None
# Return a TFLite flatbuffer as a byte array.
return flatbuffer_utils.convert_object_to_bytearray(model)
| OpsSet |
python | astropy__astropy | astropy/cosmology/_src/tests/flrw/test_wpwazpcdm.py | {
"start": 7890,
"end": 11683
} | class ____(FlatFLRWMixinTest, TestwpwaCDM):
"""Test :class:`astropy.cosmology.FlatwpwaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = FlatwpwaCDM
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
super().test_repr(cosmo_cls, cosmo)
assert repr(cosmo) == (
"FlatwpwaCDM(name='ABCMeta', H0=<Quantity 70. km / (Mpc s)>, Om0=0.27,"
" Tcmb0=<Quantity 3. K>, Neff=3.04, m_nu=<Quantity [0., 0., 0.] eV>,"
" Ob0=0.03, wp=-0.9, wa=0.2, zp=<Quantity 0.5 redshift>)"
)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.")
@pytest.mark.parametrize(
("args", "kwargs", "expected"),
[
( # no relativistic species
(75.0, 0.3),
{},
[3030.70481348, 4745.82435272, 5828.73710847, 6582.60454542] * u.Mpc,
),
( # massless neutrinos
(75.0, 0.25),
{"zp": 0.4, "wa": 0.1, "Tcmb0": 3.0, "Neff": 3, "m_nu": 0.0 * u.eV},
[3113.62199365, 4943.28425668, 6114.45491003, 6934.07461377] * u.Mpc,
),
( # massive neutrinos
(75.0, 0.25),
{"zp": 1.0, "Tcmb0": 3.0, "Neff": 4, "m_nu": 5 * u.eV},
[2517.08634022, 3694.21111754, 4402.17802962, 4886.65787948] * u.Mpc,
),
],
)
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
"""Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`.
These do not come from external codes -- they are just internal checks to make
sure nothing changes if we muck with the distance calculators.
"""
super().test_comoving_distance_example(
cosmo_cls, args, {**COMOVING_DISTANCE_EXAMPLE_KWARGS, **kwargs}, expected
)
###############################################################################
# Comparison to Other Codes
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.")
def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a Mathematica computation."""
z = np.array([0.2, 0.4, 0.9, 1.2])
# wpwa models
cosmo = wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5, Tcmb0=0.0)
assert u.allclose(
cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc,
rtol=1e-4,
)
cosmo = wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9, Tcmb0=0.0)
assert u.allclose(
cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc,
rtol=1e-4,
)
##############################################################################
# Miscellaneous
# TODO: these should be better integrated into the new test framework
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_de_densityscale():
cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, wa=0.2, zp=0.5)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert u.allclose(
cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439, 1.324988, 1.565746],
rtol=1e-4,
)
assert u.allclose(cosmo.de_density_scale(3), cosmo.de_density_scale(3.0), rtol=1e-7)
assert u.allclose(
cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1.0, 2.0, 3.0]),
rtol=1e-7,
)
# Flat tests
cosmo = wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9, wa=0.2, zp=0.5)
flatcosmo = FlatwpwaCDM(H0=70, Om0=0.3, wp=-0.9, wa=0.2, zp=0.5)
assert u.allclose(
cosmo.de_density_scale(z), flatcosmo.de_density_scale(z), rtol=1e-7
)
| TestFlatwpwaCDM |
python | getsentry__sentry | src/sentry/db/postgres/transactions.py | {
"start": 2214,
"end": 4486
} | class ____(threading.local):
enabled = True
in_test_transaction_enforcement = InTestTransactionEnforcement()
@contextlib.contextmanager
def in_test_hide_transaction_boundary() -> Generator[None]:
"""
In production, has no effect.
In tests, it hides 'in_test_assert_no_transaction' invocations against problematic code paths.
Using this function is a huge code smell, often masking some other code smell, but not always possible to avoid.
"""
if not in_test_environment():
yield
return
prev = in_test_transaction_enforcement.enabled
in_test_transaction_enforcement.enabled = False
try:
yield
finally:
in_test_transaction_enforcement.enabled = prev
def in_test_assert_no_transaction(msg: str) -> None:
"""
In production, has no effect.
In tests, asserts that the current call is not inside of any transaction.
If you are getting bitten by calls to this function in tests, move your service calls outside of any active
transaction -- they can't realistically share the wrapping transaction, and in the worst case the indefinite
execution time can have cause major performance issues by holding transactional resources open for long periods
of time.
"""
if not in_test_environment() or not in_test_transaction_enforcement.enabled:
return
from sentry.testutils import hybrid_cloud # NOQA:S007
for conn in connections.all():
assert not hybrid_cloud.simulated_transaction_watermarks.connection_transaction_depth_above_watermark(
connection=conn
), msg
@contextlib.contextmanager
def enforce_constraints(transaction: Atomic) -> Generator[None]:
"""
Nested transaction in Django do not check constraints by default, meaning IntegrityErrors can 'float' to callers
of functions that happen to wrap with additional transaction scopes. Using this context manager around a transaction
will force constraints to be checked at the end of that transaction (or savepoint) even if it happens to be nested,
allowing you to handle the IntegrityError correctly.
"""
with transaction:
yield
get_connection(transaction.using or "default").check_constraints()
| InTestTransactionEnforcement |
python | lazyprogrammer__machine_learning_examples | rl3/a2c/atari_wrappers.py | {
"start": 3335,
"end": 4420
} | class ____(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,) + env.observation_space.shape, dtype='uint8')
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2:
self._obs_buffer[0] = obs
if i == self._skip - 1:
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
| MaxAndSkipEnv |
python | doocs__leetcode | solution/1200-1299/1233.Remove Sub-Folders from the Filesystem/Solution2.py | {
"start": 593,
"end": 814
} | class ____:
def removeSubfolders(self, folder: List[str]) -> List[str]:
trie = Trie()
for i, f in enumerate(folder):
trie.insert(i, f)
return [folder[i] for i in trie.search()]
| Solution |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 168812,
"end": 182172
} | class ____(Qwen3OmniMoePreTrainedModel, GenerationMixin):
config_class = Qwen3OmniMoeConfig
output_modalities = ("text", "audio")
def __init__(self, config: Qwen3OmniMoeConfig):
super().__init__(config)
self.thinker = Qwen3OmniMoeThinkerForConditionalGeneration._from_config(config.thinker_config)
self.has_talker = config.enable_audio_output
if self.has_talker:
self.enable_talker()
self.post_init()
def enable_talker(self):
self.talker = Qwen3OmniMoeTalkerForConditionalGeneration._from_config(self.config.talker_config)
self.code2wav = Qwen3OmniMoeCode2Wav._from_config(self.config.code2wav_config)
def disable_talker(self):
if hasattr(self, "talker"):
del self.talker
if hasattr(self, "code2wav"):
del self.code2wav
self.has_talker = False
def _get_talker_user_parts(
self, im_start_index, segment_end_index, multimodal_mask, thinker_hidden, thinker_embed
):
user_talker_part = torch.empty(
(1, segment_end_index - im_start_index, self.config.talker_config.text_config.hidden_size),
device=thinker_hidden.device,
dtype=self.talker.dtype,
)
user_mm_mask = multimodal_mask[:, im_start_index:segment_end_index]
# Multimodal data exists
if user_mm_mask.any():
user_thinker_hidden_mm = thinker_hidden[:, im_start_index:segment_end_index][user_mm_mask]
mm_hidden = self.talker.hidden_projection(user_thinker_hidden_mm).to(thinker_hidden.device)
user_talker_part[user_mm_mask] = mm_hidden
user_thinker_embed = thinker_embed[:, im_start_index:segment_end_index][~user_mm_mask]
user_text_hidden = self.talker.text_projection(user_thinker_embed).to(thinker_hidden.device)
user_talker_part[~user_mm_mask] = user_text_hidden
return user_talker_part
def _get_talker_assistant_parts(
self, im_start_index, segment_end_index, speaker_id, thinker_embed, tts_pad_embed, tts_bos_embed, tts_eos_embed
):
assistant_hidden = self.talker.text_projection(thinker_embed[:, im_start_index:segment_end_index]).to(
tts_pad_embed.device
) # [1 t d]
assistant_text_hidden = torch.cat(
(
assistant_hidden[:, :3],
tts_pad_embed.expand(-1, 4, -1),
tts_bos_embed,
assistant_hidden[:, 3:4], # First text
),
dim=1,
)
codec_special_tokens = torch.tensor(
[
[
self.config.talker_config.codec_nothink_id,
self.config.talker_config.codec_think_bos_id,
self.config.talker_config.codec_think_eos_id,
speaker_id,
self.config.talker_config.codec_pad_id,
self.config.talker_config.codec_bos_id,
]
],
device=tts_pad_embed.device,
dtype=torch.long,
)
assistant_codec_hidden = torch.cat(
(
torch.zeros(
(1, 3, self.config.talker_config.text_config.hidden_size),
device=tts_pad_embed.device,
dtype=self.talker.dtype,
),
self.talker.get_input_embeddings()(codec_special_tokens).to(tts_pad_embed.device),
),
dim=1,
)
trailing_text_hidden = torch.cat(
(
assistant_hidden[:, 4:],
tts_eos_embed,
),
dim=1,
)
input_embeds = assistant_text_hidden + assistant_codec_hidden
input_ids = torch.full(
(1, assistant_text_hidden.shape[1]),
fill_value=self.config.tts_pad_token_id,
dtype=torch.long,
device=assistant_text_hidden.device,
)
return input_embeds, input_ids, trailing_text_hidden
@torch.no_grad()
def generate(
self,
input_ids: Optional[torch.Tensor] = None,
speaker: str = "Ethan",
use_audio_in_video: bool = False,
return_audio: Optional[bool] = None,
thinker_max_new_tokens: int = 1024,
thinker_eos_token_id: int = 151645,
talker_max_new_tokens: int = 4096,
talker_do_sample: bool = True,
talker_top_k: int = 50,
talker_top_p: float = 1.0,
talker_temperature: float = 0.9,
talker_repetition_penalty: float = 1.05,
**kwargs,
):
if return_audio and not self.has_talker:
raise ValueError(
"Cannot use talker when talker module not initialized. Use `enable_talker` method or set enable_talker in config to enable talker."
)
if return_audio is None:
return_audio = self.has_talker
shared_kwargs = {"use_audio_in_video": use_audio_in_video}
thinker_kwargs = {
"max_new_tokens": thinker_max_new_tokens,
"eos_token_id": thinker_eos_token_id,
}
talker_kwargs = {}
token2wav_kwargs = {}
if return_audio:
speaker_id = self.config.talker_config.speaker_id.get(speaker.lower())
if speaker_id is None:
raise NotImplementedError(f"Speaker {speaker} not implemented")
if input_ids.shape[0] != 1:
raise NotImplementedError("Qwen3-Omni currently does not support batched inference with audio output")
talker_supppressed_tokens = [
i
for i in range(
self.config.talker_config.text_config.vocab_size - 1024,
self.config.talker_config.text_config.vocab_size,
)
if i != self.config.talker_config.codec_eos_token_id
] # Suppress additional special tokens, should not be predicted
talker_kwargs = {
"max_new_tokens": talker_max_new_tokens,
"do_sample": talker_do_sample,
"top_k": talker_top_k,
"top_p": talker_top_p,
"temperature": talker_temperature,
"eos_token_id": self.config.talker_config.codec_eos_token_id,
"repetition_penalty": talker_repetition_penalty,
"suppress_tokens": talker_supppressed_tokens,
"output_hidden_states": True,
"return_dict_in_generate": True,
}
token2wav_kwargs = {}
for key, value in kwargs.items():
if key.startswith("thinker_"):
thinker_kwargs[key[len("thinker_") :]] = value
elif key.startswith("talker_"):
talker_kwargs[key[len("talker_") :]] = value
elif key.startswith("token2wav_"):
token2wav_kwargs[key[len("token2wav_") :]] = value
# Process special input values
elif key == "feature_attention_mask":
thinker_kwargs[key] = value
talker_kwargs["audio_feature_lengths"] = torch.sum(value, dim=1)
elif key in ("input_features", "attention_mask"):
thinker_kwargs[key] = value
# Put other key to shared kwargs
else:
shared_kwargs[key] = value
# Merge kwargs
for key, value in shared_kwargs.items():
if key not in thinker_kwargs:
thinker_kwargs[key] = value
if key not in talker_kwargs and key in ["image_grid_thw", "video_grid_thw", "video_second_per_grid"]:
talker_kwargs[key] = value
if key not in token2wav_kwargs:
token2wav_kwargs[key] = value
# 1. Generate from thinker module
generate_audio = return_audio and self.has_talker
if generate_audio:
thinker_kwargs["output_hidden_states"] = True
thinker_kwargs["return_dict_in_generate"] = True
thinker_result = self.thinker.generate(input_ids=input_ids, **thinker_kwargs)
if not generate_audio:
return thinker_result
# 2. Prepare talker input
thinker_embed = torch.cat([hidden_states[0] for hidden_states in thinker_result.hidden_states], dim=1).to(
input_ids.device
) # [1 t d]
thinker_hidden = torch.cat(
[
hidden_states[self.config.talker_config.accept_hidden_layer]
for hidden_states in thinker_result.hidden_states
],
dim=1,
).to(input_ids.device) # [1 t d]
im_start_indexes = torch.cat(
(
torch.nonzero(input_ids[0] == self.config.im_start_token_id).squeeze(),
torch.tensor([thinker_result.sequences.shape[-1]], device=input_ids.device, dtype=input_ids.dtype),
),
dim=-1,
) # Shape [n_starts + 1]; Take batch 0 since batched inference is not supported here.
multimodal_mask = (
(thinker_result.sequences == self.config.thinker_config.audio_token_id) |
(thinker_result.sequences == self.config.thinker_config.image_token_id) |
(thinker_result.sequences == self.config.thinker_config.video_token_id)
).to(input_ids.device) # [1 t] # fmt: skip
talker_special_tokens = torch.tensor(
[[self.config.tts_bos_token_id, self.config.tts_eos_token_id, self.config.tts_pad_token_id]],
device=self.thinker.device,
dtype=input_ids.dtype,
)
tts_bos_embed, tts_eos_embed, tts_pad_embed = (
self.talker.text_projection(self.thinker.get_input_embeddings()(talker_special_tokens))
.to(input_ids.device)
.chunk(3, dim=1)
) # 3 * [1 1 d]
talker_input_embeds = [] # [1 t d]
talker_input_ids = []
# For every chatml parts
for i in range(len(im_start_indexes) - 1):
im_start_index = im_start_indexes[i]
segment_end_index = im_start_indexes[i + 1]
role_token = input_ids[0][im_start_index + 1]
# Talker should ignore thinker system prompt
if role_token == self.config.system_token_id:
continue
# Talker takes word embeddings for tokens and hidden state from `accept_hidden_layer` for multimodal inputs
elif role_token == self.config.user_token_id:
talker_user_part = self._get_talker_user_parts(
im_start_index, segment_end_index, multimodal_mask, thinker_hidden, thinker_embed
)
talker_input_embeds.append(talker_user_part)
talker_input_ids.append(thinker_result.sequences[:, im_start_index:segment_end_index])
# Take assistant output (for now)
elif role_token == self.config.assistant_token_id and i == len(im_start_indexes) - 2:
talker_assistant_embeds, talker_assistant_ids, trailing_text_hidden = self._get_talker_assistant_parts(
im_start_index,
segment_end_index,
speaker_id,
thinker_embed,
tts_pad_embed,
tts_bos_embed,
tts_eos_embed,
)
talker_input_embeds.append(talker_assistant_embeds)
talker_input_ids.append(talker_assistant_ids)
# History assistant output (ignore for now)
elif role_token == self.config.assistant_token_id and i != len(im_start_indexes) - 2:
continue
else:
raise AssertionError("Expect role id after <|im_start|> (assistant, user, system)")
talker_input_embed = torch.cat([embed.to(input_ids.device) for embed in talker_input_embeds], dim=1)
talker_input_id = torch.cat([embed.to(input_ids.device) for embed in talker_input_ids], dim=1)
talker_result = self.talker.generate(
inputs_embeds=talker_input_embed,
trailing_text_hidden=trailing_text_hidden,
tts_pad_embed=tts_pad_embed,
talker_input_ids=talker_input_id, # Not use input_ids to prevent repetation penalty out of bound
**talker_kwargs,
)
talker_codes = (
torch.stack([hid[-1] for hid in talker_result.hidden_states if hid[-1] is not None], dim=1)
.transpose(1, 2)
.to(self.code2wav.device)
)
talker_wavs = self.code2wav.chunked_decode(talker_codes, chunk_size=300, left_context_size=25)
return thinker_result.sequences, talker_wavs.float()
__all__ = [
"Qwen3OmniMoeForConditionalGeneration",
"Qwen3OmniMoeThinkerTextModel",
"Qwen3OmniMoeThinkerForConditionalGeneration",
"Qwen3OmniMoeTalkerForConditionalGeneration",
"Qwen3OmniMoePreTrainedModel",
"Qwen3OmniMoePreTrainedModelForConditionalGeneration",
"Qwen3OmniMoeTalkerModel",
"Qwen3OmniMoeThinkerTextPreTrainedModel",
"Qwen3OmniMoeCode2Wav",
"Qwen3OmniMoeCode2WavDecoderBlock",
"Qwen3OmniMoeCode2WavTransformerModel",
"Qwen3OmniMoeTalkerCodePredictorModel",
"Qwen3OmniMoeTalkerCodePredictorModelForConditionalGeneration",
]
| Qwen3OmniMoeForConditionalGeneration |
python | kubernetes-client__python | kubernetes/client/models/v1_env_var_source.py | {
"start": 383,
"end": 6895
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config_map_key_ref': 'V1ConfigMapKeySelector',
'field_ref': 'V1ObjectFieldSelector',
'file_key_ref': 'V1FileKeySelector',
'resource_field_ref': 'V1ResourceFieldSelector',
'secret_key_ref': 'V1SecretKeySelector'
}
attribute_map = {
'config_map_key_ref': 'configMapKeyRef',
'field_ref': 'fieldRef',
'file_key_ref': 'fileKeyRef',
'resource_field_ref': 'resourceFieldRef',
'secret_key_ref': 'secretKeyRef'
}
def __init__(self, config_map_key_ref=None, field_ref=None, file_key_ref=None, resource_field_ref=None, secret_key_ref=None, local_vars_configuration=None): # noqa: E501
"""V1EnvVarSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config_map_key_ref = None
self._field_ref = None
self._file_key_ref = None
self._resource_field_ref = None
self._secret_key_ref = None
self.discriminator = None
if config_map_key_ref is not None:
self.config_map_key_ref = config_map_key_ref
if field_ref is not None:
self.field_ref = field_ref
if file_key_ref is not None:
self.file_key_ref = file_key_ref
if resource_field_ref is not None:
self.resource_field_ref = resource_field_ref
if secret_key_ref is not None:
self.secret_key_ref = secret_key_ref
@property
def config_map_key_ref(self):
"""Gets the config_map_key_ref of this V1EnvVarSource. # noqa: E501
:return: The config_map_key_ref of this V1EnvVarSource. # noqa: E501
:rtype: V1ConfigMapKeySelector
"""
return self._config_map_key_ref
@config_map_key_ref.setter
def config_map_key_ref(self, config_map_key_ref):
"""Sets the config_map_key_ref of this V1EnvVarSource.
:param config_map_key_ref: The config_map_key_ref of this V1EnvVarSource. # noqa: E501
:type: V1ConfigMapKeySelector
"""
self._config_map_key_ref = config_map_key_ref
@property
def field_ref(self):
"""Gets the field_ref of this V1EnvVarSource. # noqa: E501
:return: The field_ref of this V1EnvVarSource. # noqa: E501
:rtype: V1ObjectFieldSelector
"""
return self._field_ref
@field_ref.setter
def field_ref(self, field_ref):
"""Sets the field_ref of this V1EnvVarSource.
:param field_ref: The field_ref of this V1EnvVarSource. # noqa: E501
:type: V1ObjectFieldSelector
"""
self._field_ref = field_ref
@property
def file_key_ref(self):
"""Gets the file_key_ref of this V1EnvVarSource. # noqa: E501
:return: The file_key_ref of this V1EnvVarSource. # noqa: E501
:rtype: V1FileKeySelector
"""
return self._file_key_ref
@file_key_ref.setter
def file_key_ref(self, file_key_ref):
"""Sets the file_key_ref of this V1EnvVarSource.
:param file_key_ref: The file_key_ref of this V1EnvVarSource. # noqa: E501
:type: V1FileKeySelector
"""
self._file_key_ref = file_key_ref
@property
def resource_field_ref(self):
"""Gets the resource_field_ref of this V1EnvVarSource. # noqa: E501
:return: The resource_field_ref of this V1EnvVarSource. # noqa: E501
:rtype: V1ResourceFieldSelector
"""
return self._resource_field_ref
@resource_field_ref.setter
def resource_field_ref(self, resource_field_ref):
"""Sets the resource_field_ref of this V1EnvVarSource.
:param resource_field_ref: The resource_field_ref of this V1EnvVarSource. # noqa: E501
:type: V1ResourceFieldSelector
"""
self._resource_field_ref = resource_field_ref
@property
def secret_key_ref(self):
"""Gets the secret_key_ref of this V1EnvVarSource. # noqa: E501
:return: The secret_key_ref of this V1EnvVarSource. # noqa: E501
:rtype: V1SecretKeySelector
"""
return self._secret_key_ref
@secret_key_ref.setter
def secret_key_ref(self, secret_key_ref):
"""Sets the secret_key_ref of this V1EnvVarSource.
:param secret_key_ref: The secret_key_ref of this V1EnvVarSource. # noqa: E501
:type: V1SecretKeySelector
"""
self._secret_key_ref = secret_key_ref
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EnvVarSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EnvVarSource):
return True
return self.to_dict() != other.to_dict()
| V1EnvVarSource |
python | realpython__materials | python-313/free-threading-jit/benchmarks/pyfeatures.py | {
"start": 1061,
"end": 1463
} | class ____(Feature):
def __init__(self):
super().__init__("JIT Compiler")
@property
def supported(self) -> bool:
return "_Py_JIT" in sysconfig.get_config_var("PY_CORE_CFLAGS")
@property
def enabled(self) -> bool:
if sys.version_info >= (3, 13):
return _testinternalcapi.get_optimizer() is not None
else:
return False
| JitCompiler |
python | python__mypy | test-data/unit/plugins/arg_names.py | {
"start": 189,
"end": 1713
} | class ____(Plugin):
def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:
if fullname in {
"mod.func",
"mod.func_unfilled",
"mod.func_star_expr",
"mod.ClassInit",
"mod.Outer.NestedClassInit",
}:
return extract_classname_and_set_as_return_type_function
return None
def get_method_hook(self, fullname: str) -> Callable[[MethodContext], Type] | None:
if fullname in {
"mod.Class.method",
"mod.Class.myclassmethod",
"mod.Class.mystaticmethod",
"mod.ClassUnfilled.method",
"mod.ClassStarExpr.method",
"mod.ClassChild.method",
"mod.ClassChild.myclassmethod",
}:
return extract_classname_and_set_as_return_type_method
return None
def extract_classname_and_set_as_return_type_function(ctx: FunctionContext) -> Type:
arg = ctx.args[ctx.callee_arg_names.index("classname")][0]
if not isinstance(arg, StrExpr):
return ctx.default_return_type
return ctx.api.named_generic_type(arg.value, [])
def extract_classname_and_set_as_return_type_method(ctx: MethodContext) -> Type:
arg = ctx.args[ctx.callee_arg_names.index("classname")][0]
if not isinstance(arg, StrExpr):
return ctx.default_return_type
return ctx.api.named_generic_type(arg.value, [])
def plugin(version: str) -> type[ArgNamesPlugin]:
return ArgNamesPlugin
| ArgNamesPlugin |
python | dask__distributed | distributed/client.py | {
"start": 219150,
"end": 221594
} | class ____:
"""Collect task metadata within a context block
This gathers ``TaskState`` metadata and final state from the scheduler
for tasks which are submitted and finished within the scope of this
context manager.
Examples
--------
>>> with get_task_metadata() as tasks:
... x.compute()
>>> tasks.metadata
{...}
>>> tasks.state
{...}
"""
def __init__(self):
self.name = f"task-metadata-{uuid.uuid4().hex}"
self.keys = set()
self.metadata = None
self.state = None
async def __aenter__(self):
await get_client().scheduler.start_task_metadata(name=self.name)
return self
async def __aexit__(self, exc_type, exc_value, traceback):
response = await get_client().scheduler.stop_task_metadata(name=self.name)
self.metadata = response["metadata"]
self.state = response["state"]
def __enter__(self):
return get_client().sync(self.__aenter__)
def __exit__(self, exc_type, exc_value, traceback):
return get_client().sync(self.__aexit__, exc_type, exc_value, traceback)
@contextmanager
def temp_default_client(c):
"""Set the default client for the duration of the context
.. note::
This function should be used exclusively for unit testing the default
client functionality. In all other cases, please use
``Client.as_current`` instead.
.. note::
Unlike ``Client.as_current``, this context manager is neither
thread-local nor task-local.
Parameters
----------
c : Client
This is what default_client() will return within the with-block.
"""
old_exec = default_client()
_set_global_client(c)
try:
with c.as_current():
yield
finally:
_set_global_client(old_exec)
def _close_global_client():
"""
Force close of global client. This cleans up when a client
wasn't close explicitly, e.g. interactive sessions.
"""
c = _get_global_client()
if c is not None:
c._should_close_loop = False
with suppress(TimeoutError, RuntimeError):
if c.asynchronous:
c.loop.add_callback(c.close, timeout=3)
else:
c.close(timeout=3)
def get_collections_metadata(collection):
return {
"type": type(collection).__name__,
}
atexit.register(_close_global_client)
| get_task_metadata |
python | getsentry__sentry | src/sentry/auth/providers/github/views.py | {
"start": 1081,
"end": 3447
} | class ____(AuthView):
def __init__(
self, org: RpcOrganization | dict[str, Any] | None = None, *args: Any, **kwargs: Any
) -> None:
self.org = org
super().__init__(*args, **kwargs)
def handle(self, request: HttpRequest, pipeline: AuthHelper) -> HttpResponseBase:
data: dict[str, Any] | None = pipeline.fetch_state("data")
assert data is not None
with GitHubClient(data["access_token"]) as client:
if self.org is not None:
# if we have a configured org (self.org) for our oauth provider
org_id = self.org.id if isinstance(self.org, RpcOrganization) else self.org["id"]
if not client.is_org_member(org_id):
# `is_org_member` fetches provider orgs for the auth'd provider user.
# if our configured org is not in the users list of orgs, then that user
# does not have access to the provisioned org and we will prevent access
return pipeline.error(ERR_NO_ORG_ACCESS)
user = client.get_user()
assert isinstance(user, dict)
if not user.get("email"):
emails = client.get_user_emails()
email = [
e["email"]
for e in emails
if ((not REQUIRE_VERIFIED_EMAIL) or e["verified"]) and e["primary"]
]
if len(email) == 0:
if REQUIRE_VERIFIED_EMAIL:
msg = ERR_NO_VERIFIED_PRIMARY_EMAIL
else:
msg = ERR_NO_PRIMARY_EMAIL
return pipeline.error(msg)
elif len(email) > 1:
if REQUIRE_VERIFIED_EMAIL:
msg = ERR_NO_SINGLE_VERIFIED_PRIMARY_EMAIL
else:
msg = ERR_NO_SINGLE_PRIMARY_EMAIL
return pipeline.error(msg)
else:
user["email"] = email[0]
# A user hasn't set their name in their Github profile so it isn't
# populated in the response
if not user.get("name"):
user["name"] = _get_name_from_email(user["email"])
pipeline.bind_state("user", user)
return pipeline.next_step()
| FetchUser |
python | jazzband__django-simple-history | simple_history/tests/tests/test_templatetags.py | {
"start": 103,
"end": 132
} | class ____:
bar = "bar"
| Foo |
python | huggingface__transformers | src/transformers/models/blenderbot_small/modeling_blenderbot_small.py | {
"start": 19128,
"end": 25425
} | class ____(BlenderbotSmallPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`BlenderbotSmallEncoderLayer`].
Args:
config: BlenderbotSmallConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: BlenderbotSmallConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
to_drop = False
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop: # skip the layer
to_drop = True
if to_drop:
layer_outputs = (None, None)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
| BlenderbotSmallEncoder |
python | ray-project__ray | python/ray/tune/examples/pbt_dcgan_mnist/pbt_dcgan_mnist_trainable.py | {
"start": 636,
"end": 5822
} | class ____(tune.Trainable):
def setup(self, config):
use_cuda = config.get("use_gpu") and torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
self.netD = Discriminator().to(self.device)
self.netD.apply(weights_init)
self.netG = Generator().to(self.device)
self.netG.apply(weights_init)
self.criterion = nn.BCELoss()
self.optimizerD = optim.Adam(
self.netD.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999)
)
self.optimizerG = optim.Adam(
self.netG.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999)
)
with FileLock(os.path.expanduser("~/.data.lock")):
self.dataloader = get_data_loader(config.get("data_dir", "~/data"))
self.mnist_model_ref = config["mnist_model_ref"]
def step(self):
lossG, lossD, is_score = train_func(
self.netD,
self.netG,
self.optimizerG,
self.optimizerD,
self.criterion,
self.dataloader,
self._iteration,
self.device,
self.mnist_model_ref,
)
return {"lossg": lossG, "lossd": lossD, "is_score": is_score}
def save_checkpoint(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint.pt")
torch.save(
{
"netDmodel": self.netD.state_dict(),
"netGmodel": self.netG.state_dict(),
"optimD": self.optimizerD.state_dict(),
"optimG": self.optimizerG.state_dict(),
},
path,
)
return checkpoint_dir
def load_checkpoint(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint.pt")
checkpoint = torch.load(path)
self.netD.load_state_dict(checkpoint["netDmodel"])
self.netG.load_state_dict(checkpoint["netGmodel"])
self.optimizerD.load_state_dict(checkpoint["optimD"])
self.optimizerG.load_state_dict(checkpoint["optimG"])
def reset_config(self, new_config):
if "netD_lr" in new_config:
for param_group in self.optimizerD.param_groups:
param_group["lr"] = new_config["netD_lr"]
if "netG_lr" in new_config:
for param_group in self.optimizerG.param_groups:
param_group["lr"] = new_config["netG_lr"]
self.config = new_config
return True
# __Trainable_end__
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
parser.add_argument(
"--data-dir", type=str, default="~/data/", help="Set the path of the dataset."
)
args, _ = parser.parse_known_args()
ray.init()
import urllib.request
# Download a pre-trained MNIST model for inception score calculation.
# This is a tiny model (<100kb).
if not os.path.exists(MODEL_PATH):
print("downloading model")
os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
urllib.request.urlretrieve(
"https://github.com/ray-project/ray/raw/master/python/ray/tune/"
"examples/pbt_dcgan_mnist/mnist_cnn.pt",
MODEL_PATH,
)
dataloader = get_data_loader()
if not args.smoke_test:
plot_images(dataloader)
# load the pretrained mnist classification model for inception_score
mnist_cnn = Net()
mnist_cnn.load_state_dict(torch.load(MODEL_PATH))
mnist_cnn.eval()
mnist_model_ref = ray.put(mnist_cnn)
# __tune_begin__
scheduler = PopulationBasedTraining(
time_attr="training_iteration",
perturbation_interval=5,
hyperparam_mutations={
# distribution for resampling
"netG_lr": lambda: np.random.uniform(1e-2, 1e-5),
"netD_lr": lambda: np.random.uniform(1e-2, 1e-5),
},
)
tune_iter = 10 if args.smoke_test else 300
tuner = tune.Tuner(
PytorchTrainable,
run_config=tune.RunConfig(
name="pbt_dcgan_mnist",
stop={"training_iteration": tune_iter},
verbose=1,
checkpoint_config=tune.CheckpointConfig(checkpoint_at_end=True),
),
tune_config=tune.TuneConfig(
metric="is_score",
mode="max",
num_samples=8,
scheduler=scheduler,
reuse_actors=True,
),
param_space={
"netG_lr": tune.sample_from(
lambda spec: random.choice([0.0001, 0.0002, 0.0005])
),
"netD_lr": tune.sample_from(
lambda spec: random.choice([0.0001, 0.0002, 0.0005])
),
"mnist_model_ref": mnist_model_ref,
"data_dir": args.data_dir,
},
)
results = tuner.fit()
# export_formats=[ExportFormat.MODEL]
# __tune_end__
# demo of the trained Generators
if not args.smoke_test:
checkpoint_paths = [result.checkpoint.to_directory() for result in results]
demo_gan(checkpoint_paths)
| PytorchTrainable |
python | pytorch__pytorch | test/inductor/test_provenance_tracing.py | {
"start": 1374,
"end": 1601
} | class ____(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b, c):
x = a * 3.14
y = torch.addmm(c, x, b)
z = torch.nn.functional.gelu(y)
return z
| Model |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/missing_maxsplit_arg.py | {
"start": 15,
"end": 162
} | class ____(str):
class_str = "1,2,3"
def split(self, sep=None, maxsplit=-1) -> list[str]:
return super().split(sep, maxsplit)
| Foo |
python | sympy__sympy | sympy/physics/paulialgebra.py | {
"start": 1436,
"end": 6002
} | class ____(Symbol):
"""
The class representing algebraic properties of Pauli matrices.
Explanation
===========
The symbol used to display the Pauli matrices can be changed with an
optional parameter ``label="sigma"``. Pauli matrices with different
``label`` attributes cannot multiply together.
If the left multiplication of symbol or number with Pauli matrix is needed,
please use parentheses to separate Pauli and symbolic multiplication
(for example: 2*I*(Pauli(3)*Pauli(2))).
Another variant is to use evaluate_pauli_product function to evaluate
the product of Pauli matrices and other symbols (with commutative
multiply rules).
See Also
========
evaluate_pauli_product
Examples
========
>>> from sympy.physics.paulialgebra import Pauli
>>> Pauli(1)
sigma1
>>> Pauli(1)*Pauli(2)
I*sigma3
>>> Pauli(1)*Pauli(1)
1
>>> Pauli(3)**4
1
>>> Pauli(1)*Pauli(2)*Pauli(3)
I
>>> from sympy.physics.paulialgebra import Pauli
>>> Pauli(1, label="tau")
tau1
>>> Pauli(1)*Pauli(2, label="tau")
sigma1*tau2
>>> Pauli(1, label="tau")*Pauli(2, label="tau")
I*tau3
>>> from sympy import I
>>> I*(Pauli(2)*Pauli(3))
-sigma1
>>> from sympy.physics.paulialgebra import evaluate_pauli_product
>>> f = I*Pauli(2)*Pauli(3)
>>> f
I*sigma2*sigma3
>>> evaluate_pauli_product(f)
-sigma1
"""
__slots__ = ("i", "label")
def __new__(cls, i, label="sigma"):
if i not in [1, 2, 3]:
raise IndexError("Invalid Pauli index")
obj = Symbol.__new__(cls, "%s%d" %(label,i), commutative=False, hermitian=True)
obj.i = i
obj.label = label
return obj
def __getnewargs_ex__(self):
return (self.i, self.label), {}
def _hashable_content(self):
return (self.i, self.label)
# FIXME don't work for -I*Pauli(2)*Pauli(3)
def __mul__(self, other):
if isinstance(other, Pauli):
j = self.i
k = other.i
jlab = self.label
klab = other.label
if jlab == klab:
return delta(j, k) \
+ I*epsilon(j, k, 1)*Pauli(1,jlab) \
+ I*epsilon(j, k, 2)*Pauli(2,jlab) \
+ I*epsilon(j, k, 3)*Pauli(3,jlab)
return super().__mul__(other)
def _eval_power(b, e):
if e.is_Integer and e.is_positive:
return super().__pow__(int(e) % 2)
def evaluate_pauli_product(arg):
'''Help function to evaluate Pauli matrices product
with symbolic objects.
Parameters
==========
arg: symbolic expression that contains Paulimatrices
Examples
========
>>> from sympy.physics.paulialgebra import Pauli, evaluate_pauli_product
>>> from sympy import I
>>> evaluate_pauli_product(I*Pauli(1)*Pauli(2))
-sigma3
>>> from sympy.abc import x
>>> evaluate_pauli_product(x**2*Pauli(2)*Pauli(1))
-I*x**2*sigma3
'''
start = arg
end = arg
if isinstance(arg, Pow) and isinstance(arg.args[0], Pauli):
if arg.args[1].is_odd:
return arg.args[0]
else:
return 1
if isinstance(arg, Add):
return Add(*[evaluate_pauli_product(part) for part in arg.args])
if isinstance(arg, TensorProduct):
return TensorProduct(*[evaluate_pauli_product(part) for part in arg.args])
elif not(isinstance(arg, Mul)):
return arg
while not start == end or start == arg and end == arg:
start = end
tmp = start.as_coeff_mul()
sigma_product = 1
com_product = 1
keeper = 1
for el in tmp[1]:
if isinstance(el, Pauli):
sigma_product *= el
elif not el.is_commutative:
if isinstance(el, Pow) and isinstance(el.args[0], Pauli):
if el.args[1].is_odd:
sigma_product *= el.args[0]
elif isinstance(el, TensorProduct):
keeper = keeper*sigma_product*\
TensorProduct(
*[evaluate_pauli_product(part) for part in el.args]
)
sigma_product = 1
else:
keeper = keeper*sigma_product*el
sigma_product = 1
else:
com_product *= el
end = tmp[0]*keeper*sigma_product*com_product
if end == arg: break
return end
| Pauli |
python | openai__openai-python | src/openai/types/beta/realtime/session.py | {
"start": 2651,
"end": 4272
} | class ____(BaseModel):
create_response: Optional[bool] = None
"""
Whether or not to automatically generate a response when a VAD stop event
occurs.
"""
eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None
"""Used only for `semantic_vad` mode.
The eagerness of the model to respond. `low` will wait longer for the user to
continue speaking, `high` will respond more quickly. `auto` is the default and
is equivalent to `medium`.
"""
interrupt_response: Optional[bool] = None
"""
Whether or not to automatically interrupt any ongoing response with output to
the default conversation (i.e. `conversation` of `auto`) when a VAD start event
occurs.
"""
prefix_padding_ms: Optional[int] = None
"""Used only for `server_vad` mode.
Amount of audio to include before the VAD detected speech (in milliseconds).
Defaults to 300ms.
"""
silence_duration_ms: Optional[int] = None
"""Used only for `server_vad` mode.
Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
With shorter values the model will respond more quickly, but may jump in on
short pauses from the user.
"""
threshold: Optional[float] = None
"""Used only for `server_vad` mode.
Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
threshold will require louder audio to activate the model, and thus might
perform better in noisy environments.
"""
type: Optional[Literal["server_vad", "semantic_vad"]] = None
"""Type of turn detection."""
| TurnDetection |
python | PyCQA__pylint | tests/functional/s/super/super_init_not_called.py | {
"start": 133,
"end": 501
} | class ____(ctypes.BigEndianStructure):
"""This class should not emit a super-init-not-called warning.
It previously did, because ``next(node.infer())`` was used in that checker's logic
and the first inferred node was an Uninferable object, leading to this false positive.
"""
def __init__(self):
ctypes.BigEndianStructure.__init__(self)
| Foo |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 107750,
"end": 108012
} | class ____(BaseModel, extra="forbid"):
shard_id: int = Field(..., description="")
from_peer_id: int = Field(..., description="")
to_peer_id: int = Field(..., description="")
method: "ShardTransferMethod" = Field(..., description="")
| RestartTransfer |
python | getsentry__sentry | src/sentry/relay/types/rule_condition.py | {
"start": 1296,
"end": 1538
} | class ____(TypedDict):
"""Glob pattern matching condition
Glob matching is done in Relay with the following crate: https://docs.rs/globset/latest/globset
"""
op: Literal["glob"]
name: str
value: list[str]
| GlobCondition |
python | mwaskom__seaborn | tests/_core/test_properties.py | {
"start": 8710,
"end": 12410
} | class ____(DataFixtures):
def assert_equal(self, a, b):
assert self.unpack(a) == self.unpack(b)
def unpack(self, x):
return x
@pytest.mark.parametrize("data_type", ["cat", "num", "bool"])
def test_default(self, data_type, vectors):
scale = self.prop().default_scale(vectors[data_type])
assert isinstance(scale, Boolean if data_type == "bool" else Nominal)
@pytest.mark.parametrize("data_type", ["cat", "num", "bool"])
def test_inference_list(self, data_type, vectors):
scale = self.prop().infer_scale(self.values, vectors[data_type])
assert isinstance(scale, Boolean if data_type == "bool" else Nominal)
assert scale.values == self.values
@pytest.mark.parametrize("data_type", ["cat", "num", "bool"])
def test_inference_dict(self, data_type, vectors):
x = vectors[data_type]
values = dict(zip(categorical_order(x), self.values))
scale = self.prop().infer_scale(values, x)
assert isinstance(scale, Boolean if data_type == "bool" else Nominal)
assert scale.values == values
def test_dict_missing(self, cat_vector):
levels = categorical_order(cat_vector)
values = dict(zip(levels, self.values[:-1]))
scale = Nominal(values)
name = self.prop.__name__.lower()
msg = f"No entry in {name} dictionary for {repr(levels[-1])}"
with pytest.raises(ValueError, match=msg):
self.prop().get_mapping(scale, cat_vector)
@pytest.mark.parametrize("data_type", ["cat", "num"])
def test_mapping_default(self, data_type, vectors):
x = vectors[data_type]
mapping = self.prop().get_mapping(Nominal(), x)
n = x.nunique()
for i, expected in enumerate(self.prop()._default_values(n)):
actual, = mapping([i])
self.assert_equal(actual, expected)
@pytest.mark.parametrize("data_type", ["cat", "num"])
def test_mapping_from_list(self, data_type, vectors):
x = vectors[data_type]
scale = Nominal(self.values)
mapping = self.prop().get_mapping(scale, x)
for i, expected in enumerate(self.standardized_values):
actual, = mapping([i])
self.assert_equal(actual, expected)
@pytest.mark.parametrize("data_type", ["cat", "num"])
def test_mapping_from_dict(self, data_type, vectors):
x = vectors[data_type]
levels = categorical_order(x)
values = dict(zip(levels, self.values[::-1]))
standardized_values = dict(zip(levels, self.standardized_values[::-1]))
scale = Nominal(values)
mapping = self.prop().get_mapping(scale, x)
for i, level in enumerate(levels):
actual, = mapping([i])
expected = standardized_values[level]
self.assert_equal(actual, expected)
def test_mapping_with_null_value(self, cat_vector):
mapping = self.prop().get_mapping(Nominal(self.values), cat_vector)
actual = mapping(np.array([0, np.nan, 2]))
v0, _, v2 = self.standardized_values
expected = [v0, self.prop.null_value, v2]
for a, b in zip(actual, expected):
self.assert_equal(a, b)
def test_unique_default_large_n(self):
n = 24
x = pd.Series(np.arange(n))
mapping = self.prop().get_mapping(Nominal(), x)
assert len({self.unpack(x_i) for x_i in mapping(x)}) == n
def test_bad_scale_values(self, cat_vector):
var_name = self.prop.__name__.lower()
with pytest.raises(TypeError, match=f"Scale values for a {var_name} variable"):
self.prop().get_mapping(Nominal(("o", "s")), cat_vector)
| ObjectPropertyBase |
python | dagster-io__dagster | python_modules/automation/automation/parse_dataproc_configs.py | {
"start": 359,
"end": 1337
} | class ____:
def __init__(self, name, enum_names, enum_descriptions):
self.name = name
self.enum_names = enum_names
self.enum_descriptions = enum_descriptions
def write(self, printer):
capitalized_name = self.name[0].upper() + self.name[1:]
printer.line(capitalized_name + " = Enum(")
with printer.with_indent():
printer.line(f"name='{capitalized_name}',")
printer.line("enum_values=[")
with printer.with_indent():
if self.enum_descriptions:
for name, value in zip(self.enum_names, self.enum_descriptions):
prefix = f"EnumValue('{name}', description='''"
printer.block(value + "'''),", initial_indent=prefix)
else:
for name in self.enum_names:
printer.line(f"EnumValue('{name}'),")
printer.line("],")
printer.line(")")
| Enum |
python | ansible__ansible | lib/ansible/module_utils/common/sentinel.py | {
"start": 168,
"end": 2372
} | class ____:
"""
Object which can be used to mark whether an entry as being special
A sentinel value demarcates a value or marks an entry as having a special meaning. In C, the
Null byte is used as a sentinel for the end of a string. In Python, None is often used as
a Sentinel in optional parameters to mean that the parameter was not set by the user.
You should use None as a Sentinel value any Python code where None is not a valid entry. If
None is a valid entry, though, then you need to create a different value, which is the purpose
of this class.
Example of using Sentinel as a default parameter value::
def confirm_big_red_button(tristate=Sentinel):
if tristate is Sentinel:
print('You must explicitly press the big red button to blow up the base')
elif tristate is True:
print('Countdown to destruction activated')
elif tristate is False:
print('Countdown stopped')
elif tristate is None:
print('Waiting for more input')
Example of using Sentinel to tell whether a dict which has a default value has been changed::
values = {'one': Sentinel, 'two': Sentinel}
defaults = {'one': 1, 'two': 2}
# [.. Other code which does things including setting a new value for 'one' ..]
values['one'] = None
# [..]
print('You made changes to:')
for key, value in values.items():
if value is Sentinel:
continue
print('%s: %s' % (key, value)
"""
def __new__(cls):
"""
Return the cls itself. This makes both equality and identity True for comparing the class
to an instance of the class, preventing common usage errors.
Preferred usage::
a = Sentinel
if a is Sentinel:
print('Sentinel value')
However, these are True as well, eliminating common usage errors::
if Sentinel is Sentinel():
print('Sentinel value')
if Sentinel == Sentinel():
print('Sentinel value')
"""
return cls
| Sentinel |
python | graphql-python__graphene | graphene/relay/node.py | {
"start": 549,
"end": 1520
} | class ____(Field):
def __init__(
self,
node=None,
parent_type=None,
required=True,
global_id_type=DefaultGlobalIDType,
*args,
**kwargs,
):
super(GlobalID, self).__init__(
global_id_type.graphene_type, required=required, *args, **kwargs
)
self.node = node or Node
self.parent_type_name = parent_type._meta.name if parent_type else None
@staticmethod
def id_resolver(parent_resolver, node, root, info, parent_type_name=None, **args):
type_id = parent_resolver(root, info, **args)
parent_type_name = parent_type_name or info.parent_type.name
return node.to_global_id(parent_type_name, type_id) # root._meta.name
def wrap_resolve(self, parent_resolver):
return partial(
self.id_resolver,
parent_resolver,
self.node,
parent_type_name=self.parent_type_name,
)
| GlobalID |
python | davidhalter__jedi | test/completion/recursion.py | {
"start": 1340,
"end": 1587
} | class ____:
def a(self, b):
for i in b:
for i in self.a(i):
#?
yield i
foo = int
foo = foo # type: foo
#? int
foo
while True:
bar = int
bar = bar # type: bar
#? int()
bar
| B |
python | falconry__falcon | falcon/routing/compiled.py | {
"start": 1896,
"end": 29915
} | class ____:
"""Fast URI router which compiles its routing logic to Python code.
Generally you do not need to use this router class directly, as an
instance is created by default when the falcon.App class is initialized.
The router treats URI paths as a tree of URI segments and searches by
checking the URI one segment at a time. Instead of interpreting the route
tree for each look-up, it generates inlined, bespoke Python code to
perform the search, then compiles that code. This makes the route
processing quite fast.
The compilation process is delayed until the first use of the router (on the
first routed request) to reduce the time it takes to start the application.
This may noticeably delay the first response of the application when a large
number of routes have been added. When adding the last route
to the application a `compile` flag may be provided to force the router
to compile immediately, thus avoiding any delay for the first response.
Note:
When using a multi-threaded web server to host the application, it is
possible that multiple requests may be routed at the same time upon
startup. Therefore, the framework employs a lock to ensure that only a
single compilation of the decision tree is performed.
See also :meth:`.CompiledRouter.add_route`
"""
__slots__ = (
'_ast',
'_converter_map',
'_converters',
'_find',
'_finder_src',
'_options',
'_patterns',
'_return_values',
'_roots',
'_compile_lock',
)
def __init__(self) -> None:
self._ast: _CxParent = _CxParent()
self._converters: list[converters.BaseConverter] = []
self._finder_src: str = ''
self._options = CompiledRouterOptions()
# PERF(kgriffs): This is usually an anti-pattern, but we do it
# here to reduce lookup time.
self._converter_map = self._options.converters.data
self._patterns: list[Pattern] = []
self._return_values: list[CompiledRouterNode] = []
self._roots: list[CompiledRouterNode] = []
# NOTE(caselit): set _find to the delayed compile method to ensure that
# compile is called when the router is first used
self._find = self._compile_and_find
self._compile_lock = Lock()
@property
def options(self) -> CompiledRouterOptions:
return self._options
@property
def finder_src(self) -> str:
# NOTE(caselit): ensure that the router is actually compiled before
# returning the finder source, since the current value may be out of
# date
self.find('/')
return self._finder_src
def map_http_methods(self, resource: object, **kwargs: Any) -> MethodDict:
"""Map HTTP methods (e.g., GET, POST) to methods of a resource object.
This method is called from :meth:`~.add_route` and may be overridden to
provide a custom mapping strategy.
Args:
resource (instance): Object which represents a REST resource.
The default maps the HTTP method ``GET`` to ``on_get()``,
``POST`` to ``on_post()``, etc. If any HTTP methods are not
supported by your resource, simply don't define the
corresponding request handlers, and Falcon will do the right
thing.
Keyword Args:
suffix (str): Optional responder name suffix for this route. If
a suffix is provided, Falcon will map GET requests to
``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``,
etc. In this way, multiple closely-related routes can be
mapped to the same resource. For example, a single resource
class can use suffixed responders to distinguish requests
for a single item vs. a collection of those same items.
Another class might use a suffixed responder to handle
a shortlink route in addition to the regular route for the
resource.
"""
return map_http_methods(resource, suffix=kwargs.get('suffix', None))
def add_route( # noqa: C901
self, uri_template: str, resource: object, **kwargs: Any
) -> None:
"""Add a route between a URI path template and a resource.
This method may be overridden to customize how a route is added.
Args:
uri_template (str): A URI template to use for the route
resource (object): The resource instance to associate with
the URI template.
Keyword Args:
suffix (str): Optional responder name suffix for this route. If
a suffix is provided, Falcon will map GET requests to
``on_get_{suffix}()``, POST requests to ``on_post_{suffix}()``,
etc. In this way, multiple closely-related routes can be
mapped to the same resource. For example, a single resource
class can use suffixed responders to distinguish requests
for a single item vs. a collection of those same items.
Another class might use a suffixed responder to handle
a shortlink route in addition to the regular route for the
resource.
compile (bool): Optional flag that can be used to compile the
routing logic on this call. By default, :class:`.CompiledRouter`
delays compilation until the first request is routed. This may
introduce a noticeable amount of latency when handling the first
request, especially when the application implements a large
number of routes. Setting `compile` to ``True`` when the last
route is added ensures that the first request will not be
delayed in this case (defaults to ``False``).
Note:
Always setting this flag to ``True`` may slow down the
addition of new routes when hundreds of them are added at
once. It is advisable to only set this flag to ``True`` when
adding the final route.
"""
# NOTE(kgriffs): falcon.asgi.App injects this private kwarg; it is
# only intended to be used internally.
asgi: bool = kwargs.get('_asgi', False)
method_map = self.map_http_methods(resource, **kwargs)
set_default_responders(method_map, asgi=asgi)
if asgi:
self._require_coroutine_responders(method_map)
else:
self._require_non_coroutine_responders(method_map)
# NOTE(kgriffs): Fields may have whitespace in them, so sub
# those before checking the rest of the URI template.
if re.search(r'\s', _FIELD_PATTERN.sub('{FIELD}', uri_template)):
raise UnacceptableRouteError('URI templates may not include whitespace.')
path = uri_template.lstrip('/').split('/')
used_names: set[str] = set()
for segment in path:
self._validate_template_segment(segment, used_names)
def find_cmp_converter(node: CompiledRouterNode) -> tuple[str, str] | None:
value = [
(field, converter)
for field, converter, _ in node.var_converter_map
if converters._consumes_multiple_segments(
self._converter_map[converter]
)
]
if value:
return value[0]
else:
return None
def insert(nodes: list[CompiledRouterNode], path_index: int = 0) -> None:
for node in nodes:
segment = path[path_index]
if node.matches(segment):
path_index += 1
if path_index == len(path):
# NOTE(kgriffs): Override previous node
node.method_map = method_map
node.resource = resource
node.uri_template = uri_template
else:
cpc = find_cmp_converter(node)
if cpc:
raise UnacceptableRouteError(
_NO_CHILDREN_ERR.format(uri_template, *cpc)
)
insert(node.children, path_index)
return
if node.conflicts_with(segment):
raise UnacceptableRouteError(
'The URI template for this route is inconsistent or conflicts '
"with another route's template. This is usually caused by "
'configuring a field converter differently for the same field '
'in two different routes, or by using different field names '
"at the same level in the path (e.g.,'/parents/{id}' and "
"'/parents/{parent_id}/children')"
)
# NOTE(richardolsson): If we got this far, the node doesn't already
# exist and needs to be created. This builds a new branch of the
# routing tree recursively until it reaches the new node leaf.
new_node = CompiledRouterNode(path[path_index])
if new_node.is_complex:
cpc = find_cmp_converter(new_node)
if cpc:
raise UnacceptableRouteError(
'Cannot use converter "{1}" of variable "{0}" in a template '
'that includes other characters or variables.'.format(*cpc)
)
nodes.append(new_node)
if path_index == len(path) - 1:
new_node.method_map = method_map
new_node.resource = resource
new_node.uri_template = uri_template
else:
cpc = find_cmp_converter(new_node)
if cpc:
# NOTE(caselit): assume success and remove the node if it's not
# supported to avoid leaving the router in a broken state.
nodes.remove(new_node)
raise UnacceptableRouteError(
_NO_CHILDREN_ERR.format(uri_template, *cpc)
)
insert(new_node.children, path_index + 1)
insert(self._roots)
# NOTE(caselit): when compile is True run the actual compile step, otherwise
# reset the _find, so that _compile will be called on the next find use
if kwargs.get('compile', False):
self._find = self._compile()
else:
self._find = self._compile_and_find
# NOTE(caselit): keep Request as string otherwise sphinx complains that it resolves
# to multiple classes, since the symbol is imported only for type check.
def find(
self,
uri: str,
req: 'Request' | None = None, # noqa: UP037
) -> tuple[object, MethodDict, dict[str, Any], str | None] | None:
"""Search for a route that matches the given partial URI.
Args:
uri(str): The requested path to route.
Keyword Args:
req: The :class:`falcon.Request` or :class:`falcon.asgi.Request`
object that will be passed to the routed responder. Currently
the value of this argument is ignored by
:class:`~.CompiledRouter`. Routing is based solely on the path.
Returns:
tuple: A 4-member tuple composed of (resource, method_map,
params, uri_template), or ``None`` if no route matches
the requested path.
"""
path = uri.lstrip('/').split('/')
params: dict[str, Any] = {}
node: CompiledRouterNode | None = self._find(
path, self._return_values, self._patterns, self._converters, params
)
if node is not None:
return node.resource, node.method_map or {}, params, node.uri_template
else:
return None
# -----------------------------------------------------------------
# Private
# -----------------------------------------------------------------
def _require_coroutine_responders(self, method_map: MethodDict) -> None:
for method, responder in method_map.items():
# NOTE(kgriffs): We don't simply wrap non-async functions
# since they likely perform relatively long blocking
# operations that need to be explicitly made non-blocking
# by the developer; raising an error helps highlight this
# issue.
if not iscoroutinefunction(responder) and is_python_func(responder):
if _should_wrap_non_coroutines():
method_map[method] = wrap_sync_to_async(responder)
else:
msg = (
'The {} responder must be a non-blocking '
'async coroutine (i.e., defined using async def) to '
'avoid blocking the main request thread.'
)
msg = msg.format(responder)
raise TypeError(msg)
def _require_non_coroutine_responders(self, method_map: MethodDict) -> None:
for method, responder in method_map.items():
# NOTE(kgriffs): We don't simply wrap non-async functions
# since they likely perform relatively long blocking
# operations that need to be explicitly made non-blocking
# by the developer; raising an error helps highlight this
# issue.
if iscoroutinefunction(responder):
msg = (
'The {} responder must be a regular synchronous '
'method to be used with a WSGI app.'
)
msg = msg.format(responder)
raise TypeError(msg)
def _validate_template_segment(self, segment: str, used_names: set[str]) -> None:
"""Validate a single path segment of a URI template.
1. Ensure field names are valid Python identifiers, since they
will be passed as kwargs to responders.
2. Check that there are no duplicate names, since that causes
(at least) the following problems:
a. For simple nodes, values from deeper nodes overwrite
values from more shallow nodes.
b. For complex nodes, re.compile() raises a nasty error
3. Check that when the converter syntax is used, the named
converter exists.
"""
for field in _FIELD_PATTERN.finditer(segment):
name = field.group('fname')
is_identifier = _IDENTIFIER_PATTERN.match(name)
if not is_identifier or name in keyword.kwlist:
msg_template = (
'Field names must be valid identifiers ("{0}" is not valid)'
)
msg = msg_template.format(name)
raise UnacceptableRouteError(msg)
if name in used_names:
msg_template = (
'Field names may not be duplicated ("{0}" was used more than once)'
)
msg = msg_template.format(name)
raise UnacceptableRouteError(msg)
used_names.add(name)
if field.group('cname_sep') == ':':
msg = 'Missing converter for field "{0}"'.format(name)
raise UnacceptableRouteError(msg)
name = field.group('cname')
if name:
if name not in self._converter_map:
msg = 'Unknown converter: "{0}"'.format(name)
raise UnacceptableRouteError(msg)
try:
self._instantiate_converter(
self._converter_map[name], field.group('argstr')
)
except Exception as e:
msg = 'Cannot instantiate converter "{}"'.format(name)
raise UnacceptableRouteError(msg) from e
def _generate_ast( # noqa: C901
self,
nodes: list[CompiledRouterNode],
parent: _CxParent,
return_values: list[CompiledRouterNode],
patterns: list[Pattern],
params_stack: list[_CxElement],
level: int = 0,
fast_return: bool = True,
) -> None:
"""Generate a coarse AST for the router."""
# NOTE(caselit): setting of the parameters in the params dict is delayed until
# a match has been found by adding them to the param_stack. This way superfluous
# parameters are not set to the params dict while descending on branches that
# ultimately do not match.
# NOTE(kgriffs): Base case
if not nodes:
return
outer_parent = _CxIfPathLength('>', level)
parent.append_child(outer_parent)
parent = outer_parent
found_simple = False
# NOTE(kgriffs & philiptzou): Sort nodes in this sequence:
# static nodes(0), complex var nodes(1) and simple var nodes(2).
# so that none of them get masked.
nodes = sorted(
nodes, key=lambda node: node.is_var + (node.is_var and not node.is_complex)
)
# NOTE(kgriffs): Down to this branch in the tree, we can do a
# fast 'return None'. See if the nodes at this branch are
# all still simple, meaning there is only one possible path.
if fast_return:
if len(nodes) > 1:
# NOTE(kgriffs): There's the possibility of more than
# one path.
var_nodes = [node for node in nodes if node.is_var]
found_var_nodes = bool(var_nodes)
fast_return = not found_var_nodes
original_params_stack = params_stack.copy()
for node in nodes:
params_stack = original_params_stack.copy()
consume_multiple_segments = False
if node.is_var:
if node.is_complex:
# NOTE(richardolsson): Complex nodes are nodes which
# contain anything more than a single literal or variable,
# and they need to be checked using a pre-compiled regular
# expression.
assert node.var_pattern
pattern_idx = len(patterns)
patterns.append(node.var_pattern)
cx_segment = _CxIfPathSegmentPattern(
level, pattern_idx, node.var_pattern.pattern
)
parent.append_child(cx_segment)
parent = cx_segment
if node.var_converter_map:
parent.append_child(_CxPrefetchGroupsFromPatternMatch())
parent = self._generate_conversion_ast(
parent, node, params_stack
)
else:
cx_pattern = _CxVariableFromPatternMatch(len(params_stack) + 1)
params_stack.append(
_CxSetParamsFromDict(cx_pattern.dict_variable_name)
)
parent.append_child(cx_pattern)
else:
# NOTE(kgriffs): Simple nodes just capture the entire path
# segment as the value for the param. They have a var_name defined
field_name = node.var_name
assert field_name is not None
if node.var_converter_map:
assert len(node.var_converter_map) == 1
__, converter_name, converter_argstr = node.var_converter_map[0]
converter_class = self._converter_map[converter_name]
converter_obj = self._instantiate_converter(
converter_class, converter_argstr
)
converter_idx = len(self._converters)
self._converters.append(converter_obj)
if converters._consumes_multiple_segments(converter_obj):
consume_multiple_segments = True
parent.append_child(_CxSetFragmentFromRemainingPaths(level))
else:
parent.append_child(_CxSetFragmentFromPath(level))
cx_converter = _CxIfConverterField(
len(params_stack) + 1, converter_idx
)
params_stack.append(
_CxSetParamFromValue(
field_name, cx_converter.field_variable_name
)
)
parent.append_child(cx_converter)
parent = cx_converter
else:
params_stack.append(_CxSetParamFromPath(field_name, level))
# NOTE(kgriffs): We don't allow multiple simple var nodes
# to exist at the same level, e.g.:
#
# /foo/{id}/bar
# /foo/{name}/bar
#
_found_nodes = [
_node
for _node in nodes
if _node.is_var and not _node.is_complex
]
assert len(_found_nodes) == 1
found_simple = True
else:
# NOTE(kgriffs): Not a param, so must match exactly
cx_literal = _CxIfPathSegmentLiteral(level, node.raw_segment)
parent.append_child(cx_literal)
parent = cx_literal
if node.resource is not None:
# NOTE(kgriffs): This is a valid route, so we will want to
# return the relevant information.
resource_idx = len(return_values)
return_values.append(node)
assert not (consume_multiple_segments and node.children)
self._generate_ast(
node.children,
parent,
return_values,
patterns,
params_stack.copy(),
level + 1,
fast_return,
)
if node.resource is None:
if fast_return:
parent.append_child(_CxReturnNone())
else:
if consume_multiple_segments:
for params in params_stack:
parent.append_child(params)
parent.append_child(_CxReturnValue(resource_idx))
else:
# NOTE(kgriffs): Make sure that we have consumed all of
# the segments for the requested route; otherwise we could
# mistakenly match "/foo/23/bar" against "/foo/{id}".
cx_path_len = _CxIfPathLength('==', level + 1)
for params in params_stack:
cx_path_len.append_child(params)
cx_path_len.append_child(_CxReturnValue(resource_idx))
parent.append_child(cx_path_len)
if fast_return:
parent.append_child(_CxReturnNone())
parent = outer_parent
if not found_simple and fast_return:
parent.append_child(_CxReturnNone())
def _generate_conversion_ast(
self,
parent: _CxParent,
node: CompiledRouterNode,
params_stack: list[_CxElement],
) -> _CxParent:
# NOTE(kgriffs): Unroll the converter loop into
# a series of nested "if" constructs.
for field_name, converter_name, converter_argstr in node.var_converter_map:
converter_class = self._converter_map[converter_name]
assert not converters._consumes_multiple_segments(converter_class)
converter_obj = self._instantiate_converter(
converter_class, converter_argstr
)
converter_idx = len(self._converters)
self._converters.append(converter_obj)
parent.append_child(_CxSetFragmentFromField(field_name))
cx_converter = _CxIfConverterField(len(params_stack) + 1, converter_idx)
params_stack.append(
_CxSetParamFromValue(field_name, cx_converter.field_variable_name)
)
parent.append_child(cx_converter)
parent = cx_converter
# NOTE(kgriffs): Add remaining fields that were not
# converted, if any.
if node.num_fields > len(node.var_converter_map):
cx_pattern_match = _CxVariableFromPatternMatchPrefetched(
len(params_stack) + 1
)
params_stack.append(
_CxSetParamsFromDict(cx_pattern_match.dict_variable_name)
)
parent.append_child(cx_pattern_match)
return parent
def _compile(self) -> Callable:
"""Generate Python code for the entire routing tree.
The generated code is compiled and the resulting Python method
is returned.
"""
src_lines = [
'def find(path, return_values, patterns, converters, params):',
_TAB_STR + 'path_len = len(path)',
]
self._return_values = []
self._patterns = []
self._converters = []
self._ast = _CxParent()
self._generate_ast(
self._roots, self._ast, self._return_values, self._patterns, params_stack=[]
)
src_lines.append(self._ast.src(0))
# PERF(kgriffs): Explicit return of None is faster than implicit
src_lines.append(_TAB_STR + 'return None')
self._finder_src = '\n'.join(src_lines)
scope: MethodDict = {}
exec(compile(self._finder_src, '<string>', 'exec'), scope)
return scope['find']
def _instantiate_converter(
self, klass: type, argstr: str | None = None
) -> converters.BaseConverter:
if argstr is None:
return klass()
# NOTE(kgriffs): Don't try this at home. ;)
src = '{0}({1})'.format(klass.__name__, argstr)
return eval(src, {klass.__name__: klass})
def _compile_and_find(
self,
path: list[str],
_return_values: Any,
_patterns: Any,
_converters: Any,
params: Any,
) -> Any:
"""Compile the router, set the `_find` attribute and return its result.
This method is set to the `_find` attribute to delay the compilation of the
router until it's used for the first time. Subsequent calls to `_find` will
be processed by the actual routing function.
This method must have the same signature as the function returned by the
:meth:`.CompiledRouter._compile`.
"""
with self._compile_lock:
if self._find == self._compile_and_find:
# NOTE(caselit): replace the find with the result of the
# router compilation
self._find = self._compile()
# NOTE(caselit): return_values, patterns, converters are reset by the _compile
# method, so the updated ones must be used
return self._find(
path, self._return_values, self._patterns, self._converters, params
)
_NO_CHILDREN_ERR = (
'Cannot add route with template "{0}". Field name "{1}" '
'uses the converter "{2}" that will consume all the path, '
'making it impossible to match this route.'
)
| CompiledRouter |
python | facebook__pyre-check | client/commands/daemon_querier.py | {
"start": 1050,
"end": 1187
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
response: List[str]
@dataclasses.dataclass(frozen=True)
| QueryModulesOfPathResponse |
python | numba__numba | numba/np/ufunc/dufunc.py | {
"start": 836,
"end": 5444
} | class ____:
def __init__(self, ufunc, a, a_ty, indices, indices_ty, b=None, b_ty=None):
self.ufunc = ufunc
self.a = a
self.a_ty = a_ty
self.indices = indices
self.indices_ty = indices_ty
self.b = b
self.b_ty = b_ty
def run(self, context, builder):
self._prepare(context, builder)
loop_indices, _ = self.indexer.begin_loops()
self._call_ufunc(context, builder, loop_indices)
self.indexer.end_loops()
def need_advanced_indexing(self):
return isinstance(self.indices_ty, types.BaseTuple)
def _prepare(self, context, builder):
from numba.np.arrayobj import normalize_indices, FancyIndexer
a, indices = self.a, self.indices
a_ty, indices_ty = self.a_ty, self.indices_ty
zero = context.get_value_type(types.intp)(0)
if self.b is not None:
self.b_indice = cgutils.alloca_once_value(builder, zero)
if self.need_advanced_indexing():
indices = cgutils.unpack_tuple(builder, indices,
count=len(indices_ty))
index_types = indices_ty.types
index_types, indices = normalize_indices(context, builder,
index_types, indices)
else:
indices = (indices,)
index_types = (indices_ty,)
index_types, indices = normalize_indices(context, builder,
index_types, indices)
self.indexer = FancyIndexer(context, builder, a_ty, a,
index_types, indices)
self.indexer.prepare()
self.cres = self._compile_ufunc(context, builder)
def _load_val(self, context, builder, loop_indices, array, array_ty):
from numba.np.arrayobj import load_item
shapes = cgutils.unpack_tuple(builder, array.shape)
strides = cgutils.unpack_tuple(builder, array.strides)
data = array.data
ptr = cgutils.get_item_pointer2(context, builder, data, shapes, strides,
array_ty.layout, loop_indices)
val = load_item(context, builder, array_ty, ptr)
return ptr, val
def _load_flat(self, context, builder, indices, array, array_ty):
idx = builder.load(indices)
sig = array_ty.dtype(array_ty, types.intp)
impl = context.get_function(operator.getitem, sig)
val = impl(builder, (array, idx))
# increment indices
one = context.get_value_type(types.intp)(1)
idx = builder.add(idx, one)
builder.store(idx, indices)
return None, val
def _store_val(self, context, builder, array, array_ty, ptr, val):
from numba.np.arrayobj import store_item
fromty = self.cres.signature.return_type
toty = array_ty.dtype
val = context.cast(builder, val, fromty, toty)
store_item(context, builder, array_ty, val, ptr)
def _compile_ufunc(self, context, builder):
ufunc = self.ufunc.key[0]
if self.b is None:
sig = (self.a_ty.dtype,)
else:
sig = (self.a_ty.dtype, self.b_ty.dtype)
cres = ufunc.add(sig)
context.add_linking_libs((cres.library,))
return cres
def _call_ufunc(self, context, builder, loop_indices):
cres = self.cres
a, a_ty = self.a, self.a_ty
ptr, val = self._load_val(context, builder, loop_indices, a, a_ty)
if self.b is None:
args = (val,)
else:
b, b_ty, b_idx = self.b, self.b_ty, self.b_indice
_, val_b = self._load_flat(context, builder, b_idx, b, b_ty)
args = (val, val_b)
res = context.call_internal(builder, cres.fndesc, cres.signature,
args)
self._store_val(context, builder, a, a_ty, ptr, res)
def make_dufunc_kernel(_dufunc):
from numba.np import npyimpl
class DUFuncKernel(npyimpl._Kernel):
"""
npyimpl._Kernel subclass responsible for lowering a DUFunc kernel
(element-wise function) inside a broadcast loop (which is
generated by npyimpl.numpy_ufunc_kernel()).
"""
dufunc = _dufunc
def __init__(self, context, builder, outer_sig):
super().__init__(context, builder, outer_sig)
self.inner_sig, self.cres = self.dufunc.find_ewise_function(
outer_sig.args)
DUFuncKernel.__name__ += _dufunc.ufunc.__name__
return DUFuncKernel
| UfuncAtIterator |
python | TheAlgorithms__Python | machine_learning/decision_tree.py | {
"start": 5738,
"end": 7212
} | class ____:
"""Decision Tres test class"""
@staticmethod
def helper_mean_squared_error_test(labels, prediction):
"""
helper_mean_squared_error_test:
@param labels: a one dimensional numpy array
@param prediction: a floating point value
return value: helper_mean_squared_error_test calculates the mean squared error
"""
squared_error_sum = float(0)
for label in labels:
squared_error_sum += (label - prediction) ** 2
return float(squared_error_sum / labels.size)
def main():
"""
In this demonstration we're generating a sample data set from the sin function in
numpy. We then train a decision tree on the data set and use the decision tree to
predict the label of 10 different test values. Then the mean squared error over
this test is displayed.
"""
x = np.arange(-1.0, 1.0, 0.005)
y = np.sin(x)
tree = DecisionTree(depth=10, min_leaf_size=10)
tree.train(x, y)
rng = np.random.default_rng()
test_cases = (rng.random(10) * 2) - 1
predictions = np.array([tree.predict(x) for x in test_cases])
avg_error = np.mean((predictions - test_cases) ** 2)
print("Test values: " + str(test_cases))
print("Predictions: " + str(predictions))
print("Average error: " + str(avg_error))
if __name__ == "__main__":
main()
import doctest
doctest.testmod(name="mean_squared_error", verbose=True)
| TestDecisionTree |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 99009,
"end": 100718
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Hidden states from the thinker model that are used as input for the talker model. These represent the encoded
response that the talker model will use to generate speech tokens.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
rope_deltas: Optional[torch.LongTensor] = None
thinker_reply_part: Optional[torch.FloatTensor] = None
@auto_docstring
| Qwen2_5OmniTalkerCausalLMOutputWithPast |
python | bokeh__bokeh | tests/unit/bokeh/core/test_properties.py | {
"start": 15605,
"end": 15636
} | class ____(HasProps):
pass
| Foo |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass8.py | {
"start": 203,
"end": 421
} | class ____(Foo):
@abstractmethod
def bar(self):
pass
@abstractmethod
def bar2(self):
pass
@final
# This should generate an error because Foo.foo, Bar.bar, and Bar.bar1
# are abstract.
| Bar |
python | mwaskom__seaborn | seaborn/relational.py | {
"start": 14853,
"end": 34630
} | class ____(_RelationalPlotter):
_legend_attributes = ["color", "s", "marker"]
def __init__(self, *, data=None, variables={}, legend=None):
# TODO this is messy, we want the mapping to be agnostic about
# the kind of plot to draw, but for the time being we need to set
# this information so the SizeMapping can use it
self._default_size_range = (
np.r_[.5, 2] * np.square(mpl.rcParams["lines.markersize"])
)
super().__init__(data=data, variables=variables)
self.legend = legend
def plot(self, ax, kws):
# --- Determine the visual attributes of the plot
data = self.comp_data.dropna()
if data.empty:
return
kws = normalize_kwargs(kws, mpl.collections.PathCollection)
# Define the vectors of x and y positions
empty = np.full(len(data), np.nan)
x = data.get("x", empty)
y = data.get("y", empty)
# Apply inverse scaling to the coordinate variables
_, inv_x = _get_transform_functions(ax, "x")
_, inv_y = _get_transform_functions(ax, "y")
x, y = inv_x(x), inv_y(y)
if "style" in self.variables:
# Use a representative marker so scatter sets the edgecolor
# properly for line art markers. We currently enforce either
# all or none line art so this works.
example_level = self._style_map.levels[0]
example_marker = self._style_map(example_level, "marker")
kws.setdefault("marker", example_marker)
# Conditionally set the marker edgecolor based on whether the marker is "filled"
# See https://github.com/matplotlib/matplotlib/issues/17849 for context
m = kws.get("marker", mpl.rcParams.get("marker", "o"))
if not isinstance(m, mpl.markers.MarkerStyle):
# TODO in more recent matplotlib (which?) can pass a MarkerStyle here
m = mpl.markers.MarkerStyle(m)
if m.is_filled():
kws.setdefault("edgecolor", "w")
# Draw the scatter plot
points = ax.scatter(x=x, y=y, **kws)
# Apply the mapping from semantic variables to artist attributes
if "hue" in self.variables:
points.set_facecolors(self._hue_map(data["hue"]))
if "size" in self.variables:
points.set_sizes(self._size_map(data["size"]))
if "style" in self.variables:
p = [self._style_map(val, "path") for val in data["style"]]
points.set_paths(p)
# Apply dependent default attributes
if "linewidth" not in kws:
sizes = points.get_sizes()
linewidth = .08 * np.sqrt(np.percentile(sizes, 10))
points.set_linewidths(linewidth)
kws["linewidth"] = linewidth
# Finalize the axes details
self._add_axis_labels(ax)
if self.legend:
attrs = {"hue": "color", "size": "s", "style": None}
self.add_legend_data(ax, _scatter_legend_artist, kws, attrs)
handles, _ = ax.get_legend_handles_labels()
if handles:
legend = ax.legend(title=self.legend_title)
adjust_legend_subtitles(legend)
def lineplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None, units=None, weights=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
dashes=True, markers=None, style_order=None,
estimator="mean", errorbar=("ci", 95), n_boot=1000, seed=None,
orient="x", sort=True, err_style="band", err_kws=None,
legend="auto", ci="deprecated", ax=None, **kwargs
):
# Handle deprecation of ci parameter
errorbar = _deprecate_ci(errorbar, ci)
p = _LinePlotter(
data=data,
variables=dict(
x=x, y=y, hue=hue, size=size, style=style, units=units, weight=weights
),
estimator=estimator, n_boot=n_boot, seed=seed, errorbar=errorbar,
sort=sort, orient=orient, err_style=err_style, err_kws=err_kws,
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
if ax is None:
ax = plt.gca()
if "style" not in p.variables and not {"ls", "linestyle"} & set(kwargs): # XXX
kwargs["dashes"] = "" if dashes is None or isinstance(dashes, bool) else dashes
if not p.has_xy_data:
return ax
p._attach(ax)
# Other functions have color as an explicit param,
# and we should probably do that here too
color = kwargs.pop("color", kwargs.pop("c", None))
kwargs["color"] = _default_color(ax.plot, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
lineplot.__doc__ = """\
Draw a line plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
By default, the plot aggregates over multiple `y` values at each value of
`x` and shows an estimate of the central tendency and a confidence
interval for that estimate.
Parameters
----------
{params.core.data}
{params.core.xy}
hue : vector or key in `data`
Grouping variable that will produce lines with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in `data`
Grouping variable that will produce lines with different widths.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in `data`
Grouping variable that will produce lines with different dashes
and/or markers. Can have a numeric dtype but will always be treated
as categorical.
{params.rel.units}
weights : vector or key in `data`
Data values or column used to compute weighted estimation.
Note that use of weights currently limits the choice of statistics
to a 'mean' estimator and 'ci' errorbar.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.dashes}
{params.rel.markers}
{params.rel.style_order}
{params.rel.estimator}
{params.stat.errorbar}
{params.rel.n_boot}
{params.rel.seed}
orient : "x" or "y"
Dimension along which the data are sorted / aggregated. Equivalently,
the "independent variable" of the resulting function.
sort : boolean
If True, the data will be sorted by the x and y variables, otherwise
lines will connect points in the order they appear in the dataset.
err_style : "band" or "bars"
Whether to draw the confidence intervals with translucent error bands
or discrete error bars.
err_kws : dict of keyword arguments
Additional parameters to control the aesthetics of the error bars. The
kwargs are passed either to :meth:`matplotlib.axes.Axes.fill_between`
or :meth:`matplotlib.axes.Axes.errorbar`, depending on `err_style`.
{params.rel.legend}
{params.rel.ci}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.plot`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.scatterplot}
{seealso.pointplot}
Examples
--------
.. include:: ../docstrings/lineplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def scatterplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=True, style_order=None, legend="auto", ax=None,
**kwargs
):
p = _ScatterPlotter(
data=data,
variables=dict(x=x, y=y, hue=hue, size=size, style=style),
legend=legend
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, order=style_order)
if ax is None:
ax = plt.gca()
if not p.has_xy_data:
return ax
p._attach(ax)
color = kwargs.pop("color", None)
kwargs["color"] = _default_color(ax.scatter, hue, color, kwargs)
p.plot(ax, kwargs)
return ax
scatterplot.__doc__ = """\
Draw a scatter plot with possibility of several semantic groupings.
{narrative.main_api}
{narrative.relational_semantic}
Parameters
----------
{params.core.data}
{params.core.xy}
hue : vector or key in `data`
Grouping variable that will produce points with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in `data`
Grouping variable that will produce points with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in `data`
Grouping variable that will produce points with different markers.
Can have a numeric dtype but will always be treated as categorical.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.markers}
{params.rel.style_order}
{params.rel.legend}
{params.core.ax}
kwargs : key, value mappings
Other keyword arguments are passed down to
:meth:`matplotlib.axes.Axes.scatter`.
Returns
-------
{returns.ax}
See Also
--------
{seealso.lineplot}
{seealso.stripplot}
{seealso.swarmplot}
Examples
--------
.. include:: ../docstrings/scatterplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
def relplot(
data=None, *,
x=None, y=None, hue=None, size=None, style=None, units=None, weights=None,
row=None, col=None, col_wrap=None, row_order=None, col_order=None,
palette=None, hue_order=None, hue_norm=None,
sizes=None, size_order=None, size_norm=None,
markers=None, dashes=None, style_order=None,
legend="auto", kind="scatter", height=5, aspect=1, facet_kws=None,
**kwargs
):
if kind == "scatter":
Plotter = _ScatterPlotter
func = scatterplot
markers = True if markers is None else markers
elif kind == "line":
Plotter = _LinePlotter
func = lineplot
dashes = True if dashes is None else dashes
else:
err = f"Plot kind {kind} not recognized"
raise ValueError(err)
# Check for attempt to plot onto specific axes and warn
if "ax" in kwargs:
msg = (
"relplot is a figure-level function and does not accept "
"the `ax` parameter. You may wish to try {}".format(kind + "plot")
)
warnings.warn(msg, UserWarning)
kwargs.pop("ax")
# Use the full dataset to map the semantics
variables = dict(x=x, y=y, hue=hue, size=size, style=style)
if kind == "line":
variables["units"] = units
variables["weight"] = weights
else:
if units is not None:
msg = "The `units` parameter has no effect with kind='scatter'."
warnings.warn(msg, stacklevel=2)
if weights is not None:
msg = "The `weights` parameter has no effect with kind='scatter'."
warnings.warn(msg, stacklevel=2)
p = Plotter(
data=data,
variables=variables,
legend=legend,
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
p.map_size(sizes=sizes, order=size_order, norm=size_norm)
p.map_style(markers=markers, dashes=dashes, order=style_order)
# Extract the semantic mappings
if "hue" in p.variables:
palette = p._hue_map.lookup_table
hue_order = p._hue_map.levels
hue_norm = p._hue_map.norm
else:
palette = hue_order = hue_norm = None
if "size" in p.variables:
sizes = p._size_map.lookup_table
size_order = p._size_map.levels
size_norm = p._size_map.norm
if "style" in p.variables:
style_order = p._style_map.levels
if markers:
markers = {k: p._style_map(k, "marker") for k in style_order}
else:
markers = None
if dashes:
dashes = {k: p._style_map(k, "dashes") for k in style_order}
else:
dashes = None
else:
markers = dashes = style_order = None
# Now extract the data that would be used to draw a single plot
variables = p.variables
plot_data = p.plot_data
# Define the common plotting parameters
plot_kws = dict(
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
sizes=sizes, size_order=size_order, size_norm=size_norm,
markers=markers, dashes=dashes, style_order=style_order,
legend=False,
)
plot_kws.update(kwargs)
if kind == "scatter":
plot_kws.pop("dashes")
# Add the grid semantics onto the plotter
grid_variables = dict(
x=x, y=y, row=row, col=col, hue=hue, size=size, style=style,
)
if kind == "line":
grid_variables.update(units=units, weights=weights)
p.assign_variables(data, grid_variables)
# Define the named variables for plotting on each facet
# Rename the variables with a leading underscore to avoid
# collisions with faceting variable names
plot_variables = {v: f"_{v}" for v in variables}
if "weight" in plot_variables:
plot_variables["weights"] = plot_variables.pop("weight")
plot_kws.update(plot_variables)
# Pass the row/col variables to FacetGrid with their original
# names so that the axes titles render correctly
for var in ["row", "col"]:
# Handle faceting variables that lack name information
if var in p.variables and p.variables[var] is None:
p.variables[var] = f"_{var}_"
grid_kws = {v: p.variables.get(v) for v in ["row", "col"]}
# Rename the columns of the plot_data structure appropriately
new_cols = plot_variables.copy()
new_cols.update(grid_kws)
full_data = p.plot_data.rename(columns=new_cols)
# Set up the FacetGrid object
facet_kws = {} if facet_kws is None else facet_kws.copy()
g = FacetGrid(
data=full_data.dropna(axis=1, how="all"),
**grid_kws,
col_wrap=col_wrap, row_order=row_order, col_order=col_order,
height=height, aspect=aspect, dropna=False,
**facet_kws
)
# Draw the plot
g.map_dataframe(func, **plot_kws)
# Label the axes, using the original variables
# Pass "" when the variable name is None to overwrite internal variables
g.set_axis_labels(variables.get("x") or "", variables.get("y") or "")
if legend:
# Replace the original plot data so the legend uses numeric data with
# the correct type, since we force a categorical mapping above.
p.plot_data = plot_data
# Handle the additional non-semantic keyword arguments out here.
# We're selective because some kwargs may be seaborn function specific
# and not relevant to the matplotlib artists going into the legend.
# Ideally, we will have a better solution where we don't need to re-make
# the legend out here and will have parity with the axes-level functions.
keys = ["c", "color", "alpha", "m", "marker"]
if kind == "scatter":
legend_artist = _scatter_legend_artist
keys += ["s", "facecolor", "fc", "edgecolor", "ec", "linewidth", "lw"]
else:
legend_artist = partial(mpl.lines.Line2D, xdata=[], ydata=[])
keys += [
"markersize", "ms",
"markeredgewidth", "mew",
"markeredgecolor", "mec",
"linestyle", "ls",
"linewidth", "lw",
]
common_kws = {k: v for k, v in kwargs.items() if k in keys}
attrs = {"hue": "color", "style": None}
if kind == "scatter":
attrs["size"] = "s"
elif kind == "line":
attrs["size"] = "linewidth"
p.add_legend_data(g.axes.flat[0], legend_artist, common_kws, attrs)
if p.legend_data:
g.add_legend(legend_data=p.legend_data,
label_order=p.legend_order,
title=p.legend_title,
adjust_subtitles=True)
# Rename the columns of the FacetGrid's `data` attribute
# to match the original column names
orig_cols = {
f"_{k}": f"_{k}_" if v is None else v for k, v in variables.items()
}
grid_data = g.data.rename(columns=orig_cols)
if data is not None and (x is not None or y is not None):
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
g.data = pd.merge(
data,
grid_data[grid_data.columns.difference(data.columns)],
left_index=True,
right_index=True,
)
else:
g.data = grid_data
return g
relplot.__doc__ = """\
Figure-level interface for drawing relational plots onto a FacetGrid.
This function provides access to several different axes-level functions
that show the relationship between two variables with semantic mappings
of subsets. The `kind` parameter selects the underlying axes-level
function to use:
- :func:`scatterplot` (with `kind="scatter"`; the default)
- :func:`lineplot` (with `kind="line"`)
Extra keyword arguments are passed to the underlying function, so you
should refer to the documentation for each to see kind-specific options.
{narrative.main_api}
{narrative.relational_semantic}
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Parameters
----------
{params.core.data}
{params.core.xy}
hue : vector or key in `data`
Grouping variable that will produce elements with different colors.
Can be either categorical or numeric, although color mapping will
behave differently in latter case.
size : vector or key in `data`
Grouping variable that will produce elements with different sizes.
Can be either categorical or numeric, although size mapping will
behave differently in latter case.
style : vector or key in `data`
Grouping variable that will produce elements with different styles.
Can have a numeric dtype but will always be treated as categorical.
{params.rel.units}
weights : vector or key in `data`
Data values or column used to compute weighted estimation.
Note that use of weights currently limits the choice of statistics
to a 'mean' estimator and 'ci' errorbar.
{params.facets.rowcol}
{params.facets.col_wrap}
row_order, col_order : lists of strings
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.rel.sizes}
{params.rel.size_order}
{params.rel.size_norm}
{params.rel.style_order}
{params.rel.dashes}
{params.rel.markers}
{params.rel.legend}
kind : string
Kind of plot to draw, corresponding to a seaborn relational plot.
Options are `"scatter"` or `"line"`.
{params.facets.height}
{params.facets.aspect}
facet_kws : dict
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
{returns.facetgrid}
Examples
--------
.. include:: ../docstrings/relplot.rst
""".format(
narrative=_relational_narrative,
params=_param_docs,
returns=_core_docs["returns"],
)
| _ScatterPlotter |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/dynamic.py | {
"start": 9285,
"end": 9835
} | class ____(_AppenderMixin[_T], Query[_T]): # type: ignore[misc]
"""A dynamic query that supports basic collection storage operations.
Methods on :class:`.AppenderQuery` include all methods of
:class:`_orm.Query`, plus additional methods used for collection
persistence.
"""
def mixin_user_query(cls: Any) -> type[_AppenderMixin[Any]]:
"""Return a new class with AppenderQuery functionality layered over."""
name = "Appender" + cls.__name__
return type(name, (_AppenderMixin, cls), {"query_class": cls})
| AppenderQuery |
python | huggingface__transformers | src/transformers/models/chameleon/modeling_chameleon.py | {
"start": 43883,
"end": 50197
} | class ____(ChameleonPreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
def __init__(self, config):
super().__init__(config)
self.model = ChameleonModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_image_tokens(self, pixel_values):
return self.model.get_image_tokens(pixel_values)
def get_image_features(self, pixel_values):
return self.model.get_image_features(pixel_values)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
>>> import torch
>>> import requests
>>> from PIL import Image
>>> model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", dtype=torch.bfloat16)
>>> processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
>>> prompt = "I used to know a lot about constellations when I was younger, but as I grew older, I forgot most of what I knew. These are the only two constellations that I really remember now.<image><image>I would like for you to tell me about 3 more constellations and give me a little bit of history about the constellation."
>>> image = Image.open(requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw)
>>> image_2 = Image.open(requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw)
>>> inputs = processor(images=[image, image_2], text=prompt, return_tensors="pt").to(model.device, torch.bfloat16)
>>> generated_ids = model.generate(**inputs, max_new_tokens=100, do_sample=False)
>>> processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
# Disallow image tokens which does not include special begin-image and end-image tokens
image_tokens = self.model.vocabulary_mapping.image_tokens
logits[:, :, image_tokens] = torch.finfo(logits.dtype).min
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
pixel_values=None,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
pixel_values=pixel_values,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
use_cache=use_cache,
**kwargs,
)
if cache_position[0] != 0:
# If we're in cached decoding stage, pixel values should be `None` because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
model_inputs["pixel_values"] = None
return model_inputs
__all__ = ["ChameleonForConditionalGeneration", "ChameleonModel", "ChameleonPreTrainedModel", "ChameleonVQVAE"]
| ChameleonForConditionalGeneration |
python | Pylons__pyramid | tests/test_scripts/test_pshell.py | {
"start": 14378,
"end": 14537
} | class ____:
def __init__(self, name, value):
self.name = name
self.value = value
def load(self):
return self.value
| DummyEntryPoint |
python | pennersr__django-allauth | allauth/headless/tokens/views.py | {
"start": 428,
"end": 1054
} | class ____(APIView):
input_class = RefreshTokenInput
def post(self, request: HttpRequest):
refresh_token = self.input.cleaned_data["refresh_token"]
strategy: AbstractTokenStrategy = app_settings.TOKEN_STRATEGY
at_rt = strategy.refresh_token(refresh_token)
if at_rt is None:
return ErrorResponse(request)
next_refresh_token: Optional[str]
access_token, next_refresh_token = at_rt
if next_refresh_token == refresh_token:
next_refresh_token = None
return RefreshTokenResponse(request, access_token, next_refresh_token)
| RefreshTokenView |
python | django__django | tests/backends/sqlite/tests.py | {
"start": 5455,
"end": 7720
} | class ____(TransactionTestCase):
available_apps = ["backends"]
def test_autoincrement(self):
"""
auto_increment fields are created with the AUTOINCREMENT keyword
in order to be monotonically increasing (#10164).
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual(
"integer NOT NULL PRIMARY KEY AUTOINCREMENT",
match[1],
"Wrong SQL used to create an auto-increment column on SQLite",
)
def test_disable_constraint_checking_failure_disallowed(self):
"""
SQLite schema editor is not usable within an outer transaction if
foreign key constraint checks are not disabled beforehand.
"""
msg = (
"SQLite schema editor cannot be used while foreign key "
"constraint checks are enabled. Make sure to disable them "
"before entering a transaction.atomic() context because "
"SQLite does not support disabling them in the middle of "
"a multi-statement transaction."
)
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic(), connection.schema_editor(atomic=True):
pass
def test_constraint_checks_disabled_atomic_allowed(self):
"""
SQLite schema editor is usable within an outer transaction as long as
foreign key constraints checks are disabled beforehand.
"""
def constraint_checks_enabled():
with connection.cursor() as cursor:
return bool(cursor.execute("PRAGMA foreign_keys").fetchone()[0])
with connection.constraint_checks_disabled(), transaction.atomic():
with connection.schema_editor(atomic=True):
self.assertFalse(constraint_checks_enabled())
self.assertFalse(constraint_checks_enabled())
self.assertTrue(constraint_checks_enabled())
@unittest.skipUnless(connection.vendor == "sqlite", "Test only for SQLite")
@override_settings(DEBUG=True)
| SchemaTests |
python | Textualize__textual | docs/examples/widgets/selection_list_tuples.py | {
"start": 103,
"end": 769
} | class ____(App[None]):
CSS_PATH = "selection_list.tcss"
def compose(self) -> ComposeResult:
yield Header()
yield SelectionList[int]( # (1)!
("Falken's Maze", 0, True),
("Black Jack", 1),
("Gin Rummy", 2),
("Hearts", 3),
("Bridge", 4),
("Checkers", 5),
("Chess", 6, True),
("Poker", 7),
("Fighter Combat", 8, True),
)
yield Footer()
def on_mount(self) -> None:
self.query_one(SelectionList).border_title = "Shall we play some games?"
if __name__ == "__main__":
SelectionListApp().run()
| SelectionListApp |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_striphmtl.py | {
"start": 54,
"end": 1053
} | class ____(util.MdCase):
"""Test legacy stripping in HTML."""
extension = ['pymdownx.striphtml']
extension_configs = {}
def test_multiple_inline(self):
"""Test multiple inline."""
self.check_markdown(
r'''
Comments test:
<!-- BEGIN INCLUDE -->
- One
- Two
- Three
<!-- END INCLUDE -->
## Paragraph
<!-- BEGIN INCLUDE -->
- One
- Two
- Three
<!-- END INCLUDE -->
Comments test end
''',
r'''
<p>Comments test:</p>
<ul>
<li>One</li>
<li>Two</li>
<li>Three</li>
</ul>
<h2>Paragraph</h2>
<ul>
<li>One</li>
<li>Two</li>
<li>Three</li>
</ul>
<p>Comments test end</p>
''',
True
)
| TestStripHTML |
python | django__django | tests/gis_tests/relatedapp/models.py | {
"start": 1339,
"end": 1500
} | class ____(SimpleModel):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, models.SET_NULL, related_name="books", null=True)
| Book |
python | kamyu104__LeetCode-Solutions | Python/first-unique-character-in-a-string.py | {
"start": 66,
"end": 502
} | class ____(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
lookup = defaultdict(int)
candidtates = set()
for i, c in enumerate(s):
if lookup[c]:
candidtates.discard(lookup[c])
else:
lookup[c] = i+1
candidtates.add(i+1)
return min(candidtates)-1 if candidtates else -1
| Solution |
python | sphinx-doc__sphinx | tests/roots/test-ext-viewcode/spam/mod2.py | {
"start": 127,
"end": 166
} | class ____:
"""this is Class2"""
| Class2 |
python | python-pillow__Pillow | Tests/test_image.py | {
"start": 37986,
"end": 38871
} | class ____:
@pytest.mark.parametrize("mode", Image.MODES)
def test_roundtrip_bytes_constructor(self, mode: str) -> None:
im = hopper(mode)
source_bytes = im.tobytes()
reloaded = Image.frombytes(mode, im.size, source_bytes)
assert reloaded.tobytes() == source_bytes
@pytest.mark.parametrize("mode", Image.MODES)
def test_roundtrip_bytes_method(self, mode: str) -> None:
im = hopper(mode)
source_bytes = im.tobytes()
reloaded = Image.new(mode, im.size)
reloaded.frombytes(source_bytes)
assert reloaded.tobytes() == source_bytes
@pytest.mark.parametrize("mode", Image.MODES)
def test_getdata_putdata(self, mode: str) -> None:
im = hopper(mode)
reloaded = Image.new(mode, im.size)
reloaded.putdata(im.getdata())
assert_image_equal(im, reloaded)
| TestImageBytes |
python | spyder-ide__spyder | spyder/plugins/run/api.py | {
"start": 7065,
"end": 7310
} | class ____(TypedDict):
"""Per run executor configuration parameters."""
# Dictionary that maps from parameter identifiers to the actual
# configuration.
params: Dict[str, ExtendedRunExecutionParameters]
| StoredRunExecutorParameters |
python | keras-team__keras | keras/src/metrics/confusion_metrics.py | {
"start": 6278,
"end": 7941
} | class ____(_ConfusionMatrixConditionCount):
"""Calculates the number of true negatives.
If `sample_weight` is given, calculates the sum of the weights of
true negatives. This metric creates one local variable, `accumulator`
that is used to keep track of the number of true negatives.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
thresholds: (Optional) Defaults to `0.5`. A float value, or a Python
list/tuple of float threshold values in `[0, 1]`. A threshold is
compared with prediction values to determine the truth value of
predictions (i.e., above the threshold is `True`, below is `False`).
If used with a loss function that sets `from_logits=True` (i.e. no
sigmoid applied to predictions), `thresholds` should be set to 0.
One metric value is generated for each threshold value.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Example:
>>> m = keras.metrics.TrueNegatives()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])
>>> m.result()
2.0
>>> m.reset_state()
>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])
>>> m.result()
1.0
"""
def __init__(self, thresholds=None, name=None, dtype=None):
super().__init__(
confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES,
thresholds=thresholds,
name=name,
dtype=dtype,
)
@keras_export("keras.metrics.TruePositives")
| TrueNegatives |
python | django__django | django/contrib/gis/db/models/aggregates.py | {
"start": 2610,
"end": 2920
} | class ____(GeoAggregate):
name = "Extent3D"
is_extent = "3D"
def __init__(self, expression, **extra):
super().__init__(expression, output_field=ExtentField(), **extra)
def convert_value(self, value, expression, connection):
return connection.ops.convert_extent3d(value)
| Extent3D |
python | networkx__networkx | networkx/algorithms/centrality/tests/test_katz_centrality.py | {
"start": 10727,
"end": 11240
} | class ____:
@classmethod
def setup_class(cls):
global np
np = pytest.importorskip("numpy")
pytest.importorskip("scipy")
def test_eigenvector_v_katz_random(self):
G = nx.gnp_random_graph(10, 0.5, seed=1234)
l = max(np.linalg.eigvals(nx.adjacency_matrix(G).todense()))
e = nx.eigenvector_centrality_numpy(G)
k = nx.katz_centrality_numpy(G, 1.0 / l)
for n in G:
assert e[n] == pytest.approx(k[n], abs=1e-7)
| TestKatzEigenvectorVKatz |
python | fsspec__filesystem_spec | fsspec/implementations/tests/memory/memory_test.py | {
"start": 349,
"end": 426
} | class ____(abstract.AbstractPipeTests, MemoryFixtures):
pass
| TestMemoryPipe |
python | django__django | django/db/migrations/operations/models.py | {
"start": 44444,
"end": 45901
} | class ____(IndexOperation):
category = OperationCategory.ALTERATION
option_name = "constraints"
def __init__(self, model_name, name, constraint):
self.model_name = model_name
self.name = name
self.constraint = constraint
def state_forwards(self, app_label, state):
state.alter_constraint(
app_label, self.model_name_lower, self.name, self.constraint
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def deconstruct(self):
return (
self.__class__.__name__,
[],
{
"model_name": self.model_name,
"name": self.name,
"constraint": self.constraint,
},
)
def describe(self):
return f"Alter constraint {self.name} on {self.model_name}"
@property
def migration_name_fragment(self):
return "alter_%s_%s" % (self.model_name_lower, self.constraint.name.lower())
def reduce(self, operation, app_label):
if (
isinstance(operation, (AlterConstraint, RemoveConstraint))
and self.model_name_lower == operation.model_name_lower
and self.name == operation.name
):
return [operation]
return super().reduce(operation, app_label)
| AlterConstraint |
python | ray-project__ray | python/ray/train/tensorflow/keras.py | {
"start": 3134,
"end": 5495
} | class ____(_Callback):
def __init__(
self,
checkpoint_on: Union[str, List[str]] = "epoch_end",
report_metrics_on: Union[str, List[str]] = "epoch_end",
metrics: Optional[Union[str, List[str], Dict[str, str]]] = None,
):
if isinstance(checkpoint_on, str):
checkpoint_on = [checkpoint_on]
if isinstance(report_metrics_on, str):
report_metrics_on = [report_metrics_on]
on = list(set(checkpoint_on + report_metrics_on))
super().__init__(on=on)
self._checkpoint_on: List[str] = checkpoint_on
self._report_metrics_on: List[str] = report_metrics_on
self._metrics = metrics
def _get_reported_metrics(self, logs: Dict) -> Dict:
assert isinstance(self._metrics, (type(None), str, list, dict))
if self._metrics is None:
reported_metrics = logs
elif isinstance(self._metrics, str):
reported_metrics = {self._metrics: logs[self._metrics]}
elif isinstance(self._metrics, list):
reported_metrics = {metric: logs[metric] for metric in self._metrics}
elif isinstance(self._metrics, dict):
reported_metrics = {
key: logs[metric] for key, metric in self._metrics.items()
}
assert isinstance(reported_metrics, dict)
return reported_metrics
@abstractmethod
def _save_and_report_checkpoint(
self, metrics: Dict, checkpoint: TensorflowCheckpoint
):
"""Save checkpoint and report metrics corresonding to this checkpoint."""
raise NotImplementedError
@abstractmethod
def _report_metrics(self, metrics: Dict):
"""Report metrics."""
raise NotImplementedError
def _handle(self, logs: Dict, when: str):
assert when in self._checkpoint_on or when in self._report_metrics_on
metrics = self._get_reported_metrics(logs)
should_checkpoint = when in self._checkpoint_on
if should_checkpoint:
checkpoint = TensorflowCheckpoint.from_model(self.model)
self._save_and_report_checkpoint(metrics, checkpoint)
# Clean up temporary checkpoint
shutil.rmtree(checkpoint.path, ignore_errors=True)
else:
self._report_metrics(metrics)
@PublicAPI(stability="alpha")
| RayReportCallback |
python | ipython__ipython | IPython/utils/tempdir.py | {
"start": 350,
"end": 1264
} | class ____:
def __init__(self, filename, mode, bufsize=-1, add_to_syspath=False, **kwds):
"""
Open a file named `filename` in a temporary directory.
This context manager is preferred over `NamedTemporaryFile` in
stdlib `tempfile` when one needs to reopen the file.
Arguments `mode` and `bufsize` are passed to `open`.
Rest of the arguments are passed to `TemporaryDirectory`.
"""
self._tmpdir = TemporaryDirectory(**kwds)
path = Path(self._tmpdir.name) / filename
encoding = None if "b" in mode else "utf-8"
self.file = open(path, mode, bufsize, encoding=encoding)
def cleanup(self):
self.file.close()
self._tmpdir.cleanup()
__del__ = cleanup
def __enter__(self):
return self.file
def __exit__(self, type, value, traceback):
self.cleanup()
| NamedFileInTemporaryDirectory |
python | keras-team__keras | keras/src/legacy/layers.py | {
"start": 4673,
"end": 7406
} | class ____(Layer):
"""DEPRECATED."""
def __init__(self, factor, interpolation="bilinear", seed=None, **kwargs):
super().__init__(**kwargs)
self.seed_generator = backend.random.SeedGenerator(seed)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.width_lower = factor[0]
self.width_upper = factor[1]
else:
self.width_lower = -factor
self.width_upper = factor
if self.width_upper < self.width_lower:
raise ValueError(
"`factor` argument cannot have an upper bound less than the "
f"lower bound. Received: factor={factor}"
)
if self.width_lower < -1.0 or self.width_upper < -1.0:
raise ValueError(
"`factor` argument must have values larger than -1. "
f"Received: factor={factor}"
)
self.interpolation = interpolation
self.seed = seed
def call(self, inputs, training=True):
inputs = tf.convert_to_tensor(inputs, dtype=self.compute_dtype)
def random_width_inputs(inputs):
"""Inputs width-adjusted with random ops."""
inputs_shape = tf.shape(inputs)
img_hd = inputs_shape[-3]
img_wd = tf.cast(inputs_shape[-2], tf.float32)
width_factor = backend.random.uniform(
shape=[],
minval=(1.0 + self.width_lower),
maxval=(1.0 + self.width_upper),
seed=self.seed_generator,
)
adjusted_width = tf.cast(width_factor * img_wd, tf.int32)
adjusted_size = tf.stack([img_hd, adjusted_width])
output = tf.image.resize(
images=inputs,
size=adjusted_size,
method=self.interpolation,
)
# tf.resize will output float32 regardless of input type.
output = tf.cast(output, self.compute_dtype)
output_shape = inputs.shape.as_list()
output_shape[-2] = None
output.set_shape(output_shape)
return output
if training:
return random_width_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
input_shape[-2] = None
return tuple(input_shape)
def get_config(self):
config = {
"factor": self.factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras._legacy.layers.ThresholdedReLU")
| RandomWidth |
python | kamyu104__LeetCode-Solutions | Python/escape-a-large-maze.py | {
"start": 80,
"end": 1533
} | class ____(object):
def isEscapePossible(self, blocked, source, target):
"""
:type blocked: List[List[int]]
:type source: List[int]
:type target: List[int]
:rtype: bool
"""
R, C = 10**6, 10**6
directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]
def bfs(blocks, source, target):
max_area_surrounded_by_blocks = len(blocks)*(len(blocks)-1)//2
lookup = set([source])
if len(lookup) > max_area_surrounded_by_blocks:
return True
q = collections.deque([source])
while q:
source = q.popleft()
if source == target:
return True
for direction in directions:
nr, nc = source[0]+direction[0], source[1]+direction[1]
if not ((0 <= nr < R) and
(0 <= nc < C) and
(nr, nc) not in lookup and
(nr, nc) not in blocks):
continue
lookup.add((nr, nc))
if len(lookup) > max_area_surrounded_by_blocks:
return True
q.append((nr, nc))
return False
return bfs(set(map(tuple, blocked)), tuple(source), tuple(target)) and \
bfs(set(map(tuple, blocked)), tuple(target), tuple(source))
| Solution |
python | encode__django-rest-framework | tests/test_testing.py | {
"start": 2469,
"end": 9802
} | class ____(TestCase):
def setUp(self):
self.client = APIClient()
def test_credentials(self):
"""
Setting `.credentials()` adds the required headers to each request.
"""
self.client.credentials(HTTP_AUTHORIZATION='example')
for _ in range(0, 3):
response = self.client.get('/view/')
assert response.data['auth'] == 'example'
def test_force_authenticate_with_user(self):
"""
Setting `.force_authenticate()` with a user forcibly authenticates each
request with that user.
"""
user = User.objects.create_user('example', 'example@example.com')
self.client.force_authenticate(user=user)
response = self.client.get('/view/')
assert response.data['user'] == 'example'
assert 'token' not in response.data
def test_force_authenticate_with_token(self):
"""
Setting `.force_authenticate()` with a token forcibly authenticates each
request with that token.
"""
user = User.objects.create_user('example', 'example@example.com')
token = Token.objects.create(key='xyz', user=user)
self.client.force_authenticate(token=token)
response = self.client.get('/view/')
assert response.data['token'] == 'xyz'
assert 'user' not in response.data
def test_force_authenticate_with_user_and_token(self):
"""
Setting `.force_authenticate()` with a user and token forcibly
authenticates each request with that user and token.
"""
user = User.objects.create_user('example', 'example@example.com')
token = Token.objects.create(key='xyz', user=user)
self.client.force_authenticate(user=user, token=token)
response = self.client.get('/view/')
assert response.data['user'] == 'example'
assert response.data['token'] == 'xyz'
def test_force_authenticate_with_sessions(self):
"""
Setting `.force_authenticate()` forcibly authenticates each request.
"""
user = User.objects.create_user('example', 'example@example.com')
self.client.force_authenticate(user)
# First request does not yet have an active session
response = self.client.get('/session-view/')
assert response.data['active_session'] is False
# Subsequent requests have an active session
response = self.client.get('/session-view/')
assert response.data['active_session'] is True
# Force authenticating with `None` user and token should also logout
# the user session.
self.client.force_authenticate(user=None, token=None)
response = self.client.get('/session-view/')
assert response.data['active_session'] is False
def test_csrf_exempt_by_default(self):
"""
By default, the test client is CSRF exempt.
"""
User.objects.create_user('example', 'example@example.com', 'password')
self.client.login(username='example', password='password')
response = self.client.post('/view/')
assert response.status_code == 200
def test_explicitly_enforce_csrf_checks(self):
"""
The test client can enforce CSRF checks.
"""
client = APIClient(enforce_csrf_checks=True)
User.objects.create_user('example', 'example@example.com', 'password')
client.login(username='example', password='password')
response = client.post('/view/')
expected = {'detail': 'CSRF Failed: CSRF cookie not set.'}
assert response.status_code == 403
assert response.data == expected
def test_can_logout(self):
"""
`logout()` resets stored credentials
"""
self.client.credentials(HTTP_AUTHORIZATION='example')
response = self.client.get('/view/')
assert response.data['auth'] == 'example'
self.client.logout()
response = self.client.get('/view/')
assert response.data['auth'] == b''
def test_logout_resets_force_authenticate(self):
"""
`logout()` resets any `force_authenticate`
"""
user = User.objects.create_user('example', 'example@example.com', 'password')
self.client.force_authenticate(user)
response = self.client.get('/view/')
assert response.data['user'] == 'example'
self.client.logout()
response = self.client.get('/view/')
assert response.data['user'] == ''
def test_follow_redirect(self):
"""
Follow redirect by setting follow argument.
"""
for method in ('get', 'post', 'put', 'patch', 'delete', 'options'):
with self.subTest(method=method):
req_method = getattr(self.client, method)
response = req_method('/redirect-view/')
assert response.status_code == 302
response = req_method('/redirect-view/', follow=True)
assert response.redirect_chain is not None
assert response.status_code == 200
def test_follow_307_308_preserve_kwargs(self, *mocked_methods):
"""
Follow redirect by setting follow argument, and make sure the following
method called with appropriate kwargs.
"""
methods = ('get', 'post', 'put', 'patch', 'delete', 'options')
codes = (307, 308)
for method, code in itertools.product(methods, codes):
subtest_ctx = self.subTest(method=method, code=code)
patch_ctx = patch.object(self.client, method, side_effect=getattr(self.client, method))
with subtest_ctx, patch_ctx as req_method:
kwargs = {'data': {'example': 'test'}, 'format': 'json'}
response = req_method('/redirect-view/%s/' % code, follow=True, **kwargs)
assert response.redirect_chain is not None
assert response.status_code == 200
for _, call_args, call_kwargs in req_method.mock_calls:
assert all(call_kwargs[k] == kwargs[k] for k in kwargs if k in call_kwargs)
def test_invalid_multipart_data(self):
"""
MultiPart encoding cannot support nested data, so raise a helpful
error if the user attempts to do so.
"""
self.assertRaises(
AssertionError, self.client.post,
path='/view/', data={'valid': 123, 'invalid': {'a': 123}}
)
def test_empty_post_uses_default_boolean_value(self):
response = self.client.post(
'/post-view/',
data=None,
content_type='application/json'
)
assert response.status_code == 200
assert response.data == {"flag": True}
def test_post_encodes_data_based_on_json_content_type(self):
data = {'data': True}
response = self.client.post(
'/post-json-view/',
data=data,
content_type='application/json'
)
assert response.status_code == 200
assert response.data == data
def test_delete_based_on_format(self):
response = self.client.delete('/delete-json-view/', format='json')
assert response.status_code == status.HTTP_204_NO_CONTENT
assert response.data is None
| TestAPITestClient |
python | openai__openai-python | src/openai/_extras/numpy_proxy.py | {
"start": 340,
"end": 805
} | class ____(LazyProxy[Any]):
@override
def __load__(self) -> Any:
try:
import numpy
except ImportError as err:
raise MissingDependencyError(NUMPY_INSTRUCTIONS) from err
return numpy
if not TYPE_CHECKING:
numpy = NumpyProxy()
def has_numpy() -> bool:
try:
import numpy # noqa: F401 # pyright: ignore[reportUnusedImport]
except ImportError:
return False
return True
| NumpyProxy |
python | huggingface__transformers | tests/models/informer/test_modeling_informer.py | {
"start": 19398,
"end": 22764
} | class ____(unittest.TestCase):
def test_inference_no_head(self):
model = InformerModel.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device)
batch = prepare_batch()
torch.manual_seed(0)
with torch.no_grad():
output = model(
past_values=batch["past_values"],
past_time_features=batch["past_time_features"],
past_observed_mask=batch["past_observed_mask"],
static_categorical_features=batch["static_categorical_features"],
future_values=batch["future_values"],
future_time_features=batch["future_time_features"],
).last_hidden_state
expected_shape = torch.Size((64, model.config.context_length, model.config.d_model))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[0.4699, 0.7295, 0.8967], [0.4858, 0.3810, 0.9641], [-0.0233, 0.3608, 1.0303]],
device=torch_device,
)
torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_inference_head(self):
model = InformerForPrediction.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device)
batch = prepare_batch("val-batch.pt")
torch.manual_seed(0)
with torch.no_grad():
output = model(
past_values=batch["past_values"],
past_time_features=batch["past_time_features"],
past_observed_mask=batch["past_observed_mask"],
static_categorical_features=batch["static_categorical_features"],
future_time_features=batch["future_time_features"],
).encoder_last_hidden_state
# encoder distils the context length to 1/8th of the original length
expected_shape = torch.Size((64, model.config.context_length // 8, model.config.d_model))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[0.4170, 0.9067, 0.8153], [0.3004, 0.7574, 0.7066], [0.6803, -0.6323, 1.2802]], device=torch_device
)
torch.testing.assert_close(output[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_seq_to_seq_generation(self):
model = InformerForPrediction.from_pretrained("huggingface/informer-tourism-monthly").to(torch_device)
batch = prepare_batch("val-batch.pt")
torch.manual_seed(0)
with torch.no_grad():
outputs = model.generate(
static_categorical_features=batch["static_categorical_features"],
past_time_features=batch["past_time_features"],
past_values=batch["past_values"],
future_time_features=batch["future_time_features"],
past_observed_mask=batch["past_observed_mask"],
)
expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape, expected_shape)
expected_slice = torch.tensor([3400.8005, 4289.2637, 7101.9209], device=torch_device)
mean_prediction = outputs.sequences.mean(dim=1)
torch.testing.assert_close(mean_prediction[0, -3:], expected_slice, rtol=1e-1, atol=1e-1)
| InformerModelIntegrationTests |
python | getsentry__responses | responses/tests/test_recorder.py | {
"start": 5534,
"end": 7793
} | class ____:
def setup_method(self):
self.out_file = Path("response_record")
def teardown_method(self):
if self.out_file.exists():
self.out_file.unlink()
assert not self.out_file.exists()
@pytest.mark.parametrize("parser", (yaml, tomli_w))
def test_add_from_file(self, parser): # type: ignore[misc]
if parser == yaml:
with open(self.out_file, "w") as file:
parser.dump(get_data("example.com", "8080"), file)
else:
with open(self.out_file, "wb") as file: # type: ignore[assignment]
parser.dump(get_data("example.com", "8080"), file)
@responses.activate
def run():
responses.patch("http://httpbin.org")
if parser == tomli_w:
def _parse_resp_f(file_path):
with open(file_path, "rb") as file:
data = _toml.load(file)
return data
responses.mock._parse_response_file = _parse_resp_f # type: ignore[method-assign]
responses._add_from_file(file_path=self.out_file)
responses.post("http://httpbin.org/form")
assert responses.registered()[0].url == "http://httpbin.org/"
assert responses.registered()[1].url == "http://example.com:8080/404"
assert (
responses.registered()[2].url == "http://example.com:8080/status/wrong"
)
assert responses.registered()[3].url == "http://example.com:8080/500"
assert responses.registered()[4].url == "http://example.com:8080/202"
assert responses.registered()[5].url == "http://httpbin.org/form"
assert responses.registered()[0].method == "PATCH"
assert responses.registered()[2].method == "GET"
assert responses.registered()[4].method == "PUT"
assert responses.registered()[5].method == "POST"
assert responses.registered()[2].status == 400
assert responses.registered()[3].status == 500
assert responses.registered()[3].body == "500 Internal Server Error"
assert responses.registered()[3].content_type == "text/plain"
run()
| TestReplay |
python | huggingface__transformers | src/transformers/models/mimi/modeling_mimi.py | {
"start": 8226,
"end": 9195
} | class ____(ModelOutput):
r"""
audio_values (`torch.FloatTensor` of shape `(batch_size, segment_length)`, *optional*):
Decoded audio values, obtained using the decoder part of Mimi.
decoder_past_key_values (`Cache`, *optional*):
Pre-computed hidden-states (key and values in the self-attention blocks) that can be used to speed up sequential decoding of the decoder transformer.
This typically consists in the `past_key_values` returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
The model will output the same cache format that is fed as input.
If `past_key_values` are used, the user can optionally input only the last `audio_values` or `audio_codes (those that don't
have their past key value states given to this model).
"""
audio_values: Optional[torch.FloatTensor] = None
decoder_past_key_values: Optional[Cache] = None
| MimiDecoderOutput |
python | falconry__falcon | tests/test_cmd_inspect_app.py | {
"start": 602,
"end": 1049
} | class ____:
async def on_get(self, req, resp):
resp.text = 'Test\n'
resp.status = '200 OK'
def create_app(asgi):
app_cls = falcon.asgi.App if asgi else App
return app_cls()
def make_app(asgi=False):
app = create_app(asgi)
app.add_route('/test', DummyResourceAsync() if asgi else DummyResource())
return app
_APP = make_app()
@pytest.fixture
def app(asgi):
return make_app(asgi)
| DummyResourceAsync |
python | scrapy__scrapy | tests/test_downloadermiddleware_cookies.py | {
"start": 1996,
"end": 30475
} | class ____:
def assertCookieValEqual(self, first, second, msg=None):
def split_cookies(cookies):
return sorted([s.strip() for s in to_bytes(cookies).split(b";")])
assert split_cookies(first) == split_cookies(second), msg
def setup_method(self):
crawler = get_crawler(DefaultSpider)
crawler.spider = crawler._create_spider()
self.mw = CookiesMiddleware.from_crawler(crawler)
self.redirect_middleware = RedirectMiddleware.from_crawler(crawler)
def teardown_method(self):
del self.mw
del self.redirect_middleware
def test_basic(self):
req = Request("http://scrapytest.org/")
assert self.mw.process_request(req) is None
assert "Cookie" not in req.headers
headers = {"Set-Cookie": "C1=value1; path=/"}
res = Response("http://scrapytest.org/", headers=headers)
assert self.mw.process_response(req, res) is res
req2 = Request("http://scrapytest.org/sub1/")
assert self.mw.process_request(req2) is None
assert req2.headers.get("Cookie") == b"C1=value1"
def test_setting_false_cookies_enabled(self):
with pytest.raises(NotConfigured):
CookiesMiddleware.from_crawler(
get_crawler(settings_dict={"COOKIES_ENABLED": False})
)
def test_setting_default_cookies_enabled(self):
assert isinstance(
CookiesMiddleware.from_crawler(get_crawler()), CookiesMiddleware
)
def test_setting_true_cookies_enabled(self):
assert isinstance(
CookiesMiddleware.from_crawler(
get_crawler(settings_dict={"COOKIES_ENABLED": True})
),
CookiesMiddleware,
)
def test_setting_enabled_cookies_debug(self):
crawler = get_crawler(settings_dict={"COOKIES_DEBUG": True})
mw = CookiesMiddleware.from_crawler(crawler)
with LogCapture(
"scrapy.downloadermiddlewares.cookies",
propagate=False,
level=logging.DEBUG,
) as log:
req = Request("http://scrapytest.org/")
res = Response(
"http://scrapytest.org/", headers={"Set-Cookie": "C1=value1; path=/"}
)
mw.process_response(req, res)
req2 = Request("http://scrapytest.org/sub1/")
mw.process_request(req2)
log.check(
(
"scrapy.downloadermiddlewares.cookies",
"DEBUG",
"Received cookies from: <200 http://scrapytest.org/>\n"
"Set-Cookie: C1=value1; path=/\n",
),
(
"scrapy.downloadermiddlewares.cookies",
"DEBUG",
"Sending cookies to: <GET http://scrapytest.org/sub1/>\n"
"Cookie: C1=value1\n",
),
)
def test_setting_disabled_cookies_debug(self):
crawler = get_crawler(settings_dict={"COOKIES_DEBUG": False})
mw = CookiesMiddleware.from_crawler(crawler)
with LogCapture(
"scrapy.downloadermiddlewares.cookies",
propagate=False,
level=logging.DEBUG,
) as log:
req = Request("http://scrapytest.org/")
res = Response(
"http://scrapytest.org/", headers={"Set-Cookie": "C1=value1; path=/"}
)
mw.process_response(req, res)
req2 = Request("http://scrapytest.org/sub1/")
mw.process_request(req2)
log.check()
def test_do_not_break_on_non_utf8_header(self):
req = Request("http://scrapytest.org/")
assert self.mw.process_request(req) is None
assert "Cookie" not in req.headers
headers = {"Set-Cookie": b"C1=in\xa3valid; path=/", "Other": b"ignore\xa3me"}
res = Response("http://scrapytest.org/", headers=headers)
assert self.mw.process_response(req, res) is res
req2 = Request("http://scrapytest.org/sub1/")
assert self.mw.process_request(req2) is None
assert "Cookie" in req2.headers
def test_dont_merge_cookies(self):
# merge some cookies into jar
headers = {"Set-Cookie": "C1=value1; path=/"}
req = Request("http://scrapytest.org/")
res = Response("http://scrapytest.org/", headers=headers)
assert self.mw.process_response(req, res) is res
# test Cookie header is not seted to request
req = Request("http://scrapytest.org/dontmerge", meta={"dont_merge_cookies": 1})
assert self.mw.process_request(req) is None
assert "Cookie" not in req.headers
# check that returned cookies are not merged back to jar
res = Response(
"http://scrapytest.org/dontmerge",
headers={"Set-Cookie": "dont=mergeme; path=/"},
)
assert self.mw.process_response(req, res) is res
# check that cookies are merged back
req = Request("http://scrapytest.org/mergeme")
assert self.mw.process_request(req) is None
assert req.headers.get("Cookie") == b"C1=value1"
# check that cookies are merged when dont_merge_cookies is passed as 0
req = Request("http://scrapytest.org/mergeme", meta={"dont_merge_cookies": 0})
assert self.mw.process_request(req) is None
assert req.headers.get("Cookie") == b"C1=value1"
def test_complex_cookies(self):
# merge some cookies into jar
cookies = [
{
"name": "C1",
"value": "value1",
"path": "/foo",
"domain": "scrapytest.org",
},
{
"name": "C2",
"value": "value2",
"path": "/bar",
"domain": "scrapytest.org",
},
{
"name": "C3",
"value": "value3",
"path": "/foo",
"domain": "scrapytest.org",
},
{"name": "C4", "value": "value4", "path": "/foo", "domain": "scrapy.org"},
]
req = Request("http://scrapytest.org/", cookies=cookies)
self.mw.process_request(req)
# embed C1 and C3 for scrapytest.org/foo
req = Request("http://scrapytest.org/foo")
self.mw.process_request(req)
assert req.headers.get("Cookie") in (
b"C1=value1; C3=value3",
b"C3=value3; C1=value1",
)
# embed C2 for scrapytest.org/bar
req = Request("http://scrapytest.org/bar")
self.mw.process_request(req)
assert req.headers.get("Cookie") == b"C2=value2"
# embed nothing for scrapytest.org/baz
req = Request("http://scrapytest.org/baz")
self.mw.process_request(req)
assert "Cookie" not in req.headers
def test_merge_request_cookies(self):
req = Request("http://scrapytest.org/", cookies={"galleta": "salada"})
assert self.mw.process_request(req) is None
assert req.headers.get("Cookie") == b"galleta=salada"
headers = {"Set-Cookie": "C1=value1; path=/"}
res = Response("http://scrapytest.org/", headers=headers)
assert self.mw.process_response(req, res) is res
req2 = Request("http://scrapytest.org/sub1/")
assert self.mw.process_request(req2) is None
self.assertCookieValEqual(
req2.headers.get("Cookie"), b"C1=value1; galleta=salada"
)
def test_cookiejar_key(self):
req = Request(
"http://scrapytest.org/",
cookies={"galleta": "salada"},
meta={"cookiejar": "store1"},
)
assert self.mw.process_request(req) is None
assert req.headers.get("Cookie") == b"galleta=salada"
headers = {"Set-Cookie": "C1=value1; path=/"}
res = Response("http://scrapytest.org/", headers=headers, request=req)
assert self.mw.process_response(req, res) is res
req2 = Request("http://scrapytest.org/", meta=res.meta)
assert self.mw.process_request(req2) is None
self.assertCookieValEqual(
req2.headers.get("Cookie"), b"C1=value1; galleta=salada"
)
req3 = Request(
"http://scrapytest.org/",
cookies={"galleta": "dulce"},
meta={"cookiejar": "store2"},
)
assert self.mw.process_request(req3) is None
assert req3.headers.get("Cookie") == b"galleta=dulce"
headers = {"Set-Cookie": "C2=value2; path=/"}
res2 = Response("http://scrapytest.org/", headers=headers, request=req3)
assert self.mw.process_response(req3, res2) is res2
req4 = Request("http://scrapytest.org/", meta=res2.meta)
assert self.mw.process_request(req4) is None
self.assertCookieValEqual(
req4.headers.get("Cookie"), b"C2=value2; galleta=dulce"
)
# cookies from hosts with port
req5_1 = Request("http://scrapytest.org:1104/")
assert self.mw.process_request(req5_1) is None
headers = {"Set-Cookie": "C1=value1; path=/"}
res5_1 = Response(
"http://scrapytest.org:1104/", headers=headers, request=req5_1
)
assert self.mw.process_response(req5_1, res5_1) is res5_1
req5_2 = Request("http://scrapytest.org:1104/some-redirected-path")
assert self.mw.process_request(req5_2) is None
assert req5_2.headers.get("Cookie") == b"C1=value1"
req5_3 = Request("http://scrapytest.org/some-redirected-path")
assert self.mw.process_request(req5_3) is None
assert req5_3.headers.get("Cookie") == b"C1=value1"
# skip cookie retrieval for not http request
req6 = Request("file:///scrapy/sometempfile")
assert self.mw.process_request(req6) is None
assert req6.headers.get("Cookie") is None
def test_local_domain(self):
request = Request("http://example-host/", cookies={"currencyCookie": "USD"})
assert self.mw.process_request(request) is None
assert "Cookie" in request.headers
assert request.headers["Cookie"] == b"currencyCookie=USD"
@pytest.mark.xfail(reason="Cookie header is not currently being processed")
def test_keep_cookie_from_default_request_headers_middleware(self):
DEFAULT_REQUEST_HEADERS = {"Cookie": "default=value; asdf=qwerty"}
mw_default_headers = DefaultHeadersMiddleware(DEFAULT_REQUEST_HEADERS.items())
# overwrite with values from 'cookies' request argument
req1 = Request("http://example.org", cookies={"default": "something"})
assert mw_default_headers.process_request(req1) is None
assert self.mw.process_request(req1) is None
self.assertCookieValEqual(
req1.headers["Cookie"], b"default=something; asdf=qwerty"
)
# keep both
req2 = Request("http://example.com", cookies={"a": "b"})
assert mw_default_headers.process_request(req2) is None
assert self.mw.process_request(req2) is None
self.assertCookieValEqual(
req2.headers["Cookie"], b"default=value; a=b; asdf=qwerty"
)
@pytest.mark.xfail(reason="Cookie header is not currently being processed")
def test_keep_cookie_header(self):
# keep only cookies from 'Cookie' request header
req1 = Request("http://scrapytest.org", headers={"Cookie": "a=b; c=d"})
assert self.mw.process_request(req1) is None
self.assertCookieValEqual(req1.headers["Cookie"], "a=b; c=d")
# keep cookies from both 'Cookie' request header and 'cookies' keyword
req2 = Request(
"http://scrapytest.org", headers={"Cookie": "a=b; c=d"}, cookies={"e": "f"}
)
assert self.mw.process_request(req2) is None
self.assertCookieValEqual(req2.headers["Cookie"], "a=b; c=d; e=f")
# overwrite values from 'Cookie' request header with 'cookies' keyword
req3 = Request(
"http://scrapytest.org",
headers={"Cookie": "a=b; c=d"},
cookies={"a": "new", "e": "f"},
)
assert self.mw.process_request(req3) is None
self.assertCookieValEqual(req3.headers["Cookie"], "a=new; c=d; e=f")
def test_request_cookies_encoding(self):
# 1) UTF8-encoded bytes
req1 = Request("http://example.org", cookies={"a": "á".encode()})
assert self.mw.process_request(req1) is None
self.assertCookieValEqual(req1.headers["Cookie"], b"a=\xc3\xa1")
# 2) Non UTF8-encoded bytes
req2 = Request("http://example.org", cookies={"a": "á".encode("latin1")})
assert self.mw.process_request(req2) is None
self.assertCookieValEqual(req2.headers["Cookie"], b"a=\xc3\xa1")
# 3) String
req3 = Request("http://example.org", cookies={"a": "á"})
assert self.mw.process_request(req3) is None
self.assertCookieValEqual(req3.headers["Cookie"], b"a=\xc3\xa1")
@pytest.mark.xfail(reason="Cookie header is not currently being processed")
def test_request_headers_cookie_encoding(self):
# 1) UTF8-encoded bytes
req1 = Request("http://example.org", headers={"Cookie": "a=á".encode()})
assert self.mw.process_request(req1) is None
self.assertCookieValEqual(req1.headers["Cookie"], b"a=\xc3\xa1")
# 2) Non UTF8-encoded bytes
req2 = Request("http://example.org", headers={"Cookie": "a=á".encode("latin1")})
assert self.mw.process_request(req2) is None
self.assertCookieValEqual(req2.headers["Cookie"], b"a=\xc3\xa1")
# 3) String
req3 = Request("http://example.org", headers={"Cookie": "a=á"})
assert self.mw.process_request(req3) is None
self.assertCookieValEqual(req3.headers["Cookie"], b"a=\xc3\xa1")
def test_invalid_cookies(self):
"""
Invalid cookies are logged as warnings and discarded
"""
with LogCapture(
"scrapy.downloadermiddlewares.cookies",
propagate=False,
level=logging.INFO,
) as lc:
cookies1 = [{"value": "bar"}, {"name": "key", "value": "value1"}]
req1 = Request("http://example.org/1", cookies=cookies1)
assert self.mw.process_request(req1) is None
cookies2 = [{"name": "foo"}, {"name": "key", "value": "value2"}]
req2 = Request("http://example.org/2", cookies=cookies2)
assert self.mw.process_request(req2) is None
cookies3 = [{"name": "foo", "value": None}, {"name": "key", "value": ""}]
req3 = Request("http://example.org/3", cookies=cookies3)
assert self.mw.process_request(req3) is None
lc.check(
(
"scrapy.downloadermiddlewares.cookies",
"WARNING",
"Invalid cookie found in request <GET http://example.org/1>:"
" {'value': 'bar', 'secure': False} ('name' is missing)",
),
(
"scrapy.downloadermiddlewares.cookies",
"WARNING",
"Invalid cookie found in request <GET http://example.org/2>:"
" {'name': 'foo', 'secure': False} ('value' is missing)",
),
(
"scrapy.downloadermiddlewares.cookies",
"WARNING",
"Invalid cookie found in request <GET http://example.org/3>:"
" {'name': 'foo', 'value': None, 'secure': False} ('value' is missing)",
),
)
self.assertCookieValEqual(req1.headers["Cookie"], "key=value1")
self.assertCookieValEqual(req2.headers["Cookie"], "key=value2")
self.assertCookieValEqual(req3.headers["Cookie"], "key=")
def test_primitive_type_cookies(self):
# Boolean
req1 = Request("http://example.org", cookies={"a": True})
assert self.mw.process_request(req1) is None
self.assertCookieValEqual(req1.headers["Cookie"], b"a=True")
# Float
req2 = Request("http://example.org", cookies={"a": 9.5})
assert self.mw.process_request(req2) is None
self.assertCookieValEqual(req2.headers["Cookie"], b"a=9.5")
# Integer
req3 = Request("http://example.org", cookies={"a": 10})
assert self.mw.process_request(req3) is None
self.assertCookieValEqual(req3.headers["Cookie"], b"a=10")
# String
req4 = Request("http://example.org", cookies={"a": "b"})
assert self.mw.process_request(req4) is None
self.assertCookieValEqual(req4.headers["Cookie"], b"a=b")
def _test_cookie_redirect(
self,
source,
target,
*,
cookies1,
cookies2,
):
input_cookies = {"a": "b"}
if not isinstance(source, dict):
source = {"url": source}
if not isinstance(target, dict):
target = {"url": target}
target.setdefault("status", 301)
request1 = Request(cookies=input_cookies, **source)
self.mw.process_request(request1)
cookies = request1.headers.get("Cookie")
assert cookies == (b"a=b" if cookies1 else None)
response = Response(
headers={
"Location": target["url"],
},
**target,
)
assert self.mw.process_response(request1, response) == response
request2 = self.redirect_middleware.process_response(request1, response)
assert isinstance(request2, Request)
self.mw.process_request(request2)
cookies = request2.headers.get("Cookie")
assert cookies == (b"a=b" if cookies2 else None)
def test_cookie_redirect_same_domain(self):
self._test_cookie_redirect(
"https://toscrape.com",
"https://toscrape.com",
cookies1=True,
cookies2=True,
)
def test_cookie_redirect_same_domain_forcing_get(self):
self._test_cookie_redirect(
"https://toscrape.com",
{"url": "https://toscrape.com", "status": 302},
cookies1=True,
cookies2=True,
)
def test_cookie_redirect_different_domain(self):
self._test_cookie_redirect(
"https://toscrape.com",
"https://example.com",
cookies1=True,
cookies2=False,
)
def test_cookie_redirect_different_domain_forcing_get(self):
self._test_cookie_redirect(
"https://toscrape.com",
{"url": "https://example.com", "status": 302},
cookies1=True,
cookies2=False,
)
def _test_cookie_header_redirect(
self,
source,
target,
*,
cookies2,
):
"""Test the handling of a user-defined Cookie header when building a
redirect follow-up request.
We follow RFC 6265 for cookie handling. The Cookie header can only
contain a list of key-value pairs (i.e. no additional cookie
parameters like Domain or Path). Because of that, we follow the same
rules that we would follow for the handling of the Set-Cookie response
header when the Domain is not set: the cookies must be limited to the
target URL domain (not even subdomains can receive those cookies).
.. note:: This method tests the scenario where the cookie middleware is
disabled. Because of known issue #1992, when the cookies
middleware is enabled we do not need to be concerned about
the Cookie header getting leaked to unintended domains,
because the middleware empties the header from every request.
"""
if not isinstance(source, dict):
source = {"url": source}
if not isinstance(target, dict):
target = {"url": target}
target.setdefault("status", 301)
request1 = Request(headers={"Cookie": b"a=b"}, **source)
response = Response(
headers={
"Location": target["url"],
},
**target,
)
request2 = self.redirect_middleware.process_response(request1, response)
assert isinstance(request2, Request)
cookies = request2.headers.get("Cookie")
assert cookies == (b"a=b" if cookies2 else None)
def test_cookie_header_redirect_same_domain(self):
self._test_cookie_header_redirect(
"https://toscrape.com",
"https://toscrape.com",
cookies2=True,
)
def test_cookie_header_redirect_same_domain_forcing_get(self):
self._test_cookie_header_redirect(
"https://toscrape.com",
{"url": "https://toscrape.com", "status": 302},
cookies2=True,
)
def test_cookie_header_redirect_different_domain(self):
self._test_cookie_header_redirect(
"https://toscrape.com",
"https://example.com",
cookies2=False,
)
def test_cookie_header_redirect_different_domain_forcing_get(self):
self._test_cookie_header_redirect(
"https://toscrape.com",
{"url": "https://example.com", "status": 302},
cookies2=False,
)
def _test_user_set_cookie_domain_followup(
self,
url1,
url2,
domain,
*,
cookies1,
cookies2,
):
input_cookies = [
{
"name": "a",
"value": "b",
"domain": domain,
}
]
request1 = Request(url1, cookies=input_cookies)
self.mw.process_request(request1)
cookies = request1.headers.get("Cookie")
assert cookies == (b"a=b" if cookies1 else None)
request2 = Request(url2)
self.mw.process_request(request2)
cookies = request2.headers.get("Cookie")
assert cookies == (b"a=b" if cookies2 else None)
def test_user_set_cookie_domain_suffix_private(self):
self._test_user_set_cookie_domain_followup(
"https://books.toscrape.com",
"https://quotes.toscrape.com",
"toscrape.com",
cookies1=True,
cookies2=True,
)
def test_user_set_cookie_domain_suffix_public_period(self):
self._test_user_set_cookie_domain_followup(
"https://foo.co.uk",
"https://bar.co.uk",
"co.uk",
cookies1=False,
cookies2=False,
)
def test_user_set_cookie_domain_suffix_public_private(self):
self._test_user_set_cookie_domain_followup(
"https://foo.blogspot.com",
"https://bar.blogspot.com",
"blogspot.com",
cookies1=False,
cookies2=False,
)
def test_user_set_cookie_domain_public_period(self):
self._test_user_set_cookie_domain_followup(
"https://co.uk",
"https://co.uk",
"co.uk",
cookies1=True,
cookies2=True,
)
def _test_server_set_cookie_domain_followup(
self,
url1,
url2,
domain,
*,
cookies,
):
request1 = Request(url1)
self.mw.process_request(request1)
input_cookies = [
{
"name": "a",
"value": "b",
"domain": domain,
}
]
headers = {
"Set-Cookie": _cookies_to_set_cookie_list(input_cookies),
}
response = Response(url1, status=200, headers=headers)
assert self.mw.process_response(request1, response) == response
request2 = Request(url2)
self.mw.process_request(request2)
actual_cookies = request2.headers.get("Cookie")
assert actual_cookies == (b"a=b" if cookies else None)
def test_server_set_cookie_domain_suffix_private(self):
self._test_server_set_cookie_domain_followup(
"https://books.toscrape.com",
"https://quotes.toscrape.com",
"toscrape.com",
cookies=True,
)
def test_server_set_cookie_domain_suffix_public_period(self):
self._test_server_set_cookie_domain_followup(
"https://foo.co.uk",
"https://bar.co.uk",
"co.uk",
cookies=False,
)
def test_server_set_cookie_domain_suffix_public_private(self):
self._test_server_set_cookie_domain_followup(
"https://foo.blogspot.com",
"https://bar.blogspot.com",
"blogspot.com",
cookies=False,
)
def test_server_set_cookie_domain_public_period(self):
self._test_server_set_cookie_domain_followup(
"https://co.uk",
"https://co.uk",
"co.uk",
cookies=True,
)
def _test_cookie_redirect_scheme_change(
self, secure, from_scheme, to_scheme, cookies1, cookies2, cookies3
):
"""When a redirect causes the URL scheme to change from *from_scheme*
to *to_scheme*, while domain and port remain the same, and given a
cookie on the initial request with its secure attribute set to
*secure*, check if the cookie should be set on the Cookie header of the
initial request (*cookies1*), if it should be kept by the redirect
middleware (*cookies2*), and if it should be present on the Cookie
header in the redirected request (*cookie3*)."""
cookie_kwargs = {}
if secure is not UNSET:
cookie_kwargs["secure"] = secure
input_cookies = [{"name": "a", "value": "b", **cookie_kwargs}]
request1 = Request(f"{from_scheme}://a.example", cookies=input_cookies)
self.mw.process_request(request1)
cookies = request1.headers.get("Cookie")
assert cookies == (b"a=b" if cookies1 else None)
response = Response(
f"{from_scheme}://a.example",
headers={"Location": f"{to_scheme}://a.example"},
status=301,
)
assert self.mw.process_response(request1, response) == response
request2 = self.redirect_middleware.process_response(request1, response)
assert isinstance(request2, Request)
cookies = request2.headers.get("Cookie")
assert cookies == (b"a=b" if cookies2 else None)
self.mw.process_request(request2)
cookies = request2.headers.get("Cookie")
assert cookies == (b"a=b" if cookies3 else None)
def test_cookie_redirect_secure_undefined_downgrade(self):
self._test_cookie_redirect_scheme_change(
secure=UNSET,
from_scheme="https",
to_scheme="http",
cookies1=True,
cookies2=False,
cookies3=False,
)
def test_cookie_redirect_secure_undefined_upgrade(self):
self._test_cookie_redirect_scheme_change(
secure=UNSET,
from_scheme="http",
to_scheme="https",
cookies1=True,
cookies2=True,
cookies3=True,
)
def test_cookie_redirect_secure_false_downgrade(self):
self._test_cookie_redirect_scheme_change(
secure=False,
from_scheme="https",
to_scheme="http",
cookies1=True,
cookies2=False,
cookies3=True,
)
def test_cookie_redirect_secure_false_upgrade(self):
self._test_cookie_redirect_scheme_change(
secure=False,
from_scheme="http",
to_scheme="https",
cookies1=True,
cookies2=True,
cookies3=True,
)
def test_cookie_redirect_secure_true_downgrade(self):
self._test_cookie_redirect_scheme_change(
secure=True,
from_scheme="https",
to_scheme="http",
cookies1=True,
cookies2=False,
cookies3=False,
)
def test_cookie_redirect_secure_true_upgrade(self):
self._test_cookie_redirect_scheme_change(
secure=True,
from_scheme="http",
to_scheme="https",
cookies1=False,
cookies2=False,
cookies3=True,
)
| TestCookiesMiddleware |
python | pyparsing__pyparsing | tests/test_unit.py | {
"start": 395747,
"end": 402077
} | class ____(ppt.TestParseResultsAsserts, TestCase):
"""
Tests for recursive parsing
"""
suite_context = None
save_suite_context = None
def setUp(self):
recursion_suite_context.restore()
def tearDown(self):
default_suite_context.restore()
def test_repeat_as_recurse(self):
"""repetition rules formulated with recursion"""
one_or_more = pp.Forward().set_name("one_or_more")
one_or_more <<= one_or_more + "a" | "a"
self.assertParseResultsEquals(
one_or_more.parse_string("a", parse_all=True), expected_list=["a"]
)
self.assertParseResultsEquals(
one_or_more.parse_string("aaa aa", parse_all=True),
expected_list=["a", "a", "a", "a", "a"],
)
DelimitedList = pp.Forward().set_name("DelimitedList")
DelimitedList <<= DelimitedList + pp.Suppress(",") + "b" | "b"
self.assertParseResultsEquals(
DelimitedList.parse_string("b", parse_all=True), expected_list=["b"]
)
self.assertParseResultsEquals(
DelimitedList.parse_string("b,b", parse_all=True), expected_list=["b", "b"]
)
self.assertParseResultsEquals(
DelimitedList.parse_string("b,b , b, b,b", parse_all=True),
expected_list=["b", "b", "b", "b", "b"],
)
def test_binary_recursive(self):
"""parsing of single left-recursive binary operator"""
expr = pp.Forward().set_name("expr")
num = pp.Word(pp.nums)
expr <<= expr + "+" - num | num
self.assertParseResultsEquals(
expr.parse_string("1+2", parse_all=True), expected_list=["1", "+", "2"]
)
self.assertParseResultsEquals(
expr.parse_string("1+2+3+4", parse_all=True),
expected_list=["1", "+", "2", "+", "3", "+", "4"],
)
def test_binary_associative(self):
"""associative is preserved for single left-recursive binary operator"""
expr = pp.Forward().set_name("expr")
num = pp.Word(pp.nums)
expr <<= pp.Group(expr) + "+" - num | num
self.assertParseResultsEquals(
expr.parse_string("1+2", parse_all=True), expected_list=[["1"], "+", "2"]
)
self.assertParseResultsEquals(
expr.parse_string("1+2+3+4", parse_all=True),
expected_list=[[[["1"], "+", "2"], "+", "3"], "+", "4"],
)
def test_add_sub(self):
"""indirectly left-recursive/associative add/sub calculator"""
expr = pp.Forward().set_name("expr")
num = pp.Word(pp.nums).set_parse_action(lambda t: int(t[0]))
expr <<= (
(expr + "+" - num).set_parse_action(lambda t: t[0] + t[2])
| (expr + "-" - num).set_parse_action(lambda t: t[0] - t[2])
| num
)
self.assertEqual(expr.parse_string("1+2", parse_all=True)[0], 3)
self.assertEqual(expr.parse_string("1+2+3", parse_all=True)[0], 6)
self.assertEqual(expr.parse_string("1+2-3", parse_all=True)[0], 0)
self.assertEqual(expr.parse_string("1-2+3", parse_all=True)[0], 2)
self.assertEqual(expr.parse_string("1-2-3", parse_all=True)[0], -4)
def test_math(self):
"""precedence climbing parser for math"""
# named references
expr = pp.Forward().set_name("expr")
add_sub = pp.Forward().set_name("add_sub")
mul_div = pp.Forward().set_name("mul_div")
power = pp.Forward().set_name("power")
terminal = pp.Forward().set_name("terminal")
# concrete rules
number = pp.Word(pp.nums).set_parse_action(lambda t: int(t[0]))
signed = ("+" - expr) | ("-" - expr).set_parse_action(lambda t: -t[1])
group = pp.Suppress("(") - expr - pp.Suppress(")")
add_sub <<= (
(add_sub + "+" - mul_div).set_parse_action(lambda t: t[0] + t[2])
| (add_sub + "-" - mul_div).set_parse_action(lambda t: t[0] - t[2])
| mul_div
)
mul_div <<= (
(mul_div + "*" - power).set_parse_action(lambda t: t[0] * t[2])
| (mul_div + "/" - power).set_parse_action(lambda t: t[0] / t[2])
| power
)
power <<= (terminal + "^" - power).set_parse_action(
lambda t: t[0] ** t[2]
) | terminal
terminal <<= number | signed | group
expr <<= add_sub
# simple add_sub expressions
self.assertEqual(expr.parse_string("1+2", parse_all=True)[0], 3)
self.assertEqual(expr.parse_string("1+2+3", parse_all=True)[0], 6)
self.assertEqual(expr.parse_string("1+2-3", parse_all=True)[0], 0)
self.assertEqual(expr.parse_string("1-2+3", parse_all=True)[0], 2)
self.assertEqual(expr.parse_string("1-2-3", parse_all=True)[0], -4)
# precedence overwriting via parentheses
self.assertEqual(expr.parse_string("1+(2+3)", parse_all=True)[0], 6)
self.assertEqual(expr.parse_string("1+(2-3)", parse_all=True)[0], 0)
self.assertEqual(expr.parse_string("1-(2+3)", parse_all=True)[0], -4)
self.assertEqual(expr.parse_string("1-(2-3)", parse_all=True)[0], 2)
# complicated math expressions – same as Python expressions
self.assertEqual(expr.parse_string("1----3", parse_all=True)[0], 1 - ---3)
self.assertEqual(expr.parse_string("1+2*3", parse_all=True)[0], 1 + 2 * 3)
self.assertEqual(expr.parse_string("1*2+3", parse_all=True)[0], 1 * 2 + 3)
self.assertEqual(expr.parse_string("1*2^3", parse_all=True)[0], 1 * 2**3)
self.assertEqual(expr.parse_string("4^3^2^1", parse_all=True)[0], 4**3**2**1)
def test_terminate_empty(self):
"""Recursion with ``Empty`` terminates"""
empty = pp.Forward().set_name("e")
empty <<= empty + pp.Empty() | pp.Empty()
self.assertParseResultsEquals(
empty.parse_string("", parse_all=True), expected_list=[]
)
def test_non_peg(self):
"""Recursion works for non-PEG operators"""
expr = pp.Forward().set_name("expr")
expr <<= expr + "a" ^ expr + "ab" ^ expr + "abc" ^ "."
self.assertParseResultsEquals(
expr.parse_string(".abcabaabc", parse_all=True),
expected_list=[".", "abc", "ab", "a", "abc"],
)
| Test11_LR1_Recursion |
python | crytic__slither | slither/solc_parsing/declarations/custom_error.py | {
"start": 898,
"end": 4473
} | class ____(CallerContextExpression):
def __init__(
self,
custom_error: CustomError,
custom_error_data: dict,
contract_parser: Optional["ContractSolc"],
slither_parser: "SlitherCompilationUnitSolc",
) -> None:
self._slither_parser: "SlitherCompilationUnitSolc" = slither_parser
self._custom_error = custom_error
custom_error.name = custom_error_data["name"]
self._contract_parser = contract_parser
self._params_was_analyzed = False
if not self._slither_parser.is_compact_ast:
custom_error_data = custom_error_data["attributes"]
self._custom_error_data = custom_error_data
def analyze_params(self) -> None:
# Can be re-analyzed due to inheritance
if self._params_was_analyzed:
return
self._params_was_analyzed = True
if self._slither_parser.is_compact_ast:
params = self._custom_error_data["parameters"]
else:
children = self._custom_error_data[self.get_children("children")]
# It uses to be
# params = children[0]
# returns = children[1]
# But from Solidity 0.6.3 to 0.6.10 (included)
# Comment above a function might be added in the children
child_iter = iter(
[child for child in children if child[self.get_key()] == "ParameterList"]
)
params = next(child_iter)
if params:
self._parse_params(params)
@property
def contract_parser(self) -> Optional["ContractSolc"]:
return self._contract_parser
@property
def is_compact_ast(self) -> bool:
return self._slither_parser.is_compact_ast
def get_key(self) -> str:
return self._slither_parser.get_key()
def get_children(self, key: str) -> str:
if self._slither_parser.is_compact_ast:
return key
return "children"
def _parse_params(self, params: Dict) -> None:
assert params[self.get_key()] == "ParameterList"
if self._slither_parser.is_compact_ast:
params = params["parameters"]
else:
params = params[self.get_children("children")]
for param in params:
assert param[self.get_key()] == "VariableDeclaration"
local_var = self._add_param(param)
self._custom_error.add_parameters(local_var.underlying_variable)
self._custom_error.set_solidity_sig()
def _add_param(self, param: Dict) -> LocalVariableSolc:
local_var = LocalVariable()
local_var.set_offset(param["src"], self._slither_parser.compilation_unit)
local_var_parser = LocalVariableSolc(local_var, param)
if isinstance(self._custom_error, CustomErrorTopLevel):
local_var_parser.analyze(self)
else:
assert isinstance(self._custom_error, CustomErrorContract)
local_var_parser.analyze(self)
# see https://solidity.readthedocs.io/en/v0.4.24/types.html?highlight=storage%20location#data-location
if local_var.location == "default":
local_var.set_location("memory")
return local_var_parser
@property
def underlying_custom_error(self) -> CustomError:
return self._custom_error
@property
def slither_parser(self) -> "SlitherCompilationUnitSolc":
return self._slither_parser
@property
def compilation_unit(self) -> "SlitherCompilationUnit":
return self._custom_error.compilation_unit
| CustomErrorSolc |
python | pytorch__pytorch | torch/_inductor/analysis/profile_analysis.py | {
"start": 12585,
"end": 24523
} | class ____:
_devices: DeviceMap
def __init__(
self,
path: str,
benchmark_name: Optional[str] = None,
dtype: Optional[Union[torch.dtype, str]] = None,
):
"""
Convenience class for running common operations on chrome/perfetto json traces.
"""
self.path = path
with open(path) as f:
self.data = json.load(f)
self.events = self.data["traceEvents"]
self.benchmark_name = benchmark_name
if dtype is None:
self.dtype = None
elif isinstance(dtype, torch.dtype):
# pyrefly: ignore [bad-assignment]
self.dtype = dtype
else:
# pyrefly: ignore [bad-assignment]
self.dtype = _dtype_map.get(dtype)
self._create_devices()
def convert_dtype(self, event: dict[str, Any]) -> Optional[torch.dtype]:
"""
Each op has a list of dtypes for each input arg. We need to convert these into a single dtype for flop estimation.
Issues:
- converting the strings to concrete torch.dtypes
- What if we have float32, float, float16 all in the inputs? Our choice is to use the largest buffer dtype.
"""
if (
"Input Dims" not in event["args"]
or "Input type" not in event["args"]
or "Concrete Inputs" not in event["args"]
):
if "bfloat16" in event["name"]:
return torch.bfloat16
elif "float16" in event["name"]:
return torch.float16
else:
return None
input_sizes = event["args"]["Input Dims"]
input_types = event["args"]["Input type"]
concrete_inputs = event["args"]["Concrete Inputs"]
assert len(input_sizes) == len(input_types)
assert len(input_types) == len(concrete_inputs)
if len(input_sizes) == 0:
raise RuntimeError("Empty input_sizes and input_types")
biggest_size = 0
biggest_index = 0
for i in range(len(input_sizes)):
if concrete_inputs[i] != "":
# concrete inputs are usually small tensors, so we can just skip
continue
my_size = input_sizes[i]
total_size = sum(parse_list(my_size))
if total_size > biggest_size:
biggest_size = total_size
biggest_index = i
ret_type = input_types[biggest_index]
if ret_type in _dtype_map:
return _dtype_map[ret_type]
raise RuntimeError(f"Unknown type: {ret_type}. Please add to _dtype_map.")
def _create_devices(self) -> None:
self._devices = {}
for dev in self.data["deviceProperties"]:
name = dev["name"]
device_info = lookup_device_info(name)
if device_info is None:
log.info(
"Unsupported device in profile: %s, please consider contributing to _device_mapping.",
name,
)
self._devices[dev["id"]] = Device(
name, dev["id"], device_info, defaultdict(OrderedSet)
)
def calculate_flops(self, event: dict[str, Any]) -> int:
return _calculate_flops(event)
def estimate_gb(self, event: dict[str, Any]) -> float:
return _estimate_gb(event)
def augment_trace(self) -> None:
self.data = _augment_trace_helper(self.data)
def _compute_stats(self) -> None:
"""populates the name -> stats map"""
for event in self.events:
if "cat" not in event or "args" not in event or event["cat"] != "kernel":
continue
if "device" not in event["args"]:
continue
dev_tmp = event["args"]["device"]
if dev_tmp not in self._devices:
continue
dev = self._devices[event["args"]["device"]]
dur = event["dur"] # us
if "kernel_flop" in event["args"]:
assert dur != 0
# 1,000,000us/s * flop / us
op_flops = event["args"]["kernel_flop"] / (dur / 1e6)
else:
op_flops = 0
if "kernel_num_gb" in event["args"]:
assert dur != 0
# 1,000,000us/s * gb = gb/s
op_gbps = event["args"]["kernel_num_gb"] / (dur / 1e6)
else:
op_gbps = 0
if dev.info is not None:
dtype = self.convert_dtype(event) or self.dtype
if dtype is None:
raise RuntimeError(
"dtype is not found on tensor and default dtype is not set"
)
achieved_flops = 100 * op_flops / (1e12 * dev.info.tops[dtype])
achieved_bandwidth = 100 * op_gbps / dev.info.dram_bw_gbs
else:
achieved_flops = 0
achieved_bandwidth = 0
if "name" not in event["args"]:
continue
dev.stats[event["name"]].add(
KernelStats(
flops=op_flops,
bw=op_gbps,
latency=dur,
achieved_bandwidth=achieved_bandwidth,
achieved_flops=achieved_flops,
)
)
def _create_single_table(self, dev: Device) -> Table:
"""Create a table with the devices mapped to indices."""
headers = [
"Kernel Name",
"Kernel Count",
"FLOPS",
"Kernel Reads (GB)",
"Dur (us)",
"Achieved FLOPS %",
"Achieved Bandwidth %",
]
rows: dict[str, list[str]] = {}
def safe_div_format(x: float, y: float) -> str:
if y == 0:
return "0.0"
return f"{x / y:.4f}"
for kernel_name, stats_set in dev.stats.items():
ker_count = 0
flops = 0
flops_count = 0
achieved_flops = 0.0
bw = 0.0
bw_count = 0
achieved_bandwidth = 0.0
latency = 0.0
for stats in stats_set:
if stats.flops != 0:
flops += stats.flops
achieved_flops += stats.achieved_flops
flops_count += 1
if stats.bw != 0:
bw += stats.bw
achieved_bandwidth += stats.achieved_bandwidth
bw_count += 1
latency += stats.latency
ker_count += 1
assert ker_count != 0
rows[kernel_name] = [
str(ker_count),
safe_div_format(flops, flops_count),
safe_div_format(bw, bw_count),
safe_div_format(latency, ker_count),
safe_div_format(achieved_flops, flops_count),
safe_div_format(achieved_bandwidth, bw_count),
]
return headers, rows
def _create_tables(self, devs: DeviceMap) -> dict[int, Table]:
return {idx: self._create_single_table(dev) for idx, dev in devs.items()}
def _combine_tables(
self, table1: Table, table1_name: str, table2: Table, table2_name: str
) -> Table:
new_headers = (
["Kernel Name"]
+ [f"{table1_name} {head}" for head in table1[0][1:]]
+ [f"{table2_name} {head}" for head in table2[0][1:]]
)
t1_length = len(table1[0][1:])
t2_length = len(table2[0][1:])
new_rows = {}
for key, row1, row2 in zip_dicts(
table1[1],
table2[1],
d1_default=["Empty"] * t1_length,
d2_default=["Empty"] * t2_length,
):
assert row1 is not None
assert row2 is not None
new_rows[key] = row1 + row2
return new_headers, new_rows
def report(
self, other: Optional["JsonProfile"] = None, name_limit: int = 40
) -> str:
def create_ret(
table_headers: list[str], table_rows: dict[str, list[str]]
) -> str:
table_flattened = [
[kernel_name[:name_limit], *kernel_vals]
for kernel_name, kernel_vals in table_rows.items()
]
return tabulate_2d(table_flattened, headers=table_headers)
if other is not None:
self._compute_stats()
other._compute_stats()
self_tables = self._create_tables(self._devices)
other_tables = self._create_tables(other._devices)
self_name = (
self.benchmark_name if self.benchmark_name is not None else "Table 1"
)
other_name = (
other.benchmark_name if other.benchmark_name is not None else "Table 2"
)
ret = []
assert self._devices.keys() == other._devices.keys()
for device_idx, t1, t2 in zip_dicts(
self_tables, other_tables, d1_default=None, d2_default=None
):
assert t1 is not None
assert t2 is not None
table_headers, table_rows = self._combine_tables(
t1, self_name, t2, other_name
)
tab_string = create_ret(table_headers, table_rows)
# pyrefly: ignore [bad-argument-type]
ret.append(f"{self._devices[device_idx]}:\n{tab_string}")
return "\n".join(ret)
self._compute_stats()
self_tables = self._create_tables(self._devices)
ret = []
for idx, table in self_tables.items():
table_headers, table_rows = table
tab_string = create_ret(table_headers, table_rows)
# pyrefly: ignore [bad-argument-type]
ret.append(f"{self._devices[idx]}:\n{tab_string}")
return "\n".join(ret)
def dump(self, out: str) -> None:
with open(out, "w") as f:
json.dump(self.data, f)
def combine_with(self, other: "JsonProfile") -> "JsonProfile":
"""
Combine this profile with another profile by merging their trace events.
Returns a new JsonProfile object with combined data.
"""
# Create a new combined data structure
combined_data = {
"traceEvents": self.data["traceEvents"] + other.data["traceEvents"],
"deviceProperties": self.data.get("deviceProperties", []),
}
# Merge device properties, avoiding duplicates
other_device_props = other.data.get("deviceProperties", [])
existing_device_ids = OrderedSet(
[dev["id"] for dev in combined_data["deviceProperties"]]
)
for device_prop in other_device_props:
if device_prop["id"] not in existing_device_ids:
combined_data["deviceProperties"].append(device_prop)
# Copy any other top-level properties from the first profile
for key, value in self.data.items():
if key not in combined_data:
combined_data[key] = value
import os
# Create a temporary file to write the combined data
import tempfile
with tempfile.NamedTemporaryFile(
mode="w", suffix=".json", delete=False
) as tmp_file:
json.dump(combined_data, tmp_file)
tmp_path = tmp_file.name
try:
# Create new JsonProfile from the combined data
combined_profile = JsonProfile(
tmp_path,
benchmark_name=f"{self.benchmark_name or 'Profile1'}_+_{other.benchmark_name or 'Profile2'}",
dtype=self.dtype or other.dtype,
)
return combined_profile
finally:
# Clean up temporary file
os.unlink(tmp_path)
| JsonProfile |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.