language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchValue1.py | {
"start": 3997,
"end": 4245
} | class ____:
a: Literal[False]
def test_bool_expansion(subj: bool):
match subj:
case DC2.a:
reveal_type(subj, expected_text="Literal[False]")
case x:
reveal_type(subj, expected_text="Literal[True]")
| DC2 |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 1868,
"end": 1992
} | class ____(A):
def bar(self):
super(__class__, self).foo()
# see: https://github.com/astral-sh/ruff/issues/18684
| B |
python | pytorch__pytorch | torch/utils/data/datapipes/iter/sharding.py | {
"start": 423,
"end": 701
} | class ____(IterDataPipe):
def apply_sharding(
self,
num_of_instances: int,
instance_id: int,
sharding_group: SHARDING_PRIORITIES,
) -> NoReturn:
raise NotImplementedError
@functional_datapipe("sharding_filter")
| _ShardingIterDataPipe |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/execution_tests/engine_tests/test_step_delegating_executor_retries.py | {
"start": 5223,
"end": 6913
} | class ____(ConfigurableResource):
parent_dir: str
def create_resource(self, context: InitResourceContext) -> None:
filepath = os.path.join(self.parent_dir, f"{context.run_id}_resource.txt")
if not os.path.exists(filepath):
open(filepath, "a", encoding="utf8").close()
raise ValueError("Resource error")
@op(retry_policy=RetryPolicy(max_retries=3))
def resource_op(my_resource: FailOnceResource):
pass
@dg.executor(
name="retry_resource_executor",
requirements=dg.multiple_process_executor_requirements(),
config_schema=dg.Permissive(),
)
def retry_resource_executor(exc_init):
return StepDelegatingExecutor(
TestStepHandler("resource_op"),
**(merge_dicts({"retries": RetryMode.ENABLED}, exc_init.executor_config)),
check_step_health_interval_seconds=0,
)
@job(
resource_defs={"my_resource": FailOnceResource(parent_dir="")},
executor_def=retry_resource_executor,
)
def resource_fail_once_job():
resource_op()
def test_resource_retries():
TestStepHandler.reset()
with tempfile.TemporaryDirectory() as tempdir:
with instance_for_test() as instance:
with execute_job(
reconstructable(resource_fail_once_job),
instance=instance,
run_config={"resources": {"my_resource": {"config": {"parent_dir": tempdir}}}},
) as result:
TestStepHandler.wait_for_processes()
assert result.success
step_events = result.events_for_node("resource_op")
assert len([event for event in step_events if event.is_step_up_for_retry]) == 1
| FailOnceResource |
python | pennersr__django-allauth | allauth/socialaccount/providers/salesforce/provider.py | {
"start": 331,
"end": 542
} | class ____(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get("link")
def get_avatar_url(self):
return self.account.extra_data.get("picture")
| SalesforceAccount |
python | pypa__pip | src/pip/_vendor/rich/highlighter.py | {
"start": 316,
"end": 1230
} | class ____(ABC):
"""Abstract base class for highlighters."""
def __call__(self, text: Union[str, Text]) -> Text:
"""Highlight a str or Text instance.
Args:
text (Union[str, ~Text]): Text to highlight.
Raises:
TypeError: If not called with text or str.
Returns:
Text: A test instance with highlighting applied.
"""
if isinstance(text, str):
highlight_text = Text(text)
elif isinstance(text, Text):
highlight_text = text.copy()
else:
raise TypeError(f"str or Text instance required, not {text!r}")
self.highlight(highlight_text)
return highlight_text
@abstractmethod
def highlight(self, text: Text) -> None:
"""Apply highlighting in place to text.
Args:
text (~Text): A text object highlight.
"""
| Highlighter |
python | great-expectations__great_expectations | contrib/cli/great_expectations_contrib/package.py | {
"start": 1680,
"end": 11942
} | class ____(SerializableDictDot):
# Core
package_name: Optional[str] = None
icon: Optional[str] = None
description: Optional[str] = None
expectations: Optional[Dict[str, ExpectationDiagnostics]] = None
expectation_count: Optional[int] = None
dependencies: Optional[List[Dependency]] = None
maturity: Optional[Maturity] = None
status: Optional[PackageCompletenessStatus] = None
# Users
owners: Optional[List[GitHubUser]] = None
contributors: Optional[List[GitHubUser]] = None
domain_experts: Optional[List[DomainExpert]] = None
# Metadata
version: Optional[str] = None
def to_json_dict(self) -> dict:
# Chetan - 20220511 - this is a TEMPORARY patch to pop non-serializable values from the result dict
json_dict = asdict(self)
for value in json_dict["expectations"].values():
for test in value["tests"]:
test.pop("validation_result")
return json_dict
def update_package_state(self) -> None:
"""
Parses diagnostic reports from package Expectations and uses them to update JSON state
"""
diagnostics = (
GreatExpectationsContribPackageManifest.retrieve_package_expectations_diagnostics()
)
self._update_attrs_with_diagnostics(diagnostics)
def _update_attrs_with_diagnostics(self, diagnostics: List[ExpectationDiagnostics]) -> None:
self._update_from_package_info("package_info.yml")
self._update_expectations(diagnostics)
self._update_dependencies("requirements.txt")
self._update_contributors(diagnostics)
def _update_from_package_info(self, path: str) -> None: # noqa: C901 - too complex
if not os.path.exists(path): # noqa: PTH110
logger.warning(f"Could not find package info file {path}")
return
with open(path) as f:
data: dict = yaml.load(f.read())
if not data:
logger.warning(f"{path} is empty so exiting early")
return
# Assign general attrs
general = data.get("general")
if general:
for attr in ("package_name", "icon", "description"):
if attr == "icon":
# If the user has provided an icon, we need to check if it is a relative URL.
# If it is, we need to convert to the HTTPS path that will show up when merged into `develop`.
icon: Optional[str] = general.get(attr)
if icon and os.path.exists(icon): # noqa: PTH110
package_name: str = os.path.basename( # noqa: PTH119
os.getcwd() # noqa: PTH109
)
url: str = os.path.join( # noqa: PTH118
"https://raw.githubusercontent.com/great-expectations/great_expectations/develop/contrib",
package_name,
icon,
)
self["icon"] = url
else:
self["icon"] = icon
else:
self[attr] = general.get(attr)
# Assign code owners
code_owners = data.get("code_owners")
if code_owners:
self.code_owners = []
for owner in code_owners:
code_owner = GitHubUser(**owner)
self.code_owners.append(code_owner)
# Assign domain experts
domain_experts = data.get("domain_experts")
if domain_experts:
self.domain_experts = []
for expert in domain_experts:
# If the user has provided a picture, we need to check if it is a relative URL.
# If it is, we need to convert to the HTTPS path that will show up when merged into `develop`.
picture_path: Optional[str] = expert.get("picture")
if picture_path and os.path.exists(picture_path): # noqa: PTH110
package_name: str = os.path.basename( # noqa: PTH119
os.getcwd() # noqa: PTH109
)
url: str = os.path.join( # noqa: PTH118
"https://raw.githubusercontent.com/great-expectations/great_expectations/develop/contrib",
package_name,
picture_path,
)
expert["picture"] = url
domain_expert = DomainExpert(**expert)
self.domain_experts.append(domain_expert)
def _update_expectations(self, diagnostics: List[ExpectationDiagnostics]) -> None:
expectations = {}
status = {maturity.name: 0 for maturity in Maturity}
for diagnostic in diagnostics:
name = diagnostic.description.snake_name
expectations[name] = diagnostic
expectation_maturity = diagnostic.library_metadata.maturity
status[expectation_maturity] += 1
self.expectations = expectations
self.expectation_count = len(expectations)
# Enum is all caps but status attributes are lowercase
lowercase_status = {k.lower(): v for k, v in status.items()}
lowercase_status["total"] = sum(status.values())
self.status = PackageCompletenessStatus(**lowercase_status)
maturity = max(status, key=status.get) # Get the key with the max value
self.maturity = Maturity[maturity]
def _update_dependencies(self, path: str) -> None:
if not os.path.exists(path): # noqa: PTH110
logger.warning(f"Could not find requirements file {path}")
return
session = PipSession()
requirements = [req for req in parse_requirements(path, session=session)]
def _convert_to_dependency(
requirement: InstallRequirement,
) -> Dependency:
name = requirement.project_name
pypi_url = f"https://pypi.org/project/{name}"
if requirement.specs:
# Stringify tuple of pins
version = ", ".join(
"".join(symbol for symbol in pin) for pin in sorted(requirement.specs)
)
else:
version = None
return Dependency(text=name, link=pypi_url, version=version)
dependencies = list(map(_convert_to_dependency, requirements))
self.dependencies = dependencies
def _update_contributors(self, diagnostics: List[ExpectationDiagnostics]) -> None:
contributors = []
for diagnostic in diagnostics:
for contributor in diagnostic.library_metadata.contributors:
github_user = GitHubUser(contributor)
if github_user not in contributors:
contributors.append(github_user)
self.contributors = contributors
@staticmethod
def retrieve_package_expectations_diagnostics() -> List[ExpectationDiagnostics]:
try:
package = GreatExpectationsContribPackageManifest._identify_user_package()
expectations_module = (
GreatExpectationsContribPackageManifest._import_expectations_module(package)
)
expectations = (
GreatExpectationsContribPackageManifest._retrieve_expectations_from_module(
expectations_module
)
)
diagnostics = GreatExpectationsContribPackageManifest._gather_diagnostics(expectations)
return diagnostics
except Exception as e:
# Exceptions should not break the CLI - this behavior should be working in the background
# without the user being concerned about the underlying functionality
logger.warning(
f"Something went wrong when modifying the contributor package JSON object: {e}"
)
return []
@staticmethod
def _identify_user_package() -> str:
# Guaranteed to have a dir named '<MY_PACKAGE>_expectations' through Cookiecutter validation
packages = [
d.name for d in Path().iterdir() if d.is_dir() and d.name.endswith("_expectations")
]
# A sanity check in case the user modifies the Cookiecutter template in unexpected ways
if len(packages) == 0:
raise FileNotFoundError("Could not find a user-defined package") # noqa: TRY003
elif len(packages) > 1:
raise ValueError("Found more than one user-defined package") # noqa: TRY003
return packages[0]
@staticmethod
def _import_expectations_module(package: str) -> Any:
# Need to add user's project to the PYTHONPATH
cwd = os.getcwd() # noqa: PTH109
sys.path.append(cwd)
try:
expectations_module = importlib.import_module(f"{package}.expectations")
return expectations_module
except ModuleNotFoundError: # noqa: TRY203
raise
@staticmethod
def _retrieve_expectations_from_module(
expectations_module: Any,
) -> List[Type[Expectation]]:
expectations: List[Type[Expectation]] = []
names: List[str] = []
for name, obj in inspect.getmembers(expectations_module):
# ProfileNumericColumnsDiffExpectation from capitalone_dataprofiler_expectations
# is a base class that the contrib Expectations in that package all inherit from
if inspect.isclass(obj) and issubclass(obj, Expectation) and not obj.is_abstract():
expectations.append(obj)
names.append(name)
logger.info(f"Found {len(names)} expectation(s): {names}")
return expectations
@staticmethod
def _gather_diagnostics(
expectations: List[Type[Expectation]],
) -> List[ExpectationDiagnostics]:
diagnostics_list = []
for expectation in expectations:
instance = expectation()
diagnostics = instance.run_diagnostics()
diagnostics_list.append(diagnostics)
logger.info(f"Successfully retrieved diagnostics from {expectation}")
return diagnostics_list
| GreatExpectationsContribPackageManifest |
python | modin-project__modin | modin/core/dataframe/algebra/default2pandas/struct.py | {
"start": 900,
"end": 1339
} | class ____(SeriesDefault):
"""Builder for default-to-pandas methods which is executed under struct accessor."""
@classmethod
def frame_wrapper(cls, df):
"""
Get struct accessor of the passed frame.
Parameters
----------
df : pandas.DataFrame
Returns
-------
pandas.core.arrays.arrow.StructAccessor
"""
return df.squeeze(axis=1).struct
| StructDefault |
python | gevent__gevent | src/greentest/3.10/test_asyncore.py | {
"start": 1118,
"end": 2517
} | class ____:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen()
conn, addr = serv.accept()
except TimeoutError:
pass
else:
n = 200
start = time.monotonic()
while n > 0 and time.monotonic() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
os_helper.unlink(addr)
socket_helper.bind_unix_socket(sock, addr)
else:
sock.bind(addr)
| crashingdummy |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 118192,
"end": 118684
} | class ____(sgqlc.types.Enum):
"""Defines which types of team members are included in the returned
list. Can be one of IMMEDIATE, CHILD_TEAM or ALL.
Enumeration Choices:
* `ALL`: Includes immediate and child team members for the team.
* `CHILD_TEAM`: Includes only child team members for the team.
* `IMMEDIATE`: Includes only immediate members of the team.
"""
__schema__ = github_schema
__choices__ = ("ALL", "CHILD_TEAM", "IMMEDIATE")
| TeamMembershipType |
python | pytorch__pytorch | torch/utils/data/datapipes/_typing.py | {
"start": 8382,
"end": 12268
} | class ____(GenericMeta):
r"""
Metaclass for `DataPipe`.
Add `type` attribute and `__init_subclass__` based on the type, and validate the return hint of `__iter__`.
Note that there is subclass `_IterDataPipeMeta` specifically for `IterDataPipe`.
"""
type: _DataPipeType
def __new__(cls, name, bases, namespace, **kwargs):
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
# TODO: the statements below are not reachable by design as there is a bug and typing is low priority for now.
# pyrefly: ignore [no-access]
cls.__origin__ = None
if "type" in namespace:
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
namespace["__type_class__"] = False
# For plain derived class without annotation
for base in bases:
if isinstance(base, _DataPipeMeta):
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
namespace.update(
{"type": _DEFAULT_TYPE, "__init_subclass__": _dp_init_subclass}
)
return super().__new__(cls, name, bases, namespace, **kwargs) # type: ignore[call-overload]
def __init__(self, name, bases, namespace, **kwargs) -> None:
super().__init__(name, bases, namespace, **kwargs) # type: ignore[call-overload]
# TODO: Fix isinstance bug
@_tp_cache
def _getitem_(self, params):
if params is None:
raise TypeError(f"{self.__name__}[t]: t can not be None")
if isinstance(params, str):
params = ForwardRef(params)
if not isinstance(params, tuple):
params = (params,)
msg = f"{self.__name__}[t]: t must be a type"
params = tuple(_type_check(p, msg) for p in params)
if isinstance(self.type.param, _GenericAlias):
orig = getattr(self.type.param, "__origin__", None)
if isinstance(orig, type) and orig is not Generic:
p = self.type.param[params] # type: ignore[index]
t = _DataPipeType(p)
l = len(str(self.type)) + 2
name = self.__name__[:-l]
name = name + "[" + str(t) + "]"
bases = (self,) + self.__bases__
return self.__class__(
name,
bases,
{
"__init_subclass__": _dp_init_subclass,
"type": t,
"__type_class__": True,
},
)
if len(params) > 1:
raise TypeError(
f"Too many parameters for {self} actual {len(params)}, expected 1"
)
t = _DataPipeType(params[0])
if not t.issubtype(self.type):
raise TypeError(
f"Can not subclass a DataPipe[{t}] from DataPipe[{self.type}]"
)
# Types are equal, fast path for inheritance
if self.type == t:
return self
name = self.__name__ + "[" + str(t) + "]"
bases = (self,) + self.__bases__
return self.__class__(
name,
bases,
{"__init_subclass__": _dp_init_subclass, "__type_class__": True, "type": t},
)
# TODO: Fix isinstance bug
def _eq_(self, other):
if not isinstance(other, _DataPipeMeta):
return NotImplemented
if self.__origin__ is None or other.__origin__ is None: # type: ignore[has-type]
return self is other
return (
self.__origin__ == other.__origin__ # type: ignore[has-type]
and self.type == other.type
)
# TODO: Fix isinstance bug
def _hash_(self):
return hash((self.__name__, self.type))
| _DataPipeMeta |
python | pytorch__pytorch | tools/jit/gen_unboxing.py | {
"start": 978,
"end": 3587
} | class ____:
target: Literal[Target.DECLARATION, Target.DEFINITION]
selector: SelectiveBuilder
@method_with_native_function
def __call__(self, f: NativeFunction) -> str:
if not self.selector.is_root_operator(f"aten::{f.func.name}"):
return ""
if self.target is Target.DECLARATION:
# Note [The ATen Codegen Unboxing API]
# Similar to the ATen Operators API, ATen Codegen Unboxing API lives in the at::unboxing namespace, and
# will be used by codegen unboxing wrappers (CodegenUnboxingWrappers.cpp).
# The Wrappers will be registered into torch::jit::OperatorRegistry using RegisterOperators API.
#
# Important characteristics about the Codegen Unboxing API:
# (1) It follows the OperatorRegistry API.
# This is kind of necessary to avoid overhead.
# For example: if it followed the C++ API, then all of the faithful C++ factory functions
# would need to wrap their arguments into TensorOptions only to unwrap them again.
# (2) Under the hood it calls C++ API.
return f"""
// aten::{f.func}
TORCH_API void {f.func.name.unambiguous_name()}(Stack & stack);
"""
else:
sig_group = CppSignatureGroup.from_native_function(
f, method=(Variant.method in f.variants)
)
sig = sig_group.most_faithful_signature()
# parse arguments into C++ code
binding_list, code_list = convert_arguments(f)
# for each C++ argument, generate the conversion code
code_connector = "\n\t"
arg_connector = ", "
# function call and push back to stack
prefix = "self_base." if sig.method else "at::"
translated_args = translate(
binding_list, sig.arguments(), method=sig.method
)
args_str = f"{arg_connector.join(e.expr for e in translated_args)}"
if len(f.func.returns) == 0:
ret_str = ""
push_str = ""
else:
ret_str = "auto result_ = "
push_str = """
pack(stack, std::move(result_));
"""
return f"""
// aten::{f.func}
TORCH_API void {f.func.name.unambiguous_name()}(Stack & stack) {{
{code_connector.join(code_list)}
drop(stack, {len(binding_list)});
{ret_str}{prefix}{sig.name()}({args_str});
{push_str}
}}
"""
# Generates RegisterCodegenUnboxedKernels.cpp.
@dataclass(frozen=True)
| ComputeUnboxingFunctions |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 1293,
"end": 1430
} | class ____(serializers.ModelSerializer):
class Meta:
model = RelatedModel
fields = ('user',)
| RelatedModelUserSerializer |
python | sympy__sympy | sympy/codegen/numpy_nodes.py | {
"start": 861,
"end": 2051
} | class ____(Function):
""" Logarithm of the sum of exponentiations of the inputs.
Helper class for use with e.g. numpy.logaddexp
See Also
========
https://numpy.org/doc/stable/reference/generated/numpy.logaddexp.html
"""
nargs = 2
def __new__(cls, *args):
return Function.__new__(cls, *sorted(args, key=default_sort_key))
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
wrt, other = self.args
elif argindex == 2:
other, wrt = self.args
else:
raise ArgumentIndexError(self, argindex)
return S.One/(S.One + exp(other-wrt))
def _eval_rewrite_as_log(self, x1, x2, **kwargs):
return _logaddexp(x1, x2)
def _eval_evalf(self, *args, **kwargs):
return self.rewrite(log).evalf(*args, **kwargs)
def _eval_simplify(self, *args, **kwargs):
a, b = (x.simplify(**kwargs) for x in self.args)
candidate = _logaddexp(a, b)
if candidate != _logaddexp(a, b, evaluate=False):
return candidate
else:
return logaddexp(a, b)
| logaddexp |
python | walkccc__LeetCode | solutions/10. Regular Expression Matching/10.py | {
"start": 0,
"end": 748
} | class ____:
def isMatch(self, s: str, p: str) -> bool:
m = len(s)
n = len(p)
# dp[i][j] := True if s[0..i) matches p[0..j)
dp = [[False] * (n + 1) for _ in range(m + 1)]
dp[0][0] = True
def isMatch(i: int, j: int) -> bool:
return j >= 0 and p[j] == '.' or s[i] == p[j]
for j, c in enumerate(p):
if c == '*' and dp[0][j - 1]:
dp[0][j + 1] = True
for i in range(m):
for j in range(n):
if p[j] == '*':
# The minimum index of '*' is 1.
noRepeat = dp[i + 1][j - 1]
doRepeat = isMatch(i, j - 1) and dp[i][j + 1]
dp[i + 1][j + 1] = noRepeat or doRepeat
elif isMatch(i, j):
dp[i + 1][j + 1] = dp[i][j]
return dp[m][n]
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 1053076,
"end": 1053250
} | class ____(sgqlc.types.Union):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__types__ = (Bot, Mannequin, Organization, User)
| Assignee |
python | optuna__optuna | optuna/exceptions.py | {
"start": 2332,
"end": 2639
} | class ____(Warning):
"""Experimental Warning class.
This implementation exists here because the policy of `FutureWarning` has been changed
since Python 3.7 was released. See the details in
https://docs.python.org/3/library/warnings.html#warning-categories.
"""
pass
| ExperimentalWarning |
python | gevent__gevent | src/gevent/tests/test__real_greenlet.py | {
"start": 204,
"end": 693
} | class ____(greentest.TestCase):
def test(self):
import greenlet
print('Your greenlet version: %s' % (getattr(greenlet, '__version__', None), ))
result = []
def func():
result.append(repr(sys.exc_info()))
g = greenlet.greenlet(func)
try:
1 / 0
except ZeroDivisionError:
g.switch()
self.assertEqual(result, ['(None, None, None)'])
if __name__ == '__main__':
greentest.main()
| Test |
python | huggingface__transformers | tests/models/parakeet/test_processing_parakeet.py | {
"start": 837,
"end": 1020
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = ParakeetProcessor
text_input_name = "labels"
model_id = "nvidia/parakeet-ctc-1.1b"
| ParakeetProcessorTest |
python | ray-project__ray | rllib/utils/exploration/slate_epsilon_greedy.py | {
"start": 427,
"end": 3871
} | class ____(EpsilonGreedy):
@override(EpsilonGreedy)
def _get_tf_exploration_action_op(
self,
action_distribution: ActionDistribution,
explore: Union[bool, TensorType],
timestep: Union[int, TensorType],
) -> "tf.Tensor":
per_slate_q_values = action_distribution.inputs
all_slates = action_distribution.all_slates
exploit_action = action_distribution.deterministic_sample()
batch_size, num_slates = (
tf.shape(per_slate_q_values)[0],
tf.shape(per_slate_q_values)[1],
)
action_logp = tf.zeros(batch_size, dtype=tf.float32)
# Get the current epsilon.
epsilon = self.epsilon_schedule(
timestep if timestep is not None else self.last_timestep
)
# A random action.
random_indices = tf.random.uniform(
(batch_size,),
minval=0,
maxval=num_slates,
dtype=tf.dtypes.int32,
)
random_actions = tf.gather(all_slates, random_indices)
choose_random = (
tf.random.uniform(
tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32
)
< epsilon
)
# Pick either random or greedy.
action = tf.cond(
pred=tf.constant(explore, dtype=tf.bool)
if isinstance(explore, bool)
else explore,
true_fn=(lambda: tf.where(choose_random, random_actions, exploit_action)),
false_fn=lambda: exploit_action,
)
if self.framework == "tf2" and not self.policy_config["eager_tracing"]:
self.last_timestep = timestep
return action, action_logp
else:
assign_op = tf1.assign(self.last_timestep, tf.cast(timestep, tf.int64))
with tf1.control_dependencies([assign_op]):
return action, action_logp
@override(EpsilonGreedy)
def _get_torch_exploration_action(
self,
action_distribution: ActionDistribution,
explore: bool,
timestep: Union[int, TensorType],
) -> "torch.Tensor":
per_slate_q_values = action_distribution.inputs
all_slates = self.model.slates
device = all_slates.device
exploit_indices = action_distribution.deterministic_sample()
exploit_indices = exploit_indices.to(device)
exploit_action = all_slates[exploit_indices]
batch_size = per_slate_q_values.size()[0]
action_logp = torch.zeros(batch_size, dtype=torch.float)
self.last_timestep = timestep
# Explore.
if explore:
# Get the current epsilon.
epsilon = self.epsilon_schedule(self.last_timestep)
# A random action.
random_indices = torch.randint(
0,
per_slate_q_values.shape[1],
(per_slate_q_values.shape[0],),
device=device,
)
random_actions = all_slates[random_indices]
# Pick either random or greedy.
action = torch.where(
torch.empty((batch_size,)).uniform_() < epsilon,
random_actions,
exploit_action,
)
return action, action_logp
# Return the deterministic "sample" (argmax) over the logits.
else:
return exploit_action, action_logp
| SlateEpsilonGreedy |
python | django-crispy-forms__django-crispy-forms | tests/forms.py | {
"start": 6715,
"end": 7173
} | class ____(BaseForm):
inline_radios = forms.ChoiceField(
choices=(
("option_one", "Option one"),
("option_two", "Option two"),
),
widget=CustomRadioSelect,
initial="option_two",
)
checkboxes = forms.MultipleChoiceField(
choices=((1, "Option one"), (2, "Option two"), (3, "Option three")),
initial=(1,),
widget=CustomCheckboxSelectMultiple,
)
| SampleFormCustomWidgets |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_header_image09.py | {
"start": 315,
"end": 1239
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image09.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
"xl/worksheets/sheet2.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.write("A1", "Foo")
worksheet1.write_comment("B2", "Some text")
worksheet1.set_comments_author("John")
worksheet2.set_header("&L&G", {"image_left": self.image_dir + "red.jpg"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | sympy__sympy | doc/ext/docscrape_sphinx.py | {
"start": 8728,
"end": 9548
} | class ____(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| SphinxObjDoc |
python | ansible__ansible | lib/ansible/playbook/role/__init__.py | {
"start": 4545,
"end": 29078
} | class ____(Base, Conditional, Taggable, CollectionSearch, Delegatable):
def __init__(self,
play: Play = None,
from_files: dict[str, list[str]] = None,
from_include: bool = False,
validate: bool = True,
public: bool = None,
static: bool = True,
rescuable: bool = True) -> None:
self._role_name: str = None
self._role_path: str = None
self._role_collection: str = None
self._role_params: dict[str, dict[str, str]] = dict()
self._loader = None
self.static: bool = static
self._rescuable: bool = rescuable
# includes (static=false) default to private, while imports (static=true) default to public
# but both can be overridden by global config if set
if public is None:
global_private, origin = C.config.get_config_value_and_origin('DEFAULT_PRIVATE_ROLE_VARS')
if origin == 'default':
self.public = static
else:
self.public = not global_private
else:
self.public = public
self._metadata: RoleMetadata = RoleMetadata()
self._play: Play = play
self._parents: list[Role] = []
self._dependencies: list[Role] = []
self._all_dependencies: list[Role] | None = None
self._task_blocks: list[Block] = []
self._handler_blocks: list[Block] = []
self._compiled_handler_blocks: list[Block] | None = None
self._default_vars: dict[str, str] | None = dict()
self._role_vars: dict[str, str] | None = dict()
self._had_task_run: dict[str, bool] = dict()
self._completed: dict[str, bool] = dict()
self._should_validate: bool = validate
if from_files is None:
from_files = {}
self._from_files: dict[str, list[str]] = from_files
# Indicates whether this role was included via include/import_role
self.from_include: bool = from_include
self._hash = None
super(Role, self).__init__()
def __repr__(self):
return self.get_name()
@_functools.cached_property
def _FAIL(self):
return AnsibleActionFail if self._rescuable else AnsibleParserError
def get_name(self, include_role_fqcn=True):
if include_role_fqcn:
return '.'.join(x for x in (self._role_collection, self._role_name) if x)
return self._role_name
def get_role_path(self):
# Purposefully using realpath for canonical path
return os.path.realpath(self._role_path)
def _get_hash_dict(self):
if self._hash:
return self._hash
self._hash = MappingProxyType(
{
'name': self.get_name(),
'path': self.get_role_path(),
'params': MappingProxyType(self.get_role_params()),
'when': self.when,
'tags': self.tags,
'from_files': MappingProxyType(self._from_files),
'vars': MappingProxyType(self.vars),
'from_include': self.from_include,
}
)
return self._hash
def __eq__(self, other):
if not isinstance(other, Role):
return False
return self._get_hash_dict() == other._get_hash_dict()
@staticmethod
def load(role_include, play, parent_role=None, from_files=None, from_include=False, validate=True, public=None, static=True, rescuable=True):
if from_files is None:
from_files = {}
try:
# TODO: need to fix cycle detection in role load (maybe use an empty dict
# for the in-flight in role cache as a sentinel that we're already trying to load
# that role?)
# see https://github.com/ansible/ansible/issues/61527
r = Role(play=play, from_files=from_files, from_include=from_include, validate=validate, public=public, static=static, rescuable=rescuable)
r._load_role_data(role_include, parent_role=parent_role)
role_path = r.get_role_path()
if role_path not in play.role_cache:
play.role_cache[role_path] = []
# Using the role path as a cache key is done to improve performance when a large number of roles
# are in use in the play
if r not in play.role_cache[role_path]:
play.role_cache[role_path].append(r)
return r
except RecursionError as ex:
raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles",
obj=role_include._ds) from ex
def _load_role_data(self, role_include, parent_role=None):
self._role_name = role_include.role
self._role_path = role_include.get_role_path()
self._role_collection = role_include._role_collection
self._role_params = role_include.get_role_params()
self._variable_manager = role_include.get_variable_manager()
self._loader = role_include.get_loader()
if parent_role:
self.add_parent(parent_role)
# copy over all field attributes from the RoleInclude
# update self._attr directly, to avoid squashing
for attr_name in self.fattributes:
setattr(self, f'_{attr_name}', getattr(role_include, f'_{attr_name}', Sentinel))
# vars and default vars are regular dictionaries
self._role_vars = self._load_role_yaml('vars', main=self._from_files.get('vars'), allow_dir=True)
if self._role_vars is None:
self._role_vars = {}
elif not isinstance(self._role_vars, Mapping):
raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
self._default_vars = self._load_role_yaml('defaults', main=self._from_files.get('defaults'), allow_dir=True)
if self._default_vars is None:
self._default_vars = {}
elif not isinstance(self._default_vars, Mapping):
raise AnsibleParserError("The defaults/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
# load the role's other files, if they exist
metadata = self._load_role_yaml('meta')
if metadata:
self._metadata = RoleMetadata.load(metadata, owner=self, variable_manager=self._variable_manager, loader=self._loader)
self._dependencies = self._load_dependencies()
# reset collections list; roles do not inherit collections from parents, just use the defaults
# FUTURE: use a private config default for this so we can allow it to be overridden later
self.collections = []
# configure plugin/collection loading; either prepend the current role's collection or configure legacy plugin loading
# FIXME: need exception for explicit ansible.legacy?
if self._role_collection: # this is a collection-hosted role
self.collections.insert(0, self._role_collection)
else: # this is a legacy role, but set the default collection if there is one
default_collection = AnsibleCollectionConfig.default_collection
if default_collection:
self.collections.insert(0, default_collection)
# legacy role, ensure all plugin dirs under the role are added to plugin search path
add_all_plugin_dirs(self._role_path)
# collections can be specified in metadata for legacy or collection-hosted roles
if self._metadata.collections:
self.collections.extend((c for c in self._metadata.collections if c not in self.collections))
# if any collections were specified, ensure that core or legacy synthetic collections are always included
if self.collections:
# default append collection is core for collection-hosted roles, legacy for others
default_append_collection = 'ansible.builtin' if self._role_collection else 'ansible.legacy'
if 'ansible.builtin' not in self.collections and 'ansible.legacy' not in self.collections:
self.collections.append(default_append_collection)
task_data = self._load_role_yaml('tasks', main=self._from_files.get('tasks'))
if self._should_validate:
role_argspecs = self._get_role_argspecs()
task_data = self._prepend_validation_task(task_data, role_argspecs)
if task_data:
try:
self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager)
except AssertionError as ex:
raise AnsibleParserError(f"The tasks/main.yml file for role {self._role_name!r} must contain a list of tasks.", obj=task_data) from ex
handler_data = self._load_role_yaml('handlers', main=self._from_files.get('handlers'))
if handler_data:
try:
self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader,
variable_manager=self._variable_manager)
except AssertionError as ex:
raise AnsibleParserError(f"The handlers/main.yml file for role {self._role_name!r} must contain a list of tasks.",
obj=handler_data) from ex
def _get_role_argspecs(self):
"""Get the role argument spec data.
Role arg specs can be in one of two files in the role meta subdir: argument_specs.yml
or main.yml. The former has precedence over the latter. Data is not combined
between the files.
:returns: A dict of all data under the top-level ``argument_specs`` YAML key
in the argument spec file. An empty dict is returned if there is no
argspec data.
"""
base_argspec_path = os.path.join(self._role_path, 'meta', 'argument_specs')
for ext in C.YAML_FILENAME_EXTENSIONS:
full_path = base_argspec_path + ext
if self._loader.path_exists(full_path):
# Note: _load_role_yaml() takes care of rebuilding the path.
argument_specs = self._load_role_yaml('meta', main='argument_specs')
try:
return argument_specs.get('argument_specs') or {}
except AttributeError:
return {}
# We did not find the meta/argument_specs.[yml|yaml] file, so use the spec
# dict from the role meta data, if it exists. Ansible 2.11 and later will
# have the 'argument_specs' attribute, but earlier versions will not.
return getattr(self._metadata, 'argument_specs', {})
def _prepend_validation_task(self, task_data, argspecs):
"""Insert a role validation task if we have a role argument spec.
This method will prepend a validation task to the front of the role task
list to perform argument spec validation before any other tasks, if an arg spec
exists for the entry point. Entry point defaults to `main`.
:param task_data: List of tasks loaded from the role.
:param argspecs: The role argument spec data dict.
:returns: The (possibly modified) task list.
"""
if argspecs:
# Determine the role entry point so we can retrieve the correct argument spec.
# This comes from the `tasks_from` value to include_role or import_role.
entrypoint = self._from_files.get('tasks', 'main')
entrypoint_arg_spec = argspecs.get(entrypoint)
if entrypoint_arg_spec:
validation_task = self._create_validation_task(entrypoint_arg_spec, entrypoint)
# Prepend our validate_argument_spec action to happen before any tasks provided by the role.
# 'any tasks' can and does include 0 or None tasks, in which cases we create a list of tasks and add our
# validate_argument_spec task
if not task_data:
task_data = []
task_data.insert(0, validation_task)
return task_data
def _create_validation_task(self, argument_spec, entrypoint_name):
"""Create a new task data structure that uses the validate_argument_spec action plugin.
:param argument_spec: The arg spec definition for a particular role entry point.
This will be the entire arg spec for the entry point as read from the input file.
:param entrypoint_name: The name of the role entry point associated with the
supplied `argument_spec`.
"""
# If the arg spec provides a short description, use it to flesh out the validation task name
task_name = "Validating arguments against arg spec '%s'" % entrypoint_name
if 'short_description' in argument_spec:
task_name = task_name + ' - ' + argument_spec['short_description']
return {
'action': 'ansible.builtin.validate_argument_spec',
'args': {
# Pass only the 'options' portion of the arg spec to the module.
'argument_spec': argument_spec.get('options', {}),
'provided_arguments': self._role_params,
'validate_args_context': {
'type': 'role',
'name': self._role_name,
'argument_spec_name': entrypoint_name,
'path': self._role_path
},
},
'name': task_name,
'tags': ['always'],
}
def _load_role_yaml(self, subdir, main=None, allow_dir=False):
"""
Find and load role YAML files and return data found.
:param subdir: subdir of role to search (vars, files, tasks, handlers, defaults)
:type subdir: string
:param main: filename to match, will default to 'main.<ext>' if not provided.
:type main: string
:param allow_dir: If true we combine results of multiple matching files found.
If false, highlander rules. Only for vars(dicts) and not tasks(lists).
:type allow_dir: bool
:returns: data from the matched file(s), type can be dict or list depending on vars or tasks.
"""
data = None
file_path = os.path.join(self._role_path, subdir)
if self._loader.path_exists(file_path) and self._loader.is_directory(file_path):
# Valid extensions and ordering for roles is hard-coded to maintain portability
extensions = ['.yml', '.yaml', '.json'] # same as default for YAML_FILENAME_EXTENSIONS
# look for files w/o extensions before/after bare name depending on it being set or not
# keep 'main' as original to figure out errors if no files found
if main is None:
_main = 'main'
extensions.append('')
else:
_main = main
extensions.insert(0, '')
# not really 'find_vars_files' but find_files_with_extensions_default_to_yaml_filename_extensions
found_files = self._loader.find_vars_files(file_path, _main, extensions, allow_dir)
if found_files:
for found in found_files:
if not is_subpath(found, file_path):
raise self._FAIL(f"Failed loading '{found!r}' for role ({self._role_name}) "
f"as it is not inside the expected role path: {file_path!r}")
new_data = self._loader.load_from_file(found, trusted_as_template=True)
if new_data:
if data is not None and isinstance(new_data, Mapping):
data = combine_vars(data, new_data)
else:
data = new_data
# found data so no need to continue unless we want to merge
if not allow_dir:
break
elif main is not None:
# this won't trigger with default only when <subdir>_from is specified
raise self._FAIL(f"Could not find specified file in role: {subdir}/{main}")
elif main is not None:
# this won't trigger with default only when <subdir>_from is specified
raise self._FAIL(f"Could not find specified file in role, its '{subdir}/' is not usable.")
return data
def _load_dependencies(self):
"""
Recursively loads role dependencies from the metadata list of
dependencies, if it exists
"""
deps = []
for role_include in self._metadata.dependencies:
r = Role.load(role_include, play=self._play, parent_role=self, static=self.static)
deps.append(r)
return deps
# other functions
def add_parent(self, parent_role):
""" adds a role to the list of this roles parents """
if not isinstance(parent_role, Role):
raise AnsibleAssertionError()
if parent_role not in self._parents:
self._parents.append(parent_role)
def get_parents(self):
return self._parents
def get_dep_chain(self):
dep_chain = []
for parent in self._parents:
dep_chain.extend(parent.get_dep_chain())
dep_chain.append(parent)
return dep_chain
def get_default_vars(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
default_vars = dict()
for dep in self.get_all_dependencies():
default_vars = combine_vars(default_vars, dep.get_default_vars())
if dep_chain:
for parent in dep_chain:
default_vars = combine_vars(default_vars, parent._default_vars)
default_vars = combine_vars(default_vars, self._default_vars)
return default_vars
def get_inherited_vars(self, dep_chain=None, only_exports=False):
dep_chain = [] if dep_chain is None else dep_chain
inherited_vars = dict()
if dep_chain:
for parent in dep_chain:
if not only_exports:
inherited_vars = combine_vars(inherited_vars, parent.vars)
inherited_vars = combine_vars(inherited_vars, parent._role_vars)
return inherited_vars
def get_role_params(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
params = {}
if dep_chain:
for parent in dep_chain:
params = combine_vars(params, parent._role_params)
params = combine_vars(params, self._role_params)
return params
def get_vars(self, dep_chain=None, include_params=True, only_exports=False):
dep_chain = [] if dep_chain is None else dep_chain
all_vars = {}
# get role_vars: from parent objects
# TODO: is this right precedence for inherited role_vars?
all_vars = self.get_inherited_vars(dep_chain, only_exports=only_exports)
# get exported variables from meta/dependencies
seen = []
for dep in self.get_all_dependencies():
# Avoid rerunning dupe deps since they can have vars from previous invocations and they accumulate in deps
# TODO: re-examine dep loading to see if we are somehow improperly adding the same dep too many times
if dep not in seen:
# only take 'exportable' vars from deps
all_vars = combine_vars(all_vars, dep.get_vars(include_params=False, only_exports=True))
seen.append(dep)
# role_vars come from vars/ in a role
all_vars = combine_vars(all_vars, self._role_vars)
if not only_exports:
# include_params are 'inline variables' in role invocation. - {role: x, varname: value}
if include_params:
# TODO: add deprecation notice
all_vars = combine_vars(all_vars, self.get_role_params(dep_chain=dep_chain))
# these come from vars: keyword in role invocation. - {role: x, vars: {varname: value}}
all_vars = combine_vars(all_vars, self.vars)
return all_vars
def get_direct_dependencies(self):
return self._dependencies[:]
def get_all_dependencies(self):
"""
Returns a list of all deps, built recursively from all child dependencies,
in the proper order in which they should be executed or evaluated.
"""
if self._all_dependencies is None:
self._all_dependencies = []
for dep in self.get_direct_dependencies():
for child_dep in dep.get_all_dependencies():
self._all_dependencies.append(child_dep)
self._all_dependencies.append(dep)
return self._all_dependencies
def get_task_blocks(self):
return self._task_blocks[:]
def get_handler_blocks(self, play, dep_chain=None):
# Do not recreate this list each time ``get_handler_blocks`` is called.
# Cache the results so that we don't potentially overwrite with copied duplicates
#
# ``get_handler_blocks`` may be called when handling ``import_role`` during parsing
# as well as with ``Play.compile_roles_handlers`` from ``TaskExecutor``
if self._compiled_handler_blocks:
return self._compiled_handler_blocks
self._compiled_handler_blocks = block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
for dep in self.get_direct_dependencies():
dep_blocks = dep.get_handler_blocks(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
for task_block in self._handler_blocks:
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
block_list.append(new_task_block)
return block_list
def has_run(self, host):
"""
Returns true if this role has been iterated over completely and
at least one task was run
"""
return host.name in self._completed
def compile(self, play, dep_chain=None):
"""
Returns the task list for this role, which is created by first
recursively compiling the tasks for all direct dependencies, and
then adding on the tasks for this role.
The role compile() also remembers and saves the dependency chain
with each task, so tasks know by which route they were found, and
can correctly take their parent's tags/conditionals into account.
"""
from ansible.playbook.block import Block
from ansible.playbook.task import Task
block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
deps = self.get_direct_dependencies()
for dep in deps:
dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
for task_block in self._task_blocks:
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
block_list.append(new_task_block)
eor_block = Block(play=play)
eor_block._loader = self._loader
eor_block._role = self
eor_block._variable_manager = self._variable_manager
eor_block.run_once = False
eor_task = Task(block=eor_block)
eor_task._role = self
eor_task.action = 'meta'
eor_task.args = {'_raw_params': 'role_complete'}
eor_task.implicit = True
eor_task.tags = ['always']
eor_task.when = True
eor_block.block = [eor_task]
block_list.append(eor_block)
return block_list
def set_loader(self, loader):
self._loader = loader
for parent in self._parents:
parent.set_loader(loader)
for dep in self.get_direct_dependencies():
dep.set_loader(loader)
| Role |
python | facebook__pyre-check | scripts/analyze_leaks.py | {
"start": 807,
"end": 1021
} | class ____:
error_message: str
bad_value: JSON
def to_json(self) -> JSON:
return {"error_message": self.error_message, "bad_value": self.bad_value}
@dataclass(frozen=True)
| LeakAnalysisScriptError |
python | fluentpython__example-code-2e | 03-dict-set/strkeydict0.py | {
"start": 609,
"end": 1054
} | class ____(dict): # <1>
def __missing__(self, key):
if isinstance(key, str): # <2>
raise KeyError(key)
return self[str(key)] # <3>
def get(self, key, default=None):
try:
return self[key] # <4>
except KeyError:
return default # <5>
def __contains__(self, key):
return key in self.keys() or str(key) in self.keys() # <6>
# end::STRKEYDICT0[]
| StrKeyDict0 |
python | mlflow__mlflow | dev/check_function_signatures.py | {
"start": 12530,
"end": 13094
} | class ____:
base_branch: str
def parse_args() -> Args:
parser = argparse.ArgumentParser(
description="Check for breaking changes in Python function signatures"
)
parser.add_argument("--base-branch", default=os.environ.get("GITHUB_BASE_REF", "master"))
args = parser.parse_args()
return Args(base_branch=args.base_branch)
def main():
args = parse_args()
errors = compare_signatures(args.base_branch)
for error in errors:
print(error.format(github=is_github_actions()))
if __name__ == "__main__":
main()
| Args |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/hooks/test_kubernetes.py | {
"start": 2417,
"end": 2978
} | class ____(AirflowException): ...
DEFAULT_CONN_ID = "kubernetes_default"
@pytest.fixture
def remove_default_conn(monkeypatch):
original_env_var = os.environ.get(f"AIRFLOW_CONN_{DEFAULT_CONN_ID.upper()}")
# remove the env variable to simulate no default connection
if original_env_var:
monkeypatch.delenv(f"AIRFLOW_CONN_{DEFAULT_CONN_ID.upper()}")
yield
# restore the original env variable
if original_env_var:
monkeypatch.setenv(f"AIRFLOW_CONN_{DEFAULT_CONN_ID.upper()}", original_env_var)
| DeprecationRemovalRequired |
python | getsentry__sentry | src/sentry/auth/providers/saml2/generic/provider.py | {
"start": 476,
"end": 825
} | class ____(SAML2Provider):
name = "SAML2"
key = "saml2"
def get_configure_view(
self,
) -> Callable[[HttpRequest, RpcOrganization, RpcAuthProvider], DeferredResponse]:
return saml2_configure_view
def get_saml_setup_pipeline(self) -> list[AuthView]:
return [SelectIdP(), MapAttributes()]
| GenericSAML2Provider |
python | scipy__scipy | scipy/integrate/_quadpack_py.py | {
"start": 52660,
"end": 52820
} | class ____:
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
| _OptFunc |
python | tiangolo__fastapi | tests/test_custom_middleware_exception.py | {
"start": 239,
"end": 2892
} | class ____:
"""Content size limiting middleware for ASGI applications
Args:
app (ASGI application): ASGI application
max_content_size (optional): the maximum content size allowed in bytes, None for no limit
"""
def __init__(self, app: APIRouter, max_content_size: Optional[int] = None):
self.app = app
self.max_content_size = max_content_size
def receive_wrapper(self, receive):
received = 0
async def inner():
nonlocal received
message = await receive()
if message["type"] != "http.request":
return message # pragma: no cover
body_len = len(message.get("body", b""))
received += body_len
if received > self.max_content_size:
raise HTTPException(
422,
detail={
"name": "ContentSizeLimitExceeded",
"code": 999,
"message": "File limit exceeded",
},
)
return message
return inner
async def __call__(self, scope, receive, send):
if scope["type"] != "http" or self.max_content_size is None:
await self.app(scope, receive, send)
return
wrapper = self.receive_wrapper(receive)
await self.app(scope, wrapper, send)
@router.post("/middleware")
def run_middleware(file: UploadFile = File(..., description="Big File")):
return {"message": "OK"}
app.include_router(router)
app.add_middleware(ContentSizeLimitMiddleware, max_content_size=2**8)
client = TestClient(app)
def test_custom_middleware_exception(tmp_path: Path):
default_pydantic_max_size = 2**16
path = tmp_path / "test.txt"
path.write_bytes(b"x" * (default_pydantic_max_size + 1))
with client:
with open(path, "rb") as file:
response = client.post("/middleware", files={"file": file})
assert response.status_code == 422, response.text
assert response.json() == {
"detail": {
"name": "ContentSizeLimitExceeded",
"code": 999,
"message": "File limit exceeded",
}
}
def test_custom_middleware_exception_not_raised(tmp_path: Path):
path = tmp_path / "test.txt"
path.write_bytes(b"<file content>")
with client:
with open(path, "rb") as file:
response = client.post("/middleware", files={"file": file})
assert response.status_code == 200, response.text
assert response.json() == {"message": "OK"}
| ContentSizeLimitMiddleware |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/ext/orderinglist/orderinglist_one.py | {
"start": 998,
"end": 1366
} | class ____(Base):
__tablename__ = "bullet"
id: Mapped[int] = mapped_column(primary_key=True)
slide_id: Mapped[int] = mapped_column(ForeignKey("slide.id"))
position: Mapped[int]
text: Mapped[str]
slide = Slide()
if TYPE_CHECKING:
assert_type(pos_from_text, Callable[[], OrderingList[Bullet]])
assert_type(slide.bullets, list[Bullet])
| Bullet |
python | tiangolo__fastapi | tests/test_union_inherited_body.py | {
"start": 244,
"end": 5311
} | class ____(Item):
age: int
@app.post("/items/")
def save_union_different_body(item: Union[ExtendedItem, Item]):
return {"item": item}
client = TestClient(app)
def test_post_extended_item():
response = client.post("/items/", json={"name": "Foo", "age": 5})
assert response.status_code == 200, response.text
assert response.json() == {"item": {"name": "Foo", "age": 5}}
def test_post_item():
response = client.post("/items/", json={"name": "Foo"})
assert response.status_code == 200, response.text
assert response.json() == {"item": {"name": "Foo"}}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Save Union Different Body",
"operationId": "save_union_different_body_items__post",
"requestBody": {
"content": {
"application/json": {
"schema": {
"title": "Item",
"anyOf": [
{"$ref": "#/components/schemas/ExtendedItem"},
{"$ref": "#/components/schemas/Item"},
],
}
}
},
"required": True,
},
}
}
},
"components": {
"schemas": {
"Item": {
"title": "Item",
"type": "object",
"properties": {
"name": IsDict(
{
"title": "Name",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Name", "type": "string"}
)
},
},
"ExtendedItem": {
"title": "ExtendedItem",
"required": ["age"],
"type": "object",
"properties": {
"name": IsDict(
{
"title": "Name",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Name", "type": "string"}
),
"age": {"title": "Age", "type": "integer"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
}
| ExtendedItem |
python | coleifer__peewee | tests/fields.py | {
"start": 27220,
"end": 27636
} | class ____(ModelTestCase):
requires = [BigModel]
def test_big_auto_field(self):
b1 = BigModel.create(data='b1')
b2 = BigModel.create(data='b2')
b1_db = BigModel.get(BigModel.pk == b1.pk)
b2_db = BigModel.get(BigModel.pk == b2.pk)
self.assertTrue(b1_db.pk < b2_db.pk)
self.assertTrue(b1_db.data, 'b1')
self.assertTrue(b2_db.data, 'b2')
| TestBigAutoField |
python | huggingface__transformers | src/transformers/models/roformer/modeling_roformer.py | {
"start": 43475,
"end": 44361
} | class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@auto_docstring(
custom_intro="""
RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
| RoFormerClassificationHead |
python | django__django | tests/field_subclassing/fields.py | {
"start": 90,
"end": 199
} | class ____(models.TextField):
def db_type(self, connection):
return "custom_field"
| CustomTypedField |
python | pytransitions__transitions | tests/test_async.py | {
"start": 28883,
"end": 29173
} | class ____(TestAsync):
def setUp(self):
super(TestAsync, self).setUp()
self.machine_cls = AsyncGraphMachine # type: Type[AsyncGraphMachine]
self.machine = self.machine_cls(states=['A', 'B', 'C'], transitions=[['go', 'A', 'B']], initial='A')
| TestAsyncGraphMachine |
python | pytorch__pytorch | test/distributed/tensor/test_dtensor_export.py | {
"start": 6036,
"end": 19482
} | class ____(TestCase):
def tearDown(self):
super().tearDown()
dist.destroy_process_group()
def setUp(self):
super().setUp()
self.world_size = 8
store = FakeStore()
dist.init_process_group(
backend="fake", rank=0, world_size=self.world_size, store=store
)
self.device_type = "cuda"
def _run_test(self, export_fn, test_annotation=False):
dp_degree = 2
tp_degree = self.world_size // dp_degree
# 2-D mesh is [dp, tp]
mesh_2d = init_device_mesh(
self.device_type,
mesh_shape=(dp_degree, tp_degree),
mesh_dim_names=["dp", "tp"],
)
model = None
if test_annotation:
model = SimpleModelAnnotated(self.device_type)
else:
model = SimpleModel(self.device_type)
parallelize_plan = {
"mlp_0.net1": ColwiseParallel(),
"mlp_0.net2": RowwiseParallel(),
"mlp_1.net1": ColwiseParallel(),
"mlp_1.net2": RowwiseParallel(),
}
tp_model = parallelize_module(model, mesh_2d["tp"], parallelize_plan)
inp = torch.rand(20, 10, device=self.device_type)
inputs = (distribute_tensor(inp, mesh_2d["tp"], placements=[Replicate()]),)
joint_gm = export_fn(tp_model, inputs)
fw_gm, bw_gm = min_cut_rematerialization_partition(
joint_gm, None, num_fwd_outputs=1
)
self.assertTrue(
_count_op(joint_gm, torch.ops._c10d_functional.all_reduce.default),
3,
)
self.assertTrue(
_count_op(fw_gm, torch.ops._c10d_functional.all_reduce.default),
2,
)
self.assertTrue(
_count_op(bw_gm, torch.ops._c10d_functional.all_reduce.default),
1,
)
if test_annotation:
def has_tag(node):
return "custom" in node.meta and node.meta["custom"] == {"pp_stage": 0}
def marked_nodes(gm):
return [
node.name
for node in gm.graph.nodes
if has_tag(node) and node.op == "call_function"
]
def unmarked_nodes(gm):
return [
node.name
for node in gm.graph.nodes
if not has_tag(node) and node.op == "call_function"
]
marked_nodes_fw = [
"t",
"addmm",
"view",
"relu",
"view_1",
"t_1",
"div",
"addmm_1",
"all_reduce",
"wait_tensor",
"view_2",
"t_12",
]
unmarked_nodes_fw = [
"view_3",
"t_2",
"addmm_2",
"view_4",
"relu_1",
"view_5",
"t_3",
"div_1",
"addmm_3",
"all_reduce_1",
"wait_tensor_1",
"view_6",
"t_4",
"t_8",
]
marked_nodes_bw = [
"mm_4",
"t_13",
"view_1",
"mm_5",
"t_14",
"sum_3",
"view_9",
"t_15",
"detach",
"detach_3",
"threshold_backward_1",
"t_16",
"mm_6",
"t_17",
"sum_4",
"view_10",
"t_18",
]
unmarked_nodes_bw = [
"mm",
"t_5",
"view_5",
"mm_1",
"t_6",
"sum_1",
"view_7",
"t_7",
"detach_1",
"detach_2",
"threshold_backward",
"mm_2",
"t_9",
"mm_3",
"t_10",
"sum_2",
"view_8",
"t_11",
"all_reduce_2",
"wait_tensor_2",
]
self.assertEqual(marked_nodes(fw_gm), marked_nodes_fw)
self.assertEqual(unmarked_nodes(fw_gm), unmarked_nodes_fw)
self.assertEqual(marked_nodes(bw_gm), marked_nodes_bw)
self.assertEqual(unmarked_nodes(bw_gm), unmarked_nodes_bw)
self.assertEqual(
set(marked_nodes(joint_gm)), set(marked_nodes_fw + marked_nodes_bw)
)
self.assertEqual(
set(unmarked_nodes(joint_gm)),
set(unmarked_nodes_fw + unmarked_nodes_bw),
)
@parametrize(
"export_fn",
[
graph_capture_and_aot_export_joint_with_descriptors_v2,
aot_export_joint_with_descriptors_alone,
],
)
def test_export_parallelize_module_with_dtensor_input(
self,
export_fn,
):
self._run_test(export_fn)
# aot_export_joint_with_descriptors on strict-exported exported_program.module()
# is producing a joint graph with backward region missing
@unittest.expectedFailure
def test_strict_export_parallelize_module_with_dtensor_input(self):
self._run_test(strict_export_and_aot_export_joint_with_descriptors)
def test_annotate_aot_export_joint_with_descriptors_alone(self):
self._run_test(aot_export_joint_with_descriptors_alone, True)
@parametrize(
"export_fn_with_answer",
[
(
graph_capture_and_aot_export_joint_with_descriptors_v2,
"[[4, 10], [4], [10, 4], [10], [4, 10], [4], [10, 4], [10], [s64, 10], [s64, 10]]",
),
],
)
def test_dynamic_shapes(self, export_fn_with_answer):
export_fn, answer = export_fn_with_answer
dp_degree = 2
tp_degree = self.world_size // dp_degree
# 2-D mesh is [dp, tp]
mesh_2d = init_device_mesh(
self.device_type,
mesh_shape=(dp_degree, tp_degree),
mesh_dim_names=["dp", "tp"],
)
model = SimpleModelDynamicShapes(self.device_type)
parallelize_plan = {
"mlp_0.net1": ColwiseParallel(),
"mlp_0.net2": RowwiseParallel(),
"mlp_1.net1": ColwiseParallel(),
"mlp_1.net2": RowwiseParallel(),
}
tp_model = parallelize_module(model, mesh_2d["tp"], parallelize_plan)
inp = torch.rand(20, 10, device=self.device_type)
inp_dtensor = distribute_tensor(inp, mesh_2d["tp"], placements=[Replicate()])
torch._dynamo.mark_dynamic(inp_dtensor, 0, min=5, max=100)
inputs = (inp_dtensor,)
joint_gm = export_fn(tp_model, inputs)
res = []
for node in joint_gm.graph.nodes:
if node.op == "placeholder":
assert "val" in node.meta
fake_val = node.meta["val"]
if isinstance(fake_val, torch._subclasses.fake_tensor.FakeTensor):
res.append(list(fake_val.shape))
self.assertEqual(str(res), answer)
@parametrize(
"export_fn",
[
dynamo_graph_capture_for_export,
],
)
def test_einsum_dtensor_export(self, export_fn):
"""Test exporting a model with einsum that has DTensor inputs/outputs with side effects"""
world_size = 4
# Create device mesh
device_mesh = init_device_mesh(self.device_type, mesh_shape=(world_size,))
model = EinsumModel()
x = torch.randn(4, 8, 16)
x_dtensor = distribute_tensor(x, device_mesh, placements=[Shard(0)])
# y: [16, 16] replicated
y = torch.randn(16, 16)
z = torch.randn(16, 16)
y_dtensor = distribute_tensor(y, device_mesh, placements=[Replicate()])
z_dtensor = DTensor.from_local(z, device_mesh, placements=[Partial()])
inputs = (x_dtensor, y_dtensor, z_dtensor)
# Run model to verify it works
output = model(*inputs)
gm = export_fn(model)(*inputs)
output_gm = gm(*inputs)
self.assertEqual(output, output_gm)
@parametrize(
"export_fn",
[
graph_capture_and_aot_export_joint_with_descriptors_v2,
],
)
def test_flex_attention_dtensor_export(self, export_fn):
device_mesh = init_device_mesh(self.device_type, mesh_shape=(self.world_size,))
model = FlexAttentionModel(self.device_type)
# Parallelize the model: shard on head dimension
# proj_q, proj_k, proj_v are colwise parallel (output is sharded on head dimension)
# proj_out is rowwise parallel (input is sharded, output needs reduction)
parallelize_plan = {
"proj_q": ColwiseParallel(),
"proj_k": ColwiseParallel(),
"proj_v": ColwiseParallel(),
"proj_out": RowwiseParallel(),
}
tp_model = parallelize_module(model, device_mesh, parallelize_plan)
batch_size = 4
seq_len = 64
embed_dim = 16
num_heads = 8
# Input tensor replicated across all devices
inp = torch.randn(batch_size, seq_len, embed_dim, device=self.device_type)
inputs = (distribute_tensor(inp, device_mesh, placements=[Replicate()]),)
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
block_mask = create_block_mask(
causal_mask,
batch_size,
num_heads,
seq_len,
seq_len,
device=self.device_type,
)
flex_kwargs = {"block_mask": block_mask}
joint_gm = export_fn(tp_model, inputs, flex_kwargs)
self.assertTrue(
_count_op(joint_gm, torch.ops.higher_order.flex_attention),
1,
)
self.assertTrue(
_count_op(joint_gm, torch.ops.higher_order.flex_attention_backward),
2,
)
def test_union_typed_annotation(self):
def fn(leaf: torch.Tensor | DTensor):
def nest_fn(leaf: torch.Tensor | DTensor):
# def nest_fn(leaf: Union[torch.Tensor, DTensor]): # this works
if isinstance(leaf, DTensor):
leaf = leaf.to_local()
return leaf
return nest_fn(leaf) + 1
z = torch.randn(16, 16)
gm = graph_capture_and_aot_export_joint_with_descriptors_v2(fn, (z,))
self.assertEqual(fn(z), gm(z)[0])
def test_dtensor_data_dependent_index_and_slice(self):
device_mesh = init_device_mesh(self.device_type, mesh_shape=(self.world_size,))
class Foo(torch.nn.Module):
def forward(self, x, y):
return x[y]
x = torch.randn(10)
y = torch.randint(1, (10,)).bool()
x_dt = distribute_tensor(x, device_mesh, placements=[Replicate()])
y_dt = distribute_tensor(y, device_mesh, placements=[Replicate()])
dynamo_graph_capture_for_export(Foo())(x_dt, y_dt)
class Bar(torch.nn.Module):
def forward(self, x):
val = torch.clamp(x.max(), min=1).item()
torch._check(val >= 1)
return x[:val]
x = torch.randint(1000, (4, 64, 16))
x_dt = distribute_tensor(x, device_mesh, placements=[Replicate()])
gm = dynamo_graph_capture_for_export(Bar())(x_dt)
self.assertExpectedInline(
str(gm.graph).strip(),
"""\
graph():
%l_x_ : torch.distributed.tensor.DTensor [num_users=2] = placeholder[target=L_x_]
%max_1 : [num_users=1] = call_method[target=max](args = (%l_x_,), kwargs = {})
%clamp : [num_users=1] = call_function[target=torch.clamp](args = (%max_1,), kwargs = {min: 1})
%item : [num_users=2] = call_method[target=item](args = (%clamp,), kwargs = {})
%ge_1 : [num_users=1] = call_function[target=operator.ge](args = (%item, 1), kwargs = {})
%_assert_scalar_default : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge_1, Runtime assertion failed for expression u0 >= 1 on node 'ge_1'), kwargs = {})
%getitem : [num_users=2] = call_function[target=operator.getitem](args = (%l_x_, slice(None, item, None)), kwargs = {})
%getattr_1 : [num_users=1] = call_function[target=builtins.getattr](args = (%getitem, _local_tensor), kwargs = {})
%sym_size_int : [num_users=2] = call_function[target=torch.ops.aten.sym_size.int](args = (%getattr_1, 0), kwargs = {})
%ge_2 : [num_users=1] = call_function[target=operator.ge](args = (%sym_size_int, 0), kwargs = {})
%_assert_scalar_default_1 : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%ge_2, Runtime assertion failed for expression u2 >= 0 on node 'ge_2'), kwargs = {})
%le : [num_users=1] = call_function[target=operator.le](args = (%sym_size_int, 4), kwargs = {})
%_assert_scalar_default_2 : [num_users=0] = call_function[target=torch.ops.aten._assert_scalar.default](args = (%le, Runtime assertion failed for expression u2 <= 4 on node 'le'), kwargs = {})
return (getitem,)""", # noqa: B950
)
instantiate_parametrized_tests(DTensorExportTest)
if __name__ == "__main__":
run_tests()
| DTensorExportTest |
python | pytest-dev__pytest | testing/test_mark.py | {
"start": 369,
"end": 14690
} | class ____:
@pytest.mark.parametrize("attr", ["mark", "param"])
def test_pytest_exists_in_namespace_all(self, attr: str) -> None:
module = sys.modules["pytest"]
assert attr in module.__all__
def test_pytest_mark_notcallable(self) -> None:
mark = MarkGenerator(_ispytest=True)
with pytest.raises(TypeError):
mark() # type: ignore[operator]
def test_mark_with_param(self):
def some_function(abc):
pass
class SomeClass:
pass
assert pytest.mark.foo(some_function) is some_function
marked_with_args = pytest.mark.foo.with_args(some_function)
assert marked_with_args is not some_function
assert pytest.mark.foo(SomeClass) is SomeClass
assert pytest.mark.foo.with_args(SomeClass) is not SomeClass # type: ignore[comparison-overlap]
def test_pytest_mark_name_starts_with_underscore(self) -> None:
mark = MarkGenerator(_ispytest=True)
with pytest.raises(AttributeError):
_ = mark._some_name
def test_marked_class_run_twice(pytester: Pytester) -> None:
"""Test fails file is run twice that contains marked class.
See issue#683.
"""
py_file = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('abc', [1, 2, 3])
class Test1(object):
def test_1(self, abc):
assert abc in [1, 2, 3]
"""
)
file_name = os.path.basename(py_file)
rec = pytester.inline_run("--keep-duplicates", file_name, file_name)
rec.assertoutcome(passed=6)
def test_ini_markers(pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
markers =
a1: this is a webtest marker
a2: this is a smoke marker
"""
)
pytester.makepyfile(
"""
def test_markers(pytestconfig):
markers = pytestconfig.getini("markers")
print(markers)
assert len(markers) >= 2
assert markers[0].startswith("a1:")
assert markers[1].startswith("a2:")
"""
)
rec = pytester.inline_run()
rec.assertoutcome(passed=1)
def test_markers_option(pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
markers =
a1: this is a webtest marker
a1some: another marker
nodescription
"""
)
result = pytester.runpytest("--markers")
result.stdout.fnmatch_lines(
["*a1*this is a webtest*", "*a1some*another marker", "*nodescription*"]
)
def test_ini_markers_whitespace(pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
markers =
a1 : this is a whitespace marker
"""
)
pytester.makepyfile(
"""
import pytest
@pytest.mark.a1
def test_markers():
assert True
"""
)
rec = pytester.inline_run("--strict-markers", "-m", "a1")
rec.assertoutcome(passed=1)
def test_marker_without_description(pytester: Pytester) -> None:
pytester.makefile(
".cfg",
setup="""
[tool:pytest]
markers=slow
""",
)
pytester.makeconftest(
"""
import pytest
pytest.mark.xfail('FAIL')
"""
)
ftdir = pytester.mkdir("ft1_dummy")
pytester.path.joinpath("conftest.py").replace(ftdir.joinpath("conftest.py"))
rec = pytester.runpytest("--strict-markers")
rec.assert_outcomes()
def test_markers_option_with_plugin_in_current_dir(pytester: Pytester) -> None:
pytester.makeconftest('pytest_plugins = "flip_flop"')
pytester.makepyfile(
flip_flop="""\
def pytest_configure(config):
config.addinivalue_line("markers", "flip:flop")
def pytest_generate_tests(metafunc):
try:
mark = metafunc.function.flipper
except AttributeError:
return
metafunc.parametrize("x", (10, 20))"""
)
pytester.makepyfile(
"""\
import pytest
@pytest.mark.flipper
def test_example(x):
assert x"""
)
result = pytester.runpytest("--markers")
result.stdout.fnmatch_lines(["*flip*flop*"])
def test_mark_on_pseudo_function(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.r(lambda x: 0/0)
def test_hello():
pass
"""
)
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize(
"option_name", ["--strict-markers", "--strict", "strict_markers", "strict"]
)
def test_strict_prohibits_unregistered_markers(
pytester: Pytester, option_name: str
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.unregisteredmark
def test_hello():
pass
"""
)
if option_name in ("strict_markers", "strict"):
pytester.makeini(
f"""
[pytest]
{option_name} = true
"""
)
result = pytester.runpytest()
else:
result = pytester.runpytest(option_name)
assert result.ret != 0
result.stdout.fnmatch_lines(
["'unregisteredmark' not found in `markers` configuration option"]
)
@pytest.mark.parametrize(
("expr", "expected_passed"),
[
("xyz", ["test_one"]),
("((( xyz)) )", ["test_one"]),
("not not xyz", ["test_one"]),
("xyz and xyz2", []),
("xyz2", ["test_two"]),
("xyz or xyz2", ["test_one", "test_two"]),
],
)
def test_mark_option(
expr: str, expected_passed: list[str | None], pytester: Pytester
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.xyz
def test_one():
pass
@pytest.mark.xyz2
def test_two():
pass
"""
)
rec = pytester.inline_run("-m", expr)
passed, _skipped, _fail = rec.listoutcomes()
passed_str = [x.nodeid.split("::")[-1] for x in passed]
assert passed_str == expected_passed
@pytest.mark.parametrize(
("expr", "expected_passed"),
[
("car(color='red')", ["test_one"]),
("car(color='red') or car(color='blue')", ["test_one", "test_two"]),
("car and not car(temp=5)", ["test_one", "test_three"]),
("car(temp=4)", ["test_one"]),
("car(temp=4) or car(temp=5)", ["test_one", "test_two"]),
("car(temp=4) and car(temp=5)", []),
("car(temp=-5)", ["test_three"]),
("car(ac=True)", ["test_one"]),
("car(ac=False)", ["test_two"]),
("car(ac=None)", ["test_three"]), # test NOT_NONE_SENTINEL
],
ids=str,
)
def test_mark_option_with_kwargs(
expr: str, expected_passed: list[str | None], pytester: Pytester
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.car
@pytest.mark.car(ac=True)
@pytest.mark.car(temp=4)
@pytest.mark.car(color="red")
def test_one():
pass
@pytest.mark.car
@pytest.mark.car(ac=False)
@pytest.mark.car(temp=5)
@pytest.mark.car(color="blue")
def test_two():
pass
@pytest.mark.car
@pytest.mark.car(ac=None)
@pytest.mark.car(temp=-5)
def test_three():
pass
"""
)
rec = pytester.inline_run("-m", expr)
passed, _skipped, _fail = rec.listoutcomes()
passed_str = [x.nodeid.split("::")[-1] for x in passed]
assert passed_str == expected_passed
@pytest.mark.parametrize(
("expr", "expected_passed"),
[("interface", ["test_interface"]), ("not interface", ["test_nointer"])],
)
def test_mark_option_custom(
expr: str, expected_passed: list[str], pytester: Pytester
) -> None:
pytester.makeconftest(
"""
import pytest
def pytest_collection_modifyitems(items):
for item in items:
if "interface" in item.nodeid:
item.add_marker(pytest.mark.interface)
"""
)
pytester.makepyfile(
"""
def test_interface():
pass
def test_nointer():
pass
"""
)
rec = pytester.inline_run("-m", expr)
passed, _skipped, _fail = rec.listoutcomes()
passed_str = [x.nodeid.split("::")[-1] for x in passed]
assert passed_str == expected_passed
@pytest.mark.parametrize(
("expr", "expected_passed"),
[
("interface", ["test_interface"]),
("not interface", ["test_nointer", "test_pass", "test_1", "test_2"]),
("pass", ["test_pass"]),
("not pass", ["test_interface", "test_nointer", "test_1", "test_2"]),
("not not not (pass)", ["test_interface", "test_nointer", "test_1", "test_2"]),
("1 or 2", ["test_1", "test_2"]),
("not (1 or 2)", ["test_interface", "test_nointer", "test_pass"]),
],
)
def test_keyword_option_custom(
expr: str, expected_passed: list[str], pytester: Pytester
) -> None:
pytester.makepyfile(
"""
def test_interface():
pass
def test_nointer():
pass
def test_pass():
pass
def test_1():
pass
def test_2():
pass
"""
)
rec = pytester.inline_run("-k", expr)
passed, _skipped, _fail = rec.listoutcomes()
passed_str = [x.nodeid.split("::")[-1] for x in passed]
assert passed_str == expected_passed
def test_keyword_option_considers_mark(pytester: Pytester) -> None:
pytester.copy_example("marks/marks_considered_keywords")
rec = pytester.inline_run("-k", "foo")
passed = rec.listoutcomes()[0]
assert len(passed) == 1
@pytest.mark.parametrize(
("expr", "expected_passed"),
[
("None", ["test_func[None]"]),
("[1.3]", ["test_func[1.3]"]),
("2-3", ["test_func[2-3]"]),
],
)
def test_keyword_option_parametrize(
expr: str, expected_passed: list[str], pytester: Pytester
) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
def test_func(arg):
pass
"""
)
rec = pytester.inline_run("-k", expr)
passed, _skipped, _fail = rec.listoutcomes()
passed_str = [x.nodeid.split("::")[-1] for x in passed]
assert passed_str == expected_passed
def test_parametrize_with_module(pytester: Pytester) -> None:
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", [pytest,])
def test_func(arg):
pass
"""
)
rec = pytester.inline_run()
passed, _skipped, _fail = rec.listoutcomes()
expected_id = "test_func[" + pytest.__name__ + "]"
assert passed[0].nodeid.split("::")[-1] == expected_id
@pytest.mark.parametrize(
("expr", "expected_error"),
[
(
"foo or",
"at column 7: expected not OR left parenthesis OR identifier; got end of input",
),
(
"foo or or",
"at column 8: expected not OR left parenthesis OR identifier; got or",
),
(
"(foo",
"at column 5: expected right parenthesis; got end of input",
),
(
"foo bar",
"at column 5: expected end of input; got identifier",
),
(
"or or",
"at column 1: expected not OR left parenthesis OR identifier; got or",
),
(
"not or",
"at column 5: expected not OR left parenthesis OR identifier; got or",
),
(
"nonexistent_mark(non_supported='kwarg')",
"Keyword expressions do not support call parameters",
),
],
)
def test_keyword_option_wrong_arguments(
expr: str, expected_error: str, pytester: Pytester, capsys
) -> None:
pytester.makepyfile(
"""
def test_func(arg):
pass
"""
)
pytester.inline_run("-k", expr)
err = capsys.readouterr().err
assert expected_error in err
def test_parametrized_collected_from_command_line(pytester: Pytester) -> None:
"""Parametrized test not collected if test named specified in command
line issue#649."""
py_file = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
def test_func(arg):
pass
"""
)
file_name = os.path.basename(py_file)
rec = pytester.inline_run(file_name + "::" + "test_func")
rec.assertoutcome(passed=3)
def test_parametrized_collect_with_wrong_args(pytester: Pytester) -> None:
"""Test collect parametrized func with wrong number of args."""
py_file = pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize('foo, bar', [(1, 2, 3)])
def test_func(foo, bar):
pass
"""
)
result = pytester.runpytest(py_file)
result.stdout.fnmatch_lines(
[
'test_parametrized_collect_with_wrong_args.py::test_func: in "parametrize" the number of names (2):',
" ['foo', 'bar']",
"must be equal to the number of values (3):",
" (1, 2, 3)",
]
)
def test_parametrized_with_kwargs(pytester: Pytester) -> None:
"""Test collect parametrized func with wrong number of args."""
py_file = pytester.makepyfile(
"""
import pytest
@pytest.fixture(params=[1,2])
def a(request):
return request.param
@pytest.mark.parametrize(argnames='b', argvalues=[1, 2])
def test_func(a, b):
pass
"""
)
result = pytester.runpytest(py_file)
assert result.ret == 0
def test_parametrize_iterator(pytester: Pytester) -> None:
"""`parametrize` should work with generators (#5354)."""
py_file = pytester.makepyfile(
"""\
import pytest
def gen():
yield 1
yield 2
yield 3
@pytest.mark.parametrize('a', gen())
def test(a):
assert a >= 1
"""
)
result = pytester.runpytest(py_file)
assert result.ret == 0
# should not skip any tests
result.stdout.fnmatch_lines(["*3 passed*"])
| TestMark |
python | jazzband__tablib | tests/test_tablib_dbfpy_packages_utils.py | {
"start": 624,
"end": 2251
} | class ____(unittest.TestCase):
"""dbfpy.utils.getDate test cases."""
def test_getDate_none(self):
# Arrange
value = None
# Act
output = utils.getDate(value)
# Assert
self.assertIsInstance(output, dt.date)
def test_getDate_datetime_date(self):
# Arrange
value = dt.date(2019, 10, 19)
# Act
output = utils.getDate(value)
# Assert
self.assertIsInstance(output, dt.date)
self.assertEqual(output, value)
def test_getDate_datetime_datetime(self):
# Arrange
value = dt.datetime(2019, 10, 19, 12, 00, 00)
# Act
output = utils.getDate(value)
# Assert
self.assertIsInstance(output, dt.date)
self.assertEqual(output, value)
def test_getDate_datetime_timestamp(self):
# Arrange
value = 1571515306
# Act
output = utils.getDate(value)
# Assert
self.assertIsInstance(output, dt.date)
self.assertEqual(output, dt.date(2019, 10, 19))
def test_getDate_datetime_string_yyyy_mm_dd(self):
# Arrange
value = "20191019"
# Act
output = utils.getDate(value)
# Assert
self.assertIsInstance(output, dt.date)
self.assertEqual(output, dt.date(2019, 10, 19))
def test_getDate_datetime_string_yymmdd(self):
# Arrange
value = "191019"
# Act
output = utils.getDate(value)
# Assert
self.assertIsInstance(output, dt.date)
self.assertEqual(output, dt.date(2019, 10, 19))
| UtilsGetDateTestCase |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_capacity_request_policy_range.py | {
"start": 383,
"end": 6247
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'max': 'str',
'min': 'str',
'step': 'str'
}
attribute_map = {
'max': 'max',
'min': 'min',
'step': 'step'
}
def __init__(self, max=None, min=None, step=None, local_vars_configuration=None): # noqa: E501
"""V1beta1CapacityRequestPolicyRange - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._max = None
self._min = None
self._step = None
self.discriminator = None
if max is not None:
self.max = max
self.min = min
if step is not None:
self.step = step
@property
def max(self):
"""Gets the max of this V1beta1CapacityRequestPolicyRange. # noqa: E501
Max defines the upper limit for capacity that can be requested. Max must be less than or equal to the capacity value. Min and requestPolicy.default must be less than or equal to the maximum. # noqa: E501
:return: The max of this V1beta1CapacityRequestPolicyRange. # noqa: E501
:rtype: str
"""
return self._max
@max.setter
def max(self, max):
"""Sets the max of this V1beta1CapacityRequestPolicyRange.
Max defines the upper limit for capacity that can be requested. Max must be less than or equal to the capacity value. Min and requestPolicy.default must be less than or equal to the maximum. # noqa: E501
:param max: The max of this V1beta1CapacityRequestPolicyRange. # noqa: E501
:type: str
"""
self._max = max
@property
def min(self):
"""Gets the min of this V1beta1CapacityRequestPolicyRange. # noqa: E501
Min specifies the minimum capacity allowed for a consumption request. Min must be greater than or equal to zero, and less than or equal to the capacity value. requestPolicy.default must be more than or equal to the minimum. # noqa: E501
:return: The min of this V1beta1CapacityRequestPolicyRange. # noqa: E501
:rtype: str
"""
return self._min
@min.setter
def min(self, min):
"""Sets the min of this V1beta1CapacityRequestPolicyRange.
Min specifies the minimum capacity allowed for a consumption request. Min must be greater than or equal to zero, and less than or equal to the capacity value. requestPolicy.default must be more than or equal to the minimum. # noqa: E501
:param min: The min of this V1beta1CapacityRequestPolicyRange. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and min is None: # noqa: E501
raise ValueError("Invalid value for `min`, must not be `None`") # noqa: E501
self._min = min
@property
def step(self):
"""Gets the step of this V1beta1CapacityRequestPolicyRange. # noqa: E501
Step defines the step size between valid capacity amounts within the range. Max (if set) and requestPolicy.default must be a multiple of Step. Min + Step must be less than or equal to the capacity value. # noqa: E501
:return: The step of this V1beta1CapacityRequestPolicyRange. # noqa: E501
:rtype: str
"""
return self._step
@step.setter
def step(self, step):
"""Sets the step of this V1beta1CapacityRequestPolicyRange.
Step defines the step size between valid capacity amounts within the range. Max (if set) and requestPolicy.default must be a multiple of Step. Min + Step must be less than or equal to the capacity value. # noqa: E501
:param step: The step of this V1beta1CapacityRequestPolicyRange. # noqa: E501
:type: str
"""
self._step = step
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1CapacityRequestPolicyRange):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1CapacityRequestPolicyRange):
return True
return self.to_dict() != other.to_dict()
| V1beta1CapacityRequestPolicyRange |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 4041,
"end": 5904
} | class ____(TypedDict):
message: str
stack: Sequence[str]
# class name of Exception object in python, left as optional for flexibility
name: Optional[str]
# https://docs.python.org/3/library/exceptions.html#exception-context
# exception that explicitly led to this exception
cause: Optional["PipesException"]
# exception that being handled when this exception was raised
context: Optional["PipesException"]
# ########################
# ##### UTIL
# ########################
ESCAPE_CHARACTER = "\\"
def de_escape_asset_key(asset_key: str) -> str:
r"""Removes the backward slashes escape characters from the asset key.
Example: "foo\/bar" -> "foo/bar"
"""
# make sure to keep any standalone backslashes since they may be
# coming from the original (non-escaped) key
return asset_key.replace(ESCAPE_CHARACTER + "/", "/")
def to_assey_key_path(asset_key: str) -> list[str]:
"""Converts an asset key to a collection of key parts.
Forward slash (except escaped) is used as separator. De-escapes the key.
"""
parts = []
current_part = []
escape_next = False
for char in asset_key:
if escape_next:
# Include escaped character (including backslash itself) in the current part
current_part.append(ESCAPE_CHARACTER + char)
escape_next = False
elif char == ESCAPE_CHARACTER:
escape_next = True
elif char == "/":
parts.append("".join(current_part))
current_part = []
else:
current_part.append(char)
# Add the final part to parts
if current_part:
parts.append("".join(current_part))
# De-escape each part, ensuring standalone backslashes remain intact
return [de_escape_asset_key(part) for part in parts]
_T = TypeVar("_T")
| PipesException |
python | nedbat__coveragepy | tests/test_coverage.py | {
"start": 38936,
"end": 41025
} | class ____(CoverageTest):
"""Tests of new syntax in Python 2.4."""
def test_function_decorators(self) -> None:
self.check_coverage(
"""\
def require_int(func):
def wrapper(arg):
assert isinstance(arg, int)
return func(arg)
return wrapper
@require_int
def p1(arg):
return arg*2
assert p1(10) == 20
""",
lines=[1, 2, 3, 4, 6, 8, 9, 10, 12],
missing="",
)
def test_function_decorators_with_args(self) -> None:
self.check_coverage(
"""\
def boost_by(extra):
def decorator(func):
def wrapper(arg):
return extra*func(arg)
return wrapper
return decorator
@boost_by(10)
def boosted(arg):
return arg*2
assert boosted(10) == 200
""",
lines=[1, 2, 3, 4, 5, 6, 8, 9, 10, 12],
missing="",
)
def test_double_function_decorators(self) -> None:
self.check_coverage(
"""\
def require_int(func):
def wrapper(arg):
assert isinstance(arg, int)
return func(arg)
return wrapper
def boost_by(extra):
def decorator(func):
def wrapper(arg):
return extra*func(arg)
return wrapper
return decorator
@require_int
@boost_by(10)
def boosted1(arg):
return arg*2
assert boosted1(10) == 200
@boost_by(10)
@require_int
def boosted2(arg):
return arg*2
assert boosted2(10) == 200
""",
lines=[1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 14, 15, 16, 17, 19, 21, 22, 23, 24, 26],
missing="",
)
| Py24Test |
python | pallets__jinja | src/jinja2/environment.py | {
"start": 57030,
"end": 57767
} | class ____:
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template: Template, undefined_to_none: bool) -> None:
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any | None:
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars["result"]
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
| TemplateExpression |
python | Delgan__loguru | loguru/_simple_sinks.py | {
"start": 1283,
"end": 3244
} | class ____:
"""A sink that writes log messages using the standard logging module.
Parameters
----------
handler
A logging handler instance.
"""
def __init__(self, handler):
self._handler = handler
def write(self, message):
"""Write a message using the standard logging handler.
Parameters
----------
message
The message to write.
"""
if message.record["level"].no < self._handler.level:
return
raw_record = message.record
message = str(message)
exc = raw_record["exception"]
record = logging.getLogger().makeRecord(
raw_record["name"],
raw_record["level"].no,
raw_record["file"].path,
raw_record["line"],
message,
(),
(exc.type, exc.value, exc.traceback) if exc else None,
raw_record["function"],
{"extra": raw_record["extra"]},
)
# By default, the standard logging module will format the exception and assign it to the
# "exc_text" attribute. Then, the formatted exception will be automatically appended to the
# message when the record is formatted. This is a problem, because that would cause the
# exception to be duplicated in the log message, since it's also formatted by Loguru. To
# avoid this, we set "exc_text" to a simple newline character, which will end the message.
if exc:
record.exc_text = "\n"
record.levelname = raw_record["level"].name
self._handler.handle(record)
def stop(self):
"""Close the logging handler."""
self._handler.close()
def tasks_to_complete(self):
"""Return list of tasks that need to be completed.
Returns
-------
list
Empty list as standard sink has no async tasks.
"""
return []
| StandardSink |
python | pallets__werkzeug | src/werkzeug/local.py | {
"start": 3320,
"end": 5954
} | class ____(t.Generic[T]):
"""Create a stack of context-local data. This wraps a
:class:`ContextVar` containing a :class:`list` value.
This may incur a performance penalty compared to using individual
context vars, as it has to copy data to avoid mutating the list
between nested contexts.
:param context_var: The :class:`~contextvars.ContextVar` to use as
storage for this local. If not given, one will be created.
Context vars not created at the global scope may interfere with
garbage collection.
.. versionchanged:: 2.0
Uses ``ContextVar`` instead of a custom storage implementation.
.. versionadded:: 0.6.1
"""
__slots__ = ("_storage",)
def __init__(self, context_var: ContextVar[list[T]] | None = None) -> None:
if context_var is None:
# A ContextVar not created at global scope interferes with
# Python's garbage collection. However, a local only makes
# sense defined at the global scope as well, in which case
# the GC issue doesn't seem relevant.
context_var = ContextVar(f"werkzeug.LocalStack<{id(self)}>.storage")
self._storage = context_var
def __release_local__(self) -> None:
self._storage.set([])
def push(self, obj: T) -> list[T]:
"""Add a new item to the top of the stack."""
stack = self._storage.get([]).copy()
stack.append(obj)
self._storage.set(stack)
return stack
def pop(self) -> T | None:
"""Remove the top item from the stack and return it. If the
stack is empty, return ``None``.
"""
stack = self._storage.get([])
if len(stack) == 0:
return None
rv = stack[-1]
self._storage.set(stack[:-1])
return rv
@property
def top(self) -> T | None:
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
stack = self._storage.get([])
if len(stack) == 0:
return None
return stack[-1]
def __call__(
self, name: str | None = None, *, unbound_message: str | None = None
) -> LocalProxy[t.Any]:
"""Create a :class:`LocalProxy` that accesses the top of this
local stack.
:param name: If given, the proxy access this attribute of the
top item, rather than the item itself.
:param unbound_message: The error message that the proxy will
show if the stack is empty.
"""
return LocalProxy(self, name, unbound_message=unbound_message)
| LocalStack |
python | getsentry__sentry | tests/sentry/incidents/test_metric_issue_detector_handler.py | {
"start": 567,
"end": 5998
} | class ____(BaseMetricIssueTest):
def setUp(self) -> None:
super().setUp()
self.handler = MetricIssueDetectorHandler(self.detector)
def generate_evidence_data(
self,
value: int,
detector_trigger: DataCondition,
extra_trigger: DataCondition | None = None,
):
self.query_subscription.refresh_from_db()
conditions = [
{
"id": detector_trigger.id,
"type": detector_trigger.type,
"comparison": detector_trigger.comparison,
"condition_result": detector_trigger.condition_result.value,
},
]
if extra_trigger:
conditions.append(
{
"id": extra_trigger.id,
"type": extra_trigger.type,
"comparison": extra_trigger.comparison,
"condition_result": extra_trigger.condition_result.value,
}
)
evidence_data = {
"detector_id": self.detector.id,
"value": value,
"alert_id": self.alert_rule.id,
"data_packet_source_id": str(self.query_subscription.id),
"conditions": conditions,
"data_sources": [
{
"id": str(self.data_source.id),
"organization_id": str(self.organization.id),
"type": self.data_source.type,
"source_id": str(self.query_subscription.id),
"query_obj": {
"id": str(self.query_subscription.id),
"status": self.query_subscription.status,
"subscription": self.query_subscription.subscription_id,
"snuba_query": {
"id": str(self.snuba_query.id),
"dataset": self.snuba_query.dataset,
"query": self.snuba_query.query,
"aggregate": self.snuba_query.aggregate,
"time_window": self.snuba_query.time_window,
"environment": self.environment.name,
"event_types": ["error"],
"extrapolation_mode": "unknown",
},
},
}
],
}
return evidence_data
def verify_issue_occurrence(
self, occurrence: IssueOccurrence, evidence_data: dict, detector_trigger: DataCondition
) -> None:
assert occurrence is not None
assert occurrence.issue_title == self.detector.name
assert occurrence.subtitle == self.handler.construct_title(
snuba_query=self.snuba_query,
detector_trigger=detector_trigger,
priority=detector_trigger.condition_result,
)
assert occurrence.evidence_data == evidence_data
assert occurrence.level == "error"
assert occurrence.priority == detector_trigger.condition_result
assert occurrence.assignee
assert occurrence.assignee.id == self.detector.created_by_id
def test_metric_issue_occurrence(self) -> None:
value = self.critical_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value)
evidence_data = self.generate_evidence_data(
value, self.critical_detector_trigger, self.warning_detector_trigger
)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
self.verify_issue_occurrence(occurrence, evidence_data, self.critical_detector_trigger)
def test_warning_level(self) -> None:
value = self.warning_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value)
evidence_data = self.generate_evidence_data(value, self.warning_detector_trigger)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
self.verify_issue_occurrence(occurrence, evidence_data, self.warning_detector_trigger)
def test_does_not_trigger(self) -> None:
value = self.warning_detector_trigger.comparison - 1
data_packet = self.create_subscription_packet(value)
result = self.process_packet_and_return_result(data_packet)
assert result is None
def test_missing_detector_trigger(self) -> None:
value = self.critical_detector_trigger.comparison + 1
data_packet = self.create_subscription_packet(value)
DataCondition.objects.all().delete()
result = self.process_packet_and_return_result(data_packet)
assert result is None
def test_flipped_detector_trigger(self) -> None:
self.warning_detector_trigger.delete()
self.critical_detector_trigger.update(type=Condition.LESS)
value = self.critical_detector_trigger.comparison - 1
data_packet = self.create_subscription_packet(value)
evidence_data = self.generate_evidence_data(value, self.critical_detector_trigger)
occurrence = self.process_packet_and_return_result(data_packet)
assert isinstance(occurrence, IssueOccurrence)
self.verify_issue_occurrence(occurrence, evidence_data, self.critical_detector_trigger)
| TestEvaluateMetricDetector |
python | keon__algorithms | tests/test_tree.py | {
"start": 3980,
"end": 5053
} | class ____(unittest.TestCase):
def test_construct_tree_with_update_1(self):
freq = [2, 1, 1, 3, 2, 3, 4, 5, 6, 7, 8, 9]
ft = Fenwick_Tree(freq)
bit_tree = ft.construct()
self.assertEqual(12, ft.get_sum(bit_tree, 5))
freq[3] += 6
ft.update_bit(bit_tree, 3, 6)
self.assertEqual(18, ft.get_sum(bit_tree, 5))
def test_construct_tree_with_update_2(self):
freq = [1, 2, 3, 4, 5]
ft = Fenwick_Tree(freq)
bit_tree = ft.construct()
self.assertEqual(10, ft.get_sum(bit_tree, 3))
freq[3] -= 5
ft.update_bit(bit_tree, 3, -5)
self.assertEqual(5, ft.get_sum(bit_tree, 3))
def test_construct_tree_with_update_3(self):
freq = [2, 1, 4, 6, -1, 5, -32, 0, 1]
ft = Fenwick_Tree(freq)
bit_tree = ft.construct()
self.assertEqual(12, ft.get_sum(bit_tree, 4))
freq[2] += 11
ft.update_bit(bit_tree, 2, 11)
self.assertEqual(23, ft.get_sum(bit_tree, 4))
if __name__ == '__main__':
unittest.main()
| TestFenwickTree |
python | django__django | django/core/serializers/python.py | {
"start": 412,
"end": 3509
} | class ____(base.Serializer):
"""
Serialize a QuerySet to basic Python objects.
"""
internal_use_only = True
def start_serialization(self):
self._current = None
self.objects = []
def end_serialization(self):
pass
def start_object(self, obj):
self._current = {}
def end_object(self, obj):
self.objects.append(self.get_dump_object(obj))
self._current = None
def get_dump_object(self, obj):
data = {"model": str(obj._meta)}
if not self.use_natural_primary_keys or not hasattr(obj, "natural_key"):
data["pk"] = self._value_from_field(obj, obj._meta.pk)
data["fields"] = self._current
return data
def _value_from_field(self, obj, field):
if isinstance(field, CompositePrimaryKey):
return [self._value_from_field(obj, f) for f in field]
value = field.value_from_object(obj)
# Protected types (i.e., primitives like None, numbers, dates,
# and Decimals) are passed through as is. All other values are
# converted to string first.
return value if is_protected_type(value) else field.value_to_string(obj)
def handle_field(self, obj, field):
self._current[field.name] = self._value_from_field(obj, field)
def handle_fk_field(self, obj, field):
if self.use_natural_foreign_keys and hasattr(
field.remote_field.model, "natural_key"
):
related = getattr(obj, field.name)
if related:
value = related.natural_key()
else:
value = None
else:
value = self._value_from_field(obj, field)
self._current[field.name] = value
def handle_m2m_field(self, obj, field):
if field.remote_field.through._meta.auto_created:
if self.use_natural_foreign_keys and hasattr(
field.remote_field.model, "natural_key"
):
def m2m_value(value):
return value.natural_key()
def queryset_iterator(obj, field):
attr = getattr(obj, field.name)
chunk_size = (
2000 if getattr(attr, "prefetch_cache_name", None) else None
)
return attr.iterator(chunk_size)
else:
def m2m_value(value):
return self._value_from_field(value, value._meta.pk)
def queryset_iterator(obj, field):
query_set = getattr(obj, field.name).select_related(None).only("pk")
chunk_size = 2000 if query_set._prefetch_related_lookups else None
return query_set.iterator(chunk_size=chunk_size)
m2m_iter = getattr(obj, "_prefetched_objects_cache", {}).get(
field.name,
queryset_iterator(obj, field),
)
self._current[field.name] = [m2m_value(related) for related in m2m_iter]
def getvalue(self):
return self.objects
| Serializer |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_workers.py | {
"start": 68893,
"end": 70517
} | class ____:
async def test_delete_worker(self, client, work_pool, session, db):
work_pool_id = work_pool.id
deleted_worker_name = "worker1"
for i in range(2):
insert_stmt = (db.queries.insert(db.Worker)).values(
name=f"worker{i}",
work_pool_id=work_pool_id,
last_heartbeat_time=datetime.now(timezone.utc),
)
await session.execute(insert_stmt)
await session.commit()
response = await client.delete(
f"/work_pools/{work_pool.name}/workers/{deleted_worker_name}"
)
assert response.status_code == status.HTTP_204_NO_CONTENT, response.text
remaining_workers = await models.workers.read_workers(
session=session,
work_pool_id=work_pool_id,
)
assert deleted_worker_name not in map(lambda x: x.name, remaining_workers)
async def test_nonexistent_worker(self, client, session, db):
worker_name = "worker1"
wp = await models.workers.create_work_pool(
session=session,
work_pool=schemas.actions.WorkPoolCreate(name="A"),
)
insert_stmt = (db.queries.insert(db.Worker)).values(
name=worker_name,
work_pool_id=wp.id,
last_heartbeat_time=datetime.now(timezone.utc),
)
await session.execute(insert_stmt)
await session.commit()
response = await client.delete(f"/work_pools/{wp.name}/workers/does-not-exist")
assert response.status_code == status.HTTP_404_NOT_FOUND, response.text
| TestDeleteWorker |
python | redis__redis-py | redis/commands/search/querystring.py | {
"start": 6171,
"end": 6590
} | class ____(IntersectNode):
"""
Create a disjunct node. In order for this node to be true, all of its
children must evaluate to false
"""
def to_string(self, with_parens=None):
with_parens = self._should_use_paren(with_parens)
ret = super().to_string(with_parens=False)
if with_parens:
return "(-" + ret + ")"
else:
return "-" + ret
| DisjunctNode |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/auth/tokens.py | {
"start": 8717,
"end": 13234
} | class ____:
"""
Validate the claims and validitory of a JWT.
This will either validate the JWT is signed with the symmetric key if ``secret_key`` is passed, or else
that it is signed by one of the public keys in the keyset in ``jwks`` attribute.
"""
jwks: JWKS | None = None
secret_key: str | None = attrs.field(repr=False, default=None, converter=lambda v: None if v == "" else v)
issuer: str | list[str] | None = attrs.field(
factory=_conf_list_factory("api_auth", "jwt_issuer", fallback=None),
# Ensure we have None, instead of an empty list, else pyjwt will fail to validate it
converter=lambda v: None if v == [] else v,
)
# By default, we just validate these
required_claims: frozenset[str] = frozenset({"exp", "iat", "nbf"})
audience: str | Sequence[str]
algorithm: list[str] = attrs.field(
factory=_conf_list_factory("api_auth", "jwt_algorithm", fallback="GUESS"), converter=_to_list
)
leeway: float = attrs.field(factory=_conf_factory("api_auth", "jwt_leeway"), converter=int)
def __attrs_post_init__(self):
if not (self.jwks is None) ^ (self.secret_key is None):
raise ValueError("Exactly one of private_key and secret_key must be specified")
if self.algorithm == ["GUESS"]:
if self.jwks:
# TODO: We could probably populate this from the jwks document, but we don't have that at
# construction time.
raise ValueError(
"Cannot guess the algorithm when using JWKS - please specify it in the config option "
"[api_auth] jwt_algorithm"
)
self.algorithm = ["HS512"]
def _get_kid_from_header(self, unvalidated: str) -> str:
header = jwt.get_unverified_header(unvalidated)
if "kid" not in header:
raise jwt.InvalidTokenError("Missing 'kid' in token header")
return header["kid"]
async def _get_validation_key(self, unvalidated: str) -> str | jwt.PyJWK:
if self.secret_key:
return self.secret_key
if TYPE_CHECKING:
assert self.jwks is not None
kid = self._get_kid_from_header(unvalidated)
return await self.jwks.get_key(kid)
def validated_claims(
self, unvalidated: str, required_claims: dict[str, Any] | None = None
) -> dict[str, Any]:
return async_to_sync(self.avalidated_claims)(unvalidated, required_claims)
async def avalidated_claims(
self, unvalidated: str, required_claims: dict[str, Any] | None = None
) -> dict[str, Any]:
"""Decode the JWT token, returning the validated claims or raising an exception."""
key = await self._get_validation_key(unvalidated)
claims = jwt.decode(
unvalidated,
key,
audience=self.audience,
issuer=self.issuer,
options={"require": self.required_claims},
algorithms=self.algorithm,
leeway=self.leeway,
)
# Validate additional claims if provided
if required_claims:
for claim, expected_value in required_claims.items():
if expected_value["essential"] and (
claim not in claims or claims[claim] != expected_value["value"]
):
raise InvalidClaimError(claim)
return claims
def status(self):
if self.jwks:
self.jwks.status()
def _pem_to_key(pem_data: str | bytes | AllowedPrivateKeys) -> AllowedPrivateKeys:
if isinstance(pem_data, str):
pem_data = pem_data.encode()
elif not isinstance(pem_data, bytes):
# Assume it's already a key object
return pem_data
return load_pem_private_key(pem_data, password=None) # type: ignore[return-value]
def _load_key_from_configured_file() -> AllowedPrivateKeys | None:
from airflow.configuration import conf
path = conf.get("api_auth", "jwt_private_key_path", fallback=None)
if not path:
return None
with open(path, mode="rb") as fh:
return _pem_to_key(fh.read())
def _generate_kid(gen) -> str:
if not gen._private_key:
return "not-used"
if kid := _conf_factory("api_auth", "jwt_kid", fallback=None)():
return kid
# Generate it from the thumbprint of the private key
info = key_to_jwk_dict(gen._private_key)
return info["kid"]
@attrs.define(repr=False, kw_only=True)
| JWTValidator |
python | sphinx-doc__sphinx | sphinx/domains/changeset.py | {
"start": 1122,
"end": 1258
} | class ____(NamedTuple):
type: str
docname: str
lineno: int
module: str | None
descname: str
content: str
| ChangeSet |
python | huggingface__transformers | tests/models/grounding_dino/test_modeling_grounding_dino.py | {
"start": 9490,
"end": 25536
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (GroundingDinoModel, GroundingDinoForObjectDetection) if is_torch_available() else ()
is_encoder_decoder = True
test_missing_keys = False
pipeline_model_mapping = (
{"image-feature-extraction": GroundingDinoModel, "zero-shot-object-detection": GroundingDinoForObjectDetection}
if is_torch_available()
else {}
)
# special case for head models
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "GroundingDinoForObjectDetection":
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
target["masks"] = torch.ones(
self.model_tester.n_targets,
self.model_tester.image_size,
self.model_tester.image_size,
device=torch_device,
dtype=torch.float,
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = GroundingDinoModelTester(self)
self.config_tester = ConfigTester(
self,
config_class=GroundingDinoConfig,
has_text_modality=False,
common_properties=["d_model", "encoder_attention_heads", "decoder_attention_heads"],
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_object_detection_head_model(*config_and_inputs)
@unittest.skip(reason="Grounding DINO does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Grounding DINO does not have a get_input_embeddings method")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="Grounding DINO does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions[-1]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions[-1]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
self.model_tester.num_feature_levels,
self.model_tester.encoder_n_points,
],
)
out_len = len(outputs)
correct_outlen = 12
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes and input_ids
if model_class.__name__ == "GroundingDinoForObjectDetection":
correct_outlen += 3
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions[0]
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries],
)
# cross attentions
cross_attentions = outputs.decoder_attentions[-1]
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
self.model_tester.num_feature_levels,
self.model_tester.decoder_n_points,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 3, len(outputs))
self_attentions = outputs.encoder_attentions[-1]
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
self.model_tester.num_feature_levels,
self.model_tester.encoder_n_points,
],
)
# overwrite since hidden_states are called encoder_text_hidden_states
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_vision_hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = self.model_tester.encoder_seq_length_vision
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_len, self.model_tester.hidden_size],
)
hidden_states = outputs.encoder_text_hidden_states
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = self.model_tester.encoder_seq_length_text
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_len, self.model_tester.hidden_size],
)
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
# removed retain_grad and grad on decoder_hidden_states, as queries don't require grad
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
encoder_hidden_states = outputs.encoder_vision_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0][0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
cross_attentions = outputs.decoder_attentions[-1][0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values", "input_ids"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_different_timm_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# let's pick a random timm backbone
config.backbone = "tf_mobilenetv3_small_075"
config.use_timm_backbone = True
config.backbone_config = None
config.backbone_kwargs = {"in_chans": 3, "out_indices": (2, 3, 4)}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "GroundingDinoForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
config.max_text_len,
)
self.assertEqual(outputs.logits.shape, expected_shape)
self.assertTrue(outputs)
@require_timm
def test_hf_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Load a pretrained HF checkpoint as backbone
config.backbone = "microsoft/resnet-18"
config.backbone_config = None
config.use_timm_backbone = False
config.use_pretrained_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "GroundingDinoForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
config.max_text_len,
)
self.assertEqual(outputs.logits.shape, expected_shape)
self.assertTrue(outputs)
# Copied from tests.models.deformable_detr.test_modeling_deformable_detr.DeformableDetrModelTest.test_two_stage_training with DeformableDetr->GroundingDino
def test_two_stage_training(self):
model_class = GroundingDinoForObjectDetection
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
config.two_stage = True
config.auxiliary_loss = True
config.with_box_refine = True
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_tied_weights_keys(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
config.tie_word_embeddings = True
for model_class in self.all_model_classes:
model_tied = model_class(config)
ptrs = collections.defaultdict(list)
for name, tensor in model_tied.state_dict().items():
ptrs[id_tensor_storage(tensor)].append(name)
# These are all the pointers of shared tensors.
tied_params = [names for _, names in ptrs.items() if len(names) > 1]
tied_weight_keys = model_tied._tied_weights_keys if model_tied._tied_weights_keys is not None else []
# Detect we get a hit for each key
for key in tied_weight_keys:
if not any(re.search(key, p) for group in tied_params for p in group):
raise ValueError(f"{key} is not a tied weight key for {model_class}.")
# Removed tied weights found from tied params -> there should only be one left after
for key in tied_weight_keys:
for i in range(len(tied_params)):
tied_params[i] = [p for p in tied_params[i] if re.search(key, p) is None]
# GroundingDino when sharing weights also uses the shared ones in GroundingDinoDecoder
# Therefore, differently from DeformableDetr, we expect the group lens to be 2
# one for self.bbox_embed in GroundingDinoForObjectDetection and another one
# in the decoder
tied_params = [group for group in tied_params if len(group) > 2]
self.assertListEqual(
tied_params,
[],
f"Missing `_tied_weights_keys` for {model_class}: add all of {tied_params} except one.",
)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
def prepare_text():
text = "a cat."
return text
@require_timm
@require_vision
@slow
| GroundingDinoModelTest |
python | huggingface__transformers | tests/quantization/spqr_integration/test_spqr.py | {
"start": 2517,
"end": 9585
} | class ____(unittest.TestCase):
model_name = "elvircrn/Llama-2-7b-SPQR-3Bit-16x16-red_pajama-hf"
input_text = "Hello my name is"
max_new_tokens = 32
EXPECTED_OUTPUT = (
"Hello my name is Jesse. (I'm also known as Jesse) I'm a 25 year old male from United States. I'm looking for"
)
EXPECTED_OUTPUT_COMPILE = "Hello my name is Jake and I am a 20 year old student at the University of North Texas. (Go Mean Green!) I am a huge fan of the Dallas"
# called only once for all test in this class
@classmethod
def setUpClass(cls):
"""
Setup quantized model
"""
cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name)
cls.quantized_model = AutoModelForCausalLM.from_pretrained(
cls.model_name,
device_map=torch_device,
)
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_quantized_model_conversion(self):
"""
Simple test that checks if the quantized model has been converted properly
"""
from spqr_quant import QuantizedLinear
from transformers.integrations import replace_with_spqr_linear
model_id = "meta-llama/Llama-2-7b-hf"
config = AutoConfig.from_pretrained(model_id)
quantization_config = AutoConfig.from_pretrained(self.model_name, return_dict=False).quantization_config
quantization_config = SpQRConfig.from_dict(quantization_config)
with init_empty_weights():
model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id, config=config)
nb_linears = 0
for module in model.modules():
if isinstance(module, torch.nn.Linear):
nb_linears += 1
model, _ = replace_with_spqr_linear(
model,
quantization_config=quantization_config,
modules_to_not_convert=quantization_config.modules_to_not_convert,
)
nb_spqr_linear = 0
for module in model.modules():
if isinstance(module, QuantizedLinear):
nb_spqr_linear += 1
self.assertEqual(nb_linears - 1, nb_spqr_linear)
def test_quantized_model(self):
"""
Simple test that checks if the quantized model is working properly
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_raise_if_non_quantized(self):
model_id = "meta-llama/Llama-2-7b-hf"
quantization_config = SpQRConfig()
with self.assertRaises(ValueError):
_ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config)
@unittest.skip
def test_save_pretrained(self):
"""
Simple test that checks if the quantized model is working properly after being saved and loaded
"""
with tempfile.TemporaryDirectory() as tmpdirname:
self.quantized_model.save_pretrained(tmpdirname)
model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=torch_device)
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@require_torch_multi_gpu
def test_quantized_model_multi_gpu(self):
"""
Simple test that checks if the quantized model is working properly with multiple GPUs
"""
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)
quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto")
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
@pytest.mark.torch_compile_test
def test_quantized_model_compile(self):
"""
Simple test that checks if the quantized model is working properly
"""
# Sample tokens greedily
def decode_one_tokens(model, cur_token, input_pos, cache_position, past_key_values):
logits = model(
cur_token,
position_ids=input_pos,
cache_position=cache_position,
past_key_values=past_key_values,
return_dict=False,
use_cache=True,
)[0]
new_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int)
return new_token
# Tokenize the test input
input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device)["input_ids"]
seq_length = input_ids.shape[1]
# Setup static KV cache for generation
past_key_values = StaticCache(
config=self.quantized_model.config, max_cache_len=seq_length + self.max_new_tokens + 1
)
# Allocate token ids to be generated and copy prefix ids
cache_position = torch.arange(seq_length, device=torch_device)
generated_ids = torch.zeros(1, seq_length + self.max_new_tokens, dtype=torch.int, device=torch_device)
generated_ids[:, cache_position] = input_ids.to(torch_device).to(torch.int)
# Do a forward pass to fill the prefix cache and compile the kernels if necessary
logits = self.quantized_model(
input_ids,
cache_position=cache_position,
past_key_values=past_key_values,
return_dict=False,
use_cache=True,
)[0]
next_token = torch.argmax(logits[:, [-1]], dim=-1).to(torch.int)
generated_ids[:, [seq_length]] = next_token
with torch.no_grad():
# Compile the CUDA graph
decode_one_tokens = torch.compile(decode_one_tokens, mode="default", backend="inductor", fullgraph=True)
# Generate tokens one by one
cache_position = torch.tensor([seq_length + 1], device=torch_device)
for _ in range(1, self.max_new_tokens):
with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True):
next_token = decode_one_tokens(
self.quantized_model, next_token.clone(), None, cache_position, past_key_values
)
generated_ids.index_copy_(1, cache_position, next_token)
cache_position += 1
# Check generated text
self.assertEqual(
self.tokenizer.decode(generated_ids[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_COMPILE
)
| SpQRTest |
python | walkccc__LeetCode | solutions/2453. Destroy Sequential Targets/2453.py | {
"start": 0,
"end": 248
} | class ____:
def destroyTargets(self, nums: list[int], space: int) -> int:
count = collections.Counter([num % space for num in nums])
maxCount = max(count.values())
return min(num for num in nums if count[num % space] == maxCount)
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-seller-partner/components.py | {
"start": 6925,
"end": 8216
} | class ____(Decoder):
parameters: InitVar[Mapping[str, Any]]
NORMALIZED_FIELD_NAMES = ["date", "rating", "comments", "response", "order_id", "rater_email"]
def is_stream_response(self) -> bool:
return False
def decode(self, response: requests.Response) -> Generator[MutableMapping[str, Any], None, None]:
# csv header field names for this report differ per marketplace (are localized to marketplace language)
# but columns come in the same order, so we set fieldnames to our custom ones
# and raise error if original and custom header field count does not match
try:
document = gzip.decompress(response.content).decode("iso-8859-1")
except gzip.BadGzipFile:
document = response.content.decode("iso-8859-1")
try:
parsed = xmltodict.parse(
document,
dict_constructor=dict,
attr_prefix="",
cdata_key="text",
force_list={"attribute", "id", "refinementField"},
)
except Exception as e:
logger.warning(f"Unable to parse the report for the stream, error: {str(e)}")
parsed = {}
yield from parsed.get("Result", {}).get("Node", [])
| GetXmlBrowseTreeDataDecoder |
python | qdrant__qdrant-client | qdrant_client/grpc/points_service_pb2_grpc.py | {
"start": 26972,
"end": 45219
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def Upsert(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/Upsert',
points__pb2.UpsertPoints.SerializeToString,
points__pb2.PointsOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/Delete',
points__pb2.DeletePoints.SerializeToString,
points__pb2.PointsOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/Get',
points__pb2.GetPoints.SerializeToString,
points__pb2.GetResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateVectors(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/UpdateVectors',
points__pb2.UpdatePointVectors.SerializeToString,
points__pb2.PointsOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteVectors(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/DeleteVectors',
points__pb2.DeletePointVectors.SerializeToString,
points__pb2.PointsOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetPayload(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/SetPayload',
points__pb2.SetPayloadPoints.SerializeToString,
points__pb2.PointsOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def OverwritePayload(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/OverwritePayload',
points__pb2.SetPayloadPoints.SerializeToString,
points__pb2.PointsOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeletePayload(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/DeletePayload',
points__pb2.DeletePayloadPoints.SerializeToString,
points__pb2.PointsOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ClearPayload(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/ClearPayload',
points__pb2.ClearPayloadPoints.SerializeToString,
points__pb2.PointsOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateFieldIndex(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/CreateFieldIndex',
points__pb2.CreateFieldIndexCollection.SerializeToString,
points__pb2.PointsOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteFieldIndex(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/DeleteFieldIndex',
points__pb2.DeleteFieldIndexCollection.SerializeToString,
points__pb2.PointsOperationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Search(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/Search',
points__pb2.SearchPoints.SerializeToString,
points__pb2.SearchResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SearchBatch(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/SearchBatch',
points__pb2.SearchBatchPoints.SerializeToString,
points__pb2.SearchBatchResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SearchGroups(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/SearchGroups',
points__pb2.SearchPointGroups.SerializeToString,
points__pb2.SearchGroupsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Scroll(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/Scroll',
points__pb2.ScrollPoints.SerializeToString,
points__pb2.ScrollResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Recommend(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/Recommend',
points__pb2.RecommendPoints.SerializeToString,
points__pb2.RecommendResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RecommendBatch(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/RecommendBatch',
points__pb2.RecommendBatchPoints.SerializeToString,
points__pb2.RecommendBatchResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def RecommendGroups(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/RecommendGroups',
points__pb2.RecommendPointGroups.SerializeToString,
points__pb2.RecommendGroupsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Discover(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/Discover',
points__pb2.DiscoverPoints.SerializeToString,
points__pb2.DiscoverResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DiscoverBatch(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/DiscoverBatch',
points__pb2.DiscoverBatchPoints.SerializeToString,
points__pb2.DiscoverBatchResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Count(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/Count',
points__pb2.CountPoints.SerializeToString,
points__pb2.CountResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateBatch(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/UpdateBatch',
points__pb2.UpdateBatchPoints.SerializeToString,
points__pb2.UpdateBatchResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Query(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/Query',
points__pb2.QueryPoints.SerializeToString,
points__pb2.QueryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def QueryBatch(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/QueryBatch',
points__pb2.QueryBatchPoints.SerializeToString,
points__pb2.QueryBatchResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def QueryGroups(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/QueryGroups',
points__pb2.QueryPointGroups.SerializeToString,
points__pb2.QueryGroupsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Facet(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/Facet',
points__pb2.FacetCounts.SerializeToString,
points__pb2.FacetResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SearchMatrixPairs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/SearchMatrixPairs',
points__pb2.SearchMatrixPoints.SerializeToString,
points__pb2.SearchMatrixPairsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SearchMatrixOffsets(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/qdrant.Points/SearchMatrixOffsets',
points__pb2.SearchMatrixPoints.SerializeToString,
points__pb2.SearchMatrixOffsetsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| Points |
python | run-llama__llama_index | llama-index-core/tests/memory/blocks/test_vector.py | {
"start": 2417,
"end": 9641
} | class ____(BaseNodePostprocessor):
"""Mock node postprocessor for testing."""
def _postprocess_nodes(
self, nodes: List[NodeWithScore], query: Any = None
) -> List[NodeWithScore]:
"""Add a prefix to each node's text."""
for node in nodes:
if isinstance(node.node, TextNode):
node.node.text = f"PROCESSED: {node.node.text}"
return nodes
@pytest.fixture
def mock_embedding():
"""Create a mock embedding model."""
return MockEmbedding(embed_dim=10)
@pytest.fixture
def mock_vector_store():
"""Create a mock vector store."""
return MockVectorStore()
@pytest.fixture
def vector_memory_block(
mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding
):
"""Create a vector memory block."""
return VectorMemoryBlock(
vector_store=mock_vector_store,
embed_model=mock_embedding,
similarity_top_k=2,
)
@pytest.mark.asyncio
async def test_vector_memory_block_put(vector_memory_block: VectorMemoryBlock):
"""Test putting messages in the vector memory block."""
# Create messages
messages = [
ChatMessage(role="user", content="Hello, how are you?"),
ChatMessage(role="assistant", content="I'm doing well, thank you for asking!"),
]
# Put messages in memory
await vector_memory_block.aput(messages=messages)
# Check that messages were added to vector store
assert len(vector_memory_block.vector_store.nodes) == 1
# Check node content contains both messages
node = next(iter(vector_memory_block.vector_store.nodes.values()))
assert "<message role='user'>Hello, how are you?</message>" in node.text
assert (
"<message role='assistant'>I'm doing well, thank you for asking!</message>"
in node.text
)
@pytest.mark.asyncio
async def test_vector_memory_block_get(vector_memory_block: VectorMemoryBlock):
"""Test getting messages from the vector memory block."""
# Create and store some messages
history_messages = [
ChatMessage(role="user", content="What's the capital of France?"),
ChatMessage(role="assistant", content="The capital of France is Paris."),
ChatMessage(role="user", content="What about Germany?"),
ChatMessage(role="assistant", content="The capital of Germany is Berlin."),
]
await vector_memory_block.aput(messages=history_messages)
# Create a new query
query_messages = [ChatMessage(role="user", content="Tell me about Paris.")]
# Get relevant information
result = await vector_memory_block.aget(messages=query_messages)
# Check that we got a result
assert result != ""
assert "capital of France is Paris" in result
@pytest.mark.asyncio
async def test_empty_messages(vector_memory_block: VectorMemoryBlock):
"""Test with empty messages."""
# Test empty get
result = await vector_memory_block.aget(messages=[])
assert result == ""
# Test empty put
await vector_memory_block.aput(messages=[])
assert len(vector_memory_block.vector_store.nodes) == 0
@pytest.mark.asyncio
async def test_message_without_text(vector_memory_block: VectorMemoryBlock):
"""Test with a message that has no text blocks."""
# Create a message with no text blocks
message = ChatMessage(role="user", content=None, blocks=[])
# Put the message in memory
await vector_memory_block.aput(messages=[message])
# Check that nothing was added
assert len(vector_memory_block.vector_store.nodes) == 0
@pytest.mark.asyncio
async def test_retrieval_context_window(
mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding
):
"""Test the retrieval_context_window parameter."""
# Create a memory block with a specific context window
memory_block = VectorMemoryBlock(
vector_store=mock_vector_store,
embed_model=mock_embedding,
retrieval_context_window=2,
similarity_top_k=2,
)
# Create and store some messages
history_messages = [
ChatMessage(role="user", content="What's your name?"),
ChatMessage(role="assistant", content="I'm an AI assistant."),
ChatMessage(role="user", content="What's the capital of France?"),
ChatMessage(role="assistant", content="The capital of France is Paris."),
]
await memory_block.aput(messages=history_messages)
# Create a query with multiple messages
query_messages = [
ChatMessage(role="user", content="What about the UK?"),
ChatMessage(role="assistant", content="The capital of the UK is London."),
ChatMessage(role="user", content="And Germany?"),
]
# The retrieval should only use the last 2 messages
result = await memory_block.aget(messages=query_messages)
# Check that we got a result
assert result != ""
# The result should be more related to UK/London than Paris
# In our mock implementation, it will just return all stored nodes
@pytest.mark.asyncio
async def test_node_postprocessors(
mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding
):
"""Test node postprocessors."""
# Create a postprocessor
postprocessor = MockNodePostprocessor()
# Create a memory block with the postprocessor
memory_block = VectorMemoryBlock(
vector_store=mock_vector_store,
embed_model=mock_embedding,
similarity_top_k=2,
node_postprocessors=[postprocessor],
)
# Create and store some messages
history_messages = [
ChatMessage(role="user", content="What's the capital of France?"),
ChatMessage(role="assistant", content="The capital of France is Paris."),
]
await memory_block.aput(messages=history_messages)
# Create a query
query_messages = [ChatMessage(role="user", content="Tell me about Paris.")]
# Get relevant information - this should be processed
result = await memory_block.aget(messages=query_messages)
# Check that the result contains the processed prefix
assert "PROCESSED:" in result
@pytest.mark.asyncio
async def test_format_template(
mock_vector_store: MockVectorStore, mock_embedding: MockEmbedding
):
"""Test custom format template."""
# Create a memory block with a custom format template
custom_template = RichPromptTemplate("Relevant context: {{ text }}")
memory_block = VectorMemoryBlock(
vector_store=mock_vector_store,
embed_model=mock_embedding,
similarity_top_k=2,
format_template=custom_template,
)
# Create and store some messages
history_messages = [
ChatMessage(role="user", content="What's the capital of France?"),
ChatMessage(role="assistant", content="The capital of France is Paris."),
]
await memory_block.aput(messages=history_messages)
# Create a query
query_messages = [ChatMessage(role="user", content="Tell me about Paris.")]
# Get relevant information with custom format
result = await memory_block.aget(messages=query_messages)
# Check that the result contains our custom prefix
assert result.startswith("Relevant context:")
assert "capital of France is Paris" in result
| MockNodePostprocessor |
python | instagram__MonkeyType | monkeytype/typing.py | {
"start": 13391,
"end": 13925
} | class ____(TypeRewriter):
"""Union[Dict[K, V1], ..., Dict[K, VN]] -> Dict[K, Union[V1, ..., VN]]"""
def rewrite_Union(self, union):
key_type = None
value_types = []
for e in union.__args__:
if not is_generic_of(e, Dict):
return union
key_type = key_type or e.__args__[0]
if key_type != e.__args__[0]:
return union
value_types.extend(e.__args__[1:])
return Dict[key_type, Union[tuple(value_types)]]
| RewriteConfigDict |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 23923,
"end": 24040
} | class ____(oracle.NVARCHAR2):
def get_dbapi_type(self, dbapi):
return dbapi.NCHAR
| _OracleUnicodeStringNCHAR |
python | jazzband__django-formtools | tests/wizard/wizardtests/forms.py | {
"start": 1703,
"end": 1872
} | class ____(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
UserFormSet = modelformset_factory(User, form=UserForm)
| UserForm |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 35592,
"end": 35907
} | class ____(NonStrictDataModel):
"""
Last metric events, one for each variant hash
"""
_schema = {
"additionalProperties": {"$ref": "#/definitions/last_metrics_event"},
"description": "Last metric events, one for each variant hash",
"type": "object",
}
| LastMetricsVariants |
python | numba__numba | numba/core/typing/templates.py | {
"start": 38790,
"end": 39480
} | class ____(object):
def __init__(self, context):
self.context = context
def resolve(self, value, attr):
return self._resolve(value, attr)
def _resolve(self, value, attr):
fn = getattr(self, "resolve_%s" % attr, None)
if fn is None:
fn = self.generic_resolve
if fn is NotImplemented:
if isinstance(value, types.Module):
return self.context.resolve_module_constants(value, attr)
else:
return None
else:
return fn(value, attr)
else:
return fn(value)
generic_resolve = NotImplemented
| AttributeTemplate |
python | justquick__django-activity-stream | actstream/apps.py | {
"start": 288,
"end": 1030
} | class ____(AppConfig):
name = 'actstream'
default_auto_field = 'django.db.models.AutoField'
verbose_name = 'Activity Streams'
def ready(self):
from actstream.actions import action_handler
action.connect(action_handler, dispatch_uid='actstream.models')
action_class = self.get_model('action')
if actstream_settings.USE_JSONFIELD:
if not hasattr(action_class, 'data'):
from actstream.jsonfield import DataField
DataField(blank=True, null=True).contribute_to_class(
action_class, 'data'
)
from actstream.follows import delete_orphaned_follows
pre_delete.connect(delete_orphaned_follows)
| ActstreamConfig |
python | allegroai__clearml | clearml/backend_api/services/v2_13/workers.py | {
"start": 70612,
"end": 74419
} | class ____(Request):
"""
Register a worker in the system. Called by the Worker Daemon.
:param worker: Worker id. Must be unique in company.
:type worker: str
:param timeout: Registration timeout in seconds. If timeout seconds have passed
since the worker's last call to register or status_report, the worker is
automatically removed from the list of registered workers.
:type timeout: int
:param queues: List of queue IDs on which the worker is listening.
:type queues: Sequence[str]
:param tags: User tags for the worker
:type tags: Sequence[str]
"""
_service = "workers"
_action = "register"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"queues": {
"description": "List of queue IDs on which the worker is listening.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User tags for the worker",
"items": {"type": "string"},
"type": "array",
},
"timeout": {
"default": 600,
"description": "Registration timeout in seconds. If timeout seconds have passed since the worker's last call to register or status_report, the worker is automatically removed from the list of registered workers.",
"type": "integer",
},
"worker": {
"description": "Worker id. Must be unique in company.",
"type": "string",
},
},
"required": ["worker"],
"type": "object",
}
def __init__(
self,
worker: str,
timeout: Optional[int] = 600,
queues: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
**kwargs: Any
) -> None:
super(RegisterRequest, self).__init__(**kwargs)
self.worker = worker
self.timeout = timeout
self.queues = queues
self.tags = tags
@schema_property("worker")
def worker(self) -> str:
return self._property_worker
@worker.setter
def worker(self, value: str) -> None:
if value is None:
self._property_worker = None
return
self.assert_isinstance(value, "worker", six.string_types)
self._property_worker = value
@schema_property("timeout")
def timeout(self) -> Optional[int]:
return self._property_timeout
@timeout.setter
def timeout(self, value: Optional[int]) -> None:
if value is None:
self._property_timeout = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "timeout", six.integer_types)
self._property_timeout = value
@schema_property("queues")
def queues(self) -> Optional[List[str]]:
return self._property_queues
@queues.setter
def queues(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_queues = None
return
self.assert_isinstance(value, "queues", (list, tuple))
self.assert_isinstance(value, "queues", six.string_types, is_array=True)
self._property_queues = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
| RegisterRequest |
python | kamyu104__LeetCode-Solutions | Python/maximum-hamming-distances.py | {
"start": 587,
"end": 1292
} | class ____(object):
def maxHammingDistances(self, nums, m):
"""
:type nums: List[int]
:type m: int
:rtype: List[int]
"""
q = []
dist = [-1]*(1<<m)
for x in nums:
if dist[x] != -1:
continue
dist[x] = 0
q.append(x)
d = 0
while q:
d += 1
new_q = []
for u in q:
for i in xrange(m):
if dist[u^(1<<i)] != -1:
continue
dist[u^(1<<i)] = d
new_q.append(u^(1<<i))
q = new_q
return [m-dist[((1<<m)-1)^x] for x in nums]
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/find-the-least-frequent-digit.py | {
"start": 51,
"end": 377
} | class ____(object):
def getLeastFrequentDigit(self, n):
"""
:type n: int
:rtype: int
"""
cnt = [0]*10
while n:
n, r = divmod(n, 10)
cnt[r] += 1
mn = min(x for x in cnt if x)
return next(i for i in range(len(cnt)) if cnt[i] == mn)
| Solution |
python | lxml__lxml | src/lxml/tests/test_xpathevaluator.py | {
"start": 18401,
"end": 19703
} | class ____(HelperTestCase):
"Tests for the XPath class"
def test_xpath_compile_doc(self):
x = self.parse('<a attr="true"/>')
expr = etree.XPath("/a[@attr != 'true']")
r = expr(x)
self.assertEqual(0, len(r))
expr = etree.XPath("/a[@attr = 'true']")
r = expr(x)
self.assertEqual(1, len(r))
expr = etree.XPath( expr.path )
r = expr(x)
self.assertEqual(1, len(r))
def test_xpath_compile_element(self):
x = self.parse('<a><b/><c/></a>')
root = x.getroot()
expr = etree.XPath("./b")
r = expr(root)
self.assertEqual(1, len(r))
self.assertEqual('b', r[0].tag)
expr = etree.XPath("./*")
r = expr(root)
self.assertEqual(2, len(r))
def test_xpath_compile_vars(self):
x = self.parse('<a attr="true"/>')
expr = etree.XPath("/a[@attr=$aval]")
r = expr(x, aval=False)
self.assertEqual(0, len(r))
r = expr(x, aval=True)
self.assertEqual(1, len(r))
def test_xpath_compile_error(self):
self.assertRaises(SyntaxError, etree.XPath, '\\fad')
def test_xpath_elementtree_error(self):
self.assertRaises(ValueError, etree.XPath('*'), etree.ElementTree())
| ETreeXPathClassTestCase |
python | ansible__ansible | test/integration/targets/callback-dispatch/callback_plugins/v1_only_methods.py | {
"start": 106,
"end": 1254
} | class ____(CallbackBase):
"""Test callback that implements exclusively deprecated v1 callback methods."""
CALLBACK_NEEDS_ENABLED = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.called_v1_method_names: set[str] = set()
def callback_impl(self, *args, name: str, **kwargs) -> None:
print(f"hi from callback {name!r} with {args=!r} {kwargs=!r}")
self.called_v1_method_names.add(name)
for v1_method in CallbackBase._v2_v1_method_map.values():
if not v1_method:
continue
locals()[v1_method.__name__] = functools.partialmethod(callback_impl, name=v1_method.__name__)
def playbook_on_stats(self, stats, *args, **kwargs):
if missed_v1_method_calls := (
{'on_any',
'runner_on_ok',
'playbook_on_task_start',
'runner_on_async_ok',
} - self.called_v1_method_names):
assert False, f"The following v1 callback methods were not invoked as expected: {', '.join(missed_v1_method_calls)}"
print("v1 callback test PASS")
| CallbackModule |
python | getsentry__sentry | src/sentry/users/api/endpoints/user_authenticator_enroll.py | {
"start": 2545,
"end": 4144
} | class ____(serializers.Serializer[Authenticator]):
deviceName = serializers.CharField(
label="Device name",
required=False,
allow_null=True,
allow_blank=True,
max_length=60,
trim_whitespace=False,
default=lambda: petname.generate(2, " ", letters=10).title(),
)
challenge = serializers.CharField(required=True, trim_whitespace=False)
response = serializers.CharField(required=True, trim_whitespace=False)
serializer_map = {"totp": TotpRestSerializer, "sms": SmsRestSerializer, "u2f": U2fRestSerializer}
def get_serializer_field_metadata(
serializer: serializers.Serializer[Authenticator], fields: list[str] | None = None
) -> list[dict[str, Any]]:
"""Returns field metadata for serializer"""
meta = []
for field_name, field in serializer.fields.items():
if (fields is None or field_name in fields) and field_name:
try:
default = field.get_default()
except SkipField:
default = None
serialized_field = {
"name": field_name,
"defaultValue": default,
"read_only": field.read_only,
"required": field.required,
"type": "string",
}
if hasattr(field, "max_length") and field.max_length:
serialized_field["max_length"] = field.max_length
if field.label:
serialized_field["label"] = field.label
meta.append(serialized_field)
return meta
@control_silo_endpoint
| U2fRestSerializer |
python | pytorch__pytorch | test/onnx/exporter/test_building.py | {
"start": 299,
"end": 6425
} | class ____(common_utils.TestCase):
def setUp(self):
super().setUp()
self.opset_version = 17
self.opset = onnxscript.values.Opset("", self.opset_version)
self.recorder = _building.OpRecorder(opset=self.opset, constant_farm={})
self.model = ir.Model(
graph=ir.Graph(
[],
[],
nodes=[],
opset_imports={
"": self.opset_version,
},
name="main_graph",
),
ir_version=9,
producer_name="pytorch",
producer_version=torch.__version__,
)
def test_skippable_castlike_is_ommited(self):
input_x = _tensors.SymbolicTensor(opset=self.opset, name="input_x")
input_x.dtype = ir.DataType.FLOAT
input_y = _tensors.SymbolicTensor(opset=self.opset, name="input_y")
input_y.dtype = ir.DataType.FLOAT
with onnxscript.evaluator.default_as(tracer := self.recorder):
cast = self.opset.CastLike(input_y, input_x)
_ = self.opset.Add(input_x, cast)
self.assertEqual(len(tracer.nodes), 1)
self.assertEqual(tracer.nodes[0].op_type, "Add")
def test_castlike_is_replaced_with_cast_when_it_is_traced(self):
input_x = _tensors.SymbolicTensor(opset=self.opset, name="input_x")
input_x.dtype = ir.DataType.FLOAT
input_y = _tensors.SymbolicTensor(opset=self.opset, name="input_y")
input_y.dtype = ir.DataType.INT64
with onnxscript.evaluator.default_as(tracer := self.recorder):
cast = self.opset.CastLike(input_y, input_x)
_ = self.opset.Add(input_x, cast)
self.assertEqual(len(tracer.nodes), 2)
self.assertEqual(tracer.nodes[0].op_type, "Cast")
self.assertEqual(tracer.nodes[1].op_type, "Add")
def test_python_constant_added_as_constant_nodes(self):
input_x = _tensors.SymbolicTensor(
opset=self.opset, name="input_x", shape=ir.Shape([2, 3, 4])
)
new_shape = [3, 2, 4]
with onnxscript.evaluator.default_as(tracer := self.recorder):
_ = self.opset.Reshape(input_x, new_shape)
self.assertEqual(len(tracer.nodes), 2)
self.assertEqual(tracer.nodes[0].op_type, "Constant")
self.assertEqual(
tracer.nodes[0].attributes["value"].value.numpy(), np.array(new_shape)
)
self.assertEqual(tracer.nodes[1].op_type, "Reshape")
def test_process_python_sequence_with_allowed_sequence_type(self):
input_x = _tensors.SymbolicTensor(
opset=self.opset, name="input_x", shape=ir.Shape([2, 3])
)
input_y = _tensors.SymbolicTensor(
opset=self.opset, name="input_y", shape=ir.Shape([2, 4])
)
input_z = _tensors.SymbolicTensor(
opset=self.opset, name="input_z", shape=ir.Shape([1, 3])
)
with onnxscript.evaluator.default_as(tracer := self.recorder):
_ = self.opset.SequenceAt([input_x, input_y, input_z], 1)
self.assertEqual(len(tracer.nodes), 3)
self.assertEqual(tracer.nodes[1].op_type, "SequenceConstruct")
def test_process_python_sequence_with_variadic_input(self):
input_x = _tensors.SymbolicTensor(
opset=self.opset, name="input_x", shape=ir.Shape([2, 3])
)
input_y = _tensors.SymbolicTensor(
opset=self.opset, name="input_y", shape=ir.Shape([2, 4])
)
input_z = _tensors.SymbolicTensor(
opset=self.opset, name="input_z", shape=ir.Shape([1, 3])
)
with onnxscript.evaluator.default_as(tracer := self.recorder):
_ = self.opset.Max(input_x, input_y, 0, input_z)
self.assertEqual(len(tracer.nodes), 2)
self.assertEqual(tracer.nodes[0].op_type, "Constant")
def test_process_python_sequence_creates_extra_concat(self):
# Elements in the list must be 0D tensors
input_x = _tensors.SymbolicTensor(
opset=self.opset, name="input_x", shape=ir.Shape([])
)
input_y = _tensors.SymbolicTensor(
opset=self.opset, name="input_y", shape=ir.Shape([])
)
input_z = _tensors.SymbolicTensor(
opset=self.opset, name="input_z", shape=ir.Shape([4, 3])
)
with onnxscript.evaluator.default_as(tracer := self.recorder):
_ = self.opset.Add([input_x, input_y], input_z)
self.assertEqual(len(tracer.nodes), 6)
self.assertEqual(tracer.nodes[-2].op_type, "Concat")
self.assertEqual(tracer.nodes[-2].attributes["axis"].value, 0)
def test_process_python_sequence_mix_symbolic_constant_creates_extra_concat(self):
# Elements in the list must be 0D tensors
input_x = _tensors.SymbolicTensor(
opset=self.opset, name="input_x", shape=ir.Shape([])
)
input_z = _tensors.SymbolicTensor(
opset=self.opset, name="input_z", shape=ir.Shape([4, 3])
)
with onnxscript.evaluator.default_as(tracer := self.recorder):
_ = self.opset.Add([input_x, 42], input_z)
self.assertEqual(len(tracer.nodes), 5)
self.assertEqual(tracer.nodes[-2].op_type, "Concat")
self.assertEqual(tracer.nodes[-2].attributes["axis"].value, 0)
def test_process_python_sequence_mix_constant_symbolic_creates_extra_concat(self):
# Elements in the list must be 0D tensors
input_x = _tensors.SymbolicTensor(
opset=self.opset, name="input_x", shape=ir.Shape([])
)
input_z = _tensors.SymbolicTensor(
opset=self.opset, name="input_z", shape=ir.Shape([4, 3])
)
with onnxscript.evaluator.default_as(tracer := self.recorder):
# Constant first
_ = self.opset.Add([42, input_x], input_z)
self.assertEqual(len(tracer.nodes), 5)
self.assertEqual(tracer.nodes[-2].op_type, "Concat")
self.assertEqual(tracer.nodes[-2].attributes["axis"].value, 0)
if __name__ == "__main__":
common_utils.run_tests()
| TestOpRecorder |
python | huggingface__transformers | src/transformers/models/tvp/modeling_tvp.py | {
"start": 12778,
"end": 14541
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
| TvpTextInputEmbeddings |
python | numba__numba | numba/tests/test_cffi.py | {
"start": 365,
"end": 6695
} | class ____(TestCase):
# Need to run the tests serially because of race conditions in
# cffi's OOL mode.
_numba_parallel_test_ = False
def setUp(self):
mod.init()
mod.init_ool()
def test_type_map(self):
signature = cffi_support.map_type(mod.ffi.typeof(mod.cffi_sin))
self.assertEqual(len(signature.args), 1)
self.assertEqual(signature.args[0], types.double)
def _test_function(self, pyfunc, flags=enable_pyobj_flags):
cfunc = jit((types.double,), **flags)(pyfunc)
for x in [-1.2, -1, 0, 0.1, 3.14]:
self.assertPreciseEqual(pyfunc(x), cfunc(x))
def test_sin_function(self):
self._test_function(mod.use_cffi_sin)
def test_bool_function_ool(self):
pyfunc = mod.use_cffi_boolean_true
cfunc = njit((),)(pyfunc)
self.assertEqual(pyfunc(), True)
self.assertEqual(cfunc(), True)
def test_sin_function_npm(self):
self._test_function(mod.use_cffi_sin, flags=no_pyobj_flags)
def test_sin_function_ool(self, flags=enable_pyobj_flags):
self._test_function(mod.use_cffi_sin_ool)
def test_sin_function_npm_ool(self):
self._test_function(mod.use_cffi_sin_ool, flags=no_pyobj_flags)
def test_two_funcs(self):
# Check that two constant functions don't get mixed up.
self._test_function(mod.use_two_funcs)
def test_two_funcs_ool(self):
self._test_function(mod.use_two_funcs_ool)
def test_function_pointer(self):
pyfunc = mod.use_func_pointer
cfunc = jit(nopython=True)(pyfunc)
for (fa, fb, x) in [
(mod.cffi_sin, mod.cffi_cos, 1.0),
(mod.cffi_sin, mod.cffi_cos, -1.0),
(mod.cffi_cos, mod.cffi_sin, 1.0),
(mod.cffi_cos, mod.cffi_sin, -1.0),
(mod.cffi_sin_ool, mod.cffi_cos_ool, 1.0),
(mod.cffi_sin_ool, mod.cffi_cos_ool, -1.0),
(mod.cffi_cos_ool, mod.cffi_sin_ool, 1.0),
(mod.cffi_cos_ool, mod.cffi_sin_ool, -1.0),
(mod.cffi_sin, mod.cffi_cos_ool, 1.0),
(mod.cffi_sin, mod.cffi_cos_ool, -1.0),
(mod.cffi_cos, mod.cffi_sin_ool, 1.0),
(mod.cffi_cos, mod.cffi_sin_ool, -1.0)]:
expected = pyfunc(fa, fb, x)
got = cfunc(fa, fb, x)
self.assertEqual(got, expected)
# A single specialization was compiled for all calls
self.assertEqual(len(cfunc.overloads), 1, cfunc.overloads)
def test_user_defined_symbols(self):
pyfunc = mod.use_user_defined_symbols
cfunc = jit(nopython=True)(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def check_vector_sin(self, cfunc, x, y):
cfunc(x, y)
np.testing.assert_allclose(y, np.sin(x))
def _test_from_buffer_numpy_array(self, pyfunc, dtype):
x = np.arange(10).astype(dtype)
y = np.zeros_like(x)
cfunc = jit(nopython=True)(pyfunc)
self.check_vector_sin(cfunc, x, y)
def test_from_buffer_float32(self):
self._test_from_buffer_numpy_array(mod.vector_sin_float32, np.float32)
def test_from_buffer_float64(self):
self._test_from_buffer_numpy_array(mod.vector_sin_float64, np.float64)
def test_from_buffer_struct(self):
n = 10
x = np.arange(n) + np.arange(n * 2, n * 3) * 1j
y = np.zeros(n)
real_cfunc = jit(nopython=True)(mod.vector_extract_real)
real_cfunc(x, y)
np.testing.assert_equal(x.real, y)
imag_cfunc = jit(nopython=True)(mod.vector_extract_imag)
imag_cfunc(x, y)
np.testing.assert_equal(x.imag, y)
def test_from_buffer_pyarray(self):
pyfunc = mod.vector_sin_float32
cfunc = jit(nopython=True)(pyfunc)
x = array.array("f", range(10))
y = array.array("f", [0] * len(x))
self.check_vector_sin(cfunc, x, y)
def test_from_buffer_error(self):
pyfunc = mod.vector_sin_float32
cfunc = jit(nopython=True)(pyfunc)
# Non-contiguous array
x = np.arange(10).astype(np.float32)[::2]
y = np.zeros_like(x)
with self.assertRaises(errors.TypingError) as raises:
cfunc(x, y)
self.assertIn("from_buffer() unsupported on non-contiguous buffers",
str(raises.exception))
def test_from_buffer_numpy_multi_array(self):
c1 = np.array([1, 2], order='C', dtype=np.float32)
c1_zeros = np.zeros_like(c1)
c2 = np.array([[1, 2], [3, 4]], order='C', dtype=np.float32)
c2_zeros = np.zeros_like(c2)
f1 = np.array([1, 2], order='F', dtype=np.float32)
f1_zeros = np.zeros_like(f1)
f2 = np.array([[1, 2], [3, 4]], order='F', dtype=np.float32)
f2_zeros = np.zeros_like(f2)
f2_copy = f2.copy('K')
pyfunc = mod.vector_sin_float32
cfunc = jit(nopython=True)(pyfunc)
# No exception because of C layout and single dimension
self.check_vector_sin(cfunc, c1, c1_zeros)
# No exception because of C layout
cfunc(c2, c2_zeros)
sin_c2 = np.sin(c2)
sin_c2[1] = [0, 0] # Reset to zero, since cfunc only processes one row
np.testing.assert_allclose(c2_zeros, sin_c2)
# No exception because of single dimension
self.check_vector_sin(cfunc, f1, f1_zeros)
# Exception because multi-dimensional with F layout
with self.assertRaises(errors.TypingError) as raises:
cfunc(f2, f2_zeros)
np.testing.assert_allclose(f2, f2_copy)
self.assertIn("from_buffer() only supports multidimensional arrays with C layout",
str(raises.exception))
def test_indirect_multiple_use(self):
"""
Issue #2263
Linkage error due to multiple definition of global tracking symbol.
"""
my_sin = mod.cffi_sin
# Use two jit functions that references `my_sin` to ensure multiple
# modules
@jit(nopython=True)
def inner(x):
return my_sin(x)
@jit(nopython=True)
def foo(x):
return inner(x) + my_sin(x + 1)
# Error occurs when foo is being compiled
x = 1.123
self.assertEqual(foo(x), my_sin(x) + my_sin(x + 1))
if __name__ == '__main__':
unittest.main()
| TestCFFI |
python | getsentry__sentry | tests/sentry/notifications/notification_action/test_issue_alert_registry_handlers.py | {
"start": 25908,
"end": 26462
} | class ____(BaseWorkflowTest):
def setUp(self) -> None:
super().setUp()
self.handler = PluginIssueAlertHandler()
self.detector = self.create_detector(project=self.project)
self.action = self.create_action(
type=Action.Type.PLUGIN,
)
def test_build_rule_action_blob(self) -> None:
blob = self.handler.build_rule_action_blob(self.action, self.organization.id)
assert blob == {
"id": ACTION_FIELD_MAPPINGS[Action.Type.PLUGIN]["id"],
}
| TestPluginIssueAlertHandler |
python | pennersr__django-allauth | allauth/socialaccount/providers/base/constants.py | {
"start": 0,
"end": 91
} | class ____:
LOGIN = "login"
CONNECT = "connect"
REDIRECT = "redirect"
| AuthProcess |
python | PrefectHQ__prefect | src/prefect/_internal/concurrency/api.py | {
"start": 5302,
"end": 7352
} | class ____(_base):
@staticmethod
def wait_for_call_in_loop_thread(
__call: Union[
Callable[[], Awaitable[T]],
Call[T],
],
timeout: Optional[float] = None,
done_callbacks: Optional[Iterable[Call[T]]] = None,
contexts: Optional[Iterable[AbstractContextManager[Any]]] = None,
) -> T:
call = cast_to_call(__call)
waiter = SyncWaiter(call)
_base.call_soon_in_loop_thread(call, timeout=timeout)
for callback in done_callbacks or []:
waiter.add_done_callback(callback)
with contextlib.ExitStack() as stack:
for context in contexts or []:
stack.enter_context(context)
waiter.wait()
return call.result()
@staticmethod
def wait_for_call_in_new_thread(
__call: Union[Callable[[], T], Call[T]],
timeout: Optional[float] = None,
done_callbacks: Optional[Iterable[Call[T]]] = None,
) -> T:
call = cast_to_call(__call)
waiter = SyncWaiter(call=call)
for callback in done_callbacks or []:
waiter.add_done_callback(callback)
_base.call_soon_in_new_thread(call, timeout=timeout)
waiter.wait()
return call.result()
@staticmethod
def call_in_new_thread(
__call: Union["_SyncOrAsyncCallable[[], T]", Call[T]],
timeout: Optional[float] = None,
) -> T:
call = _base.call_soon_in_new_thread(__call, timeout=timeout)
return call.result()
@staticmethod
def call_in_loop_thread(
__call: Union["_SyncOrAsyncCallable[[], T]", Call[T]],
timeout: Optional[float] = None,
) -> Union[Awaitable[T], T]:
if in_global_loop():
# Avoid deadlock where the call is submitted to the loop then the loop is
# blocked waiting for the call
call = cast_to_call(__call)
return call()
call = _base.call_soon_in_loop_thread(__call, timeout=timeout)
return call.result()
| from_sync |
python | pyca__cryptography | src/cryptography/x509/extensions.py | {
"start": 73286,
"end": 75493
} | class ____:
def __init__(
self,
admission_authority: GeneralName | None,
naming_authority: NamingAuthority | None,
profession_infos: Iterable[ProfessionInfo],
) -> None:
if admission_authority is not None and not isinstance(
admission_authority, GeneralName
):
raise TypeError("admission_authority must be a GeneralName")
if naming_authority is not None and not isinstance(
naming_authority, NamingAuthority
):
raise TypeError("naming_authority must be a NamingAuthority")
profession_infos = list(profession_infos)
if not all(
isinstance(info, ProfessionInfo) for info in profession_infos
):
raise TypeError(
"Every item in the profession_infos list must be a "
"ProfessionInfo"
)
self._admission_authority = admission_authority
self._naming_authority = naming_authority
self._profession_infos = profession_infos
@property
def admission_authority(self) -> GeneralName | None:
return self._admission_authority
@property
def naming_authority(self) -> NamingAuthority | None:
return self._naming_authority
@property
def profession_infos(self) -> list[ProfessionInfo]:
return self._profession_infos
def __repr__(self) -> str:
return (
f"<Admission(admission_authority={self.admission_authority}, "
f"naming_authority={self.naming_authority}, "
f"profession_infos={self.profession_infos})>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Admission):
return NotImplemented
return (
self.admission_authority == other.admission_authority
and self.naming_authority == other.naming_authority
and self.profession_infos == other.profession_infos
)
def __hash__(self) -> int:
return hash(
(
self.admission_authority,
self.naming_authority,
tuple(self.profession_infos),
)
)
| Admission |
python | google__jax | tests/pallas/fuser_block_spec_test.py | {
"start": 45154,
"end": 53233
} | class ____(parameterized.TestCase):
def setUp(self):
super().setUp()
if config.enable_x64.value:
self.skipTest('x64 not supported')
def test_binop(self):
def f(x):
return x + jnp.ones_like(x)
block_spec = pl.BlockSpec((128, 128), lambda i, j: (i, j))
x_type = jax.ShapeDtypeStruct((512, 512), jnp.float32)
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, block_spec.block_shape)
def f(x, y):
return x + y
x_block_spec = pl.BlockSpec((128, 128), lambda i, j: (i, j))
y_block_spec = pl.BlockSpec((128, 1), lambda i, j: (i, 0))
x_type = jax.ShapeDtypeStruct((512, 512), jnp.float32)
y_type = jax.ShapeDtypeStruct((512, 1), jnp.float32)
with self.assertRaisesRegex(
ValueError, 'Cannot propagate block spec through RHS broadcast.'
):
block_spec_lib.push_block_spec(f, pl.no_block_spec, y_block_spec)(
x_type, y_type
)
out_block_spec = block_spec_lib.push_block_spec(
f, x_block_spec, pl.no_block_spec
)(x_type, y_type)
self.assertIs(x_block_spec, out_block_spec)
x_block_spec = pl.BlockSpec((1, 128), lambda i, j: (0, j))
y_block_spec = pl.BlockSpec((128, 128), lambda i, j: (i, j))
x_type = jax.ShapeDtypeStruct((1, 512), jnp.float32)
y_type = jax.ShapeDtypeStruct((512, 512), jnp.float32)
with self.assertRaisesRegex(
ValueError, 'Cannot propagate block spec through LHS broadcast.'
):
block_spec_lib.push_block_spec(f, x_block_spec, pl.no_block_spec)(
x_type, y_type
)
out_block_spec = block_spec_lib.push_block_spec(
f, pl.no_block_spec, y_block_spec
)(x_type, y_type)
self.assertIs(out_block_spec, y_block_spec)
def test_jit(self):
def f(x):
return jax.jit(jnp.sin)(x)
block_spec = pl.BlockSpec(
(None, 1, 128, 128), lambda i, j, k, l, _: (i, l, k, j)
)
x_type = jax.ShapeDtypeStruct((1, 1, 512, 512), jnp.float32)
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, block_spec.block_shape)
def test_custom_jvp(self):
def f(x):
return jax.nn.relu(x)
x_type = jax.ShapeDtypeStruct((1, 1, 512, 512), jnp.float32)
block_spec = pl.BlockSpec(
(None, 1, 128, 128), lambda i, j, k, l, _: (i, l, k, j)
)
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, block_spec.block_shape)
def test_push_reshape_lanes_to_sublanes(self):
def f(x):
return x.reshape((512, 32, 128))
x_type = jax.ShapeDtypeStruct((512, 4096), jnp.float32)
block_spec = pl.BlockSpec((256, 1024), lambda i, j, k: (i, k))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, (256, 8, 128))
self.assertTupleEqual(out_block_spec.index_map(0, 1, 2), (0, 2, 0))
self.assertEqual(out_block_spec.index_map(3, 2, 1), (3, 1, 0))
def f(x):
return x.reshape((512, 16, 256))
x_type = jax.ShapeDtypeStruct((512, 4096), jnp.float32)
block_spec = pl.BlockSpec((256, 1024), lambda i, j, k: (i, k))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, (256, 4, 256))
self.assertTupleEqual(out_block_spec.index_map(0, 1, 2), (0, 2, 0))
self.assertEqual(out_block_spec.index_map(3, 2, 1), (3, 1, 0))
def test_custom_vjp(self):
@jax.custom_vjp
def act(x):
return jax.nn.relu(x) * x
def act_fwd(x):
return jax.nn.relu(x) * x, (x,)
def act_bwd(res, dy):
(x,) = res
return (dy * x * 2.34,)
act.defvjp(act_fwd, act_bwd)
def f(x):
return act(x)
x_type = jax.ShapeDtypeStruct((1, 1, 512, 512), jnp.float32)
block_spec = pl.BlockSpec(
(None, 1, 128, 128), lambda i, j, k, l, _: (i, l, k, j)
)
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, block_spec.block_shape)
def test_reduce_sum_push(self):
def f(x):
return x.sum(axis=0)
x_type = jax.ShapeDtypeStruct((256, 512), jnp.float32)
block_spec = pl.BlockSpec((256, 256), lambda i, j: (i, j))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, (256,))
self.assertEqual(out_block_spec.index_map(2, 3), (3,))
def f(x):
return x.sum(axis=1)
x_type = jax.ShapeDtypeStruct((128, 512), jnp.float32)
block_spec = pl.BlockSpec((64, 512), lambda i, j: (i, j))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, (64,))
self.assertEqual(out_block_spec.index_map(2, 3), (2,))
def test_broadcast_in_dim_push(self):
def f(x):
return jnp.broadcast_to(x, (128, 512))
x_type = jax.ShapeDtypeStruct((512,), jnp.float32)
block_spec = pl.BlockSpec((128,), lambda i: (i,))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, (128, 128))
self.assertEqual(out_block_spec.index_map(3), (0, 3))
def f(x):
return jnp.broadcast_to(x, (128, 512))
x_type = jax.ShapeDtypeStruct((1, 512), jnp.float32)
block_spec = pl.BlockSpec((1, 128), lambda i, j: (i, j))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, (128, 128))
self.assertEqual(out_block_spec.index_map(0, 3), (0, 3))
def f(x):
x = jnp.expand_dims(x, axis=1)
return jnp.broadcast_to(x, (128, 512))
x_type = jax.ShapeDtypeStruct((128,), jnp.float32)
block_spec = pl.BlockSpec((64,), lambda i: (i,))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, (64, 512))
self.assertEqual(out_block_spec.index_map(1), (1, 0))
def f(x):
x = jnp.expand_dims(x, axis=0)
return jnp.broadcast_to(x, (128, 512))
x_type = jax.ShapeDtypeStruct((512,), jnp.float32)
block_spec = pl.BlockSpec((256,), lambda i: (i,))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec)(x_type)
self.assertEqual(out_block_spec.block_shape, (128, 256))
self.assertEqual(out_block_spec.index_map(1), (0, 1))
def test_concatenate_push(self):
def f(x1, x2):
return jnp.concatenate((x1, x2), axis=0)
x_type = jax.ShapeDtypeStruct((512,), jnp.float32)
block_spec = pl.BlockSpec((128,), lambda i: (i,))
with self.assertRaisesRegex(
NotImplementedError, 'concatenate not supported yet'
):
block_spec_lib.push_block_spec(f, block_spec, block_spec)(x_type, x_type)
x_type = jax.ShapeDtypeStruct((512,), jnp.float32)
block_spec = pl.BlockSpec((512,), lambda i: (i,))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec, block_spec)(
x_type, x_type
)
self.assertEqual(out_block_spec.block_shape, (1024,))
self.assertEqual(out_block_spec.index_map(0), (0,))
def f(x1, x2):
return jnp.stack([x1, x2], axis=0)
x_type = jax.ShapeDtypeStruct((512,), jnp.float32)
block_spec = pl.BlockSpec((128,), lambda i: (i,))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec, block_spec)(
x_type, x_type
)
self.assertEqual(out_block_spec.block_shape, (2, 128))
self.assertEqual(out_block_spec.index_map(3), (0, 3))
def f(x1, x2):
return jnp.stack([x1, x2], axis=1)
x_type = jax.ShapeDtypeStruct((512,), jnp.float32)
block_spec = pl.BlockSpec((128,), lambda i: (i,))
out_block_spec = block_spec_lib.push_block_spec(f, block_spec, block_spec)(
x_type, x_type
)
self.assertEqual(out_block_spec.block_shape, (128, 2))
self.assertEqual(out_block_spec.index_map(3), (3, 0))
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| PushBlockSpecTest |
python | openai__gym | gym/envs/classic_control/continuous_mountain_car.py | {
"start": 595,
"end": 10546
} | class ____(gym.Env):
"""
### Description
The Mountain Car MDP is a deterministic MDP that consists of a car placed stochastically
at the bottom of a sinusoidal valley, with the only possible actions being the accelerations
that can be applied to the car in either direction. The goal of the MDP is to strategically
accelerate the car to reach the goal state on top of the right hill. There are two versions
of the mountain car domain in gym: one with discrete actions and one with continuous.
This version is the one with continuous actions.
This MDP first appeared in [Andrew Moore's PhD Thesis (1990)](https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-209.pdf)
```
@TECHREPORT{Moore90efficientmemory-based,
author = {Andrew William Moore},
title = {Efficient Memory-based Learning for Robot Control},
institution = {University of Cambridge},
year = {1990}
}
```
### Observation Space
The observation is a `ndarray` with shape `(2,)` where the elements correspond to the following:
| Num | Observation | Min | Max | Unit |
|-----|--------------------------------------|------|-----|--------------|
| 0 | position of the car along the x-axis | -Inf | Inf | position (m) |
| 1 | velocity of the car | -Inf | Inf | position (m) |
### Action Space
The action is a `ndarray` with shape `(1,)`, representing the directional force applied on the car.
The action is clipped in the range `[-1,1]` and multiplied by a power of 0.0015.
### Transition Dynamics:
Given an action, the mountain car follows the following transition dynamics:
*velocity<sub>t+1</sub> = velocity<sub>t+1</sub> + force * self.power - 0.0025 * cos(3 * position<sub>t</sub>)*
*position<sub>t+1</sub> = position<sub>t</sub> + velocity<sub>t+1</sub>*
where force is the action clipped to the range `[-1,1]` and power is a constant 0.0015.
The collisions at either end are inelastic with the velocity set to 0 upon collision with the wall.
The position is clipped to the range [-1.2, 0.6] and velocity is clipped to the range [-0.07, 0.07].
### Reward
A negative reward of *-0.1 * action<sup>2</sup>* is received at each timestep to penalise for
taking actions of large magnitude. If the mountain car reaches the goal then a positive reward of +100
is added to the negative reward for that timestep.
### Starting State
The position of the car is assigned a uniform random value in `[-0.6 , -0.4]`.
The starting velocity of the car is always assigned to 0.
### Episode End
The episode ends if either of the following happens:
1. Termination: The position of the car is greater than or equal to 0.45 (the goal position on top of the right hill)
2. Truncation: The length of the episode is 999.
### Arguments
```
gym.make('MountainCarContinuous-v0')
```
### Version History
* v0: Initial versions release (1.0.0)
"""
metadata = {
"render_modes": ["human", "rgb_array"],
"render_fps": 30,
}
def __init__(self, render_mode: Optional[str] = None, goal_velocity=0):
self.min_action = -1.0
self.max_action = 1.0
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = (
0.45 # was 0.5 in gym, 0.45 in Arnaud de Broissia's version
)
self.goal_velocity = goal_velocity
self.power = 0.0015
self.low_state = np.array(
[self.min_position, -self.max_speed], dtype=np.float32
)
self.high_state = np.array(
[self.max_position, self.max_speed], dtype=np.float32
)
self.render_mode = render_mode
self.screen_width = 600
self.screen_height = 400
self.screen = None
self.clock = None
self.isopen = True
self.action_space = spaces.Box(
low=self.min_action, high=self.max_action, shape=(1,), dtype=np.float32
)
self.observation_space = spaces.Box(
low=self.low_state, high=self.high_state, dtype=np.float32
)
def step(self, action: np.ndarray):
position = self.state[0]
velocity = self.state[1]
force = min(max(action[0], self.min_action), self.max_action)
velocity += force * self.power - 0.0025 * math.cos(3 * position)
if velocity > self.max_speed:
velocity = self.max_speed
if velocity < -self.max_speed:
velocity = -self.max_speed
position += velocity
if position > self.max_position:
position = self.max_position
if position < self.min_position:
position = self.min_position
if position == self.min_position and velocity < 0:
velocity = 0
# Convert a possible numpy bool to a Python bool.
terminated = bool(
position >= self.goal_position and velocity >= self.goal_velocity
)
reward = 0
if terminated:
reward = 100.0
reward -= math.pow(action[0], 2) * 0.1
self.state = np.array([position, velocity], dtype=np.float32)
if self.render_mode == "human":
self.render()
return self.state, reward, terminated, False, {}
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
super().reset(seed=seed)
# Note that if you use custom reset bounds, it may lead to out-of-bound
# state/observations.
low, high = utils.maybe_parse_reset_bounds(options, -0.6, -0.4)
self.state = np.array([self.np_random.uniform(low=low, high=high), 0])
if self.render_mode == "human":
self.render()
return np.array(self.state, dtype=np.float32), {}
def _height(self, xs):
return np.sin(3 * xs) * 0.45 + 0.55
def render(self):
if self.render_mode is None:
gym.logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym("{self.spec.id}", render_mode="rgb_array")'
)
return
try:
import pygame
from pygame import gfxdraw
except ImportError:
raise DependencyNotInstalled(
"pygame is not installed, run `pip install gym[classic_control]`"
)
if self.screen is None:
pygame.init()
if self.render_mode == "human":
pygame.display.init()
self.screen = pygame.display.set_mode(
(self.screen_width, self.screen_height)
)
else: # mode == "rgb_array":
self.screen = pygame.Surface((self.screen_width, self.screen_height))
if self.clock is None:
self.clock = pygame.time.Clock()
world_width = self.max_position - self.min_position
scale = self.screen_width / world_width
carwidth = 40
carheight = 20
self.surf = pygame.Surface((self.screen_width, self.screen_height))
self.surf.fill((255, 255, 255))
pos = self.state[0]
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip((xs - self.min_position) * scale, ys * scale))
pygame.draw.aalines(self.surf, points=xys, closed=False, color=(0, 0, 0))
clearance = 10
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
coords = []
for c in [(l, b), (l, t), (r, t), (r, b)]:
c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos))
coords.append(
(
c[0] + (pos - self.min_position) * scale,
c[1] + clearance + self._height(pos) * scale,
)
)
gfxdraw.aapolygon(self.surf, coords, (0, 0, 0))
gfxdraw.filled_polygon(self.surf, coords, (0, 0, 0))
for c in [(carwidth / 4, 0), (-carwidth / 4, 0)]:
c = pygame.math.Vector2(c).rotate_rad(math.cos(3 * pos))
wheel = (
int(c[0] + (pos - self.min_position) * scale),
int(c[1] + clearance + self._height(pos) * scale),
)
gfxdraw.aacircle(
self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128)
)
gfxdraw.filled_circle(
self.surf, wheel[0], wheel[1], int(carheight / 2.5), (128, 128, 128)
)
flagx = int((self.goal_position - self.min_position) * scale)
flagy1 = int(self._height(self.goal_position) * scale)
flagy2 = flagy1 + 50
gfxdraw.vline(self.surf, flagx, flagy1, flagy2, (0, 0, 0))
gfxdraw.aapolygon(
self.surf,
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)],
(204, 204, 0),
)
gfxdraw.filled_polygon(
self.surf,
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)],
(204, 204, 0),
)
self.surf = pygame.transform.flip(self.surf, False, True)
self.screen.blit(self.surf, (0, 0))
if self.render_mode == "human":
pygame.event.pump()
self.clock.tick(self.metadata["render_fps"])
pygame.display.flip()
elif self.render_mode == "rgb_array":
return np.transpose(
np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2)
)
def close(self):
if self.screen is not None:
import pygame
pygame.display.quit()
pygame.quit()
self.isopen = False
| Continuous_MountainCarEnv |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 18437,
"end": 18998
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
sum = np.nansum(helper_functions.get_value("NumSymbols"))
return sum if np.isfinite(sum) else 0
################################################################################
# Statistical meta features
# Only use third and fourth statistical moment because it is common to
# standardize for the other two
# see Engels & Theusinger, 1998 - Using a Data Metric for Preprocessing Advice
# for Data Mining Applications.
@helper_functions.define("Kurtosisses")
| SymbolsSum |
python | openai__openai-python | src/openai/resources/beta/threads/messages.py | {
"start": 28524,
"end": 29640
} | class ____:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.create = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
messages.create, # pyright: ignore[reportDeprecated],
)
)
self.retrieve = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
messages.retrieve, # pyright: ignore[reportDeprecated],
)
)
self.update = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
messages.update, # pyright: ignore[reportDeprecated],
)
)
self.list = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
messages.list, # pyright: ignore[reportDeprecated],
)
)
self.delete = ( # pyright: ignore[reportDeprecated]
to_streamed_response_wrapper(
messages.delete, # pyright: ignore[reportDeprecated],
)
)
| MessagesWithStreamingResponse |
python | falconry__falcon | tests/test_httperror.py | {
"start": 4580,
"end": 4698
} | class ____:
def on_get(self, req, resp):
raise falcon.HTTPMethodNotAllowed(['PUT'])
| MethodNotAllowedResource |
python | huggingface__transformers | src/transformers/models/biogpt/modeling_biogpt.py | {
"start": 2139,
"end": 3230
} | class ____(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# BIOGPT is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(
self,
attention_mask: torch.LongTensor,
past_key_values_length: int = 0,
position_ids: Optional[torch.LongTensor] = None,
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
if position_ids is None:
position_ids = torch.cumsum(attention_mask, dim=1)
position_ids = (position_ids * attention_mask - 1).long()
# cut positions if `past_key_values_length` is > 0
position_ids = position_ids[:, past_key_values_length:]
return super().forward(position_ids + self.offset)
| BioGptLearnedPositionalEmbedding |
python | getsentry__sentry | src/sentry/workflow_engine/typings/notification_action.py | {
"start": 15662,
"end": 15817
} | class ____(TicketActionTranslator):
@property
def action_type(self) -> ActionType:
return ActionType.AZURE_DEVOPS
| AzureDevopsActionTranslator |
python | huggingface__transformers | src/transformers/models/seamless_m4t/modeling_seamless_m4t.py | {
"start": 94162,
"end": 95562
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
embed_dim = config.unit_embed_dim
kernel_size = config.variance_predictor_kernel_size
var_pred_dropout = config.var_pred_dropout
self.conv1 = nn.Conv1d(
embed_dim,
embed_dim,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
)
self.activation_function = nn.ReLU()
self.ln1 = nn.LayerNorm(embed_dim)
self.dropout_module = nn.Dropout(p=var_pred_dropout)
self.conv2 = nn.Conv1d(
embed_dim,
embed_dim,
kernel_size=kernel_size,
padding=1,
)
self.ln2 = nn.LayerNorm(embed_dim)
self.proj = nn.Linear(embed_dim, 1)
def forward(self, hidden_states: Tensor) -> Tensor:
# Input: B x T x C; Output: B x T
hidden_states = self.conv1(hidden_states.transpose(1, 2))
hidden_states = self.activation_function(hidden_states).transpose(1, 2)
hidden_states = self.dropout_module(self.ln1(hidden_states))
hidden_states = self.conv2(hidden_states.transpose(1, 2))
hidden_states = self.activation_function(hidden_states).transpose(1, 2)
hidden_states = self.dropout_module(self.ln2(hidden_states))
return self.proj(hidden_states).squeeze(dim=2)
| SeamlessM4TVariancePredictor |
python | astropy__astropy | astropy/cosmology/_src/tests/parameter/test_descriptors.py | {
"start": 345,
"end": 1339
} | class ____:
"""Example class with a ParametersAttribute."""
# Attributes that will be accessed by ParametersAttribute when called an instance
# of this class. On a Cosmology these would be the Parameter objects.
a: ClassVar[int] = 1
b: ClassVar[int] = 2
c: ClassVar[int] = 3
# The class attribute that is accessed by ParametersAttribute when called on the
# class. On a Cosmology this would be the mapping of Parameter objects.
# Here it is just the names of the attributes that will be accessed by the
# ParametersAttribute to better distinguish between the class and instance
# attributes.
_attr_map: ClassVar[tuple[str, ...]] = ("a", "b", "c")
# The ParametersAttribute descriptor. This will return a mapping of the values of
# the attributes listed in ``_attr_map`` when called on an instance of this class.
# When called on the class, it will return ``_attr_map`` itself.
attr = ParametersAttribute(attr_name="_attr_map")
| Obj |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 687556,
"end": 689817
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"created_at",
"database_id",
"description",
"ip_allow_list_entries",
"logo_background_color",
"logo_url",
"name",
"slug",
"updated_at",
"url",
)
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
description = sgqlc.types.Field(String, graphql_name="description")
ip_allow_list_entries = sgqlc.types.Field(
sgqlc.types.non_null(IpAllowListEntryConnection),
graphql_name="ipAllowListEntries",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
IpAllowListEntryOrder,
graphql_name="orderBy",
default={"field": "ALLOW_LIST_VALUE", "direction": "ASC"},
),
),
)
),
)
logo_background_color = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="logoBackgroundColor"
)
logo_url = sgqlc.types.Field(
sgqlc.types.non_null(URI),
graphql_name="logoUrl",
args=sgqlc.types.ArgDict(
(("size", sgqlc.types.Arg(Int, graphql_name="size", default=None)),)
),
)
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
slug = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="slug")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
| App |
python | ray-project__ray | rllib/algorithms/cql/cql.py | {
"start": 1725,
"end": 10152
} | class ____(SACConfig):
"""Defines a configuration class from which a CQL can be built.
.. testcode::
:skipif: True
from ray.rllib.algorithms.cql import CQLConfig
config = CQLConfig().training(gamma=0.9, lr=0.01)
config = config.resources(num_gpus=0)
config = config.env_runners(num_env_runners=4)
print(config.to_dict())
# Build a Algorithm object from the config and run 1 training iteration.
algo = config.build(env="CartPole-v1")
algo.train()
"""
def __init__(self, algo_class=None):
super().__init__(algo_class=algo_class or CQL)
# fmt: off
# __sphinx_doc_begin__
# CQL-specific config settings:
self.bc_iters = 20000
self.temperature = 1.0
self.num_actions = 10
self.lagrangian = False
self.lagrangian_thresh = 5.0
self.min_q_weight = 5.0
self.deterministic_backup = True
self.lr = 3e-4
# Note, the new stack defines learning rates for each component.
# The base learning rate `lr` has to be set to `None`, if using
# the new stack.
self.actor_lr = 1e-4
self.critic_lr = 1e-3
self.alpha_lr = 1e-3
self.replay_buffer_config = {
"_enable_replay_buffer_api": True,
"type": "MultiAgentPrioritizedReplayBuffer",
"capacity": int(1e6),
# If True prioritized replay buffer will be used.
"prioritized_replay": False,
"prioritized_replay_alpha": 0.6,
"prioritized_replay_beta": 0.4,
"prioritized_replay_eps": 1e-6,
# Whether to compute priorities already on the remote worker side.
"worker_side_prioritization": False,
}
# Changes to Algorithm's/SACConfig's default:
# .reporting()
self.min_sample_timesteps_per_iteration = 0
self.min_train_timesteps_per_iteration = 100
# fmt: on
# __sphinx_doc_end__
self.timesteps_per_iteration = DEPRECATED_VALUE
@override(SACConfig)
def training(
self,
*,
bc_iters: Optional[int] = NotProvided,
temperature: Optional[float] = NotProvided,
num_actions: Optional[int] = NotProvided,
lagrangian: Optional[bool] = NotProvided,
lagrangian_thresh: Optional[float] = NotProvided,
min_q_weight: Optional[float] = NotProvided,
deterministic_backup: Optional[bool] = NotProvided,
**kwargs,
) -> Self:
"""Sets the training-related configuration.
Args:
bc_iters: Number of iterations with Behavior Cloning pretraining.
temperature: CQL loss temperature.
num_actions: Number of actions to sample for CQL loss
lagrangian: Whether to use the Lagrangian for Alpha Prime (in CQL loss).
lagrangian_thresh: Lagrangian threshold.
min_q_weight: in Q weight multiplier.
deterministic_backup: If the target in the Bellman update should have an
entropy backup. Defaults to `True`.
Returns:
This updated AlgorithmConfig object.
"""
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if bc_iters is not NotProvided:
self.bc_iters = bc_iters
if temperature is not NotProvided:
self.temperature = temperature
if num_actions is not NotProvided:
self.num_actions = num_actions
if lagrangian is not NotProvided:
self.lagrangian = lagrangian
if lagrangian_thresh is not NotProvided:
self.lagrangian_thresh = lagrangian_thresh
if min_q_weight is not NotProvided:
self.min_q_weight = min_q_weight
if deterministic_backup is not NotProvided:
self.deterministic_backup = deterministic_backup
return self
@override(AlgorithmConfig)
def offline_data(self, **kwargs) -> Self:
super().offline_data(**kwargs)
# Check, if the passed in class incorporates the `OfflinePreLearner`
# interface.
if "prelearner_class" in kwargs:
from ray.rllib.offline.offline_data import OfflinePreLearner
if not issubclass(kwargs.get("prelearner_class"), OfflinePreLearner):
raise ValueError(
f"`prelearner_class` {kwargs.get('prelearner_class')} is not a "
"subclass of `OfflinePreLearner`. Any class passed to "
"`prelearner_class` needs to implement the interface given by "
"`OfflinePreLearner`."
)
return self
@override(SACConfig)
def get_default_learner_class(self) -> Union[Type["Learner"], str]:
if self.framework_str == "torch":
from ray.rllib.algorithms.cql.torch.cql_torch_learner import CQLTorchLearner
return CQLTorchLearner
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. "
"Use `'torch'` instead."
)
@override(AlgorithmConfig)
def build_learner_connector(
self,
input_observation_space,
input_action_space,
device=None,
):
pipeline = super().build_learner_connector(
input_observation_space=input_observation_space,
input_action_space=input_action_space,
device=device,
)
# Prepend the "add-NEXT_OBS-from-episodes-to-train-batch" connector piece (right
# after the corresponding "add-OBS-..." default piece).
pipeline.insert_after(
AddObservationsFromEpisodesToBatch,
AddNextObservationsFromEpisodesToTrainBatch(),
)
return pipeline
@override(SACConfig)
def validate(self) -> None:
# First check, whether old `timesteps_per_iteration` is used.
if self.timesteps_per_iteration != DEPRECATED_VALUE:
deprecation_warning(
old="timesteps_per_iteration",
new="min_train_timesteps_per_iteration",
error=True,
)
# Call super's validation method.
super().validate()
# CQL-torch performs the optimizer steps inside the loss function.
# Using the multi-GPU optimizer will therefore not work (see multi-GPU
# check above) and we must use the simple optimizer for now.
if self.simple_optimizer is not True and self.framework_str == "torch":
self.simple_optimizer = True
if self.framework_str in ["tf", "tf2"] and tfp is None:
logger.warning(
"You need `tensorflow_probability` in order to run CQL! "
"Install it via `pip install tensorflow_probability`. Your "
f"tf.__version__={tf.__version__ if tf else None}."
"Trying to import tfp results in the following error:"
)
try_import_tfp(error=True)
# Assert that for a local learner the number of iterations is 1. Note,
# this is needed because we have no iterators, but instead a single
# batch returned directly from the `OfflineData.sample` method.
if (
self.num_learners == 0
and not self.dataset_num_iters_per_learner
and self.enable_rl_module_and_learner
):
self._value_error(
"When using a single local learner the number of iterations "
"per learner, `dataset_num_iters_per_learner` has to be defined. "
"Set this hyperparameter in the `AlgorithmConfig.offline_data`."
)
@override(SACConfig)
def get_default_rl_module_spec(self) -> RLModuleSpecType:
if self.framework_str == "torch":
from ray.rllib.algorithms.cql.torch.default_cql_torch_rl_module import (
DefaultCQLTorchRLModule,
)
return RLModuleSpec(module_class=DefaultCQLTorchRLModule)
else:
raise ValueError(
f"The framework {self.framework_str} is not supported. Use `torch`."
)
@property
def _model_config_auto_includes(self):
return super()._model_config_auto_includes | {
"num_actions": self.num_actions,
}
| CQLConfig |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py | {
"start": 16049,
"end": 16983
} | class ____(nn.Module):
"""
The residual connection is defined in Data2VecVisionLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: Data2VecVisionConfig) -> None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor, gamma=None) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
DATA2VEC_VISION_SELF_ATTENTION_CLASSES = {
"eager": Data2VecVisionSelfAttention,
"sdpa": Data2VecVisionSdpaSelfAttention,
}
# Copied from tests.models.beit.modeling_beit.BeitAttention with Beit->Data2VecVision, BEIT->DATA2VEC_VISION
| Data2VecVisionSelfOutput |
python | django__django | tests/generic_views/views.py | {
"start": 3868,
"end": 3974
} | class ____(generic.UpdateView):
queryset = Author.objects.all()
fields = "__all__"
| NaiveAuthorUpdate |
python | google__jax | tests/jax_to_ir_test.py | {
"start": 984,
"end": 5615
} | class ____(absltest.TestCase):
def test_jax_to_hlo_axpy(self):
hlo_proto, hlo_text = jax_to_ir.jax_to_hlo(axpy, [
('y', jax_to_ir.parse_shape_str('f32[128]')),
('a', jax_to_ir.parse_shape_str('f32[]')),
('x', jax_to_ir.parse_shape_str('f32[128,2]')),
])
# Check that hlo_text contains a broadcast, add, and multiply.
self.assertIn('broadcast', hlo_text)
self.assertIn('add', hlo_text)
self.assertIn('multiply', hlo_text)
# Check that the HLO parameters are in the order we specified in the
# jax_to_hlo call.
self.assertIn('f32[128]{0} parameter(0)', hlo_text)
self.assertIn('f32[] parameter(1)', hlo_text)
self.assertIn('f32[128,2]{1,0} parameter(2)', hlo_text)
# Check that the parameters are in the expected order.
# TODO(jlebar): Ideally we'd check that hlo_proto can be deserialized to a
# valid HLO proto, but we don't seem to have access to hlo_pb2 at the
# moment, so the best we seem to be able to do is check that it's nonempty.
assert hlo_proto
def test_jax_to_hlo_with_constants(self):
def fn(a, b, x, y):
return a / b * x + y
_, hlo_text = jax_to_ir.jax_to_hlo(
fn,
input_shapes=[
('x', jax_to_ir.parse_shape_str('f32[128]')),
('y', jax_to_ir.parse_shape_str('f32[128]')),
],
constants={
'a': 123456,
'b': 4,
})
# Because we passed `a` and `b` as constants, they get constant-folded away
# by Python/JAX to a/b = 30864.
self.assertIn('constant(30864)', hlo_text)
self.assertNotIn('123456', hlo_text)
def test_parse_shape_str_invalid(self):
with self.assertRaisesRegex(ValueError, 'Invalid shape.*foo'):
jax_to_ir.parse_shape_str('foo[]')
@unittest.skipIf(tf is None, 'TensorFlow not installed.')
# TODO(dsuo): Remove this once we bump tensorflow version.
@unittest.skipIf(
tfxla is None or tfxla.call_module_maximum_supported_version() < 10,
'TensorFlow version too old.',
)
def test_jax_to_tf_axpy(self):
tf_proto, tf_text = jax_to_ir.jax_to_tf(axpy, [
('y', jax_to_ir.parse_shape_str('f32[128]')),
('a', jax_to_ir.parse_shape_str('f32[]')),
('x', jax_to_ir.parse_shape_str('f32[128,2]')),
])
# Check that we can re-import our graphdef.
gdef = tf.compat.v1.GraphDef()
gdef.ParseFromString(tf_proto)
g = tf.Graph()
with g.as_default():
tf.import_graph_def(gdef, name='')
# Check that the HLO parameters are named as we specified.
ops = {o.name: o for o in g.get_operations()
if o.name in ('y', 'a', 'x', 'jax2tf_out')}
self.assertLen(ops, 4)
self.assertIdentityOp(ops['y'], [128], jnp.float32)
self.assertIdentityOp(ops['a'], [], jnp.float32)
self.assertIdentityOp(ops['x'], [128, 2], jnp.float32)
self.assertIdentityOp(ops['jax2tf_out'], [128, 2], jnp.float32)
def assertIdentityOp(self, op, expected_shape, expected_dtype):
self.assertEqual(op.type, 'Identity')
output, = op.outputs
self.assertEqual(output.shape, expected_shape)
self.assertEqual(output.dtype, expected_dtype)
def test_parse_shape_str(self):
self.assertParsedShape('f32[]', [], jnp.float32)
self.assertParsedShape('f32[1,2,3]', [1, 2, 3], jnp.float32)
self.assertParsedShape('pred[1]', [1], jnp.bool_)
self.assertParsedShape('s2[1]', [1], jnp.int2)
self.assertParsedShape('s4[1]', [1], jnp.int4)
self.assertParsedShape('s8[1]', [1], jnp.int8)
self.assertParsedShape('s16[1]', [1], jnp.int16)
self.assertParsedShape('s32[1]', [1], jnp.int32)
self.assertParsedShape('s64[1]', [1], jnp.int64)
self.assertParsedShape('u2[1]', [1], jnp.uint2)
self.assertParsedShape('u4[1]', [1], jnp.uint4)
self.assertParsedShape('u8[1]', [1], jnp.uint8)
self.assertParsedShape('u16[1]', [1], jnp.uint16)
self.assertParsedShape('u32[1]', [1], jnp.uint32)
self.assertParsedShape('u64[1]', [1], jnp.uint64)
self.assertParsedShape('f16[1]', [1], jnp.float16)
self.assertParsedShape('f32[1]', [1], jnp.float32)
self.assertParsedShape('f64[1]', [1], jnp.float64)
self.assertParsedShape('bf16[1]', [1], jnp.bfloat16)
self.assertParsedShape('c64[1]', [1], jnp.complex64)
self.assertParsedShape('c128[1]', [1], jnp.complex128)
def assertParsedShape(self, s: str, expected_shape, expected_dtype):
p = jax_to_ir.parse_shape_str(s)
self.assertEqual(p.shape, tuple(expected_shape))
self.assertEqual(p.dtype, expected_dtype)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| JaxToIRTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 203737,
"end": 204078
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "starrable")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
starrable = sgqlc.types.Field("Starrable", graphql_name="starrable")
| AddStarPayload |
python | realpython__materials | contact-book-python-textual/source_code_step_4/rpcontacts/tui.py | {
"start": 199,
"end": 1934
} | class ____(App):
CSS_PATH = "rpcontacts.tcss"
BINDINGS = [
("m", "toggle_dark", "Toggle dark mode"),
("a", "add", "Add"),
("d", "delete", "Delete"),
("c", "clear_all", "Clear All"),
("q", "request_quit", "Quit"),
]
def __init__(self, db):
super().__init__()
self.db = db
def compose(self):
yield Header()
contacts_list = DataTable(classes="contacts-list")
contacts_list.focus()
contacts_list.add_columns("Name", "Phone", "Email")
contacts_list.cursor_type = "row"
contacts_list.zebra_stripes = True
add_button = Button("Add", variant="success", id="add")
add_button.focus()
buttons_panel = Vertical(
add_button,
Button("Delete", variant="warning", id="delete"),
Static(classes="separator"),
Button("Clear All", variant="error", id="clear"),
classes="buttons-panel",
)
yield Horizontal(contacts_list, buttons_panel)
yield Footer()
def on_mount(self):
self.title = "RP Contacts"
self.sub_title = "A Contacts Book App With Textual & Python"
self._load_contacts()
def _load_contacts(self):
contacts_list = self.query_one(DataTable)
for contact_data in self.db.get_all_contacts():
id, *contact = contact_data
contacts_list.add_row(*contact, key=id)
def action_toggle_dark(self):
self.dark = not self.dark
def action_request_quit(self):
def check_answer(accepted):
if accepted:
self.exit()
self.push_screen(QuestionDialog("Do you want to quit?"), check_answer)
| ContactsApp |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datafusion.py | {
"start": 18520,
"end": 19485
} | class ____:
@mock.patch(HOOK_STR)
def test_execute_check_hook_call_should_execute_successfully(self, mock_hook):
mock_hook.return_value.get_instance.return_value = {
"apiEndpoint": INSTANCE_URL,
"serviceEndpoint": INSTANCE_URL,
}
op = CloudDataFusionStopPipelineOperator(
task_id="test_tasks",
pipeline_name=PIPELINE_NAME,
instance_name=INSTANCE_NAME,
namespace=NAMESPACE,
location=LOCATION,
project_id=PROJECT_ID,
)
op.execute(context=mock.MagicMock())
mock_hook.return_value.get_instance.assert_called_once_with(
instance_name=INSTANCE_NAME, location=LOCATION, project_id=PROJECT_ID
)
mock_hook.return_value.stop_pipeline.assert_called_once_with(
instance_url=INSTANCE_URL, pipeline_name=PIPELINE_NAME, namespace=NAMESPACE
)
| TestCloudDataFusionStopPipelineOperator |
python | huggingface__transformers | tests/models/blip/test_modeling_blip.py | {
"start": 38657,
"end": 42861
} | class ____(unittest.TestCase):
def test_inference_image_captioning(self):
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(torch_device)
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
image = prepare_img()
# image only
inputs = processor(images=image, return_tensors="pt").to(torch_device)
predictions = model.generate(**inputs)
# Test output
self.assertEqual(predictions[0].tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102])
# image and context
context = ["a picture of"]
inputs = processor(images=image, text=context, return_tensors="pt").to(torch_device)
predictions = model.generate(**inputs)
# Test output
self.assertEqual(
predictions[0].tolist(),
[30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102],
)
@require_torch_accelerator
@require_torch_fp16
def test_inference_image_captioning_fp16(self):
model = BlipForConditionalGeneration.from_pretrained(
"Salesforce/blip-image-captioning-base", dtype=torch.float16
).to(torch_device)
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
image = prepare_img()
# image only
inputs = processor(images=image, return_tensors="pt").to(torch_device, torch.float16)
predictions = model.generate(**inputs)
# Test output
self.assertEqual(predictions[0].tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102])
# image and context
context = ["a picture of"]
inputs = processor(images=image, text=context, return_tensors="pt").to(torch_device, torch.float16)
predictions = model.generate(**inputs)
# Test output
self.assertEqual(
predictions[0].tolist(),
[30522, 1037, 3861, 1997, 1037, 2450, 1998, 2014, 3899, 2006, 1996, 3509, 102],
)
def test_inference_interpolate_pos_encoding(self):
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(torch_device)
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
processor.image_processor.size = {"height": 500, "width": 500}
image = prepare_img()
inputs = processor(images=image, return_tensors="pt").to(torch_device)
predictions = model.generate(**inputs, interpolate_pos_encoding=True)
generated_text = processor.batch_decode(predictions, skip_special_tokens=True)[0].strip()
self.assertEqual(predictions[0].tolist(), [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 1037, 3899, 102])
self.assertEqual(generated_text, "a woman sitting on the beach with a dog")
def test_inference_vqa(self):
model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base").to(torch_device)
processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
image = prepare_img()
text = "how many dogs are in the picture?"
inputs = processor(image, text=text, return_tensors="pt").to(torch_device)
out = model.generate(**inputs)
# Test output
self.assertEqual(out[0].tolist(), [30522, 1015, 102])
def test_inference_itm(self):
model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base-coco").to(torch_device)
processor = BlipProcessor.from_pretrained("Salesforce/blip-itm-base-coco")
image = prepare_img()
text = "A woman and her dog sitting in a beach"
inputs = processor(image, text, return_tensors="pt").to(torch_device)
out_itm = model(**inputs)
out = model(**inputs, use_itm_head=False)
expected_scores = torch.Tensor([[0.0029, 0.9971]])
torch.testing.assert_close(torch.nn.Softmax()(out_itm[0].cpu()), expected_scores, rtol=1e-3, atol=1e-3)
torch.testing.assert_close(out[0].cpu(), torch.Tensor([[0.5162]]), rtol=1e-3, atol=1e-3)
| BlipModelIntegrationTest |
python | ansible__ansible | test/lib/ansible_test/_internal/containers.py | {
"start": 12676,
"end": 19251
} | class ____:
"""Database of running containers used to support tests."""
def __init__(self, data: dict[str, dict[str, dict[str, ContainerAccess]]]) -> None:
self.data = data
@staticmethod
def from_dict(data: dict[str, t.Any]) -> ContainerDatabase:
"""Return a ContainerDatabase instance from the given dict."""
return ContainerDatabase(dict((access_name,
dict((context_name,
dict((container_name, ContainerAccess.from_dict(container))
for container_name, container in containers.items()))
for context_name, containers in contexts.items()))
for access_name, contexts in data.items()))
def to_dict(self) -> dict[str, t.Any]:
"""Return a dict of the current instance."""
return dict((access_name,
dict((context_name,
dict((container_name, container.to_dict())
for container_name, container in containers.items()))
for context_name, containers in contexts.items()))
for access_name, contexts in self.data.items())
def local_ssh(args: EnvironmentConfig, python: PythonConfig) -> SshConnectionDetail:
"""Return SSH connection details for localhost, connecting as root to the default SSH port."""
return SshConnectionDetail('localhost', 'localhost', None, 'root', SshKey(args).key, python.path)
def root_ssh(ssh: SshConnection) -> SshConnectionDetail:
"""Return the SSH connection details from the given SSH connection. If become was specified, the user will be changed to `root`."""
settings = ssh.settings.__dict__.copy()
if ssh.become:
settings.update(
user='root',
)
return SshConnectionDetail(**settings)
def create_container_database(args: EnvironmentConfig) -> ContainerDatabase:
"""Create and return a container database with information necessary for all test hosts to make use of relevant support containers."""
origin: dict[str, dict[str, ContainerAccess]] = {}
control: dict[str, dict[str, ContainerAccess]] = {}
managed: dict[str, dict[str, ContainerAccess]] = {}
for name, container in support_containers.items():
if container.details.published_ports:
if require_docker().command == 'podman':
host_ip_func = get_podman_host_ip
else:
host_ip_func = get_docker_host_ip
published_access = ContainerAccess(
host_ip=host_ip_func(),
names=container.aliases,
ports=None,
forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()),
)
else:
published_access = None # no published access without published ports (ports are only published if needed)
if container.details.container_ip:
# docker containers, and rootfull podman containers should have a container IP address
container_access = ContainerAccess(
host_ip=container.details.container_ip,
names=container.aliases,
ports=container.ports,
forwards=None,
)
elif require_docker().command == 'podman':
# published ports for rootless podman containers should be accessible from the host's IP
container_access = ContainerAccess(
host_ip=get_podman_host_ip(),
names=container.aliases,
ports=None,
forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()),
)
else:
container_access = None # no container access without an IP address
if get_docker_container_id():
if not container_access:
raise Exception('Missing IP address for container: %s' % name)
origin_context = origin.setdefault(container.context, {})
origin_context[name] = container_access
elif not published_access:
pass # origin does not have network access to the containers
else:
origin_context = origin.setdefault(container.context, {})
origin_context[name] = published_access
if isinstance(args.controller, RemoteConfig):
pass # SSH forwarding required
elif '-controller-' in name:
pass # hack to avoid exposing the controller container to the controller
elif isinstance(args.controller, DockerConfig) or (isinstance(args.controller, OriginConfig) and get_docker_container_id()):
if container_access:
control_context = control.setdefault(container.context, {})
control_context[name] = container_access
else:
raise Exception('Missing IP address for container: %s' % name)
else:
if not published_access:
raise Exception('Missing published ports for container: %s' % name)
control_context = control.setdefault(container.context, {})
control_context[name] = published_access
if issubclass(args.target_type, (RemoteConfig, WindowsInventoryConfig, PosixSshConfig)):
pass # SSH forwarding required
elif '-controller-' in name or '-target-' in name:
pass # hack to avoid exposing the controller and target containers to the target
elif issubclass(args.target_type, DockerConfig) or (issubclass(args.target_type, OriginConfig) and get_docker_container_id()):
if container_access:
managed_context = managed.setdefault(container.context, {})
managed_context[name] = container_access
else:
raise Exception('Missing IP address for container: %s' % name)
else:
if not published_access:
raise Exception('Missing published ports for container: %s' % name)
managed_context = managed.setdefault(container.context, {})
managed_context[name] = published_access
data = {
HostType.origin: origin,
HostType.control: control,
HostType.managed: managed,
}
data = dict((key, value) for key, value in data.items() if value)
return ContainerDatabase(data)
| ContainerDatabase |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 156818,
"end": 161214
} | class ____(ExprNode):
# {}-delimited portions of an f-string
#
# value ExprNode The expression itself
# conversion_char str or None Type conversion (!s, !r, !a, none, or 'd' for integer conversion)
# format_spec JoinedStrNode or None Format string passed to __format__
# c_format_spec str or None If not None, formatting can be done at the C level
subexprs = ['value', 'format_spec']
type = unicode_type
is_temp = True
c_format_spec = None
gil_message = "String formatting"
find_conversion_func = {
's': 'PyObject_Str',
'r': 'PyObject_Repr',
'a': 'PyObject_ASCII',
'd': '__Pyx_PyNumber_Long', # NOTE: internal mapping for '%d' formatting
}.get
def may_be_none(self):
# PyObject_Format() always returns a Unicode string or raises an exception
return False
def analyse_types(self, env):
self.value = self.value.analyse_types(env)
resolved_type = self.value.type.resolve()
if not self.format_spec or self.format_spec.is_string_literal:
c_format_spec = self.format_spec.value if self.format_spec else resolved_type.default_format_spec
if self.value.type.can_coerce_to_pystring(env, format_spec=c_format_spec):
self.c_format_spec = c_format_spec
if self.format_spec:
self.format_spec = self.format_spec.analyse_types(env).coerce_to_pyobject(env)
if self.c_format_spec is None:
self.value = self.value.coerce_to_pyobject(env)
if not self.format_spec and (not self.conversion_char or self.conversion_char == 's'):
if resolved_type is unicode_type and not self.value.may_be_none():
# value is definitely a unicode string and we don't format it any special
return self.value
return self
def generate_result_code(self, code):
if self.c_format_spec is not None and not self.value.type.is_pyobject:
convert_func_call = self.value.type.convert_to_pystring(
self.value.result(), code, self.c_format_spec)
code.putln("%s = %s; %s" % (
self.result(),
convert_func_call,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
return
value_result = self.value.py_result()
value_is_unicode = self.value.type is unicode_type and not self.value.may_be_none()
if self.format_spec:
format_func = '__Pyx_PyObject_Format'
format_spec = self.format_spec.py_result()
else:
# common case: expect simple Unicode pass-through if no format spec
format_func = '__Pyx_PyObject_FormatSimple'
# passing a Unicode format string in Py2 forces PyObject_Format() to also return a Unicode string
format_spec = code.name_in_module_state(Naming.empty_unicode)
conversion_char = self.conversion_char
if conversion_char == 's' and value_is_unicode:
# no need to pipe unicode strings through str()
conversion_char = None
if conversion_char:
fn = self.find_conversion_func(conversion_char)
assert fn is not None, "invalid conversion character found: '%s'" % conversion_char
value_result = '%s(%s)' % (fn, value_result)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormatAndDecref", "StringTools.c"))
format_func += 'AndDecref'
elif self.format_spec:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormat", "StringTools.c"))
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormatSimple", "StringTools.c"))
code.putln("%s = %s(%s, %s); %s" % (
self.result(),
format_func,
value_result,
format_spec,
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
| FormattedValueNode |
python | mlflow__mlflow | tests/resources/mlflow-test-plugin/mlflow_test_plugin/dummy_evaluator.py | {
"start": 797,
"end": 4626
} | class ____(ModelEvaluator):
@classmethod
def can_evaluate(cls, *, model_type, evaluator_config, **kwargs):
return model_type in ["classifier", "regressor"]
def _log_metrics(self, run_id, metrics):
"""
Helper method to log metrics into specified run.
"""
client = MlflowClient()
timestamp = get_current_time_millis()
client.log_batch(
run_id,
metrics=[
Metric(key=key, value=value, timestamp=timestamp, step=0)
for key, value in metrics.items()
],
)
def _evaluate(self, y_pred):
if self.model_type == "classifier":
accuracy_score = sk_metrics.accuracy_score(self.y, y_pred)
metrics = {"accuracy_score": accuracy_score}
artifacts = {}
self._log_metrics(self.run_id, metrics)
confusion_matrix = sk_metrics.confusion_matrix(self.y, y_pred)
confusion_matrix_artifact_name = "confusion_matrix"
confusion_matrix_artifact = Array2DEvaluationArtifact(
uri=get_artifact_uri(self.run_id, confusion_matrix_artifact_name + ".csv"),
content=confusion_matrix,
)
confusion_matrix_csv_buff = io.StringIO()
confusion_matrix_artifact._save(confusion_matrix_csv_buff)
self.client.log_text(
self.run_id,
confusion_matrix_csv_buff.getvalue(),
confusion_matrix_artifact_name + ".csv",
)
confusion_matrix_figure = sk_metrics.ConfusionMatrixDisplay.from_predictions(
self.y, y_pred
).figure_
img_buf = io.BytesIO()
confusion_matrix_figure.savefig(img_buf)
img_buf.seek(0)
confusion_matrix_image = Image.open(img_buf)
confusion_matrix_image_artifact_name = "confusion_matrix_image"
confusion_matrix_image_artifact = ImageEvaluationArtifact(
uri=get_artifact_uri(self.run_id, confusion_matrix_image_artifact_name + ".png"),
content=confusion_matrix_image,
)
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, confusion_matrix_image_artifact_name + ".png")
confusion_matrix_image_artifact._save(path)
self.client.log_image(
self.run_id,
confusion_matrix_image,
confusion_matrix_image_artifact_name + ".png",
)
artifacts = {
confusion_matrix_artifact_name: confusion_matrix_artifact,
confusion_matrix_image_artifact_name: confusion_matrix_image_artifact,
}
elif self.model_type == "regressor":
mean_absolute_error = sk_metrics.mean_absolute_error(self.y, y_pred)
mean_squared_error = sk_metrics.mean_squared_error(self.y, y_pred)
metrics = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
}
self._log_metrics(self.run_id, metrics)
artifacts = {}
else:
raise ValueError(f"Unsupported model type {self.model_type}")
return EvaluationResult(metrics=metrics, artifacts=artifacts)
def evaluate(self, *, model, model_type, dataset, run_id, evaluator_config, **kwargs):
self.model_type = model_type
self.client = MlflowClient()
self.dataset = dataset
self.run_id = run_id
self.X = dataset.features_data
self.y = dataset.labels_data
y_pred = model.predict(self.X) if model is not None else self.dataset.predictions_data
return self._evaluate(y_pred)
| DummyEvaluator |
python | mlflow__mlflow | mlflow/types/responses_helpers.py | {
"start": 9747,
"end": 10991
} | class ____(Status):
content: str | list[ResponseInputTextParam | dict[str, Any]]
role: str
status: str | None = None
type: str = "message"
@model_validator(mode="after")
def check_content(self) -> "Message":
if self.content is None:
raise ValueError("content must not be None")
if isinstance(self.content, list):
for item in self.content:
if isinstance(item, dict):
if "type" not in item:
raise ValueError(
"dictionary type content field values must have key 'type'"
)
if item["type"] == "input_text":
ResponseInputTextParam(**item)
elif item["type"] not in {"input_image", "input_file"}:
raise ValueError(f"Invalid type: {item['type']}.")
return self
@model_validator(mode="after")
def check_role(self) -> "Message":
if self.role not in {"user", "assistant", "system", "developer"}:
raise ValueError(
f"Invalid role: {self.role}. Must be 'user', 'assistant', 'system', or 'developer'."
)
return self
| Message |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.