language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_tasks.py | {
"start": 1458,
"end": 3480
} | class ____:
dag_id = "test_dag"
mapped_dag_id = "test_mapped_task"
unscheduled_dag_id = "test_unscheduled_dag"
task_id = "op1"
task_id2 = "op2"
task_id3 = "op3"
mapped_task_id = "mapped_task"
unscheduled_task_id1 = "unscheduled_task_1"
unscheduled_task_id2 = "unscheduled_task_2"
task1_start_date = datetime(2020, 6, 15)
task2_start_date = datetime(2020, 6, 16)
api_prefix = "/dags"
def create_dags(self, test_client):
with DAG(self.dag_id, schedule=None, start_date=self.task1_start_date, doc_md="details") as dag:
task1 = EmptyOperator(task_id=self.task_id, params={"foo": "bar"})
task2 = EmptyOperator(task_id=self.task_id2, start_date=self.task2_start_date)
task1 >> task2
with DAG(self.mapped_dag_id, schedule=None, start_date=self.task1_start_date) as mapped_dag:
EmptyOperator(task_id=self.task_id3)
# Use the private _expand() method to avoid the empty kwargs check.
# We don't care about how the operator runs here, only its presence.
EmptyOperator.partial(task_id=self.mapped_task_id)._expand(EXPAND_INPUT_EMPTY, strict=False)
with DAG(self.unscheduled_dag_id, start_date=None, schedule=None) as unscheduled_dag:
task4 = EmptyOperator(task_id=self.unscheduled_task_id1, params={"is_unscheduled": True})
task5 = EmptyOperator(task_id=self.unscheduled_task_id2, params={"is_unscheduled": True})
task4 >> task5
sync_dags_to_db([dag, mapped_dag, unscheduled_dag])
test_client.app.dependency_overrides[dag_bag_from_app] = DBDagBag
@staticmethod
def clear_db():
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
clear_db_dag_bundles()
@pytest.fixture(autouse=True)
def setup(self, test_client) -> None:
self.clear_db()
self.create_dags(test_client)
def teardown_method(self) -> None:
self.clear_db()
| TestTaskEndpoint |
python | vyperlang__vyper | vyper/ast/pre_parser.py | {
"start": 8085,
"end": 13791
} | class ____:
# Compilation settings based on the directives in the source code
settings: Settings
# A mapping of offsets to new class names
keyword_translations: dict[tuple[int, int], str]
# Map from offsets in the original vyper source code to offsets
# in the new ("reformatted", i.e. python-compatible) source code
adjustments: dict[tuple[int, int], int]
# A mapping of line/column offsets of `For` nodes to the annotation of the for loop target
for_loop_annotations: dict[tuple[int, int], list[TokenInfo]]
# A list of line/column offsets of hex string literals
hex_string_locations: list[tuple[int, int]]
# Reformatted python source string.
reformatted_code: str
def __init__(self, is_interface):
self._is_interface = is_interface
def parse(self, code: str):
"""
Re-formats a vyper source string into a python source string and performs
some validation. More specifically,
* Translates "interface", "struct", "flag", and "event" keywords into python "class" keyword
* Validates "@version" pragma against current compiler version
* Prevents direct use of python "class" keyword
* Prevents use of python semi-colon statement separator
* Extracts type annotation of for loop iterators into a separate dictionary
Stores a mapping of detected interface and struct names to their
respective vyper class types ("interface" or "struct"), and a mapping of line numbers
of for loops to the type annotation of their iterators.
Parameters
----------
code : str
The vyper source code to be re-formatted.
"""
try:
self._parse(code)
except TokenError as e:
raise SyntaxException(e.args[0], code, e.args[1][0], e.args[1][1]) from e
def _parse(self, code: str):
adjustments: dict = {}
result: list[TokenInfo] = []
keyword_translations: dict[tuple[int, int], str] = {}
settings = Settings()
for_parser = ForParser(code)
hex_string_parser = HexStringParser()
_col_adjustments: dict[int, int] = defaultdict(lambda: 0)
code_bytes = code.encode("utf-8")
token_list = list(tokenize(io.BytesIO(code_bytes).readline))
for token in token_list:
toks = [token]
typ = token.type
string = token.string
start = token.start
end = token.end
line = token.line
# handle adjustments
lineno, col = token.start
adj = _col_adjustments[lineno]
newstart = lineno, col - adj
adjustments[lineno, col - adj] = adj
if typ == COMMENT:
contents = string[1:].strip()
if contents.startswith("@version"):
if settings.compiler_version is not None:
raise PragmaException("compiler version specified twice!", code, *start)
compiler_version = contents.removeprefix("@version ").strip()
validate_version_pragma(compiler_version, (code, *start))
settings.compiler_version = compiler_version
if contents.startswith("pragma "):
_parse_pragma(contents, settings, self._is_interface, code, start)
if typ == NAME and string in ("class", "yield"):
raise SyntaxException(
f"The `{string}` keyword is not allowed. ", code, start[0], start[1]
)
if typ == NAME:
# see if it's a keyword we need to replace
new_keyword = None
if string in VYPER_CLASS_TYPES and start[1] == 0:
new_keyword = "class"
vyper_type = VYPER_CLASS_TYPES[string]
elif string in CUSTOM_STATEMENT_TYPES:
new_keyword = "yield"
vyper_type = CUSTOM_STATEMENT_TYPES[string]
elif string in CUSTOM_EXPRESSION_TYPES:
new_keyword = "await"
vyper_type = CUSTOM_EXPRESSION_TYPES[string]
if new_keyword is not None:
keyword_translations[newstart] = vyper_type
adjustment = len(string) - len(new_keyword)
# adjustments for following tokens
lineno, col = start
_col_adjustments[lineno] += adjustment
# a bit cursed technique to get untokenize to put
# the new tokens in the right place so that
# `keyword_translations` will work correctly.
# (recommend comparing the result of parse with the
# source code side by side to visualize the whitespace)
toks = [TokenInfo(NAME, new_keyword, start, end, line)]
if (typ, string) == (OP, ";"):
raise SyntaxException("Semi-colon statements not allowed", code, start[0], start[1])
if not for_parser.consume(token) and not hex_string_parser.consume(token, result):
result.extend(toks)
for_loop_annotations = {}
for k, v in for_parser.annotations.items():
for_loop_annotations[k] = v.copy()
self.adjustments = adjustments
self.settings = settings
self.keyword_translations = keyword_translations
self.for_loop_annotations = for_loop_annotations
self.hex_string_locations = hex_string_parser.locations
self.reformatted_code = untokenize(result).decode("utf-8")
| PreParser |
python | wandb__wandb | wandb/filesync/stats.py | {
"start": 427,
"end": 2941
} | class ____:
def __init__(self) -> None:
self._stats: MutableMapping[str, FileStats] = {}
self._lock = threading.Lock()
def init_file(
self, save_name: str, size: int, is_artifact_file: bool = False
) -> None:
with self._lock:
self._stats[save_name] = FileStats(
deduped=False,
total=size,
uploaded=0,
failed=False,
artifact_file=is_artifact_file,
)
def set_file_deduped(self, save_name: str) -> None:
with self._lock:
orig = self._stats[save_name]
self._stats[save_name] = orig._replace(
deduped=True,
uploaded=orig.total,
)
def update_uploaded_file(self, save_name: str, total_uploaded: int) -> None:
with self._lock:
self._stats[save_name] = self._stats[save_name]._replace(
uploaded=total_uploaded,
)
def update_failed_file(self, save_name: str) -> None:
with self._lock:
self._stats[save_name] = self._stats[save_name]._replace(
uploaded=0,
failed=True,
)
def summary(self) -> Summary:
# Need to use list to ensure we get a copy, since other threads may
# modify this while we iterate
with self._lock:
stats = list(self._stats.values())
return Summary(
uploaded_bytes=sum(f.uploaded for f in stats),
total_bytes=sum(f.total for f in stats),
deduped_bytes=sum(f.total for f in stats if f.deduped),
)
def file_counts_by_category(self) -> FileCountsByCategory:
artifact_files = 0
wandb_files = 0
media_files = 0
other_files = 0
# Need to use list to ensure we get a copy, since other threads may
# modify this while we iterate
with self._lock:
file_stats = list(self._stats.items())
for save_name, stats in file_stats:
if stats.artifact_file:
artifact_files += 1
elif filenames.is_wandb_file(save_name):
wandb_files += 1
elif save_name.startswith("media"):
media_files += 1
else:
other_files += 1
return FileCountsByCategory(
artifact=artifact_files,
wandb=wandb_files,
media=media_files,
other=other_files,
)
| Stats |
python | marshmallow-code__apispec | tests/test_core.py | {
"start": 4710,
"end": 6343
} | class ____:
def test_openapi_metadata(self, spec):
metadata = spec.to_dict()
assert metadata["info"]["title"] == "Swagger Petstore"
assert metadata["info"]["version"] == "1.0.0"
assert metadata["info"]["description"] == description
if spec.openapi_version.major < 3:
assert metadata["swagger"] == str(spec.openapi_version)
assert metadata["security"] == [{"apiKey": []}]
else:
assert metadata["openapi"] == str(spec.openapi_version)
security_schemes = {
"bearerAuth": dict(type="http", scheme="bearer", bearerFormat="JWT")
}
assert metadata["components"]["securitySchemes"] == security_schemes
assert metadata["components"]["schemas"].get("ErrorResponse", False)
assert metadata["info"]["title"] == "Swagger Petstore"
assert metadata["info"]["version"] == "1.0.0"
assert metadata["info"]["description"] == description
@pytest.mark.parametrize("spec", ("3.0.0",), indirect=True)
def test_openapi_metadata_merge_v3(self, spec):
properties = {
"ok": {
"type": "boolean",
"description": "property description",
"example": True,
}
}
spec.components.schema(
"definition", {"properties": properties, "description": "description"}
)
metadata = spec.to_dict()
assert metadata["components"]["schemas"].get("ErrorResponse", False)
assert metadata["components"]["schemas"].get("definition", False)
| TestMetadata |
python | ray-project__ray | rllib/examples/multi_agent/utils/self_play_league_based_callback_old_api_stack.py | {
"start": 268,
"end": 9232
} | class ____(RLlibCallback):
def __init__(self, win_rate_threshold):
super().__init__()
# All policies in the league.
self.main_policies = {"main", "main_0"}
self.main_exploiters = {"main_exploiter_0", "main_exploiter_1"}
self.league_exploiters = {"league_exploiter_0", "league_exploiter_1"}
# Set of currently trainable policies in the league.
self.trainable_policies = {"main"}
# Set of currently non-trainable (frozen) policies in the league.
self.non_trainable_policies = {
"main_0",
"league_exploiter_0",
"main_exploiter_0",
}
# The win-rate value reaching of which leads to a new module being added
# to the leage (frozen copy of main).
self.win_rate_threshold = win_rate_threshold
# Store the win rates for league overview printouts.
self.win_rates = {}
def on_train_result(self, *, algorithm, result, **kwargs):
# Avoid `self` being pickled into the remote function below.
_trainable_policies = self.trainable_policies
# Get the win rate for the train batch.
# Note that normally, you should set up a proper evaluation config,
# such that evaluation always happens on the already updated policy,
# instead of on the already used train_batch.
for policy_id, rew in result[ENV_RUNNER_RESULTS]["hist_stats"].items():
mo = re.match("^policy_(.+)_reward$", policy_id)
if mo is None:
continue
policy_id = mo.group(1)
# Calculate this policy's win rate.
won = 0
for r in rew:
if r > 0.0: # win = 1.0; loss = -1.0
won += 1
win_rate = won / len(rew)
self.win_rates[policy_id] = win_rate
# Policy is frozen; ignore.
if policy_id in self.non_trainable_policies:
continue
print(
f"Iter={algorithm.iteration} {policy_id}'s " f"win-rate={win_rate} -> ",
end="",
)
# If win rate is good -> Snapshot current policy and decide,
# whether to freeze the copy or not.
if win_rate > self.win_rate_threshold:
is_main = re.match("^main(_\\d+)?$", policy_id)
initializing_exploiters = False
# First time, main manages a decent win-rate against random:
# Add league_exploiter_0 and main_exploiter_0 to the mix.
if is_main and len(self.trainable_policies) == 1:
initializing_exploiters = True
self.trainable_policies.add("league_exploiter_0")
self.trainable_policies.add("main_exploiter_0")
else:
keep_training = (
False
if is_main
else np.random.choice([True, False], p=[0.3, 0.7])
)
if policy_id in self.main_policies:
new_pol_id = re.sub(
"_\\d+$", f"_{len(self.main_policies) - 1}", policy_id
)
self.main_policies.add(new_pol_id)
elif policy_id in self.main_exploiters:
new_pol_id = re.sub(
"_\\d+$", f"_{len(self.main_exploiters)}", policy_id
)
self.main_exploiters.add(new_pol_id)
else:
new_pol_id = re.sub(
"_\\d+$", f"_{len(self.league_exploiters)}", policy_id
)
self.league_exploiters.add(new_pol_id)
if keep_training:
self.trainable_policies.add(new_pol_id)
else:
self.non_trainable_policies.add(new_pol_id)
print(f"adding new opponents to the mix ({new_pol_id}).")
# Update our mapping function accordingly.
def policy_mapping_fn(agent_id, episode, worker=None, **kwargs):
# Pick, whether this is ...
type_ = np.random.choice([1, 2])
# 1) League exploiter vs any other.
if type_ == 1:
league_exploiter = "league_exploiter_" + str(
np.random.choice(list(range(len(self.league_exploiters))))
)
# This league exploiter is frozen: Play against a
# trainable policy.
if league_exploiter not in self.trainable_policies:
opponent = np.random.choice(list(self.trainable_policies))
# League exploiter is trainable: Play against any other
# non-trainable policy.
else:
opponent = np.random.choice(
list(self.non_trainable_policies)
)
print(f"{league_exploiter} vs {opponent}")
return (
league_exploiter
if episode.episode_id % 2 == agent_id
else opponent
)
# 2) Main exploiter vs main.
else:
main_exploiter = "main_exploiter_" + str(
np.random.choice(list(range(len(self.main_exploiters))))
)
# Main exploiter is frozen: Play against the main
# policy.
if main_exploiter not in self.trainable_policies:
main = "main"
# Main exploiter is trainable: Play against any
# frozen main.
else:
main = np.random.choice(list(self.main_policies - {"main"}))
# print(f"{main_exploiter} vs {main}")
return (
main_exploiter
if episode.episode_id % 2 == agent_id
else main
)
# Set the weights of the new polic(y/ies).
if initializing_exploiters:
main_state = algorithm.get_policy("main").get_state()
pol_map = algorithm.env_runner.policy_map
pol_map["main_0"].set_state(main_state)
pol_map["league_exploiter_1"].set_state(main_state)
pol_map["main_exploiter_1"].set_state(main_state)
# We need to sync the just copied local weights to all the
# remote workers as well.
algorithm.env_runner_group.sync_weights(
policies=["main_0", "league_exploiter_1", "main_exploiter_1"]
)
def _set(worker):
worker.set_policy_mapping_fn(policy_mapping_fn)
worker.set_is_policy_to_train(_trainable_policies)
algorithm.env_runner_group.foreach_env_runner(_set)
else:
base_pol = algorithm.get_policy(policy_id)
new_policy = algorithm.add_policy(
policy_id=new_pol_id,
policy_cls=type(base_pol),
policy_mapping_fn=policy_mapping_fn,
policies_to_train=self.trainable_policies,
config=base_pol.config,
observation_space=base_pol.observation_space,
action_space=base_pol.action_space,
)
main_state = base_pol.get_state()
new_policy.set_state(main_state)
# We need to sync the just copied local weights to all the
# remote workers as well.
algorithm.env_runner_group.sync_weights(policies=[new_pol_id])
self._print_league()
else:
print("not good enough; will keep learning ...")
def _print_league(self):
print("--- League ---")
print("Trainable policies (win-rates):")
for p in sorted(self.trainable_policies):
wr = self.win_rates[p] if p in self.win_rates else 0.0
print(f"\t{p}: {wr}")
print("Frozen policies:")
for p in sorted(self.non_trainable_policies):
wr = self.win_rates[p] if p in self.win_rates else 0.0
print(f"\t{p}: {wr}")
print()
| SelfPlayLeagueBasedCallbackOldAPIStack |
python | pallets__jinja | tests/test_ext.py | {
"start": 18102,
"end": 22563
} | class ____:
def test_trans(self):
tmpl = newstyle_i18n_env.get_template("child.html")
assert tmpl.render(LANGUAGE="de") == "<title>fehlend</title>pass auf"
def test_trans_plural(self):
tmpl = newstyle_i18n_env.get_template("plural.html")
assert tmpl.render(LANGUAGE="de", user_count=1) == "Ein Benutzer online"
assert tmpl.render(LANGUAGE="de", user_count=2) == "2 Benutzer online"
def test_complex_plural(self):
tmpl = newstyle_i18n_env.from_string(
"{% trans foo=42, count=2 %}{{ count }} item{% "
"pluralize count %}{{ count }} items{% endtrans %}"
)
assert tmpl.render() == "2 items"
pytest.raises(
TemplateAssertionError,
i18n_env.from_string,
"{% trans foo %}...{% pluralize bar %}...{% endtrans %}",
)
def test_trans_stringformatting(self):
tmpl = newstyle_i18n_env.get_template("stringformat.html")
assert tmpl.render(LANGUAGE="de", user_count=5) == "Benutzer: 5"
def test_newstyle_plural(self):
tmpl = newstyle_i18n_env.get_template("ngettext.html")
assert tmpl.render(LANGUAGE="de", apples=1) == "1 Apfel"
assert tmpl.render(LANGUAGE="de", apples=5) == "5 Äpfel"
def test_autoescape_support(self):
env = Environment(extensions=["jinja2.ext.i18n"])
env.install_gettext_callables(
lambda x: "<strong>Wert: %(name)s</strong>",
lambda s, p, n: s,
newstyle=True,
)
t = env.from_string(
'{% autoescape ae %}{{ gettext("foo", name="<test>") }}{% endautoescape %}'
)
assert t.render(ae=True) == "<strong>Wert: <test></strong>"
assert t.render(ae=False) == "<strong>Wert: <test></strong>"
def test_autoescape_macros(self):
env = Environment(autoescape=False)
template = (
"{% macro m() %}<html>{% endmacro %}"
"{% autoescape true %}{{ m() }}{% endautoescape %}"
)
assert env.from_string(template).render() == "<html>"
def test_num_used_twice(self):
tmpl = newstyle_i18n_env.get_template("ngettext_long.html")
assert tmpl.render(apples=5, LANGUAGE="de") == "5 Äpfel"
def test_num_called_num(self):
source = newstyle_i18n_env.compile(
"""
{% trans num=3 %}{{ num }} apple{% pluralize
%}{{ num }} apples{% endtrans %}
""",
raw=True,
)
# quite hacky, but the only way to properly test that. The idea is
# that the generated code does not pass num twice (although that
# would work) for better performance. This only works on the
# newstyle gettext of course
assert (
re.search(r"u?'%\(num\)s apple', u?'%\(num\)s apples', 3", source)
is not None
)
def test_trans_vars(self):
t1 = newstyle_i18n_env.get_template("transvars1.html")
t2 = newstyle_i18n_env.get_template("transvars2.html")
t3 = newstyle_i18n_env.get_template("transvars3.html")
assert t1.render(num=1, LANGUAGE="de") == "Benutzer: 1"
assert t2.render(count=23, LANGUAGE="de") == "Benutzer: 23"
assert t3.render(num=42, LANGUAGE="de") == "Benutzer: 42"
def test_novars_vars_escaping(self):
t = newstyle_i18n_env.get_template("novars.html")
assert t.render() == "%(hello)s"
t = newstyle_i18n_env.get_template("vars.html")
assert t.render(foo="42") == "42%(foo)s"
t = newstyle_i18n_env.get_template("explicitvars.html")
assert t.render() == "%(foo)s"
def test_context(self):
tmpl = newstyle_i18n_env.get_template("pgettext.html")
assert tmpl.render(LANGUAGE="de") == "Apple"
def test_context_plural(self):
tmpl = newstyle_i18n_env.get_template("npgettext.html")
assert tmpl.render(LANGUAGE="de", apples=1) == "1 Apple"
assert tmpl.render(LANGUAGE="de", apples=5) == "5 Apples"
def test_context_block(self):
tmpl = newstyle_i18n_env.get_template("pgettext_block")
assert tmpl.render(LANGUAGE="de") == "Apple"
def test_context_plural_block(self):
tmpl = newstyle_i18n_env.get_template("npgettext_block")
assert tmpl.render(LANGUAGE="de", apples=1) == "1 Apple"
assert tmpl.render(LANGUAGE="de", apples=5) == "5 Apples"
| TestNewstyleInternationalization |
python | mwaskom__seaborn | seaborn/_base.py | {
"start": 574,
"end": 2821
} | class ____:
"""Base class for mapping data values to plot attributes."""
# -- Default attributes that all SemanticMapping subclasses must set
# Whether the mapping is numeric, categorical, or datetime
map_type: str | None = None
# Ordered list of unique values in the input data
levels = None
# A mapping from the data values to corresponding plot attributes
lookup_table = None
def __init__(self, plotter):
# TODO Putting this here so we can continue to use a lot of the
# logic that's built into the library, but the idea of this class
# is to move towards semantic mappings that are agnostic about the
# kind of plot they're going to be used to draw.
# Fully achieving that is going to take some thinking.
self.plotter = plotter
def _check_list_length(self, levels, values, variable):
"""Input check when values are provided as a list."""
# Copied from _core/properties; eventually will be replaced for that.
message = ""
if len(levels) > len(values):
message = " ".join([
f"\nThe {variable} list has fewer values ({len(values)})",
f"than needed ({len(levels)}) and will cycle, which may",
"produce an uninterpretable plot."
])
values = [x for _, x in zip(levels, itertools.cycle(values))]
elif len(values) > len(levels):
message = " ".join([
f"The {variable} list has more values ({len(values)})",
f"than needed ({len(levels)}), which may not be intended.",
])
values = values[:len(levels)]
if message:
warnings.warn(message, UserWarning, stacklevel=6)
return values
def _lookup_single(self, key):
"""Apply the mapping to a single data value."""
return self.lookup_table[key]
def __call__(self, key, *args, **kwargs):
"""Get the attribute(s) values for the data key."""
if isinstance(key, (list, np.ndarray, pd.Series)):
return [self._lookup_single(k, *args, **kwargs) for k in key]
else:
return self._lookup_single(key, *args, **kwargs)
| SemanticMapping |
python | django__django | django/contrib/gis/management/commands/inspectdb.py | {
"start": 84,
"end": 760
} | class ____(InspectDBCommand):
db_module = "django.contrib.gis.db"
def get_field_type(self, connection, table_name, row):
field_type, field_params, field_notes = super().get_field_type(
connection, table_name, row
)
if field_type == "GeometryField":
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
field_type, geo_params = connection.introspection.get_geometry_type(
table_name, row
)
field_params.update(geo_params)
return field_type, field_params, field_notes
| Command |
python | pypa__pipenv | pipenv/vendor/click/core.py | {
"start": 74702,
"end": 76398
} | class ____(MultiCommand):
"""A command collection is a multi command that merges multiple multi
commands together into one. This is a straightforward implementation
that accepts a list of different multi commands as sources and
provides all the commands for each of them.
See :class:`MultiCommand` and :class:`Command` for the description of
``name`` and ``attrs``.
"""
def __init__(
self,
name: t.Optional[str] = None,
sources: t.Optional[t.List[MultiCommand]] = None,
**attrs: t.Any,
) -> None:
super().__init__(name, **attrs)
#: The list of registered multi commands.
self.sources: t.List[MultiCommand] = sources or []
def add_source(self, multi_cmd: MultiCommand) -> None:
"""Adds a new multi command to the chain dispatcher."""
self.sources.append(multi_cmd)
def get_command(self, ctx: Context, cmd_name: str) -> t.Optional[Command]:
for source in self.sources:
rv = source.get_command(ctx, cmd_name)
if rv is not None:
if self.chain:
_check_multicommand(self, cmd_name, rv)
return rv
return None
def list_commands(self, ctx: Context) -> t.List[str]:
rv: t.Set[str] = set()
for source in self.sources:
rv.update(source.list_commands(ctx))
return sorted(rv)
def _check_iter(value: t.Any) -> t.Iterator[t.Any]:
"""Check if the value is iterable but not a string. Raises a type
error, or return an iterator over the value.
"""
if isinstance(value, str):
raise TypeError
return iter(value)
| CommandCollection |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 35354,
"end": 35485
} | class ____(BaseModel, extra="forbid"):
drop_sharding_key: "DropShardingKey" = Field(..., description="")
| DropShardingKeyOperation |
python | scipy__scipy | benchmarks/benchmarks/cluster.py | {
"start": 2520,
"end": 3077
} | class ____(Benchmark):
params = [[2, 10, 50], ['random', 'points', '++']]
param_names = ['k', 'init']
def __init__(self):
rnd = np.random.RandomState(0)
self.obs = rnd.rand(1000, 5)
def time_kmeans2(self, k, init):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
("One of the clusters is empty. Re-run kmeans with a "
"different initialization"),
UserWarning)
kmeans2(self.obs, k, minit=init, iter=10)
| KMeans2 |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ008.py | {
"start": 4564,
"end": 4892
} | class ____(models.Model):
"""Model with type-annotated abstract = True and __str__ method - should not trigger DJ008"""
new_field = models.CharField(max_length=10)
class Meta(TypedModelMeta):
abstract: ClassVar[bool] = True
def __str__(self):
return self.new_field
| TypeAnnotatedAbstractModelWithStr |
python | coleifer__peewee | tests/fields.py | {
"start": 45631,
"end": 45676
} | class ____(TestModel):
name = TextField()
| NQ |
python | keras-team__keras | keras/src/layers/pooling/global_max_pooling_test.py | {
"start": 2788,
"end": 5475
} | class ____(testing.TestCase):
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_max_pooling1d(self, data_format, keepdims):
def np_global_max_pool1d(x, data_format, keepdims):
steps_axis = [1] if data_format == "channels_last" else [2]
res = np.apply_over_axes(np.max, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(24, dtype="float32").reshape((2, 3, 4))
layer = layers.GlobalMaxPooling1D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_global_max_pool1d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_max_pooling2d(self, data_format, keepdims):
def np_global_max_pool2d(x, data_format, keepdims):
steps_axis = [1, 2] if data_format == "channels_last" else [2, 3]
res = np.apply_over_axes(np.max, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(96, dtype="float32").reshape((2, 3, 4, 4))
layer = layers.GlobalMaxPooling2D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_global_max_pool2d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
@parameterized.parameters(
("channels_last", False),
("channels_last", True),
("channels_first", False),
("channels_first", True),
)
def test_global_max_pooling3d(self, data_format, keepdims):
def np_global_max_pool3d(x, data_format, keepdims):
steps_axis = (
[1, 2, 3] if data_format == "channels_last" else [2, 3, 4]
)
res = np.apply_over_axes(np.max, x, steps_axis)
if not keepdims:
res = res.squeeze()
return res
inputs = np.arange(360, dtype="float32").reshape((2, 3, 3, 5, 4))
layer = layers.GlobalMaxPooling3D(
data_format=data_format,
keepdims=keepdims,
)
outputs = layer(inputs)
expected = np_global_max_pool3d(inputs, data_format, keepdims)
self.assertAllClose(outputs, expected)
| GlobalMaxPoolingCorrectnessTest |
python | ApeWorX__ape | tests/functional/test_exceptions.py | {
"start": 1328,
"end": 5694
} | class ____:
def test_receipt_is_subclass(self, vyper_contract_instance, owner):
"""
Ensure TransactionError knows subclass Receipts are still receipts.
(There was a bug once when it didn't, and that caused internal AttributeErrors).
"""
class SubclassReceipt(Receipt):
pass
receipt = vyper_contract_instance.setNumber(123, sender=owner)
receipt_data = {**receipt.model_dump(), "transaction": receipt.transaction}
sub_receipt = SubclassReceipt.model_validate(receipt_data)
err = TransactionError(txn=sub_receipt)
assert isinstance(err.txn, ReceiptAPI) # Same check used.
def test_address(self, owner):
err = TransactionError(contract_address=owner.address)
assert err.address == owner.address
def test_receiver_as_address(self, owner):
tx = owner.transfer(owner, "1 wei")
err = TransactionError(txn=tx)
assert err.address == owner.address
def test_deploy_address_as_address(self, owner, ethereum, project, zero_address):
contract = project.VyperContract.deploy(629, sender=owner)
receipt = contract.creation_metadata.receipt
data = receipt.model_dump(exclude={"transaction"})
# Show when receiver is zero_address, it still picks contract address.
data["transaction"] = ethereum.create_transaction(receiver=zero_address)
tx = Receipt.model_validate(data)
assert tx.receiver == zero_address, "setup failed"
err = TransactionError(txn=tx)
assert err.address == contract.address
def test_call_with_txn_and_not_source_tb(self, failing_call):
"""
Simulating a failing-call, making sure it doesn't
blow up if it doesn't get a source-tb.
"""
err = TransactionError(txn=failing_call)
assert err.source_traceback is None
def test_call_with_source_tb_and_not_txn(self, mocker, project_with_contract):
"""
Simulating a failing call, making sure the source-tb lines
show up when a txn is NOT given.
"""
# Using mocks for simplicity. Otherwise have to use a bunch of models from ethpm-types;
# most of the stuff being mocked seems simple but is calculated from AST-Nodes and such.
src_path = "path/to/VyperFile.vy"
mock_tb = mocker.MagicMock()
mock_exec = mocker.MagicMock()
mock_exec.depth = 1
mock_exec.source_path = src_path
mock_exec.begin_lineno = 5
mock_exec.end_lineno = 5
mock_closure = mocker.MagicMock()
mock_closure.name = "setNumber"
mock_exec.closure = mock_closure
mock_tb.__getitem__.return_value = mock_exec
mock_tb.__len__.return_value = 1
mock_tb.return_value = mock_tb
err = TransactionError(
source_traceback=mock_tb, project=project_with_contract, set_ape_traceback=True
)
# Have to raise for sys.exc_info() to be available.
try:
raise err
except Exception:
pass
def assert_ape_traceback(err_arg):
assert err_arg.__traceback__ is not None
# The Vyper-frame gets injected at tb_next.
assert err_arg.__traceback__.tb_next is not None
actual = str(err_arg.__traceback__.tb_next.tb_frame)
assert src_path in actual
assert_ape_traceback(err)
err2 = TransactionError(
source_traceback=mock_tb,
project=project_with_contract,
set_ape_traceback=False,
)
try:
raise err2
except Exception:
pass
# No Ape frames are here.
if err2.__traceback__:
assert err2.__traceback__.tb_next is None
err3 = ContractLogicError(source_traceback=mock_tb, project=project_with_contract)
try:
raise err3
except Exception:
pass
assert_ape_traceback(err3)
def test_source_traceback_from_txn(self, owner):
"""
Was not given a source-traceback but showing we can deduce one from
the given transaction.
"""
tx = owner.transfer(owner, 0)
err = TransactionError(txn=tx)
_ = err.source_traceback
assert err._attempted_source_traceback
| TestTransactionError |
python | milvus-io__pymilvus | tests/test_client_entity_helper.py | {
"start": 966,
"end": 14287
} | class ____:
"""Test entity_helper module functions"""
@pytest.mark.parametrize("valid_sparse_matrix", [
[{0: 1.0, 5: 2.5, 10: 3.0}], # list of one dict
[{0: 1.0, 5: 2.5}, {10: 3.0, 15: 4.0}], # list of dicts
[{}, {10: 3.0, 15: 4.0}], # list of dicts partial empty is allowed
[[(1, 0.5), (10, 0.3)], [(2, 0.7), (20, 0.1)]], # list of list
[[("1", "0.5"), (10, 0.3)]], # str representation of int
csr_matrix(([1, 2, 3], [0, 2, 3], [0, 2, 3, 3]), shape=(3, 4)), # scipy sparse matrix
[csr_matrix([[1, 0, 2]]), csr_matrix([[0, 0, 3]])], # list of scipy sparse matrices
])
def test_entity_is_sparse_matrix(self, valid_sparse_matrix: list):
assert entity_helper.entity_is_sparse_matrix(valid_sparse_matrix) is True
@pytest.mark.parametrize("not_sparse_matrix", [
[{"a": 1.0, "b": 2.0}], # invalid dict for non-numeric keys
[], # empty
[{0: 1.0}, "not a dict", {5: 2.0}], # mixed lists
None,
123,
"string",
[1, 2, 3],
[[1, 2, 3]],
[[(1, 0.5, 0.2)]],
[[(1, "invalid")]],
[csr_matrix([[1, 0], [0, 1]])], # list of multi-row is not sparse
])
def test_entity_isnot_sparse_matrix(self, not_sparse_matrix: any):
assert entity_helper.entity_is_sparse_matrix(not_sparse_matrix) is False
def test_get_input_num_rows_list(self):
"""Test getting number of rows from list input"""
# Regular list
data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert entity_helper.get_input_num_rows(data) == 3
assert entity_helper.get_input_num_rows([1, 2, 3]) == 3
assert entity_helper.get_input_num_rows({"a": 1, "b": 2}) == 2
data = [[1, 2, 3]]
assert entity_helper.get_input_num_rows(data) == 1
data = []
assert entity_helper.get_input_num_rows(data) == 0
matrix = csr_matrix([[1, 0], [0, 1], [1, 1]])
assert entity_helper.get_input_num_rows(matrix) == 3
sparse_list = [
{0: 1.0},
{5: 2.5},
{10: 3.0}
]
assert entity_helper.get_input_num_rows(sparse_list) == 3
data = np.array([[1, 2, 3], [4, 5, 6]])
assert entity_helper.get_input_num_rows(data) == 2
@pytest.mark.parametrize("sparse_list", [
[{0: 1.0, 2: 2.0}, {2: 3.0}],
csr_matrix([[1, 0, 2], [0, 0, 3]])
])
def test_sparse_rows_to_proto_dict(self, sparse_list: any):
"""Test converting sparse rows to protobuf format"""
proto = entity_helper.sparse_rows_to_proto(sparse_list)
assert isinstance(proto, schema_pb2.SparseFloatArray)
assert len(proto.contents) == 2
assert proto.dim == 3
def test_sparse_proto_to_rows(self):
"""Test converting protobuf sparse vectors back to rows"""
# Create a mock sparse proto
proto = schema_pb2.SparseFloatArray(dim=100)
# Add some sparse vectors in binary format
# Format: pairs of (uint32 index, float32 value)
vec1_data = b''
for idx, val in [(0, 1.0), (5, 2.5), (10, 3.0)]:
vec1_data += struct.pack('I', idx) + struct.pack('f', val)
proto.contents.append(vec1_data)
vec2_data = b''
for idx, val in [(15, 4.0), (20, 5.0)]:
vec2_data += struct.pack('I', idx) + struct.pack('f', val)
proto.contents.append(vec2_data)
# Convert back to rows
rows = entity_helper.sparse_proto_to_rows(proto, 0, 2)
assert len(rows) == 2
assert rows[0] == {0: 1.0, 5: 2.5, 10: 3.0}
assert rows[1] == {15: 4.0, 20: 5.0}
def test_sparse_proto_to_rows_with_range(self):
"""Test converting specific range of sparse vectors"""
proto = schema_pb2.SparseFloatArray(dim=100)
# Add multiple sparse vectors in binary format
for i in range(5):
vec_data = struct.pack('I', i) + struct.pack('f', float(i))
proto.contents.append(vec_data)
# Get middle range
rows = entity_helper.sparse_proto_to_rows(proto, 1, 4)
assert len(rows) == 3
assert rows[0] == {1: 1.0}
assert rows[1] == {2: 2.0}
assert rows[2] == {3: 3.0}
def test_convert_to_json_nested(self):
"""Test JSON conversion with nested structures"""
obj = {
"level1": {
"level2": {
"array": np.array([[1, 2], [3, 4]]),
"list": [np.int32(1), np.float64(2.5)]
}
}
}
result = entity_helper.convert_to_json(obj)
parsed = json.loads(result)
assert parsed["level1"]["level2"]["array"] == [[1, 2], [3, 4]]
@pytest.mark.parametrize("data", [
{"key": "value", "number": 42}, # dict
{"outer": {"inner": "value"}}, # nested dict
[1, 2, 3, "four"], # list
[{"a": 1}, {"b": 2}], # list of dict
None,
pytest.param({"array": np.array([1, 2, 3])}, marks=pytest.mark.xfail(reason="fix me")),
{ "int": np.int64(42), "float": np.float32(3.14), "bool": np.bool_(True) },
[{"val": np.int64(10)}, {"val": np.float32(3.14)}],
])
def test_convert_to_json_dict(self, data: dict):
"""Test JSON conversion for dict input"""
result = entity_helper.convert_to_json(data)
assert isinstance(result, bytes)
assert json.loads(result.decode()) == data
@pytest.mark.parametrize("json_string,expected", [
('{"key": "value", "number": 42}', {"key": "value", "number": 42}),
('{"nested": {"inner": "value"}}', {"nested": {"inner": "value"}}),
('[1, 2, 3, "four"]', [1, 2, 3, "four"]),
('{"name": "Alice", "age": 30}', {"name": "Alice", "age": 30}),
('null', None),
('true', True),
('false', False),
('123', 123),
('"simple string"', "simple string"),
])
def test_convert_to_json_string_valid(self, json_string: str, expected):
"""Test JSON conversion for valid JSON string input"""
result = entity_helper.convert_to_json(json_string)
assert isinstance(result, bytes)
# Verify the result is valid JSON
parsed = json.loads(result.decode())
assert parsed == expected
def test_convert_to_json_from_json_dumps(self):
"""Test JSON conversion from json.dumps() output"""
original_dict = {"key": "value", "count": 100, "nested": {"inner": "data"}}
json_string = json.dumps(original_dict)
result = entity_helper.convert_to_json(json_string)
assert isinstance(result, bytes)
parsed = json.loads(result.decode())
assert parsed == original_dict
@pytest.mark.parametrize("invalid_json_string", [
"not a json string",
'{"invalid": }',
'{"key": "value"', # missing closing brace
"{'key': 'value'}", # single quotes not valid in JSON
"{key: value}", # unquoted keys
"undefined",
"{,}",
])
def test_convert_to_json_string_invalid(self, invalid_json_string: str):
"""Test JSON conversion rejects invalid JSON strings"""
with pytest.raises(DataNotMatchException) as exc_info:
entity_helper.convert_to_json(invalid_json_string)
# Verify error message contains the invalid JSON string
error_message = str(exc_info.value)
assert "Invalid JSON string" in error_message
# Verify the original input string is in the error message
assert invalid_json_string in error_message or invalid_json_string[:50] in error_message
def test_convert_to_json_string_with_non_string_keys(self):
"""Test JSON conversion rejects JSON strings with non-string keys in dict"""
# This is actually not possible in standard JSON, as JSON object keys are always strings
# But we can test that dict validation still works
invalid_dict = {1: "value", 2: "another"}
with pytest.raises(DataNotMatchException) as exc_info:
entity_helper.convert_to_json(invalid_dict)
error_message = str(exc_info.value)
assert "JSON" in error_message
def test_convert_to_json_long_invalid_string_truncated(self):
"""Test that long invalid JSON strings are truncated in error messages"""
# Create a long invalid JSON string
long_invalid_json = "invalid json " * 50 # > 200 characters
with pytest.raises(DataNotMatchException) as exc_info:
entity_helper.convert_to_json(long_invalid_json)
error_message = str(exc_info.value)
assert "Invalid JSON string" in error_message
# Should contain truncated version with "..."
assert "..." in error_message
def test_pack_field_value_to_field_data(self):
"""Test packing field values into field data protobuf"""
# Test with scalar field
field_data = schema_pb2.FieldData()
field_data.type = DataType.INT64
field_info = {"name": "test_field"}
value = 42
entity_helper.pack_field_value_to_field_data(value, field_data, field_info)
assert len(field_data.scalars.long_data.data) == 1
assert field_data.scalars.long_data.data[0] == value
def test_pack_field_value_to_field_data_vectors(self):
"""Test packing vector field values"""
field_data = schema_pb2.FieldData()
field_data.type = DataType.FLOAT_VECTOR
field_info = {"name": "vector_field"}
value = [1.0, 2.0, 3.0, 4.0]
entity_helper.pack_field_value_to_field_data(value, field_data, field_info)
assert field_data.vectors.dim == 4
assert list(field_data.vectors.float_vector.data) == value
def test_extract_array_row_data(self):
"""Test extracting array data from protobuf"""
# Create field data with array
field_data = schema_pb2.FieldData()
field_data.scalars.array_data.element_type = DataType.INT64
# Add array data for index 0
scalar_field = schema_pb2.ScalarField()
scalar_field.long_data.data.extend([0, 1, 2])
field_data.scalars.array_data.data.append(scalar_field)
result = entity_helper.extract_array_row_data(field_data, 0)
assert result == [0, 1, 2]
def test_extract_array_row_data_string(self):
"""Test extracting string array data"""
# Create field data with array
field_data = schema_pb2.FieldData()
field_data.scalars.array_data.element_type = DataType.VARCHAR
# Add array data for index 0
scalar_field = schema_pb2.ScalarField()
scalar_field.string_data.data.extend(["str_0", "str_1"])
field_data.scalars.array_data.data.append(scalar_field)
result = entity_helper.extract_array_row_data(field_data, 0)
assert result == ["str_0", "str_1"]
def test_extract_array_row_data_bool(self):
"""Test extracting boolean array data"""
# Create field data with array
field_data = schema_pb2.FieldData()
field_data.scalars.array_data.element_type = DataType.BOOL
# Add array data for index 0
scalar_field = schema_pb2.ScalarField()
scalar_field.bool_data.data.extend([True, False, True])
field_data.scalars.array_data.data.append(scalar_field)
result = entity_helper.extract_array_row_data(field_data, 0)
assert result == [True, False, True]
def test_extract_array_row_data_float(self):
"""Test extracting float array data"""
# Create field data with array
field_data = schema_pb2.FieldData()
field_data.scalars.array_data.element_type = DataType.FLOAT
# Add array data for index 0
scalar_field = schema_pb2.ScalarField()
scalar_field.float_data.data.extend([1.1, 2.2, 3.3])
field_data.scalars.array_data.data.append(scalar_field)
result = entity_helper.extract_array_row_data(field_data, 0)
assert result == pytest.approx([1.1, 2.2, 3.3])
def test_extract_array_row_data_double(self):
"""Test extracting double array data"""
# Create field data with array
field_data = schema_pb2.FieldData()
field_data.scalars.array_data.element_type = DataType.DOUBLE
# Add array data for index 0
scalar_field = schema_pb2.ScalarField()
scalar_field.double_data.data.extend([1.11111, 2.22222, 3.33333])
field_data.scalars.array_data.data.append(scalar_field)
result = entity_helper.extract_array_row_data(field_data, 0)
assert result == pytest.approx([1.11111, 2.22222, 3.33333])
def test_extract_array_row_data_invalid_type(self):
"""Test extracting array data with invalid type"""
# Create field data with array
field_data = schema_pb2.FieldData()
field_data.scalars.array_data.element_type = 999 # Invalid type
# Add array data for index 0
scalar_field = schema_pb2.ScalarField()
field_data.scalars.array_data.data.append(scalar_field)
result = entity_helper.extract_array_row_data(field_data, 0)
assert result is None # Returns None for unknown types
| TestEntityHelperSparse |
python | google__pytype | pytype/datatypes.py | {
"start": 10587,
"end": 10729
} | class ____(AliasingDict[_K, _V], MonitorDict[_K, _V]):
"""The dictionary that supports aliasing, lazy dict and monitor."""
| AliasingMonitorDict |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self2.py | {
"start": 1824,
"end": 1969
} | class ____(Generic[_T]):
value: _T
next: Self | None = None
LinkedList[int](value=1, next=LinkedList[int](value=2))
@dataclass
| LinkedList |
python | ansible__ansible | test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py | {
"start": 1935,
"end": 8225
} | class ____(BaseChecker):
"""Checker for unwanted imports and functions."""
name = 'unwanted'
BAD_IMPORT = 'ansible-bad-import'
BAD_IMPORT_FROM = 'ansible-bad-import-from'
BAD_FUNCTION = 'ansible-bad-function'
BAD_MODULE_IMPORT = 'ansible-bad-module-import'
msgs = dict(
E5101=('Import %s instead of %s',
BAD_IMPORT,
'Identifies imports which should not be used.'),
E5102=('Import %s from %s instead of %s',
BAD_IMPORT_FROM,
'Identifies imports which should not be used.'),
E5103=('Call %s instead of %s',
BAD_FUNCTION,
'Identifies functions which should not be used.'),
E5104=('Import external package or ansible.module_utils not %s',
BAD_MODULE_IMPORT,
'Identifies imports which should not be used.'),
)
unwanted_imports = {
# see https://docs.python.org/3/library/collections.abc.html
# deprecated: description='remove collections check now that Python 3.9 is no longer supported' core_version='2.23'
'collections': UnwantedEntry(
'collections.abc',
names=(
'MappingView',
'ItemsView',
'KeysView',
'ValuesView',
'Mapping', 'MutableMapping',
'Sequence', 'MutableSequence',
'Set', 'MutableSet',
'Container',
'Hashable',
'Sized',
'Callable',
'Iterable',
'Iterator',
)
),
'ansible.module_utils.six': UnwantedEntry(
'the Python standard library equivalent'
),
}
unwanted_functions = {
# see https://docs.python.org/3/library/tempfile.html#tempfile.mktemp
'tempfile.mktemp': UnwantedEntry('tempfile.mkstemp'),
# os.chmod resolves as posix.chmod
'posix.chmod': UnwantedEntry('verified_chmod',
ansible_test_only=True),
'sys.exit': UnwantedEntry('exit_json or fail_json',
ignore_paths=(
'/lib/ansible/module_utils/basic.py',
'/lib/ansible/modules/async_wrapper.py',
),
modules_only=True),
'builtins.print': UnwantedEntry('module.log or module.debug',
ignore_paths=(
'/lib/ansible/module_utils/basic.py',
),
modules_only=True),
}
@functools.cached_property
def is_ansible_core(self) -> bool:
"""True if ansible-core is being tested."""
return not self.linter.config.collection_name
def visit_import(self, node: astroid.nodes.Import) -> None:
"""Visit an import node."""
for name in node.names:
self._check_import(node, name[0])
def visit_importfrom(self, node: astroid.nodes.ImportFrom) -> None:
"""Visit an import from node."""
self._check_importfrom(node, node.modname, node.names)
def visit_attribute(self, node: astroid.nodes.Attribute) -> None:
"""Visit an attribute node."""
last_child = node.last_child()
# this is faster than using type inference and will catch the most common cases
if not isinstance(last_child, astroid.nodes.Name):
return
module = last_child.name
entry = self.unwanted_imports.get(module)
if entry and entry.names:
if entry.applies_to(self.linter.current_file, node.attrname):
self.add_message(self.BAD_IMPORT_FROM, args=(node.attrname, entry.alternative, module), node=node)
def visit_call(self, node: astroid.nodes.Call) -> None:
"""Visit a call node."""
try:
for i in node.func.inferred():
func = None
if isinstance(i, astroid.nodes.FunctionDef) and isinstance(i.parent, astroid.nodes.Module):
func = '%s.%s' % (i.parent.name, i.name)
if not func:
continue
entry = self.unwanted_functions.get(func)
if entry and entry.applies_to(self.linter.current_file):
self.add_message(self.BAD_FUNCTION, args=(entry.alternative, func), node=node)
except astroid.exceptions.InferenceError:
pass
def _check_import(self, node: astroid.nodes.Import, modname: str) -> None:
"""Check the imports on the specified import node."""
self._check_module_import(node, modname)
entry = self.unwanted_imports.get(modname)
if not entry:
return
if entry.applies_to(self.linter.current_file):
self.add_message(self.BAD_IMPORT, args=(entry.alternative, modname), node=node)
def _check_importfrom(self, node: astroid.nodes.ImportFrom, modname: str, names: list[tuple[str, str | None]]) -> None:
"""Check the imports on the specified import from node."""
self._check_module_import(node, modname)
entry = self.unwanted_imports.get(modname)
if not entry:
return
for name in names:
if entry.applies_to(self.linter.current_file, name[0]):
self.add_message(self.BAD_IMPORT_FROM, args=(name[0], entry.alternative, modname), node=node)
def _check_module_import(self, node: astroid.nodes.Import | astroid.nodes.ImportFrom, modname: str) -> None:
"""Check the module import on the given import or import from node."""
if not is_module_path(self.linter.current_file):
return
if modname == 'ansible.module_utils' or modname.startswith('ansible.module_utils.'):
return
if modname == 'ansible' or modname.startswith('ansible.'):
self.add_message(self.BAD_MODULE_IMPORT, args=(modname,), node=node)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(AnsibleUnwantedChecker(linter))
| AnsibleUnwantedChecker |
python | sympy__sympy | sympy/assumptions/predicates/matrices.py | {
"start": 9910,
"end": 10477
} | class ____(Predicate):
"""
Singular matrix predicate.
A matrix is singular iff the value of its determinant is 0.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.singular(X), Q.invertible(X))
False
>>> ask(Q.singular(X), ~Q.invertible(X))
True
References
==========
.. [1] https://mathworld.wolfram.com/SingularMatrix.html
"""
name = "singular"
handler = Dispatcher("SingularHandler", doc="Predicate fore key 'singular'.")
| SingularPredicate |
python | ipython__ipython | IPython/testing/plugin/pytest_ipdoctest.py | {
"start": 5068,
"end": 5492
} | class ____(TerminalRepr):
def __init__(
self, reprlocation_lines: Sequence[Tuple[ReprFileLocation, Sequence[str]]]
) -> None:
self.reprlocation_lines = reprlocation_lines
def toterminal(self, tw: TerminalWriter) -> None:
for reprlocation, lines in self.reprlocation_lines:
for line in lines:
tw.line(line)
reprlocation.toterminal(tw)
| ReprFailDoctest |
python | ApeWorX__ape | src/ape/types/private_mempool.py | {
"start": 1888,
"end": 2296
} | class ____(BaseModel):
"""
Preferences on what data should be shared about the bundle and its transactions
"""
hints: Optional[list[PrivacyHint]] = None
"""
Hints on what data should be shared about the bundle and its transactions.
"""
builders: Optional[list[str]] = None
"""
Names of the builders that should be allowed to see the bundle/transaction.
"""
| Privacy |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/dumper.py | {
"start": 589,
"end": 2084
} | class ____(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(
self,
stream,
default_style=None,
default_flow_style=None,
canonical=None,
indent=None,
width=None,
allow_unicode=None,
line_break=None,
encoding=None,
explicit_start=None,
explicit_end=None,
version=None,
tags=None,
block_seq_indent=None,
top_level_colon_align=None,
prefix_colon=None,
):
# type: (Any, StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA
Emitter.__init__(
self,
stream,
canonical=canonical,
indent=indent,
width=width,
allow_unicode=allow_unicode,
line_break=line_break,
block_seq_indent=block_seq_indent,
dumper=self,
)
Serializer.__init__(
self,
encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version,
tags=tags,
dumper=self,
)
BaseRepresenter.__init__(
self,
default_style=default_style,
default_flow_style=default_flow_style,
dumper=self,
)
BaseResolver.__init__(self, loadumper=self)
| BaseDumper |
python | agronholm__apscheduler | src/apscheduler/_events.py | {
"start": 1134,
"end": 1346
} | class ____(DataStoreEvent):
"""
Signals that a new task was added to the store.
:ivar task_id: ID of the task that was added
"""
task_id: str
@attrs.define(kw_only=True, frozen=True)
| TaskAdded |
python | allegroai__clearml | examples/hyperdatasets/finetune_qa_lora.py | {
"start": 1266,
"end": 1593
} | class ____(DataSubEntry):
"""Simple text sub-entry carrying a role label for each snippet."""
def __init__(self, name: str, text: str, role: str):
super().__init__(
name=name,
source=f"text://{uuid.uuid4().hex}",
metadata={"text": text, "role": role},
)
| QADataSubEntry |
python | python-pillow__Pillow | src/PIL/XbmImagePlugin.py | {
"start": 1170,
"end": 2669
} | class ____(ImageFile.ImageFile):
format = "XBM"
format_description = "X11 Bitmap"
def _open(self) -> None:
assert self.fp is not None
m = xbm_head.match(self.fp.read(512))
if not m:
msg = "not a XBM file"
raise SyntaxError(msg)
xsize = int(m.group("width"))
ysize = int(m.group("height"))
if m.group("hotspot"):
self.info["hotspot"] = (int(m.group("xhot")), int(m.group("yhot")))
self._mode = "1"
self._size = xsize, ysize
self.tile = [ImageFile._Tile("xbm", (0, 0) + self.size, m.end())]
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
if im.mode != "1":
msg = f"cannot write mode {im.mode} as XBM"
raise OSError(msg)
fp.write(f"#define im_width {im.size[0]}\n".encode("ascii"))
fp.write(f"#define im_height {im.size[1]}\n".encode("ascii"))
hotspot = im.encoderinfo.get("hotspot")
if hotspot:
fp.write(f"#define im_x_hot {hotspot[0]}\n".encode("ascii"))
fp.write(f"#define im_y_hot {hotspot[1]}\n".encode("ascii"))
fp.write(b"static char im_bits[] = {\n")
ImageFile._save(im, fp, [ImageFile._Tile("xbm", (0, 0) + im.size)])
fp.write(b"};\n")
Image.register_open(XbmImageFile.format, XbmImageFile, _accept)
Image.register_save(XbmImageFile.format, _save)
Image.register_extension(XbmImageFile.format, ".xbm")
Image.register_mime(XbmImageFile.format, "image/xbm")
| XbmImageFile |
python | great-expectations__great_expectations | great_expectations/expectations/registry.py | {
"start": 1354,
"end": 18239
} | class ____(NamedTuple):
expectation: str
renderer: Callable[
..., Union[RenderedAtomicContent, list[RenderedAtomicContent], RenderedContent]
]
def register_renderer(
object_name: str,
parent_class: Union[Type[Expectation], Type[MetricProvider]],
renderer_fn: Callable[
..., Union[RenderedAtomicContent, list[RenderedAtomicContent], RenderedContent]
],
):
# noinspection PyUnresolvedReferences
renderer_name = renderer_fn._renderer_type # type: ignore[attr-defined] # FIXME CoP
if object_name not in _registered_renderers:
logger.debug(f"Registering {renderer_name} for expectation_type {object_name}.")
_registered_renderers[object_name] = {renderer_name: (parent_class, renderer_fn)}
return
if renderer_name in _registered_renderers[object_name]:
if _registered_renderers[object_name][renderer_name] == (
parent_class,
renderer_fn,
):
logger.info(
f"Multiple declarations of {renderer_name} renderer for expectation_type {object_name} " # noqa: E501 # FIXME CoP
f"found."
)
return
else:
logger.warning(
f"Overwriting declaration of {renderer_name} renderer for expectation_type "
f"{object_name}."
)
_registered_renderers[object_name][renderer_name] = (
parent_class,
renderer_fn,
)
return
else:
logger.debug(f"Registering {renderer_name} for expectation_type {object_name}.")
_registered_renderers[object_name][renderer_name] = (parent_class, renderer_fn)
return
def get_renderer_names(expectation_or_metric_type: str) -> List[str]:
"""Gets renderer names for a given Expectation or Metric.
Args:
expectation_or_metric_type: The type of an Expectation or Metric for which to get renderer names.
Returns:
A list of renderer names for the Expectation or Metric.
""" # noqa: E501 # FIXME CoP
return list(_registered_renderers.get(expectation_or_metric_type, {}).keys())
def get_renderer_names_with_renderer_types(
expectation_or_metric_type: str,
renderer_types: List[AtomicRendererType],
) -> List[Union[str, AtomicDiagnosticRendererType, AtomicPrescriptiveRendererType]]:
"""Gets renderer names of a given type, for a given Expectation or Metric.
Args:
expectation_or_metric_type: The type of an Expectation or Metric for which to get renderer names.
renderer_types: The type of the renderers for which to return names.
Returns:
A list of renderer names for the given prefixes and Expectation or Metric.
""" # noqa: E501 # FIXME CoP
return [
renderer_name
for renderer_name in get_renderer_names(
expectation_or_metric_type=expectation_or_metric_type
)
if any(renderer_name.startswith(renderer_type) for renderer_type in renderer_types)
]
def get_renderer_impls(object_name: str) -> List[str]:
return list(_registered_renderers.get(object_name, {}).values())
def get_renderer_impl(object_name: str, renderer_type: str) -> Optional[RendererImpl]:
renderer_tuple: Optional[tuple] = _registered_renderers.get(object_name, {}).get(renderer_type)
renderer_impl: Optional[RendererImpl] = None
if renderer_tuple:
renderer_impl = RendererImpl(expectation=renderer_tuple[0], renderer=renderer_tuple[1])
return renderer_impl
def register_expectation(expectation: Type[Expectation]) -> None:
expectation_type = expectation.expectation_type
# TODO: add version to key
if expectation_type in _registered_expectations:
if _registered_expectations[expectation_type] == expectation:
logger.info(f"Multiple declarations of expectation {expectation_type} found.")
return
else:
logger.warning(f"Overwriting declaration of expectation {expectation_type}.")
logger.debug(f"Registering expectation: {expectation_type}")
_registered_expectations[expectation_type] = expectation
def register_core_metrics() -> None:
"""As Metric registration is the responsibility of MetaMetricProvider.__new__,
simply importing a given class will ensure that it is added to the Metric
registry.
We use this to grab metrics by name within our workflows.
Without this function, we need to hope that core Metrics are imported somewhere
in our import graph - if not, our registry will be empty and we'll see
MetricResolutionErrors.
"""
before_count = len(_registered_metrics)
# Implicitly calls MetaMetricProvider.__new__ as Metrics are loaded from metrics.__init__.py
# As __new__ calls upon register_metric this import builds our core registry
from great_expectations.expectations import metrics # noqa: F401 # FIXME CoP
after_count = len(_registered_metrics)
if before_count == after_count:
logger.debug("Already registered core metrics; no updates to registry")
else:
logger.debug(f"Registered {after_count - before_count} core metrics")
def register_core_expectations() -> None:
"""As Expectation registration is the responsibility of MetaExpectation.__new__,
simply importing a given class will ensure that it is added to the Expectation
registry.
We use this JIT in the Validator to ensure that core Expectations are available
for usage when called upon.
Without this function, we need to hope that core Expectations are imported somewhere
in our import graph - if not, our registry will be empty and Validator workflows
will fail.
"""
before_count = len(_registered_expectations)
# Implicitly calls MetaExpectation.__new__ as Expectations are loaded from core.__init__.py
# As __new__ calls upon register_expectation, this import builds our core registry
from great_expectations.expectations import core # noqa: F401 # FIXME CoP
after_count = len(_registered_expectations)
if before_count == after_count:
logger.debug("Already registered core expectations; no updates to registry")
else:
logger.debug(f"Registered {after_count - before_count} core expectations")
def _add_response_key(res, key, value):
if key in res:
res[key].append(value)
else:
res[key] = [value]
return res
def register_metric( # noqa: PLR0913 # FIXME CoP
metric_name: str,
metric_domain_keys: Tuple[str, ...],
metric_value_keys: Tuple[str, ...],
execution_engine: Type[ExecutionEngine],
metric_class: Type[MetricProvider],
metric_provider: Optional[Callable],
metric_fn_type: Optional[Union[MetricFunctionTypes, MetricPartialFunctionTypes]] = None,
) -> dict:
"""Register a Metric class for use as a callable metric within Expectations.
Args:
metric_name: A name identifying the metric. Metric Name must be globally unique in
a great_expectations installation.
metric_domain_keys: A tuple of the keys used to determine the domain of the metric.
metric_value_keys: A tuple of the keys used to determine the value of the metric.
execution_engine: The execution_engine used to execute the metric.
metric_class: A valid Metric class containing logic to compute attributes of data.
metric_provider: The MetricProvider class from which the metric_class inherits.
metric_fn_type: The MetricFunctionType or MetricPartialFunctionType used to define the Metric class.
Returns:
A dictionary containing warnings thrown during registration if applicable, and the success status of registration.
""" # noqa: E501 # FIXME CoP
res: dict = {}
execution_engine_name = execution_engine.__name__
logger.debug(f"Registering metric: {metric_name}")
if metric_provider is not None and metric_fn_type is not None:
metric_provider.metric_fn_type = metric_fn_type # type: ignore[attr-defined] # FIXME CoP
if metric_name in _registered_metrics:
metric_definition = _registered_metrics[metric_name]
current_domain_keys = metric_definition.get("metric_domain_keys", set())
if set(current_domain_keys) != set(metric_domain_keys):
logger.warning(
f"metric {metric_name} is being registered with different metric_domain_keys; overwriting metric_domain_keys" # noqa: E501 # FIXME CoP
)
_add_response_key(
res,
"warning",
f"metric {metric_name} is being registered with different metric_domain_keys; overwriting metric_domain_keys", # noqa: E501 # FIXME CoP
)
current_value_keys = metric_definition.get("metric_value_keys", set())
if set(current_value_keys) != set(metric_value_keys):
logger.warning(
f"metric {metric_name} is being registered with different metric_value_keys; overwriting metric_value_keys" # noqa: E501 # FIXME CoP
)
_add_response_key(
res,
"warning",
f"metric {metric_name} is being registered with different metric_value_keys; overwriting metric_value_keys", # noqa: E501 # FIXME CoP
)
providers = metric_definition.get("providers", {})
if execution_engine_name in providers:
_current_provider_cls, current_provider_fn = providers[execution_engine_name]
if current_provider_fn != metric_provider:
logger.warning(
f"metric {metric_name} is being registered with different metric_provider; overwriting metric_provider" # noqa: E501 # FIXME CoP
)
_add_response_key(
res,
"warning",
f"metric {metric_name} is being registered with different metric_provider; overwriting metric_provider", # noqa: E501 # FIXME CoP
)
providers[execution_engine_name] = metric_class, metric_provider
else:
logger.info(
f"Multiple declarations of metric {metric_name} for engine {execution_engine_name}." # noqa: E501 # FIXME CoP
)
_add_response_key(
res,
"info",
f"Multiple declarations of metric {metric_name} for engine {execution_engine_name}.", # noqa: E501 # FIXME CoP
)
else:
providers[execution_engine_name] = metric_class, metric_provider
else:
metric_definition = {
"metric_domain_keys": metric_domain_keys,
"metric_value_keys": metric_value_keys,
"default_kwarg_values": metric_class.default_kwarg_values,
"providers": {execution_engine_name: (metric_class, metric_provider)},
}
_registered_metrics[metric_name] = metric_definition
res["success"] = True
return res
def _get_metric_definition(metric_name: str) -> dict:
try:
return _registered_metrics[metric_name]
except KeyError:
raise gx_exceptions.MetricProviderError(f"No metric named {metric_name} found.") # noqa: TRY003 # FIXME CoP
def get_sqlalchemy_metric_provider(
metric_name: str,
) -> Tuple[MetricProvider, Callable]:
"""The default SqlAlchemy metric provider for a given metric."""
metric_definition = _get_metric_definition(metric_name)
try:
return metric_definition["providers"]["SqlAlchemyExecutionEngine"]
except KeyError:
raise gx_exceptions.MetricProviderError( # noqa: TRY003 # FIXME CoP
f"No provider found for {metric_name} using SqlAlchemyExecutionEngine"
)
def get_metric_provider(
metric_name: str, execution_engine: ExecutionEngine
) -> Tuple[MetricProvider, Callable]:
metric_definition = _get_metric_definition(metric_name)
try:
return metric_definition["providers"][type(execution_engine).__name__]
except KeyError:
# Search up class hierarchy for a match. We skip the first entry since that's the
# execution engine type itself, type(execution_engine), which we just checked and
# resulted in the KeyError we're handling here.
for cls in type(execution_engine).mro()[1:]:
possible_key = cls.__name__
if metric_definition["providers"].get(possible_key) is not None:
metric_provider = metric_definition["providers"][possible_key]
# Register the metric provider for this engine so we don't have to search again
metric_definition["providers"][type(execution_engine).__name__] = metric_provider
return metric_provider
# no matches when search hierarchy so we raise
raise gx_exceptions.MetricProviderError( # noqa: TRY003 # FIXME CoP
f"No provider found for {metric_name} using {type(execution_engine).__name__}"
)
def get_metric_function_type(
metric_name: str, execution_engine: ExecutionEngine
) -> Optional[Union[MetricPartialFunctionTypes, MetricFunctionTypes]]:
try:
provider_fn, _provider_class = get_metric_provider(metric_name, execution_engine)
return getattr(provider_fn, "metric_fn_type", None)
except KeyError:
raise gx_exceptions.MetricProviderError( # noqa: TRY003 # FIXME CoP
f"No provider found for {metric_name} using {type(execution_engine).__name__}"
)
def get_metric_kwargs(
metric_name: str,
configuration: Optional[ExpectationConfiguration] = None,
runtime_configuration: Optional[dict] = None,
) -> dict:
try:
metric_definition = _registered_metrics.get(metric_name)
if metric_definition is None:
raise gx_exceptions.MetricProviderError(f"No definition found for {metric_name}") # noqa: TRY003 # FIXME CoP
default_kwarg_values = metric_definition["default_kwarg_values"]
metric_kwargs = {
"metric_domain_keys": metric_definition["metric_domain_keys"],
"metric_value_keys": metric_definition["metric_value_keys"],
}
if configuration:
expectation = configuration.to_domain_obj()
configuration_kwargs = expectation._get_runtime_kwargs(
runtime_configuration=runtime_configuration
)
if len(metric_kwargs["metric_domain_keys"]) > 0:
metric_domain_kwargs = IDDict(
{
k: configuration_kwargs.get(k) or default_kwarg_values.get(k)
for k in metric_kwargs["metric_domain_keys"]
}
)
else:
metric_domain_kwargs = IDDict()
if len(metric_kwargs["metric_value_keys"]) > 0:
metric_value_kwargs = IDDict(
{
k: configuration_kwargs.get(k)
if configuration_kwargs.get(k) is not None
else default_kwarg_values.get(k)
for k in metric_kwargs["metric_value_keys"]
}
)
else:
metric_value_kwargs = IDDict()
metric_kwargs["metric_domain_kwargs"] = metric_domain_kwargs
metric_kwargs["metric_value_kwargs"] = metric_value_kwargs
return metric_kwargs
except KeyError:
raise gx_exceptions.MetricProviderError(f"Incomplete definition found for {metric_name}") # noqa: TRY003 # FIXME CoP
def get_domain_metrics_dict_by_name(
metrics: Dict[Tuple[str, str, str], MetricValue], metric_domain_kwargs: IDDict
):
return {
metric_edge_key_id_tuple[0]: metric_value
for metric_edge_key_id_tuple, metric_value in metrics.items()
if metric_edge_key_id_tuple[1] == metric_domain_kwargs.to_id()
}
def get_expectation_impl(expectation_name: str) -> Type[Expectation]:
expectation: Type[Expectation] | None = _registered_expectations.get(expectation_name)
if not expectation:
raise gx_exceptions.ExpectationNotFoundError(f"{expectation_name} not found") # noqa: TRY003 # FIXME CoP
return expectation
def list_registered_expectation_implementations(
expectation_root: Optional[Type[Expectation]] = None,
) -> List[str]:
registered_expectation_implementations = []
for (
expectation_name,
expectation_implementation,
) in _registered_expectations.items():
if expectation_root is None or (
expectation_root and issubclass(expectation_implementation, expectation_root)
):
registered_expectation_implementations.append(expectation_name)
return registered_expectation_implementations
| RendererImpl |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/hybrid.py | {
"start": 42544,
"end": 43181
} | class ____(InspectionAttrExtensionType):
HYBRID_METHOD = "HYBRID_METHOD"
"""Symbol indicating an :class:`InspectionAttr` that's
of type :class:`.hybrid_method`.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attribute.
.. seealso::
:attr:`_orm.Mapper.all_orm_attributes`
"""
HYBRID_PROPERTY = "HYBRID_PROPERTY"
"""Symbol indicating an :class:`InspectionAttr` that's
of type :class:`.hybrid_method`.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attribute.
.. seealso::
:attr:`_orm.Mapper.all_orm_attributes`
"""
| HybridExtensionType |
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 48475,
"end": 52019
} | class ____(MegatronBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = MegatronBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| MegatronBertForSequenceClassification |
python | pytorch__pytorch | test/onnx/exporter/test_ir_passes.py | {
"start": 338,
"end": 3158
} | class ____(common_utils.TestCase):
@common_utils.parametrize(
"shape_expr, expected_shape_expr",
[
("2*s1", "batch_size*sequence_length"),
("s11/s1", "past_sequence_length/sequence_length"),
("(s1 + s11)*2", "(masked_sequence_length)*batch_size"),
],
)
def test__replace_names_in_rename_axis(self, shape_expr, expected_shape_expr):
rename_mapping = {
"s1 + s11": "masked_sequence_length",
"s11": "past_sequence_length",
"s1": "sequence_length",
"2": "batch_size",
}
new_shape_expr = _ir_passes._replace_names(shape_expr, rename_mapping)
self.assertEqual(new_shape_expr, expected_shape_expr)
def test_rename_axis_succeeds_when_mapping_is_not_sorted_and_contains_the_str_not_in_the_model(
self,
):
model = ir.Model(
ir.Graph(
inputs=[
ir.Value(
name="input_0",
type=ir.DataType.FLOAT,
shape=ir.Shape(["s0", "s1"]),
),
ir.Value(
name="input_1",
type=ir.DataType.FLOAT,
shape=ir.Shape(["s0 + s2", "s1 + s2"]),
),
ir.Value(
name="input_2",
type=ir.DataType.FLOAT,
shape=ir.Shape(["s1/(s1 + s2)*2", "(s1 + s2)*2"]),
),
],
outputs=[
ir.Value(
name="output", type=ir.DataType.FLOAT, shape=ir.Shape("s99")
)
],
nodes=[],
),
ir_version=9,
producer_name="pytorch",
producer_version=torch.__version__,
)
mapping = {
"s1": "sequence_length",
"s2": "past_sequence_length",
"s0": "batch_size",
"s1 + s2": "masked_sequence_length",
"s3": "extra_sequence_length",
}
_ir_passes.rename_axis(model, mapping)
self.assertEqual(
model.graph.inputs[0].shape, ir.Shape(["batch_size", "sequence_length"])
)
self.assertEqual(
model.graph.inputs[1].shape,
ir.Shape(["batch_size + past_sequence_length", "masked_sequence_length"]),
)
self.assertEqual(
model.graph.inputs[2].shape,
ir.Shape(
[
"sequence_length/(masked_sequence_length)*2",
"(masked_sequence_length)*2",
]
),
)
if __name__ == "__main__":
common_utils.run_tests()
| ONNXIRPassesTest |
python | falconry__falcon | falcon/_typing.py | {
"start": 4097,
"end": 4298
} | class ____(Protocol):
async def __call__(
self,
resource: Resource,
req: AsgiRequest,
resp: AsgiResponse,
**kwargs: Any,
) -> None: ...
| AsgiResponderMethod |
python | pypa__warehouse | warehouse/macaroons/security_policy.py | {
"start": 2348,
"end": 7087
} | class ____:
def __init__(self):
self._acl = ACLHelper()
def identity(self, request):
# If we're calling into this API on a request, then we want to register
# a callback which will ensure that the response varies based on the
# Authorization header.
request.add_response_callback(add_vary_callback("Authorization"))
request.authentication_method = AuthenticationMethod.MACAROON
# We need to extract our Macaroon from the request.
macaroon = _extract_http_macaroon(request)
if macaroon is None:
return None
# Check to see if our Macaroon exists in the database, and if so
# fetch the user that is associated with it.
macaroon_service = request.find_service(IMacaroonService, context=None)
try:
dm = macaroon_service.find_from_raw(macaroon)
oidc_claims = (
dm.additional.get("oidc")
if dm.oidc_publisher and dm.additional
else None
)
except InvalidMacaroonError:
return None
login_service = request.find_service(IUserService, context=None)
# Every Macaroon is either associated with a user or an OIDC publisher.
if dm.user is not None:
is_disabled, _ = login_service.is_disabled(dm.user.id)
if is_disabled:
return None
return UserContext(dm.user, dm)
return PublisherTokenContext(dm.oidc_publisher, oidc_claims)
def remember(self, request, userid, **kw):
# This is a NO-OP because our Macaroon header policy doesn't allow
# the ability for authentication to "remember" the user id. This
# assumes it has been configured in clients somewhere out of band.
return []
def forget(self, request, **kw):
# This is a NO-OP because our Macaroon header policy doesn't allow
# the ability for authentication to "forget" the user id. This
# assumes it has been configured in clients somewhere out of band.
return []
def authenticated_userid(self, request):
# Handled by MultiSecurityPolicy
raise NotImplementedError
def permits(self, request, context, permission):
# Re-extract our Macaroon from the request, it sucks to have to do this work
# twice, but I believe it is inevitable unless we pass the Macaroon back as
# a principal-- which doesn't seem to be the right fit for it.
macaroon = _extract_http_macaroon(request)
# It should not be possible to *not* have a macaroon at this point, because we
# can't call this function without an identity that came from a macaroon
assert isinstance(macaroon, str), "no valid macaroon"
# Check to make sure that the permission we're attempting to permit is one that
# is allowed to be used for macaroons.
# TODO: This should be moved out of there and into the macaroons themselves, it
# doesn't really make a lot of sense here and it makes things more
# complicated if we want to allow the use of macaroons for actions other
# than uploading.
if permission not in [
Permissions.ProjectsUpload,
# TODO: Adding API-specific routes here is not sustainable. However,
# removing this guard would allow Macaroons to be used for Session-based
# operations, bypassing any 2FA requirements.
Permissions.APIEcho,
Permissions.APIObservationsAdd,
]:
return WarehouseDenied(
f"API tokens are not valid for permission: {permission}!",
reason="invalid_permission",
)
# Check if our macaroon itself is valid. This does not actually check if the
# identity bound to that macaroon has permission to do what it's trying to do
# but rather that the caveats embedded into the macaroon are valid for the given
# request, context, and permission.
macaroon_service = request.find_service(IMacaroonService, context=None)
try:
macaroon_service.verify(macaroon, request, context, permission)
except InvalidMacaroonError as exc:
return WarehouseDenied(
f"Invalid API Token: {exc}", reason="invalid_api_token"
)
# The macaroon is valid, so we can actually see if request.identity is
# authorized now or not.
# NOTE: These parameters are in a different order than the signature of this
# method.
return self._acl.permits(context, principals_for(request.identity), permission)
| MacaroonSecurityPolicy |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_sparkline06.py | {
"start": 345,
"end": 4800
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.name = "Sheet1"
worksheet.excel_version = 2010
data = [-2, 2, 3, -1, 0]
worksheet.write_row("A1", data)
worksheet.write_row("A2", data)
# Set up sparklines.
worksheet.add_sparkline(
"A1",
{
"location": ["F1", "F2"],
"range": ["A1:E1", "A2:E2"],
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1:E2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData>
<row r="1" spans="1:5" x14ac:dyDescent="0.25">
<c r="A1">
<v>-2</v>
</c>
<c r="B1">
<v>2</v>
</c>
<c r="C1">
<v>3</v>
</c>
<c r="D1">
<v>-1</v>
</c>
<c r="E1">
<v>0</v>
</c>
</row>
<row r="2" spans="1:5" x14ac:dyDescent="0.25">
<c r="A2">
<v>-2</v>
</c>
<c r="B2">
<v>2</v>
</c>
<c r="C2">
<v>3</v>
</c>
<c r="D2">
<v>-1</v>
</c>
<c r="E2">
<v>0</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup displayEmptyCellsAs="gap">
<x14:colorSeries theme="4" tint="-0.499984740745262"/>
<x14:colorNegative theme="5"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers theme="4" tint="-0.499984740745262"/>
<x14:colorFirst theme="4" tint="0.39997558519241921"/>
<x14:colorLast theme="4" tint="0.39997558519241921"/>
<x14:colorHigh theme="4"/>
<x14:colorLow theme="4"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A1:E1</xm:f>
<xm:sqref>F1</xm:sqref>
</x14:sparkline>
<x14:sparkline>
<xm:f>Sheet1!A2:E2</xm:f>
<xm:sqref>F2</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_project_release_files.py | {
"start": 9601,
"end": 16899
} | class ____(APITestCase):
def test_simple(self) -> None:
project = self.create_project(name="foo")
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
assert release.count_artifacts() == 0
url = reverse(
"sentry-api-0-project-release-files",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
self.login_as(user=self.user)
response = self.client.post(
url,
{
"name": "http://example.com/application.js",
"header": "X-SourceMap: http://example.com",
"file": SimpleUploadedFile(
"application.js", b"function() { }", content_type="application/javascript"
),
},
format="multipart",
)
assert release.count_artifacts() == 1
assert response.status_code == 201, response.content
releasefile = ReleaseFile.objects.get(release_id=release.id)
assert releasefile.name == "http://example.com/application.js"
assert releasefile.ident == ReleaseFile.get_ident("http://example.com/application.js")
assert releasefile.file.headers == {
"Content-Type": "application/javascript",
"X-SourceMap": "http://example.com",
}
def test_no_file(self) -> None:
project = self.create_project(name="foo")
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
url = reverse(
"sentry-api-0-project-release-files",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
self.login_as(user=self.user)
response = self.client.post(
url, {"header": "X-SourceMap: http://example.com"}, format="multipart"
)
assert response.status_code == 400, response.content
def test_missing_name(self) -> None:
project = self.create_project(name="foo")
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
url = reverse(
"sentry-api-0-project-release-files",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
self.login_as(user=self.user)
response = self.client.post(
url,
{
"header": "X-SourceMap: http://example.com",
# We can't use SimpleUploadedFile here, because it validates file names
# and doesn't allow for empty strings.
"file": ContentFile(
content=b"function() { }",
name="",
),
},
format="multipart",
)
assert response.status_code == 400, response.content
def test_invalid_name(self) -> None:
project = self.create_project(name="foo")
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
url = reverse(
"sentry-api-0-project-release-files",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
self.login_as(user=self.user)
response = self.client.post(
url,
{
"name": "http://exa\tmple.com/applic\nati\ron.js\n",
"header": "X-SourceMap: http://example.com/test.map.js",
"file": SimpleUploadedFile(
"application.js", b"function() { }", content_type="application/javascript"
),
},
format="multipart",
)
assert response.status_code == 400, response.content
def test_bad_headers(self) -> None:
project = self.create_project(name="foo")
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
url = reverse(
"sentry-api-0-project-release-files",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
self.login_as(user=self.user)
response = self.client.post(
url,
{
"name": "http://example.com/application.js",
"header": "lol",
"file": SimpleUploadedFile(
"application.js", b"function() { }", content_type="application/javascript"
),
},
format="multipart",
)
assert response.status_code == 400, response.content
response = self.client.post(
url,
{
"name": "http://example.com/application.js",
"header": "X-SourceMap: http://example.com/\r\n\ntest.map.js\n",
"file": SimpleUploadedFile(
"application.js", b"function() { }", content_type="application/javascript"
),
},
format="multipart",
)
assert response.status_code == 400, response.content
def test_duplicate_file(self) -> None:
project = self.create_project(name="foo")
release = Release.objects.create(organization_id=project.organization_id, version="1")
release.add_project(project)
url = reverse(
"sentry-api-0-project-release-files",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"version": release.version,
},
)
self.login_as(user=self.user)
data = {
"name": "http://example.com/application.js",
"header": "X-SourceMap: http://example.com",
"file": SimpleUploadedFile(
"application.js", b"function() { }", content_type="application/javascript"
),
}
response = self.client.post(url, data, format="multipart")
assert response.status_code == 201, response.content
releasefile = ReleaseFile.objects.get(release_id=release.id)
assert releasefile.name == "http://example.com/application.js"
assert releasefile.file.headers == {
"Content-Type": "application/javascript",
"X-SourceMap": "http://example.com",
}
# Now upload it again!
response = self.client.post(url, data, format="multipart")
assert response.status_code == 409, response.content
| ReleaseFileCreateTest |
python | zarr-developers__zarr-python | src/zarr/core/dtype/npy/bytes.py | {
"start": 2779,
"end": 3218
} | class ____(NamedConfig[Literal["raw_bytes"], FixedLengthBytesConfig]):
"""
The JSON representation of the ``RawBytes`` data type in Zarr V3.
References
----------
This representation is not currently defined in an external specification.
Examples
--------
```python
{
"name": "raw_bytes",
"configuration": {
"length_bytes": 12
}
}
```
"""
| RawBytesJSON_V3 |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 4749,
"end": 5104
} | class ____(PrefectBaseModel):
"""Filter by `FlowRun.state_type`."""
any_: Optional[List[StateType]] = Field(
default=None, description="A list of flow run state types to include"
)
not_any_: Optional[List[StateType]] = Field(
default=None, description="A list of flow run state types to exclude"
)
| FlowRunFilterStateType |
python | kamyu104__LeetCode-Solutions | Python/smallest-subsequence-of-distinct-characters.py | {
"start": 50,
"end": 529
} | class ____(object):
def smallestSubsequence(self, text):
"""
:type text: str
:rtype: str
"""
count = collections.Counter(text)
lookup, stk = set(), []
for c in text:
if c not in lookup:
while stk and stk[-1] > c and count[stk[-1]]:
lookup.remove(stk.pop())
stk += c
lookup.add(c)
count[c] -= 1
return "".join(stk)
| Solution |
python | EpistasisLab__tpot | tpot/search_spaces/nodes/estimator_node_gradual.py | {
"start": 6628,
"end": 7042
} | class ____(SearchSpace):
def __init__(self, method, space, hyperparameter_parser=default_hyperparameter_parser):
self.method = method
self.space = space
self.hyperparameter_parser = hyperparameter_parser
def generate(self, rng=None):
return EstimatorNodeIndividual_gradual(self.method, self.space, hyperparameter_parser=self.hyperparameter_parser, rng=rng) | EstimatorNode_gradual |
python | joke2k__faker | faker/providers/color/ru_RU/__init__.py | {
"start": 98,
"end": 2745
} | class ____(ColorProvider):
"""Implement color provider for ``ru_RU`` locale."""
all_colors = OrderedDict(
(
("Античный Белый", "#FAEBD7"),
("Аквамарин", "#7FFFD4"),
("Лазурный", "#F0FFFF"),
("Бежевый", "#F5F5DC"),
("Черный", "#000000"),
("Синий", "#0000FF"),
("Сине-фиолетовый", "#8A2BE2"),
("Коричневый", "#A52A2A"),
("Шоколадный", "#D2691E"),
("Коралловый", "#FF7F50"),
("Васильковый", "#6495ED"),
("Малиновый", "#DC143C"),
("Темно-синий", "#00008B"),
("Темно-голубой", "#008B8B"),
("Темно-серый", "#A9A9A9"),
("Темно-зеленый", "#006400"),
("Темный хаки", "#BDB76B"),
("Темно-оранжевый", "#FF8C00"),
("Темно-красный", "#8B0000"),
("Темно-бирюзовый", "#00CED1"),
("Темно-фиолетовый", "#9400D3"),
("Темно-розовый", "#FF1493"),
("Тусклый серый", "#696969"),
("Фуксия", "#FF00FF"),
("Золотой", "#FFD700"),
("Серый", "#808080"),
("Зеленый", "#008000"),
("Желто-зеленый", "#ADFF2F"),
("Ярко-розовый", "#FF69B4"),
("Индиго", "#4B0082"),
("Слоновая кость", "#FFFFF0"),
("Хаки", "#F0E68C"),
("Розовато-лавандовый", "#FFF0F5"),
("Светло-синий", "#ADD8E6"),
("Светло-голубой", "#E0FFFF"),
("Светло-серый", "#D3D3D3"),
("Светло-зеленый", "#90EE90"),
("Светло-розовый", "#FFB6C1"),
("Светло-голубой", "#87CEFA"),
("Светло-желтый", "#FFFFE0"),
("Каштановый", "#800000"),
("Оранжевый", "#FFA500"),
("Оранжево-красный", "#FF4500"),
("Бледно-зеленый", "#98FB98"),
("Бледно-Бирюзовый", "#AFEEEE"),
("Розовый", "#FFC0CB"),
("Сливовый", "#DDA0DD"),
("Пурпурный", "#800080"),
("Красный", "#FF0000"),
("Цвет морской волны", "#2E8B57"),
("Серебряный", "#C0C0C0"),
("Бирюзовый", "#40E0D0"),
("Фиолетовый", "#EE82EE"),
("Белый", "#FFFFFF"),
("Желтый", "#FFFF00"),
("Желто-зеленый", "#9ACD32"),
)
)
safe_colors = (
"черный",
"бордовый",
"зеленый",
"оливковый",
"пурпурный",
"teal",
"lime",
"синий",
"серебряный",
"серый",
"желтый",
"фуксия",
"белый",
)
| Provider |
python | astropy__astropy | astropy/extern/configobj/configobj.py | {
"start": 6018,
"end": 6297
} | class ____(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
| InterpolationLoopError |
python | walkccc__LeetCode | solutions/1781. Sum of Beauty of All Substrings/1781.py | {
"start": 0,
"end": 262
} | class ____:
def beautySum(self, s: str) -> int:
ans = 0
for i in range(len(s)):
count = collections.Counter()
for j in range(i, len(s)):
count[s[j]] += 1
ans += max(count.values()) - min(count.values())
return ans
| Solution |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 155829,
"end": 173323
} | class ____(OperationBuffer):
"""
Represents a buffer that is computed during kernel execution rather than being an input.
"""
data: Loops
_force_realize: ClassVar[bool] = False
# fields for split reduction
_split_size: Optional[int] = None
_original_inner_fn: Optional[Callable[..., Any]] = None
_original_ranges: Optional[Sequence[_IntLike]] = None
_original_reduction_ranges: Optional[Sequence[_IntLike]] = None
@contextlib.contextmanager
def with_original_inner_fn(self) -> Iterator[None]:
assert self._split_size is not None
assert self._original_inner_fn is not None
assert self._original_ranges is not None
assert self._original_reduction_ranges is not None
assert isinstance(self.data, Reduction), f"{type(self.data)}"
old_data = self.data
old_layout = self.layout
try:
new_data = Reduction(
device=old_data.device,
dtype=old_data.dtype,
inner_fn=self._original_inner_fn,
ranges=self._original_ranges,
reduction_ranges=self._original_reduction_ranges,
reduction_type=old_data.reduction_type,
src_dtype=old_data.src_dtype,
reduction_hint=old_data.reduction_hint,
)
self.data = new_data
# this layout does not matter since we skip tl.store
# later
self.layout = FixedLayout(
old_data.device,
old_data.dtype,
self._original_ranges,
)
self.get_default_sizes_body.clear_cache(self)
yield
finally:
self.data = old_data
self.layout = old_layout
@staticmethod
@contextlib.contextmanager
def force_realize() -> Iterator[None]:
old_value = ComputedBuffer._force_realize
try:
ComputedBuffer._force_realize = True
yield
finally:
ComputedBuffer._force_realize = old_value
def get_computed_buffer_name(self) -> Optional[str]:
"""
Returns self.name if it exists, otherwise returns the name of the data node if that exists.
If neither exist, returns None.
"""
if self.name is not None:
return self.name
if hasattr(self.data, "name"):
return self.data.name
return None
def num_reads(self) -> int:
return self.data.num_reads()
def get_reads(self) -> OrderedSet[Dep]:
return self.data.get_reads()
def get_read_names(self) -> OrderedSet[str]:
return self.data.get_read_names()
def get_read_writes(self) -> dependencies.ReadWrites:
if not isinstance(self.data, (Reduction, Scan, Sort, Pointwise)):
return dependencies.ReadWrites(
reads=OrderedSet(),
writes=OrderedSet(),
index_exprs=OrderedSet(),
)
with patch.object(FlexibleLayout, "allow_indexing", True):
if self.data.get_reduction_type():
return extract_read_writes(
self.get_store_function(),
self.data.get_pointwise_size(),
self.data.get_reduction_size(),
)
else:
return extract_read_writes(
self.get_store_function(),
self.data.get_size(),
)
@cache_on_self_and_args("ComputedBuffer")
def get_free_symbol_uses(
self, unbacked_only: bool = False
) -> OrderedSet[sympy.Symbol]:
# Ordinarily, we'd like to just peek at the arguments list,
# but ComputedBuffers have no argument list.
#
# Morally, this logic needs to be synchronized with the
# KernelArgs.size calls, which are responsible for making symbols make
# there way as kernel arguments (and it is precisely passing in one of
# those symbols that establishes a dependency). However, we haven't
# started codegen yet so we can't directly reuse that logic.
#
# One thing you might wonder is if this is enough for a ComputedBuffer
# denoting a reduction over i0. Empirically, it is enough, but for an
# unusual reason: we only need accurate dependencies for item() call,
# but it's impossible to end up with a reduction over i0 from an
# item() call without a regular non-reduction buffer first.
result = self.layout.get_free_symbol_uses(
unbacked_only
) | self.data.get_free_symbol_uses(unbacked_only)
if self.has_store_function():
result |= self.get_read_writes().get_free_symbol_uses(unbacked_only)
return result
def make_loader(self) -> Callable[[Sequence[Expr]], OpsValue]:
if (
not self.get_reduction_type()
and self.name not in V.graph.mutated_buffers
and self.num_reads() == 0
and not self._force_realize
):
# inline this op rather than generating ops.load()
return self.data.make_loader()
return super().make_loader()
def has_store_function(self) -> bool:
return isinstance(self.data, (Reduction, Scan, Sort, Pointwise))
def get_store_function(self) -> Callable[..., None]:
indexer = self.get_layout().as_fixed().make_indexer()
if isinstance(self.data, (Reduction, Scan, Sort)):
return partial(self.data.store_reduction, self.name, indexer)
else:
assert isinstance(self.data, Pointwise), type(self.data)
return partial(self.data.store_output, self.name, indexer)
def get_fill_order(self) -> Optional[list[int]]:
"""
If our layout is still flexible, try to determine the stride order based on stride orders of reads.
TODO(jansel): A better algorithm here would look at downstream consumers of this
value and try to do global graph-level layout optimization.
This is also something just begging to be autotuned.
"""
if isinstance(self.layout, FlexibleLayout):
(index_vars, reduction_vars), _ = dependencies.index_vars_squeeze(
self.data.get_pointwise_size(), self.data.get_reduction_size()
)
reads = self.get_read_writes().reads
# only consider reads to buffer of same size
# ignore StarDeps because they don't contribute stride information
assert all(
isinstance(r, (dependencies.StarDep, dependencies.MemoryDep))
for r in reads
)
reads = [
sympy_subs(r.index, {v: sympy.S.Zero for v in reduction_vars if v != 0})
for r in reads
if isinstance(r, dependencies.MemoryDep)
]
if reads:
if isinstance(self.data, (Scan, Sort)):
indices = self.data.reindex(index_vars, reduction_vars)
else:
indices = index_vars
stride_lengths = [
V.graph.sizevars.stride_hints(expr, indices) for expr in reads
]
from .scheduler import pick_loop_order
return pick_loop_order(stride_lengths, self.get_size())
return None
def decide_layout(self) -> None:
if isinstance(self.layout, FlexibleLayout):
order = self.get_fill_order()
if order:
self.freeze_layout_with_fill_order(order)
else:
self.freeze_layout()
@cache_on_self
def get_default_sizes_body(
self,
) -> tuple[
tuple[list[Expr], list[Expr]],
LoopBody,
tuple[list[Expr], list[Expr]],
]:
args, var_ranges = dependencies.index_vars_squeeze(
self.get_pointwise_size(), self.get_reduction_size(), prefix="q"
)
with patch.object(ConstantBuffer, "override_device", self.get_device()):
body = LoopBody(
self.get_store_function(),
(args if self.get_reduction_type() else args[:1]),
var_ranges,
*args,
)
index_vars = []
reduce_vars: list[Any] = []
index_size = []
reduce_size = []
for v, s in var_ranges.items():
if v in args[0]:
assert not reduce_vars
index_vars.append(v)
index_size.append(s)
else:
assert v in args[1]
reduce_vars.append(v)
reduce_size.append(s)
return (index_size, reduce_size), body, (index_vars, reduce_vars)
def simplify_and_reorder(
self,
extra_indexing_constraints: Optional[tuple[dict[Any, Any], list[Any]]] = None,
recompute_sizes_body_func: Optional[Callable[..., Any]] = None,
) -> tuple[tuple[list[Expr], list[Expr]], Optional[LoopBody]]:
"""
This is a main place where we do loop transformations in a
backend-agnostic way.
Here we:
1) Remove any 1 dimensions
2) Fuse contiguous dimensions together
3) Reorder dimensions based on stride orders
Optional argument extra_indexing_constraints can be used to append additional
indexing expressions to existing ones derived from buffer's body. This can be useful
to fuse scheduler nodes with compatible ranges, e.g. (s0*s1*...,) and (s0, s1, s2, ...)
on CPU by preventing indexing simplifications and obtaining index/reduce ranges for
the scheduler node compatible with other nodes.
Optional argument recompute_sizes_body_func can be used to recompute sizes and body
on the default body. This can be useful to append additional loop transformations.
"""
(
(index_size, reduce_size),
body,
(index_vars, reduce_vars),
) = self.get_default_sizes_body()
if recompute_sizes_body_func:
(
(index_size, reduce_size),
body,
(index_vars, reduce_vars),
) = recompute_sizes_body_func(
(index_size, reduce_size), body, (index_vars, reduce_vars)
)
index_formulas = [*body.indexing_exprs.values()]
if extra_indexing_constraints is not None:
assert (
isinstance(extra_indexing_constraints, tuple)
and len(extra_indexing_constraints) == 2
)
extra_indexing_ranges, extra_indexing_expr = extra_indexing_constraints
assert isinstance(extra_indexing_ranges, dict), type(extra_indexing_ranges)
assert isinstance(extra_indexing_expr, list), type(extra_indexing_expr)
assert all(isinstance(f, Expr) for f in extra_indexing_expr)
expected_var_ranges = body.var_ranges
assert expected_var_ranges == extra_indexing_ranges, (
expected_var_ranges,
extra_indexing_ranges,
)
# remove already existing expressions
extra_indexing_expr = [
e for e in extra_indexing_expr if e not in index_formulas
]
index_formulas += extra_indexing_expr
memory_addrs = [*body.get_write_exprs()]
if not V.graph.has_feature(self, BackendFeature.PREFER_STORE_LOOP_ORDER):
memory_addrs.extend(body.get_read_exprs())
def simplify_and_reorder(
x_vars: Sequence[sympy.Symbol],
support_vars: Sequence[sympy.Symbol],
sizes: Sequence[int],
simplify_loops: bool,
) -> tuple[
list[int],
Callable[[Sequence[int]], Sequence[int]],
Callable[[Sequence[int]], Sequence[int]],
]:
newsizes, reindex0, reindex1 = self._apply_loop_reordering(
x_vars, support_vars, sizes, memory_addrs
)
# When using native matmul, the codegen assumes the following loop order,
# regardless of the stride of A and B:
#
# for z -> y -> x -> r: C[z, y, x] += A[z, y, r] * B[z, r, x]
# or
# for z -> x -> y -> r: C[z, y, x] += A[z, y, r] * B[z, r, x]
#
# The critical point is the position of the "z" (batch) axis in bmm.
# It is fine to swap the y and x axes (e.g., (z, y, x, r) or (z, x, y, r)),
# but reordering the z axis (e.g., (y, x, z, r)) breaks codegen.
#
# Therefore, if loop reordering changes the "z" location in bmm,
# it should be reverted to the default.
# This may not always produce the optimal loop order when strides
# do not align with the default assumption.
#
# TODO: Consider extending tl.dot codegen to support arbitrary loop orders.
if self.get_reduction_type() == "dot" and len(sizes) == 3:
order = list(range(len(sizes))) # default order
# if z axis is not the outermost, use the default reorder.
if reindex0(order)[0] != 0:
newsizes = [sizes[i] for i in order]
reindex0 = same_reorder(order)
reindex1 = inverse_reorder(order)
# for NHWC: reindex0([0,1,2,3]) = [0,2,3,1], reindex1([0,1,2,3]) = [0,3,2,1]
x_vars = reindex0(x_vars)
if simplify_loops:
newsizes, reindex2, _prune = V.graph.sizevars._simplify_loops(
x_vars,
newsizes,
index_prevent_reordering(index_formulas, x_vars, newsizes),
)
reindex = fuse_reindexing(reindex1, reindex2)
else:
reindex = reindex1
return newsizes, reindex, reindex1
support_vars = index_vars + reduce_vars
should_merge_loops = (
not is_gpu(get_device_type(self)) or not config.loop_ordering_after_fusion
)
iter_ranges, iter_reindex, _ = simplify_and_reorder(
index_vars,
support_vars,
index_size,
should_merge_loops,
)
# Like iteration dimensions, we may also want to delay merging reduction dimensions.
# E.g., if we reduce a tensor [M, N, K] for its M and N dimensions followed by a pointwise
# kernel, merging M and N dimension too early makes it hard to decide what loop order
# we should pick for the piontwise kernel so that it is fusible with the reduction.
reduce_ranges, reduce_reindex, _ = simplify_and_reorder(
reduce_vars, support_vars, reduce_size, should_merge_loops
)
# retrace the loop body with simplification and reordering applied
(iter_vars, reduce_vars), var_ranges = dependencies.index_vars_no_squeeze(
iter_ranges,
reduce_ranges,
prefix="p",
)
body = LoopBody(
body,
[iter_reindex(iter_vars), reduce_reindex(reduce_vars)],
var_ranges,
iter_vars,
reduce_vars,
)
return (iter_ranges, reduce_ranges), body
@staticmethod
def _apply_loop_reordering(
index_vars: Sequence[sympy.Symbol],
support_vars: Sequence[sympy.Symbol],
sizes: Sequence[int],
memory_addrs: list[sympy.Expr],
priority_idx: Optional[list[int]] = None,
) -> tuple[
list[int],
Callable[[Sequence[int]], Sequence[int]],
Callable[[Sequence[int]], Sequence[int]],
]:
"""
Shuffle the order of loops around to hopefully improve performance.
"""
from .scheduler import pick_loop_order
if priority_idx is None:
priority_idx = []
try:
strides = [
V.graph.sizevars.stride_hints(expr, index_vars, support_vars)
for expr in memory_addrs
]
assert len(strides) == len(memory_addrs) and len(strides[0]) == len(
index_vars
)
order = list(reversed(pick_loop_order(strides, sizes, priority_idx)))
except Exception:
if config.debug:
log.warning(
"Did not simplify complex index:\n%s\n%s",
dict(zip(index_vars, sizes)),
memory_addrs,
)
order = list(range(len(sizes)))
sizes = [sizes[i] for i in order]
return sizes, same_reorder(order), inverse_reorder(order)
def get_pointwise_size(self) -> Sequence[Expr]:
return self.data.get_pointwise_size()
def get_reduction_size(self) -> Sequence[Expr]:
return self.data.get_reduction_size()
def get_reduction_type(self) -> Optional[str]:
return self.data.get_reduction_type()
def is_no_op(self) -> bool:
return self.data.is_zero_elements()
def should_allocate(self) -> bool:
return True
def constant_to_device(self, device: torch.device) -> IRNode:
"""Move this to a given device. Requires that all reads are to constants."""
return self.data.constant_to_device(device)
| ComputedBuffer |
python | TheAlgorithms__Python | sorts/external_sort.py | {
"start": 1350,
"end": 2182
} | class ____:
def __init__(self, files):
self.files = files
self.empty = set()
self.num_buffers = len(files)
self.buffers = dict.fromkeys(range(self.num_buffers))
def get_dict(self):
return {
i: self.buffers[i] for i in range(self.num_buffers) if i not in self.empty
}
def refresh(self):
for i in range(self.num_buffers):
if self.buffers[i] is None and i not in self.empty:
self.buffers[i] = self.files[i].readline()
if self.buffers[i] == "":
self.empty.add(i)
self.files[i].close()
return len(self.empty) != self.num_buffers
def unshift(self, index):
value = self.buffers[index]
self.buffers[index] = None
return value
| FilesArray |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 70350,
"end": 72296
} | class ____(SpeechT5PreTrainedModel):
"""
Wrapper around SpeechT5Decoder that applies SpeechT5TextDecoderPrenet to convert input tokens to hidden features.
"""
def __init__(self, config: SpeechT5Config):
super().__init__(config)
self.prenet = SpeechT5TextDecoderPrenet(config)
self.wrapped_decoder = SpeechT5Decoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.prenet.get_input_embeddings()
def set_input_embeddings(self, value):
self.prenet.set_input_embeddings(value)
def forward(
self,
input_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]:
decoder_hidden_states, attention_mask = self.prenet(input_values, attention_mask, past_key_values)
outputs = self.wrapped_decoder(
hidden_states=decoder_hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
return outputs
| SpeechT5DecoderWithTextPrenet |
python | django__django | tests/template_tests/filter_tests/test_truncatewords.py | {
"start": 171,
"end": 1032
} | class ____(SimpleTestCase):
@setup(
{
"truncatewords01": (
'{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}'
"{% endautoescape %}"
)
}
)
def test_truncatewords01(self):
output = self.engine.render_to_string(
"truncatewords01",
{"a": "alpha & bravo", "b": mark_safe("alpha & bravo")},
)
self.assertEqual(output, "alpha & … alpha & …")
@setup({"truncatewords02": '{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}'})
def test_truncatewords02(self):
output = self.engine.render_to_string(
"truncatewords02",
{"a": "alpha & bravo", "b": mark_safe("alpha & bravo")},
)
self.assertEqual(output, "alpha & … alpha & …")
| TruncatewordsTests |
python | sqlalchemy__sqlalchemy | test/ext/test_hybrid.py | {
"start": 38564,
"end": 47524
} | class ____(fixtures.DeclarativeMappedTest, AssertsCompiledSQL):
"""Original DML test suite when we first added the ability for ORM
UPDATE to handle hybrid values.
"""
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Person(Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
first_name = Column(String(10))
last_name = Column(String(10))
@hybrid.hybrid_property
def name(self):
return self.first_name + " " + self.last_name
@name.setter
def name(self, value):
self.first_name, self.last_name = value.split(" ", 1)
@name.expression
def name(cls):
return func.concat(cls.first_name, " ", cls.last_name)
@name.update_expression
def name(cls, value):
f, l = value.split(" ", 1)
return [(cls.first_name, f), (cls.last_name, l)]
@hybrid.hybrid_property
def uname(self):
return self.name
@hybrid.hybrid_property
def fname(self):
return self.first_name
@hybrid.hybrid_property
def fname2(self):
return self.fname
@classmethod
def insert_data(cls, connection):
s = Session(connection)
jill = cls.classes.Person(id=3, first_name="jill")
s.add(jill)
s.commit()
def test_update_plain(self):
Person = self.classes.Person
statement = update(Person).values({Person.fname: "Dr."})
self.assert_compile(
statement,
"UPDATE person SET first_name=:first_name",
params={"first_name": "Dr."},
)
@testing.combinations("attr", "str", "kwarg", argnames="keytype")
def test_update_expr(self, keytype):
Person = self.classes.Person
if keytype == "attr":
statement = update(Person).values({Person.name: "Dr. No"})
elif keytype == "str":
statement = update(Person).values({"name": "Dr. No"})
elif keytype == "kwarg":
statement = update(Person).values(name="Dr. No")
else:
assert False
self.assert_compile(
statement,
"UPDATE person SET first_name=:first_name, last_name=:last_name",
checkparams={"first_name": "Dr.", "last_name": "No"},
)
@testing.combinations("attr", "str", "kwarg", argnames="keytype")
def test_insert_expr(self, keytype):
Person = self.classes.Person
if keytype == "attr":
statement = insert(Person).values({Person.name: "Dr. No"})
elif keytype == "str":
statement = insert(Person).values({"name": "Dr. No"})
elif keytype == "kwarg":
statement = insert(Person).values(name="Dr. No")
else:
assert False
self.assert_compile(
statement,
"INSERT INTO person (first_name, last_name) VALUES "
"(:first_name, :last_name)",
checkparams={"first_name": "Dr.", "last_name": "No"},
)
# these tests all run two UPDATES to assert that caching is not
# interfering. this is #7209
def test_evaluate_non_hybrid_attr(self):
# this is a control case
Person = self.classes.Person
s = fixture_session()
jill = s.get(Person, 3)
s.query(Person).update(
{Person.first_name: "moonbeam"}, synchronize_session="evaluate"
)
eq_(jill.first_name, "moonbeam")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"moonbeam",
)
s.query(Person).update(
{Person.first_name: "sunshine"}, synchronize_session="evaluate"
)
eq_(jill.first_name, "sunshine")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"sunshine",
)
def test_evaluate_hybrid_attr_indirect(self):
Person = self.classes.Person
s = fixture_session()
jill = s.get(Person, 3)
s.query(Person).update(
{Person.fname2: "moonbeam"}, synchronize_session="evaluate"
)
eq_(jill.fname2, "moonbeam")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"moonbeam",
)
s.query(Person).update(
{Person.fname2: "sunshine"}, synchronize_session="evaluate"
)
eq_(jill.fname2, "sunshine")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"sunshine",
)
def test_evaluate_hybrid_attr_plain(self):
Person = self.classes.Person
s = fixture_session()
jill = s.get(Person, 3)
s.query(Person).update(
{Person.fname: "moonbeam"}, synchronize_session="evaluate"
)
eq_(jill.fname, "moonbeam")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"moonbeam",
)
s.query(Person).update(
{Person.fname: "sunshine"}, synchronize_session="evaluate"
)
eq_(jill.fname, "sunshine")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"sunshine",
)
def test_fetch_hybrid_attr_indirect(self):
Person = self.classes.Person
s = fixture_session()
jill = s.get(Person, 3)
s.query(Person).update(
{Person.fname2: "moonbeam"}, synchronize_session="fetch"
)
eq_(jill.fname2, "moonbeam")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"moonbeam",
)
s.query(Person).update(
{Person.fname2: "sunshine"}, synchronize_session="fetch"
)
eq_(jill.fname2, "sunshine")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"sunshine",
)
def test_fetch_hybrid_attr_plain(self):
Person = self.classes.Person
s = fixture_session()
jill = s.get(Person, 3)
s.query(Person).update(
{Person.fname: "moonbeam"}, synchronize_session="fetch"
)
eq_(jill.fname, "moonbeam")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"moonbeam",
)
s.query(Person).update(
{Person.fname: "sunshine"}, synchronize_session="fetch"
)
eq_(jill.fname, "sunshine")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"sunshine",
)
def test_evaluate_hybrid_attr_w_update_expr(self):
Person = self.classes.Person
s = fixture_session()
jill = s.get(Person, 3)
s.query(Person).update(
{Person.name: "moonbeam sunshine"}, synchronize_session="evaluate"
)
eq_(jill.name, "moonbeam sunshine")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"moonbeam",
)
s.query(Person).update(
{Person.name: "first last"}, synchronize_session="evaluate"
)
eq_(jill.name, "first last")
eq_(s.scalar(select(Person.first_name).where(Person.id == 3)), "first")
def test_fetch_hybrid_attr_w_update_expr(self):
Person = self.classes.Person
s = fixture_session()
jill = s.get(Person, 3)
s.query(Person).update(
{Person.name: "moonbeam sunshine"}, synchronize_session="fetch"
)
eq_(jill.name, "moonbeam sunshine")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"moonbeam",
)
s.query(Person).update(
{Person.name: "first last"}, synchronize_session="fetch"
)
eq_(jill.name, "first last")
eq_(s.scalar(select(Person.first_name).where(Person.id == 3)), "first")
def test_evaluate_hybrid_attr_indirect_w_update_expr(self):
Person = self.classes.Person
s = fixture_session()
jill = s.get(Person, 3)
s.query(Person).update(
{Person.uname: "moonbeam sunshine"}, synchronize_session="evaluate"
)
eq_(jill.uname, "moonbeam sunshine")
eq_(
s.scalar(select(Person.first_name).where(Person.id == 3)),
"moonbeam",
)
s.query(Person).update(
{Person.uname: "first last"}, synchronize_session="evaluate"
)
eq_(jill.uname, "first last")
eq_(s.scalar(select(Person.first_name).where(Person.id == 3)), "first")
if TYPE_CHECKING:
from sqlalchemy.sql import SQLColumnExpression
@dataclasses.dataclass(eq=False)
| BulkUpdateTest |
python | celery__celery | celery/schedules.py | {
"start": 27233,
"end": 32860
} | class ____(BaseSchedule):
"""Solar event.
A solar event can be used as the ``run_every`` value of a
periodic task entry to schedule based on certain solar events.
Notes:
Available event values are:
- ``dawn_astronomical``
- ``dawn_nautical``
- ``dawn_civil``
- ``sunrise``
- ``solar_noon``
- ``sunset``
- ``dusk_civil``
- ``dusk_nautical``
- ``dusk_astronomical``
Arguments:
event (str): Solar event that triggers this task.
See note for available values.
lat (float): The latitude of the observer.
lon (float): The longitude of the observer.
nowfun (Callable): Function returning the current date and time
as a class:`~datetime.datetime`.
app (Celery): Celery app instance.
"""
_all_events = {
'dawn_astronomical',
'dawn_nautical',
'dawn_civil',
'sunrise',
'solar_noon',
'sunset',
'dusk_civil',
'dusk_nautical',
'dusk_astronomical',
}
_horizons = {
'dawn_astronomical': '-18',
'dawn_nautical': '-12',
'dawn_civil': '-6',
'sunrise': '-0:34',
'solar_noon': '0',
'sunset': '-0:34',
'dusk_civil': '-6',
'dusk_nautical': '-12',
'dusk_astronomical': '18',
}
_methods = {
'dawn_astronomical': 'next_rising',
'dawn_nautical': 'next_rising',
'dawn_civil': 'next_rising',
'sunrise': 'next_rising',
'solar_noon': 'next_transit',
'sunset': 'next_setting',
'dusk_civil': 'next_setting',
'dusk_nautical': 'next_setting',
'dusk_astronomical': 'next_setting',
}
_use_center_l = {
'dawn_astronomical': True,
'dawn_nautical': True,
'dawn_civil': True,
'sunrise': False,
'solar_noon': False,
'sunset': False,
'dusk_civil': True,
'dusk_nautical': True,
'dusk_astronomical': True,
}
def __init__(self, event: str, lat: int | float, lon: int | float, **
kwargs: Any) -> None:
self.ephem = __import__('ephem')
self.event = event
self.lat = lat
self.lon = lon
super().__init__(**kwargs)
if event not in self._all_events:
raise ValueError(SOLAR_INVALID_EVENT.format(
event=event, all_events=', '.join(sorted(self._all_events)),
))
if lat < -90 or lat > 90:
raise ValueError(SOLAR_INVALID_LATITUDE.format(lat=lat))
if lon < -180 or lon > 180:
raise ValueError(SOLAR_INVALID_LONGITUDE.format(lon=lon))
cal = self.ephem.Observer()
cal.lat = str(lat)
cal.lon = str(lon)
cal.elev = 0
cal.horizon = self._horizons[event]
cal.pressure = 0
self.cal = cal
self.method = self._methods[event]
self.use_center = self._use_center_l[event]
def __reduce__(self) -> tuple[type, tuple[str, int | float, int | float]]:
return self.__class__, (self.event, self.lat, self.lon)
def __repr__(self) -> str:
return '<solar: {} at latitude {}, longitude: {}>'.format(
self.event, self.lat, self.lon,
)
def remaining_estimate(self, last_run_at: datetime) -> timedelta:
"""Return estimate of next time to run.
Returns:
~datetime.timedelta: when the periodic task should
run next, or if it shouldn't run today (e.g., the sun does
not rise today), returns the time when the next check
should take place.
"""
last_run_at = self.maybe_make_aware(last_run_at)
last_run_at_utc = localize(last_run_at, timezone.utc)
self.cal.date = last_run_at_utc
try:
if self.use_center:
next_utc = getattr(self.cal, self.method)(
self.ephem.Sun(),
start=last_run_at_utc, use_center=self.use_center
)
else:
next_utc = getattr(self.cal, self.method)(
self.ephem.Sun(), start=last_run_at_utc
)
except self.ephem.CircumpolarError: # pragma: no cover
# Sun won't rise/set today. Check again tomorrow
# (specifically, after the next anti-transit).
next_utc = (
self.cal.next_antitransit(self.ephem.Sun()) +
timedelta(minutes=1)
)
next = self.maybe_make_aware(next_utc.datetime())
now = self.maybe_make_aware(self.now())
delta = next - now
return delta
def is_due(self, last_run_at: datetime) -> tuple[bool, datetime]:
"""Return tuple of ``(is_due, next_time_to_run)``.
Note:
next time to run is in seconds.
See Also:
:meth:`celery.schedules.schedule.is_due` for more information.
"""
rem_delta = self.remaining_estimate(last_run_at)
rem = max(rem_delta.total_seconds(), 0)
due = rem == 0
if due:
rem_delta = self.remaining_estimate(self.now())
rem = max(rem_delta.total_seconds(), 0)
return schedstate(due, rem)
def __eq__(self, other: Any) -> bool:
if isinstance(other, solar):
return (
other.event == self.event and
other.lat == self.lat and
other.lon == self.lon
)
return NotImplemented
| solar |
python | django__django | django/contrib/postgres/lookups.py | {
"start": 735,
"end": 855
} | class ____(PostgresOperatorLookup):
lookup_name = "has_key"
postgres_operator = "?"
prepare_rhs = False
| HasKey |
python | mlflow__mlflow | dev/clint/tests/rules/test_temp_dir_in_test.py | {
"start": 2653,
"end": 4038
} | class ____:
@staticmethod
def TemporaryDirectory():
pass
fake_tempfile = FakeTempfile()
def test_func():
# Should not trigger since it's not tempfile.TemporaryDirectory
fake_tempfile.TemporaryDirectory()
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 0
def test_temp_dir_in_test_with_context_manager(index_path: Path) -> None:
code = """
import tempfile
# Bad - using with statement
def test_func():
with tempfile.TemporaryDirectory() as tmpdir:
pass
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, TempDirInTest) for v in violations)
assert violations[0].range == Range(Position(5, 9))
def test_temp_dir_in_test_assigned_to_variable(index_path: Path) -> None:
code = """
import tempfile
# Bad - assigned to variable
def test_func():
tmpdir = tempfile.TemporaryDirectory()
"""
config = Config(select={TempDirInTest.name})
violations = lint_file(Path("test_file.py"), code, config, index_path)
assert len(violations) == 1
assert all(isinstance(v.rule, TempDirInTest) for v in violations)
assert violations[0].range == Range(Position(5, 13))
| FakeTempfile |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_drypython_returns.py | {
"start": 2424,
"end": 2496
} | class ____(_FirstBase[C, D], _SecondBase[A, B]):
pass
| TwoGenericBases2 |
python | keon__algorithms | tests/test_maths.py | {
"start": 16289,
"end": 17561
} | class ____(unittest.TestCase):
"""[summary]
Test for the file fft.py
Arguments:
unittest {[type]} -- [description]
"""
def test_real_numbers(self):
x = [1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0]
y = [4.000, 2.613, 0.000, 1.082, 0.000, 1.082, 0.000, 2.613]
# abs(complex) returns the magnitude
result = [float("%.3f" % abs(f)) for f in fft.fft(x)]
self.assertEqual(result, y)
def test_all_zero(self):
x = [0.0, 0.0, 0.0, 0.0]
y = [0.0, 0.0, 0.0, 0.0]
result = [float("%.1f" % abs(f)) for f in fft.fft(x)]
self.assertEqual(result, y)
def test_all_ones(self):
x = [1.0, 1.0, 1.0, 1.0]
y = [4.0, 0.0, 0.0, 0.0]
result = [float("%.1f" % abs(f)) for f in fft.fft(x)]
self.assertEqual(result, y)
def test_complex_numbers(self):
x = [2.0+2j, 1.0+3j, 3.0+1j, 2.0+2j]
real = [8.0, 0.0, 2.0, -2.0]
imag = [8.0, 2.0, -2.0, 0.0]
realResult = [float("%.1f" % f.real) for f in fft.fft(x)]
imagResult = [float("%.1f" % f.imag) for f in fft.fft(x)]
self.assertEqual(real, realResult)
self.assertEqual(imag, imagResult)
if __name__ == "__main__":
unittest.main()
| TestFFT |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/foundry.py | {
"start": 10181,
"end": 17537
} | class ____(BaseFoundryClient[httpx.AsyncClient, AsyncStream[Any]], AsyncAnthropic):
@overload
def __init__(
self,
*,
resource: str | None = None,
api_key: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
) -> None: ...
@overload
def __init__(
self,
*,
base_url: str,
api_key: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
) -> None: ...
def __init__(
self,
*,
resource: str | None = None,
api_key: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
base_url: str | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
http_client: httpx.AsyncClient | None = None,
_strict_response_validation: bool = False,
) -> None:
"""Construct a new asynchronous Anthropic Foundry client instance.
This automatically infers the following arguments from their corresponding environment variables if they are not provided:
- `api_key` from `ANTHROPIC_FOUNDRY_API_KEY`
- `resource` from `ANTHROPIC_FOUNDRY_RESOURCE`
- `base_url` from `ANTHROPIC_FOUNDRY_BASE_URL`
Args:
resource: Your Foundry resource name, e.g. `example-resource` for `https://example-resource.services.ai.azure.com/anthropic/`
azure_ad_token_provider: A function that returns an Azure Active Directory token, will be invoked on every request.
"""
api_key = api_key if api_key is not None else os.environ.get("ANTHROPIC_FOUNDRY_API_KEY")
resource = resource if resource is not None else os.environ.get("ANTHROPIC_FOUNDRY_RESOURCE")
base_url = base_url if base_url is not None else os.environ.get("ANTHROPIC_FOUNDRY_BASE_URL")
if api_key is None and azure_ad_token_provider is None:
raise AnthropicError(
"Missing credentials. Please pass one of `api_key`, `azure_ad_token_provider`, or the `ANTHROPIC_FOUNDRY_API_KEY` environment variable."
)
if base_url is None:
if resource is None:
raise ValueError(
"Must provide one of the `base_url` or `resource` arguments, or the `ANTHROPIC_FOUNDRY_RESOURCE` environment variable"
)
base_url = f"https://{resource}.services.ai.azure.com/anthropic/"
elif resource is not None:
raise ValueError("base_url and resource are mutually exclusive")
super().__init__(
api_key=api_key,
base_url=base_url,
timeout=timeout,
max_retries=max_retries,
default_headers=default_headers,
default_query=default_query,
http_client=http_client,
_strict_response_validation=_strict_response_validation,
)
self._azure_ad_token_provider = azure_ad_token_provider
@cached_property
@override
def models(self) -> None: # type: ignore[override]
"""Models endpoint is not supported for Azure Anthropic client."""
return None
@cached_property
@override
def messages(self) -> AsyncMessagesFoundry: # type: ignore[override]
"""Return messages resource instance with excluded unsupported endpoints."""
return AsyncMessagesFoundry(client=self)
@cached_property
@override
def beta(self) -> AsyncBetaFoundry: # type: ignore[override]
"""Return beta resource instance with excluded unsupported endpoints."""
return AsyncBetaFoundry(client=self)
@override
def copy(
self,
*,
api_key: str | None = None,
azure_ad_token_provider: AsyncAzureADTokenProvider | None = None,
auth_token: str | None = None,
base_url: str | httpx.URL | None = None,
timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
http_client: httpx.AsyncClient | None = None,
max_retries: int | NotGiven = NOT_GIVEN,
default_headers: Mapping[str, str] | None = None,
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
set_default_query: Mapping[str, object] | None = None,
_extra_kwargs: Mapping[str, Any] = {},
) -> Self:
"""
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
return super().copy(
api_key=api_key,
auth_token=auth_token,
base_url=base_url,
timeout=timeout,
http_client=http_client,
max_retries=max_retries,
default_headers=default_headers,
set_default_headers=set_default_headers,
default_query=default_query,
set_default_query=set_default_query,
_extra_kwargs={
"azure_ad_token_provider": azure_ad_token_provider or self._azure_ad_token_provider,
**_extra_kwargs,
},
)
with_options = copy
async def _get_azure_ad_token(self) -> str | None:
provider = self._azure_ad_token_provider
if provider is not None:
token = provider()
if inspect.isawaitable(token):
token = await token
if not token or not isinstance(cast(Any, token), str):
raise ValueError(
f"Expected `azure_ad_token_provider` argument to return a string but it returned {token}",
)
return str(token)
return None
@override
async def _prepare_options(self, options: FinalRequestOptions) -> FinalRequestOptions:
headers: dict[str, str | Omit] = {**options.headers} if is_given(options.headers) else {}
options = model_copy(options)
options.headers = headers
azure_ad_token = await self._get_azure_ad_token()
if azure_ad_token is not None:
if headers.get("Authorization") is None:
headers["Authorization"] = f"Bearer {azure_ad_token}"
elif self.api_key is not None:
assert self.api_key is not None
if headers.get("api-key") is None:
headers["api-key"] = self.api_key
else:
# should never be hit
raise ValueError("Unable to handle auth")
return options
| AsyncAnthropicFoundry |
python | facebook__pyre-check | client/json_rpc.py | {
"start": 3735,
"end": 6678
} | class ____(JSONRPC):
method: str
id: Union[int, str, None] = None
activity_key: Optional[JSON] = None
parameters: Optional[Parameters] = None
def json(self) -> JSON:
parameters = self.parameters
return {
"jsonrpc": JSONRPC_VERSION,
"method": self.method,
**({"id": self.id} if self.id is not None else {}),
**(
{"activityKey": self.activity_key}
if self.activity_key is not None
else {}
),
**({"params": parameters.values} if parameters is not None else {}),
}
def extract_parameters(self) -> Parameters:
parameters = self.parameters
if parameters is None:
raise InvalidRequestError(
f"No parameters to extract for JSON-RPC {self.method} method"
)
return parameters
@staticmethod
def from_json(request_json: JSON) -> "Request":
"""
Parse a given JSON into a JSON-RPC request.
Raises `InvalidRequestError` and `InvalidParameterError` if the JSON
body is malformed.
"""
_verify_json_rpc_version(request_json)
method = request_json.get("method")
if method is None:
raise MissingMethodFieldInRequestError(
f"Required field `method` is missing: {request_json}"
)
if not isinstance(method, str):
raise InvalidRequestError(
f"`method` is expected to be a string but got {method}"
)
raw_parameters = request_json.get("params")
if raw_parameters is None:
parameters = None
elif isinstance(raw_parameters, list):
parameters = ByPositionParameters(raw_parameters)
elif isinstance(raw_parameters, dict):
parameters = ByNameParameters(raw_parameters)
else:
raise InvalidParameterError(
f"Cannot parse request parameter JSON: {raw_parameters}"
)
id = _parse_json_rpc_id(request_json)
activity_key = _parse_json_rpc_activity_key(request_json)
return Request(
method=method, id=id, activity_key=activity_key, parameters=parameters
)
@staticmethod
def from_string(request_string: str) -> "Request":
"""
Parse a given string into a JSON-RPC request.
- Raises `ParseError` if json parsing fails.
- Raises `InvalidRequestError` and `InvalidParameterError` if the
JSON body is malformed (in any way other than a missing `method`)
"""
try:
request_json = json.loads(request_string)
return Request.from_json(request_json)
except JSONDecodeError as error:
message = f"Cannot parse string into JSON: {error}"
raise ParseError(message) from error
@dataclasses.dataclass(frozen=True)
| Request |
python | mlflow__mlflow | mlflow/models/evaluation/utils/metric.py | {
"start": 258,
"end": 4532
} | class ____:
"""
A dataclass representing a metric definition used in model evaluation.
Attributes:
function: The metric function to be called for evaluation.
name: The name of the metric.
index: The index of the metric in the ``extra_metrics`` argument of ``mlflow.evaluate``.
version: (Optional) The metric version. For example v1.
genai_metric_args: (Optional) A dictionary containing arguments specified by users when
calling make_genai_metric or make_genai_metric_from_prompt.
Those args are persisted so that we can deserialize the same metric object later.
"""
function: Callable[..., Any]
name: str
index: int
version: str | None = None
genai_metric_args: dict[str, Any] | None = None
@classmethod
def from_index_and_metric(cls, index: int, metric: EvaluationMetric):
return cls(
function=metric.eval_fn,
index=index,
name=metric.name,
version=metric.version,
genai_metric_args=metric.genai_metric_args,
)
def evaluate(self, eval_fn_args) -> MetricValue | None:
"""
This function calls the metric function and performs validations on the returned
result to ensure that they are in the expected format. It will warn and will not log metrics
that are in the wrong format.
Args:
eval_fn_args: A dictionary of args needed to compute the eval metrics.
Returns:
MetricValue
"""
if self.index < 0:
exception_header = f"Did not log builtin metric '{self.name}' because it"
else:
exception_header = (
f"Did not log metric '{self.name}' at index "
f"{self.index} in the `extra_metrics` parameter because it"
)
metric: MetricValue = self.function(*eval_fn_args)
def _is_numeric(value):
return isinstance(value, (int, float, np.number))
def _is_string(value):
return isinstance(value, str)
if metric is None:
_logger.warning(f"{exception_header} returned None.")
return
if _is_numeric(metric):
return MetricValue(aggregate_results={self.name: metric})
if not isinstance(metric, MetricValue):
_logger.warning(f"{exception_header} did not return a MetricValue.")
return
scores = metric.scores
justifications = metric.justifications
aggregates = metric.aggregate_results
if scores is not None:
if not isinstance(scores, list):
_logger.warning(
f"{exception_header} must return MetricValue with scores as a list."
)
return
if any(not (_is_numeric(s) or _is_string(s) or s is None) for s in scores):
_logger.warning(
f"{exception_header} must return MetricValue with numeric or string scores."
)
return
if justifications is not None:
if not isinstance(justifications, list):
_logger.warning(
f"{exception_header} must return MetricValue with justifications as a list."
)
return
if any(not (_is_string(just) or just is None) for just in justifications):
_logger.warning(
f"{exception_header} must return MetricValue with string justifications."
)
return
if aggregates is not None:
if not isinstance(aggregates, dict):
_logger.warning(
f"{exception_header} must return MetricValue with aggregate_results as a dict."
)
return
if any(
not (isinstance(k, str) and (_is_numeric(v) or v is None))
for k, v in aggregates.items()
):
_logger.warning(
f"{exception_header} must return MetricValue with aggregate_results with "
"str keys and numeric values."
)
return
return metric
| MetricDefinition |
python | OmkarPathak__pygorithm | pygorithm/pathfinding/astar.py | {
"start": 11557,
"end": 24762
} | class ____(object):
"""BiDirectionalAStar object
Finds the optimal path between two nodes on a graph while taking
account weights. Expands from the start node and the end node
simultaneously
"""
class NodeSource(Enum):
"""NodeSource enum
Used to distinguish how a node was located
"""
BY_START = 1,
BY_END = 2
def __init__(self):
pass
@staticmethod
def reverse_path(node_from_start, node_from_end):
"""
Reconstructs the path formed by walking from
node_from_start backward to start and combining
it with the path formed by walking from
node_from_end to end. Both the start and end are
detected where 'parent' is None.
:param node_from_start: dict containing { 'vertex': any hashable, 'parent': dict or None }
:param node_from_end: dict containing { 'vertex' any hashable, 'parent': dict or None }
:return: list of vertices starting at the start and ending at the end
"""
list_from_start = []
current = node_from_start
while current is not None:
list_from_start.append(current['vertex'])
current = current['parent']
list_from_start.reverse()
list_from_end = []
current = node_from_end
while current is not None:
list_from_end.append(current['vertex'])
current = current['parent']
return list_from_start + list_from_end
def find_path(self, graph, start, end, heuristic_fn):
"""
Calculates the optimal path from the start to the end. The
search occurs from both the start and end at the same rate,
which makes this algorithm have more consistent performance
if you regularly are trying to find paths where the destination
is unreachable and in a small room.
The heuristic requirements are the same as in unidirectional A*
(it must be admissable).
:param graph: the graph with 'graph' and 'get_edge_weight' (see WeightedUndirectedGraph)
:param start: the start vertex (must be hashable and same type as the graph)
:param end: the end vertex (must be hashable and same type as the graph)
:param heuristic_fn: an admissable heuristic. signature: function(graph, start, end) returns numeric
:return: a list of vertices starting at start ending at end or None
"""
# This algorithm is really just repeating unidirectional A* twice,
# but unfortunately it's just different enough that it requires
# even more work to try to make a single function that can be called
# twice.
# Note: The nodes in by_start will have heuristic distance to the end,
# whereas the nodes in by_end will have heuristic distance to the start.
# This means that the total predicted distance for the exact same node
# might not match depending on which side we found it from. However,
# it won't make a difference since as soon as we evaluate the same node
# on both sides we've finished.
#
# This also means that we can use the same lookup table for both.
open_by_start = []
open_by_end = []
open_lookup = {}
closed = set()
# used to avoid hashing the dict.
counter_arr = [0]
total_heur_distance = heuristic_fn(graph, start, end)
heapq.heappush(open_by_start, (total_heur_distance, counter_arr[0], start))
counter_arr[0] += 1
open_lookup[start] = { 'vertex': start,
'parent': None,
'source': self.NodeSource.BY_START,
'dist_start_to_here': 0,
'pred_dist_here_to_end': total_heur_distance,
'pred_total_dist': total_heur_distance }
heapq.heappush(open_by_end, (total_heur_distance, counter_arr, end))
counter_arr[0] += 1
open_lookup[end] = { 'vertex': end,
'parent': None,
'source': self.NodeSource.BY_END,
'dist_end_to_here': 0,
'pred_dist_here_to_start': total_heur_distance,
'pred_total_dist': total_heur_distance }
# If the start runs out then the start is in a closed room,
# if the end runs out then the end is in a closed room,
# either way there is no path from start to end.
while len(open_by_start) > 0 and len(open_by_end) > 0:
result = self._evaluate_from_start(graph, start, end, heuristic_fn, open_by_start, open_by_end, open_lookup, closed, counter_arr)
if result is not None:
return result
result = self._evaluate_from_end(graph, start, end, heuristic_fn, open_by_start, open_by_end, open_lookup, closed, counter_arr)
if result is not None:
return result
return None
def _evaluate_from_start(self, graph, start, end, heuristic_fn, open_by_start, open_by_end, open_lookup, closed, counter_arr):
"""
Intended for internal use only. Expands one node from the open_by_start list.
:param graph: the graph (see WeightedUndirectedGraph)
:param start: the start node
:param end: the end node
:heuristic_fn: the heuristic function (signature function(graph, start, end) returns numeric)
:open_by_start: the open vertices from the start
:open_by_end: the open vertices from the end
:open_lookup: dictionary of vertices -> dicts
:closed: the already expanded vertices (set)
:counter_arr: arr of one integer (counter)
"""
current = heapq.heappop(open_by_start)
current_vertex = current[2]
current_dict = open_lookup[current_vertex]
del open_lookup[current_vertex]
closed.update(current_vertex)
neighbors = graph.graph[current_vertex]
for neighbor in neighbors:
if neighbor in closed:
continue
neighbor_dict = open_lookup.get(neighbor, None)
if neighbor_dict is not None and neighbor_dict['source'] is self.NodeSource.BY_END:
return self.reverse_path(current_dict, neighbor_dict)
dist_to_neighb_through_curr_from_start = current_dict['dist_start_to_here'] \
+ graph.get_edge_weight(current_vertex, neighbor)
if neighbor_dict is not None:
assert(neighbor_dict['source'] is self.NodeSource.BY_START)
if neighbor_dict['dist_start_to_here'] <= dist_to_neighb_through_curr_from_start:
continue
pred_dist_neighbor_to_end = neighbor_dict['pred_dist_here_to_end']
pred_total_dist_through_neighbor = dist_to_neighb_through_curr_from_start + pred_dist_neighbor_to_end
open_lookup[neighbor] = { 'vertex': neighbor,
'parent': current_dict,
'source': self.NodeSource.BY_START,
'dist_start_to_here': dist_to_neighb_through_curr_from_start,
'pred_dist_here_to_end': pred_dist_neighbor_to_end,
'pred_total_dist': pred_total_dist_through_neighbor }
# TODO: I'm pretty sure theres a faster way to do this
found = None
for i in range(0, len(open_by_start)):
if open_by_start[i][2] == neighbor:
found = i
break
assert(found is not None)
open_by_start[found] = (pred_total_dist_through_neighbor, counter_arr[0], neighbor)
counter_arr[0] += 1
heapq.heapify(open_by_start)
continue
pred_dist_neighbor_to_end = heuristic_fn(graph, neighbor, end)
pred_total_dist_through_neighbor = dist_to_neighb_through_curr_from_start + pred_dist_neighbor_to_end
open_lookup[neighbor] = { 'vertex': neighbor,
'parent': current_dict,
'source': self.NodeSource.BY_START,
'dist_start_to_here': dist_to_neighb_through_curr_from_start,
'pred_dist_here_to_end': pred_dist_neighbor_to_end,
'pred_total_dist': pred_total_dist_through_neighbor }
heapq.heappush(open_by_start, (pred_total_dist_through_neighbor, counter_arr[0], neighbor))
counter_arr[0] += 1
def _evaluate_from_end(self, graph, start, end, heuristic_fn, open_by_start, open_by_end, open_lookup, closed, counter_arr):
"""
Intended for internal use only. Expands one node from the open_by_end list.
:param graph: the graph (see WeightedUndirectedGraph)
:param start: the start node
:param end: the end node
:heuristic_fn: the heuristic function (signature function(graph, start, end) returns numeric)
:open_by_start: the open vertices from the start
:open_by_end: the open vertices from the end
:open_lookup: dictionary of vertices -> dicts
:closed: the already expanded vertices (set)
:counter_arr: arr of one integer (counter)
"""
current = heapq.heappop(open_by_end)
current_vertex = current[2]
current_dict = open_lookup[current_vertex]
del open_lookup[current_vertex]
closed.update(current_vertex)
neighbors = graph.graph[current_vertex]
for neighbor in neighbors:
if neighbor in closed:
continue
neighbor_dict = open_lookup.get(neighbor, None)
if neighbor_dict is not None and neighbor_dict['source'] is self.NodeSource.BY_START:
return self.reverse_path(neighbor_dict, current_dict)
dist_to_neighb_through_curr_from_end = current_dict['dist_end_to_here'] \
+ graph.get_edge_weight(current_vertex, neighbor)
if neighbor_dict is not None:
assert(neighbor_dict['source'] is self.NodeSource.BY_END)
if neighbor_dict['dist_end_to_here'] <= dist_to_neighb_through_curr_from_end:
continue
pred_dist_neighbor_to_start = neighbor_dict['pred_dist_here_to_start']
pred_total_dist_through_neighbor = dist_to_neighb_through_curr_from_end + pred_dist_neighbor_to_start
open_lookup[neighbor] = { 'vertex': neighbor,
'parent': current_dict,
'source': self.NodeSource.BY_END,
'dist_end_to_here': dist_to_neighb_through_curr_from_end,
'pred_dist_here_to_start': pred_dist_neighbor_to_start,
'pred_total_dist': pred_total_dist_through_neighbor }
# TODO: I'm pretty sure theres a faster way to do this
found = None
for i in range(0, len(open_by_end)):
if open_by_end[i][2] == neighbor:
found = i
break
assert(found is not None)
open_by_end[found] = (pred_total_dist_through_neighbor, counter_arr[0], neighbor)
counter_arr[0] += 1
heapq.heapify(open_by_end)
continue
pred_dist_neighbor_to_start = heuristic_fn(graph, neighbor, start)
pred_total_dist_through_neighbor = dist_to_neighb_through_curr_from_end + pred_dist_neighbor_to_start
open_lookup[neighbor] = { 'vertex': neighbor,
'parent': current_dict,
'source': self.NodeSource.BY_END,
'dist_end_to_here': dist_to_neighb_through_curr_from_end,
'pred_dist_here_to_start': pred_dist_neighbor_to_start,
'pred_total_dist': pred_total_dist_through_neighbor }
heapq.heappush(open_by_end, (pred_total_dist_through_neighbor, counter_arr[0], neighbor))
counter_arr[0] += 1
@staticmethod
def get_code():
"""
returns the code for the current class
"""
return inspect.getsource(BiDirectionalAStar) | BiDirectionalAStar |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 65343,
"end": 65765
} | class ____(GenericFunction[decimal.Decimal]):
"""Implement the ``cume_dist`` hypothetical-set aggregate function.
This function must be used with the :meth:`.FunctionElement.within_group`
modifier to supply a sort expression to operate upon.
The return type of this function is :class:`.Numeric`.
"""
type: sqltypes.Numeric[decimal.Decimal] = sqltypes.Numeric()
inherit_cache = True
| cume_dist |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_health/asset_materialization_health.py | {
"start": 14835,
"end": 21597
} | class ____:
failed_run_id: Optional[str]
AssetHealthMaterializationMetadata = Union[
AssetHealthMaterializationDegradedPartitionedMeta,
AssetHealthMaterializationHealthyPartitionedMeta,
AssetHealthMaterializationDegradedNotPartitionedMeta,
]
async def get_materialization_status_and_metadata(
context: "BaseWorkspaceRequestContext", asset_key: AssetKey
) -> tuple[AssetHealthStatus, Optional["AssetHealthMaterializationMetadata"]]:
"""Gets an AssetMaterializationHealthState object for an asset, either via streamline or by computing
it based on the state of the DB. Then converts it to a AssetHealthStatus and the metadata
needed to power the UIs. Metadata is fetched from the AssetLatestMaterializationState object, again
either via streamline or by computing it based on the state of the DB.
"""
asset_materialization_health_state = await MinimalAssetMaterializationHealthState.gen(
context, asset_key
)
if asset_materialization_health_state is None:
# if the minimal health stat does not exist, try fetching the full health state. It's possible that
# deserializing the full health state is non-performant since it contains a serialized entity subset, which
# is why we only fetch it if the minimal health state does not exist.
slow_deserialize_asset_materialization_health_state = (
await AssetMaterializationHealthState.gen(context, asset_key)
)
if slow_deserialize_asset_materialization_health_state is not None:
asset_materialization_health_state = (
MinimalAssetMaterializationHealthState.from_asset_materialization_health_state(
slow_deserialize_asset_materialization_health_state
)
)
# captures streamline disabled or consumer state doesn't exist
if asset_materialization_health_state is None:
if context.instance.streamline_read_asset_health_required("asset-materialization-health"):
return AssetHealthStatus.UNKNOWN, None
if not context.asset_graph.has(asset_key):
# if the asset is not in the asset graph, it could be because materializations are reported by
# an external system, determine the status as best we can based on the asset record
asset_record = await AssetRecord.gen(context, asset_key)
if asset_record is None:
return AssetHealthStatus.UNKNOWN, None
has_ever_materialized = asset_record.asset_entry.last_materialization is not None
is_currently_failed, run_id = await _get_is_currently_failed_and_latest_terminal_run_id(
context, asset_record
)
if is_currently_failed:
meta = AssetHealthMaterializationDegradedNotPartitionedMeta(
failed_run_id=run_id,
)
return AssetHealthStatus.DEGRADED, meta
if has_ever_materialized:
return AssetHealthStatus.HEALTHY, None
else:
if asset_record.asset_entry.last_observation is not None:
return AssetHealthStatus.HEALTHY, None
return AssetHealthStatus.UNKNOWN, None
node_snap = context.asset_graph.get(asset_key)
if node_snap.is_observable and not node_snap.is_materializable: # observable source asset
# get the asset record to see if there is an observation event
asset_record = await AssetRecord.gen(context, asset_key)
if asset_record and asset_record.asset_entry.last_observation is not None:
return AssetHealthStatus.HEALTHY, None
return AssetHealthStatus.UNKNOWN, None
asset_materialization_health_state = (
MinimalAssetMaterializationHealthState.from_asset_materialization_health_state(
await AssetMaterializationHealthState.compute_for_asset(
asset_key,
node_snap.partitions_def,
context,
)
)
)
if asset_materialization_health_state.health_status == AssetHealthStatus.HEALTHY:
num_missing = 0
total_num_partitions = 0
if asset_materialization_health_state.partitions_def is not None:
with partition_loading_context(dynamic_partitions_store=context.instance):
total_num_partitions = (
asset_materialization_health_state.partitions_def.get_num_partitions()
)
# asset is healthy, so no partitions are failed
num_missing = (
total_num_partitions
- asset_materialization_health_state.num_currently_materialized_partitions
)
if num_missing > 0 and total_num_partitions > 0:
meta = AssetHealthMaterializationHealthyPartitionedMeta(
num_missing_partitions=num_missing,
total_num_partitions=total_num_partitions,
)
else:
# captures the case when asset is not partitioned, or the asset is partitioned and all partitions are materialized
meta = None
return AssetHealthStatus.HEALTHY, meta
elif asset_materialization_health_state.health_status == AssetHealthStatus.DEGRADED:
if asset_materialization_health_state.partitions_def is not None:
with partition_loading_context(dynamic_partitions_store=context.instance):
total_num_partitions = (
asset_materialization_health_state.partitions_def.get_num_partitions()
)
num_missing = (
total_num_partitions
- asset_materialization_health_state.num_currently_materialized_partitions
- asset_materialization_health_state.num_failed_partitions
)
meta = AssetHealthMaterializationDegradedPartitionedMeta(
num_failed_partitions=asset_materialization_health_state.num_failed_partitions,
num_missing_partitions=num_missing,
total_num_partitions=total_num_partitions,
)
else:
meta = AssetHealthMaterializationDegradedNotPartitionedMeta(
failed_run_id=asset_materialization_health_state.latest_terminal_run_id,
)
return AssetHealthStatus.DEGRADED, meta
elif asset_materialization_health_state.health_status == AssetHealthStatus.UNKNOWN:
return AssetHealthStatus.UNKNOWN, None
else:
check.failed(
f"Unexpected materialization health status: {asset_materialization_health_state.health_status}"
)
| AssetHealthMaterializationDegradedNotPartitionedMeta |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants.py | {
"start": 18304,
"end": 22410
} | class ____(_Node):
"""A base class for Convertibles that reference functions."""
def __init__(self, node, function, enclosing_graph, first_function_input,
type_attribute, function_attributes):
"""Initializes a _FunctionCaller.
Args:
node: As in _Node.
function: As in _Node.
enclosing_graph: As in _Node.
first_function_input: The index of the first NodeDef input that is tied to
the function inputs. It is assumed that the rest of the NodeDef inputs
map one to one to function inputs.
type_attribute: The name of the NodeDef attribute that defines the input
types. It is assumed that the types listed here map one-to-one with the
function inputs (that is, they do _not_ specify types for inputs that
are not passed to functions).
function_attributes: The names of the NodeDef attributes containing
references to functions.
"""
super(_FunctionCaller, self).__init__(node, function, enclosing_graph)
self._first_function_input = first_function_input
self._type_attribute = type_attribute
self._function_attributes = function_attributes
def converted_self(self):
if self._converted_self is None:
node = super(_FunctionCaller, self).converted_self().node
converted_names = self._enclosing_graph.converted_function_names
for attr_name in self._function_attributes:
attr = node.attr[attr_name]
if attr.HasField(
"func") and self._enclosing_graph.is_converted_function(
attr.func.name):
attr.func.name = converted_names[attr.func.name]
elif attr.HasField("list"):
for func in attr.list.func:
if self._enclosing_graph.is_converted_function(func.name):
func.name = converted_names[func.name]
return self._converted_self
def convert_variable_to_constant(self, incoming_edge, tensor_data):
index = incoming_edge.destination.index
# The loop below is reasonable but not correct in general:
# The outgoing edges going into the functions are correct, because the
# inputs map to the function inputs. But the edges going into other nodes do
# not take into account the logic of the body function, which may do
# arbitrary things to the node's output:
#
# while x < 0:
# return y
#
# In this case, the node's ":0" output may map to its ":1 input". For the
# time being, then, we only process edges into functions.
for edge in self.outgoing_edges:
dest = edge.destination.convertible
if edge.source.index == index and isinstance(dest, _Function):
dest.convert_variable_to_constant(edge, tensor_data)
node = self.converted_self()
if index >= self._first_function_input:
node.update_dtype(self._type_attribute,
index - self._first_function_input, tensor_data.dtype)
def create_edges(self):
"""Creates edges related to a function caller.
Edges from a function caller to its called functions are always edges from
_inputs_ to _inputs_: a FunctionDef input is given by the caller, based on
its own inputs.
"""
super(_FunctionCaller, self).create_edges()
for attr_name in self._function_attributes:
attr = self._node.attr[attr_name]
if attr.HasField("func"):
function = self._enclosing_graph.functions[attr.func.name]
for index in range(len(self._node.input) - self._first_function_input):
self.add_outgoing_edge(
_Edge(
_EndPoint(self, index + self._first_function_input),
_EndPoint(function, index)))
elif attr.HasField("list"):
for func in attr.list.func:
function = self._enclosing_graph.functions[func.name]
for index in range(
len(self._node.input) - self._first_function_input):
self.add_outgoing_edge(
_Edge(
_EndPoint(self, index + self._first_function_input),
_EndPoint(function, index)))
| _FunctionCaller |
python | pypa__setuptools | setuptools/_vendor/importlib_metadata/__init__.py | {
"start": 20157,
"end": 22286
} | class ____(MetaPathFinder):
"""
A MetaPathFinder capable of discovering installed distributions.
Custom providers should implement this interface in order to
supply metadata.
"""
class Context:
"""
Keyword arguments presented by the caller to
``distributions()`` or ``Distribution.discover()``
to narrow the scope of a search for distributions
in all DistributionFinders.
Each DistributionFinder may expect any parameters
and should attempt to honor the canonical
parameters defined below when appropriate.
This mechanism gives a custom provider a means to
solicit additional details from the caller beyond
"name" and "path" when searching distributions.
For example, imagine a provider that exposes suites
of packages in either a "public" or "private" ``realm``.
A caller may wish to query only for distributions in
a particular realm and could call
``distributions(realm="private")`` to signal to the
custom provider to only include distributions from that
realm.
"""
name = None
"""
Specific name for which a distribution finder should match.
A name of ``None`` matches all distributions.
"""
def __init__(self, **kwargs):
vars(self).update(kwargs)
@property
def path(self) -> List[str]:
"""
The sequence of directory path that a distribution finder
should search.
Typically refers to Python installed package paths such as
"site-packages" directories and defaults to ``sys.path``.
"""
return vars(self).get('path', sys.path)
@abc.abstractmethod
def find_distributions(self, context=Context()) -> Iterable[Distribution]:
"""
Find distributions.
Return an iterable of all Distribution instances capable of
loading the metadata for packages matching the ``context``,
a DistributionFinder.Context instance.
"""
| DistributionFinder |
python | matplotlib__matplotlib | lib/matplotlib/legend_handler.py | {
"start": 18228,
"end": 18613
} | class ____(HandlerRegularPolyCollection):
r"""Handler for `.PathCollection`\s, which are used by `~.Axes.scatter`."""
def create_collection(self, orig_handle, sizes, offsets, offset_transform):
return type(orig_handle)(
[orig_handle.get_paths()[0]], sizes=sizes,
offsets=offsets, offset_transform=offset_transform,
)
| HandlerPathCollection |
python | ray-project__ray | python/ray/data/tests/test_predicate_pushdown.py | {
"start": 15103,
"end": 18447
} | class ____:
"""Tests for PASSTHROUGH behavior operators.
Operators: Sort, Repartition, RandomShuffle, Limit
Predicates pass through unchanged - operators don't affect filtering.
"""
@pytest.fixture
def base_ds(self, ray_start_regular_shared):
return ray.data.range(100)
@pytest.mark.parametrize(
"transform,expected_op_type",
[
(lambda ds: ds.sort("id"), "Sort"),
(lambda ds: ds.repartition(10), "Repartition"),
(lambda ds: ds.random_shuffle(), "RandomShuffle"),
(lambda ds: ds.limit(50), "Limit"),
],
ids=["sort", "repartition", "random_shuffle", "limit"],
)
def test_filter_pushes_through_operator(self, base_ds, transform, expected_op_type):
"""Filter should push through passthrough operators."""
ds = transform(base_ds).filter(expr=col("id") < 10)
# Verify correctness against expected result
expected = base_ds.filter(expr=col("id") < 10)
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Filter pushed down, operator remains
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert plan_has_operator(
optimized_plan, Filter
), "Filter should exist after pushdown"
# Verify the passthrough operator is still present
op_types = get_operator_types(optimized_plan)
assert expected_op_type in op_types, f"{expected_op_type} should remain in plan"
def test_filter_pushes_through_multiple_ops(self, base_ds):
"""Filter should push through multiple passthrough operators."""
ds = base_ds.sort("id").repartition(5).limit(50).filter(expr=col("id") < 10)
# Verify correctness against expected result
expected = base_ds.filter(expr=col("id") < 10)
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Verify plan: filter pushed down, all operators remain
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
assert plan_has_operator(optimized_plan, Filter), "Filter should exist"
assert plan_has_operator(optimized_plan, Sort), "Sort should remain"
assert plan_has_operator(
optimized_plan, Repartition
), "Repartition should remain"
assert plan_has_operator(optimized_plan, Limit), "Limit should remain"
def test_multiple_filters_fuse_and_push_through(self, base_ds):
"""Multiple filters should fuse and push through passthrough operators."""
ds = base_ds.filter(expr=col("id") > 5).sort("id").filter(expr=col("id") < 20)
# Verify correctness against expected result
expected = base_ds.filter(expr=(col("id") > 5) & (col("id") < 20))
assert rows_same(ds.to_pandas(), expected.to_pandas())
# Verify plan: filters fused and pushed, Sort remains
optimized_plan = LogicalOptimizer().optimize(ds._plan._logical_plan)
filters = get_operators_of_type(optimized_plan, Filter)
assert len(filters) == 1, "Multiple filters should be fused into one"
assert plan_has_operator(optimized_plan, Sort), "Sort should remain"
assert plan_operator_comes_before(
optimized_plan, Filter, Sort
), "Fused filter should come before Sort"
| TestPassthroughBehavior |
python | dagster-io__dagster | python_modules/libraries/dagster-airlift/dagster_airlift/core/serialization/serialized_data.py | {
"start": 3963,
"end": 4239
} | class ____:
"""A record containing pre-computed data about a given airflow dag."""
dag_id: str
dag_info: DagInfo
source_code: Optional[str]
leaf_asset_keys: set[AssetKey]
task_infos: Mapping[str, TaskInfo]
@whitelist_for_serdes
@record
| SerializedDagData |
python | numba__numba | numba/cuda/stubs.py | {
"start": 1217,
"end": 1507
} | class ____(Dim3):
'''
The thread indices in the current thread block. Each index is an integer
spanning the range from 0 inclusive to the corresponding value of the
attribute in :attr:`numba.cuda.blockDim` exclusive.
'''
_description_ = '<threadIdx.{x,y,z}>'
| threadIdx |
python | django__django | tests/test_client_regress/tests.py | {
"start": 33206,
"end": 33634
} | class ____(SimpleTestCase):
def test_urlconf_was_reverted(self):
"""URLconf is reverted to original value after modification in a
TestCase
This will not find a match as the default ROOT_URLCONF is empty.
"""
with self.assertRaises(NoReverseMatch):
reverse("arg_view", args=["somename"])
@override_settings(ROOT_URLCONF="test_client_regress.urls")
| zzUrlconfSubstitutionTests |
python | openai__openai-python | src/openai/types/vector_stores/file_batch_create_params.py | {
"start": 378,
"end": 1808
} | class ____(TypedDict, total=False):
attributes: Optional[Dict[str, Union[str, float, bool]]]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard. Keys are
strings with a maximum length of 64 characters. Values are strings with a
maximum length of 512 characters, booleans, or numbers.
"""
chunking_strategy: FileChunkingStrategyParam
"""The chunking strategy used to chunk the file(s).
If not set, will use the `auto` strategy. Only applicable if `file_ids` is
non-empty.
"""
file_ids: SequenceNotStr[str]
"""
A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
the vector store should use. Useful for tools like `file_search` that can access
files. If `attributes` or `chunking_strategy` are provided, they will be applied
to all files in the batch. Mutually exclusive with `files`.
"""
files: Iterable[File]
"""
A list of objects that each include a `file_id` plus optional `attributes` or
`chunking_strategy`. Use this when you need to override metadata for specific
files. The global `attributes` or `chunking_strategy` will be ignored and must
be specified for each file. Mutually exclusive with `file_ids`.
"""
| FileBatchCreateParams |
python | django__django | tests/backends/sqlite/tests.py | {
"start": 9593,
"end": 10221
} | class ____(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
def test_parameter_escaping(self):
# '%s' escaping support for sqlite3 (#13648).
with connection.cursor() as cursor:
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@unittest.skipUnless(connection.vendor == "sqlite", "SQLite tests")
@override_settings(DEBUG=True)
| EscapingChecks |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-outbrain-amplify/source_outbrain_amplify/source.py | {
"start": 26913,
"end": 29683
} | class ____(OutbrainAmplifyStream, HttpSubStream):
primary_key = None
def __init__(self, authenticator, config, parent: Marketers, **kwargs):
super().__init__(parent=parent, **kwargs)
self.config = config
self._authenticator = authenticator
self._session = requests.sessions.Session()
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return {}
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def stream_slices(
self, sync_mode: SyncMode.full_refresh, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
for stream_slice in parent_stream_slices:
parent_records = self.parent.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
)
for record in parent_records:
yield {"marketer_id": record.get("id")}
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
if response.json():
for x in response.json().get("results"):
x["marketer_id"] = stream_slice["marketer_id"]
yield x
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
stream_start, stream_end = self._get_time_interval(self.config.get("start_date"), self.config.get("end_date"))
stream_conversion_count = self._get_bool_conversion_count_by_click_date(
self.config.get("conversion_count", DEFAULT_REPORT_CONVERSION_COUNT_BY_CLICK_DATE)
)
return (
f"reports/marketers/{stream_slice['marketer_id']}/publishers?from="
+ str(stream_start.date())
+ "&to="
+ str(stream_end.date())
+ "&limit=500"
+ "&includeVideoStats=true"
+ "&conversionsByClickDate="
+ str(stream_conversion_count)
)
# Retrieve performance statistics for all marketer campaigns by publisher
# A special endpoint for retrieving publishers data by campaign breakdown.
| PerformanceReportMarketersByPublisher |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-gcs/source_gcs/config_migrations.py | {
"start": 359,
"end": 2328
} | class ____:
message_repository: MessageRepository = InMemoryMessageRepository()
migrate_from_path = "service_account"
migrate_to_path = "credentials"
@classmethod
def should_migrate(cls, config: Mapping[str, Any]) -> bool:
return config.get(cls.migrate_from_path) and not config.get(cls.migrate_to_path)
@classmethod
def transform(cls, config: Mapping[str, Any]) -> Mapping[str, Any]:
config[cls.migrate_to_path] = {"service_account": config[cls.migrate_from_path], "auth_type": "Service"}
return config
@classmethod
def modify_and_save(cls, config_path: str, source: Source, config: Mapping[str, Any]) -> Mapping[str, Any]:
migrated_config = cls.transform(config)
source.write_config(migrated_config, config_path)
return migrated_config
@classmethod
def emit_control_message(cls, migrated_config: Mapping[str, Any]) -> None:
# add the Airbyte Control Message to message repo
cls.message_repository.emit_message(create_connector_config_control_message(migrated_config))
# emit the Airbyte Control Message from message queue to stdout
for message in cls.message_repository._message_queue:
print(message)
@classmethod
def migrate(cls, args: List[str], source: Source) -> None:
"""
This method checks the input args, should the config be migrated,
transform if necessary and emit the CONTROL message.
"""
# get config path
config_path = AirbyteEntrypoint(source).extract_config(args)
# proceed only if `--config` arg is provided
if config_path:
# read the existing config
config = source.read_config(config_path)
# migration check
if cls.should_migrate(config):
cls.emit_control_message(
cls.modify_and_save(config_path, source, config),
)
| MigrateServiceAccount |
python | mlflow__mlflow | mlflow/utils/gorilla.py | {
"start": 2709,
"end": 3270
} | class ____:
"""Decorator data.
Attributes
----------
patches : list of gorilla.Patch
Patches created through the decorators.
override : dict
Any overriding value defined by the :func:`destination`, :func:`name`,
and :func:`settings` decorators.
filter : bool or None
Value defined by the :func:`filter` decorator, if any, or ``None``
otherwise.
"""
def __init__(self):
"""Constructor."""
self.patches = []
self.override = {}
self.filter = None
| DecoratorData |
python | huggingface__transformers | tests/models/codegen/test_modeling_codegen.py | {
"start": 12867,
"end": 17813
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (CodeGenModel, CodeGenForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": CodeGenModel, "text-generation": CodeGenForCausalLM} if is_torch_available() else {}
)
test_missing_keys = False
# special case for DoubleHeads model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
return inputs_dict
def setUp(self):
self.model_tester = CodeGenModelTester(self)
self.config_tester = ConfigTester(self, config_class=CodeGenConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_codegen_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_codegen_model(*config_and_inputs)
def test_codegen_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_codegen_model_past(*config_and_inputs)
def test_codegen_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_codegen_model_attention_mask_past(*config_and_inputs)
def test_codegen_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_codegen_model_past_large_inputs(*config_and_inputs)
def test_codegen_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_codegen_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
@slow
def test_batch_generation(self):
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
model = CodeGenForCausalLM.from_pretrained("Salesforce/codegen-350M-mono")
model.to(torch_device)
tokenizer.padding_side = "left"
# Define PAD Token = EOS Token = 50256
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = model.config.eos_token_id
# use different length sentences to test batching
sentences = ["def hello_world():", "def greet(name):"]
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
input_ids = inputs["input_ids"].to(torch_device)
token_type_ids = torch.cat(
[
input_ids.new_full((input_ids.shape[0], input_ids.shape[1] - 1), 0),
input_ids.new_full((input_ids.shape[0], 1), 500),
],
dim=-1,
)
outputs = model.generate(
input_ids=input_ids,
attention_mask=inputs["attention_mask"].to(torch_device),
)
outputs_tt = model.generate(
input_ids=input_ids,
attention_mask=inputs["attention_mask"].to(torch_device),
token_type_ids=token_type_ids,
)
inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device)
output_non_padded = model.generate(input_ids=inputs_non_padded)
num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().item()
inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device)
output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings)
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
batch_out_sentence_tt = tokenizer.batch_decode(outputs_tt, skip_special_tokens=True)
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
expected_output_sentence = [
'def hello_world():\n print("Hello World")\n\nhellow_world()',
'def greet(name):\n print(f"Hello {name}")\n\ng',
]
self.assertListEqual(expected_output_sentence, batch_out_sentence)
self.assertTrue(batch_out_sentence_tt != batch_out_sentence) # token_type_ids should change output
self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
@slow
def test_model_from_pretrained(self):
model_name = "Salesforce/codegen-350M-nl"
model = CodeGenModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
| CodeGenModelTest |
python | zarr-developers__zarr-python | src/zarr/core/group.py | {
"start": 16988,
"end": 67622
} | class ____:
"""
Asynchronous Group object.
"""
metadata: GroupMetadata
store_path: StorePath
# TODO: make this correct and work
# TODO: ensure that this can be bound properly to subclass of AsyncGroup
@classmethod
async def from_store(
cls,
store: StoreLike,
*,
attributes: dict[str, Any] | None = None,
overwrite: bool = False,
zarr_format: ZarrFormat = 3,
) -> AsyncGroup:
store_path = await make_store_path(store)
if overwrite:
if store_path.store.supports_deletes:
await store_path.delete_dir()
else:
await ensure_no_existing_node(store_path, zarr_format=zarr_format)
else:
await ensure_no_existing_node(store_path, zarr_format=zarr_format)
attributes = attributes or {}
group = cls(
metadata=GroupMetadata(attributes=attributes, zarr_format=zarr_format),
store_path=store_path,
)
await group._save_metadata(ensure_parents=True)
return group
@classmethod
async def open(
cls,
store: StoreLike,
zarr_format: ZarrFormat | None = 3,
use_consolidated: bool | str | None = None,
) -> AsyncGroup:
"""Open a new AsyncGroup
Parameters
----------
store : StoreLike
zarr_format : {2, 3}, optional
use_consolidated : bool or str, default None
Whether to use consolidated metadata.
By default, consolidated metadata is used if it's present in the
store (in the ``zarr.json`` for Zarr format 3 and in the ``.zmetadata`` file
for Zarr format 2) and the Store supports it.
To explicitly require consolidated metadata, set ``use_consolidated=True``.
In this case, if the Store doesn't support consolidation or consolidated metadata is
not found, a ``ValueError`` exception is raised.
To explicitly *not* use consolidated metadata, set ``use_consolidated=False``,
which will fall back to using the regular, non consolidated metadata.
Zarr format 2 allowed configuring the key storing the consolidated metadata
(``.zmetadata`` by default). Specify the custom key as ``use_consolidated``
to load consolidated metadata from a non-default key.
"""
store_path = await make_store_path(store)
if not store_path.store.supports_consolidated_metadata:
# Fail if consolidated metadata was requested but the Store doesn't support it
if use_consolidated:
store_name = type(store_path.store).__name__
raise ValueError(
f"The Zarr store in use ({store_name}) doesn't support consolidated metadata."
)
# if use_consolidated was None (optional), the Store dictates it doesn't want consolidation
use_consolidated = False
consolidated_key = ZMETADATA_V2_JSON
if (zarr_format == 2 or zarr_format is None) and isinstance(use_consolidated, str):
consolidated_key = use_consolidated
if zarr_format == 2:
paths = [store_path / ZGROUP_JSON, store_path / ZATTRS_JSON]
if use_consolidated or use_consolidated is None:
paths.append(store_path / consolidated_key)
zgroup_bytes, zattrs_bytes, *rest = await asyncio.gather(
*[path.get() for path in paths]
)
if zgroup_bytes is None:
raise FileNotFoundError(store_path)
if use_consolidated or use_consolidated is None:
maybe_consolidated_metadata_bytes = rest[0]
else:
maybe_consolidated_metadata_bytes = None
elif zarr_format == 3:
zarr_json_bytes = await (store_path / ZARR_JSON).get()
if zarr_json_bytes is None:
raise FileNotFoundError(store_path)
elif zarr_format is None:
(
zarr_json_bytes,
zgroup_bytes,
zattrs_bytes,
maybe_consolidated_metadata_bytes,
) = await asyncio.gather(
(store_path / ZARR_JSON).get(),
(store_path / ZGROUP_JSON).get(),
(store_path / ZATTRS_JSON).get(),
(store_path / str(consolidated_key)).get(),
)
if zarr_json_bytes is not None and zgroup_bytes is not None:
# warn and favor v3
msg = f"Both zarr.json (Zarr format 3) and .zgroup (Zarr format 2) metadata objects exist at {store_path}. Zarr format 3 will be used."
warnings.warn(msg, category=ZarrUserWarning, stacklevel=1)
if zarr_json_bytes is None and zgroup_bytes is None:
raise FileNotFoundError(
f"could not find zarr.json or .zgroup objects in {store_path}"
)
# set zarr_format based on which keys were found
if zarr_json_bytes is not None:
zarr_format = 3
else:
zarr_format = 2
else:
msg = f"Invalid value for 'zarr_format'. Expected 2, 3, or None. Got '{zarr_format}'." # type: ignore[unreachable]
raise MetadataValidationError(msg)
if zarr_format == 2:
# this is checked above, asserting here for mypy
assert zgroup_bytes is not None
if use_consolidated and maybe_consolidated_metadata_bytes is None:
# the user requested consolidated metadata, but it was missing
raise ValueError(consolidated_key)
elif use_consolidated is False:
# the user explicitly opted out of consolidated_metadata.
# Discard anything we might have read.
maybe_consolidated_metadata_bytes = None
return cls._from_bytes_v2(
store_path, zgroup_bytes, zattrs_bytes, maybe_consolidated_metadata_bytes
)
else:
# V3 groups are comprised of a zarr.json object
assert zarr_json_bytes is not None
if not isinstance(use_consolidated, bool | None):
raise TypeError("use_consolidated must be a bool or None for Zarr format 3.")
return cls._from_bytes_v3(
store_path,
zarr_json_bytes,
use_consolidated=use_consolidated,
)
@classmethod
def _from_bytes_v2(
cls,
store_path: StorePath,
zgroup_bytes: Buffer,
zattrs_bytes: Buffer | None,
consolidated_metadata_bytes: Buffer | None,
) -> AsyncGroup:
# V2 groups are comprised of a .zgroup and .zattrs objects
zgroup = json.loads(zgroup_bytes.to_bytes())
zattrs = json.loads(zattrs_bytes.to_bytes()) if zattrs_bytes is not None else {}
group_metadata = {**zgroup, "attributes": zattrs}
if consolidated_metadata_bytes is not None:
v2_consolidated_metadata = json.loads(consolidated_metadata_bytes.to_bytes())
v2_consolidated_metadata = v2_consolidated_metadata["metadata"]
# We already read zattrs and zgroup. Should we ignore these?
v2_consolidated_metadata.pop(".zattrs", None)
v2_consolidated_metadata.pop(".zgroup", None)
consolidated_metadata: defaultdict[str, dict[str, Any]] = defaultdict(dict)
# keys like air/.zarray, air/.zattrs
for k, v in v2_consolidated_metadata.items():
path, kind = k.rsplit("/.", 1)
if kind == "zarray":
consolidated_metadata[path].update(v)
elif kind == "zattrs":
consolidated_metadata[path]["attributes"] = v
elif kind == "zgroup":
consolidated_metadata[path].update(v)
else:
raise ValueError(f"Invalid file type '{kind}' at path '{path}")
group_metadata["consolidated_metadata"] = {
"metadata": dict(consolidated_metadata),
"kind": "inline",
"must_understand": False,
}
return cls.from_dict(store_path, group_metadata)
@classmethod
def _from_bytes_v3(
cls,
store_path: StorePath,
zarr_json_bytes: Buffer,
use_consolidated: bool | None,
) -> AsyncGroup:
group_metadata = json.loads(zarr_json_bytes.to_bytes())
if use_consolidated and group_metadata.get("consolidated_metadata") is None:
msg = f"Consolidated metadata requested with 'use_consolidated=True' but not found in '{store_path.path}'."
raise ValueError(msg)
elif use_consolidated is False:
# Drop consolidated metadata if it's there.
group_metadata.pop("consolidated_metadata", None)
return cls.from_dict(store_path, group_metadata)
@classmethod
def from_dict(
cls,
store_path: StorePath,
data: dict[str, Any],
) -> AsyncGroup:
node_type = data.pop("node_type", None)
if node_type == "array":
msg = f"An array already exists in store {store_path.store} at path {store_path.path}."
raise ContainsArrayError(msg)
elif node_type not in ("group", None):
msg = f"Node type in metadata ({node_type}) is not 'group'"
raise GroupNotFoundError(msg)
return cls(
metadata=GroupMetadata.from_dict(data),
store_path=store_path,
)
async def setitem(self, key: str, value: Any) -> None:
"""
Fastpath for creating a new array
New arrays will be created with default array settings for the array type.
Parameters
----------
key : str
Array name
value : array-like
Array data
"""
path = self.store_path / key
await async_api.save_array(
store=path, arr=value, zarr_format=self.metadata.zarr_format, overwrite=True
)
async def getitem(
self,
key: str,
) -> AnyAsyncArray | AsyncGroup:
"""
Get a subarray or subgroup from the group.
Parameters
----------
key : str
Array or group name
Returns
-------
AsyncArray or AsyncGroup
"""
store_path = self.store_path / key
logger.debug("key=%s, store_path=%s", key, store_path)
# Consolidated metadata lets us avoid some I/O operations so try that first.
if self.metadata.consolidated_metadata is not None:
return self._getitem_consolidated(store_path, key, prefix=self.name)
try:
return await get_node(
store=store_path.store, path=store_path.path, zarr_format=self.metadata.zarr_format
)
except FileNotFoundError as e:
raise KeyError(key) from e
def _getitem_consolidated(
self, store_path: StorePath, key: str, prefix: str
) -> AnyAsyncArray | AsyncGroup:
# getitem, in the special case where we have consolidated metadata.
# Note that this is a regular def (non async) function.
# This shouldn't do any additional I/O.
# the caller needs to verify this!
assert self.metadata.consolidated_metadata is not None
# we support nested getitems like group/subgroup/array
indexers = normalize_path(key).split("/")
indexers.reverse()
metadata: ArrayV2Metadata | ArrayV3Metadata | GroupMetadata = self.metadata
while indexers:
indexer = indexers.pop()
if isinstance(metadata, ArrayV2Metadata | ArrayV3Metadata):
# we've indexed into an array with group["array/subarray"]. Invalid.
raise KeyError(key)
if metadata.consolidated_metadata is None:
# we've indexed into a group without consolidated metadata.
# This isn't normal; typically, consolidated metadata
# will include explicit markers for when there are no child
# nodes as metadata={}.
# We have some freedom in exactly how we interpret this case.
# For now, we treat None as the same as {}, i.e. we don't
# have any children.
raise KeyError(key)
try:
metadata = metadata.consolidated_metadata.metadata[indexer]
except KeyError as e:
# The Group Metadata has consolidated metadata, but the key
# isn't present. We trust this to mean that the key isn't in
# the hierarchy, and *don't* fall back to checking the store.
msg = f"'{key}' not found in consolidated metadata."
raise KeyError(msg) from e
# update store_path to ensure that AsyncArray/Group.name is correct
if prefix != "/":
key = "/".join([prefix.lstrip("/"), key])
store_path = StorePath(store=store_path.store, path=key)
if isinstance(metadata, GroupMetadata):
return AsyncGroup(metadata=metadata, store_path=store_path)
else:
return AsyncArray(metadata=metadata, store_path=store_path)
async def delitem(self, key: str) -> None:
"""Delete a group member.
Parameters
----------
key : str
Array or group name
"""
store_path = self.store_path / key
await store_path.delete_dir()
if self.metadata.consolidated_metadata:
self.metadata.consolidated_metadata.metadata.pop(key, None)
await self._save_metadata()
async def get(
self, key: str, default: DefaultT | None = None
) -> AnyAsyncArray | AsyncGroup | DefaultT | None:
"""Obtain a group member, returning default if not found.
Parameters
----------
key : str
Group member name.
default : object
Default value to return if key is not found (default: None).
Returns
-------
object
Group member (AsyncArray or AsyncGroup) or default if not found.
"""
try:
return await self.getitem(key)
except KeyError:
return default
async def _save_metadata(self, ensure_parents: bool = False) -> None:
await save_metadata(self.store_path, self.metadata, ensure_parents=ensure_parents)
@property
def path(self) -> str:
"""Storage path."""
return self.store_path.path
@property
def name(self) -> str:
"""Group name following h5py convention."""
if self.path:
# follow h5py convention: add leading slash
name = self.path
if name[0] != "/":
name = "/" + name
return name
return "/"
@property
def basename(self) -> str:
"""Final component of name."""
return self.name.split("/")[-1]
@property
def attrs(self) -> dict[str, Any]:
return self.metadata.attributes
@property
def info(self) -> Any:
"""
Return a visual representation of the statically known information about a group.
Note that this doesn't include dynamic information, like the number of child
Groups or Arrays.
Returns
-------
GroupInfo
Related
-------
[zarr.AsyncGroup.info_complete][]
All information about a group, including dynamic information
"""
if self.metadata.consolidated_metadata:
members = list(self.metadata.consolidated_metadata.flattened_metadata.values())
else:
members = None
return self._info(members=members)
async def info_complete(self) -> Any:
"""
Return all the information for a group.
This includes dynamic information like the number
of child Groups or Arrays. If this group doesn't contain consolidated
metadata then this will need to read from the backing Store.
Returns
-------
GroupInfo
Related
-------
[zarr.AsyncGroup.info][]
"""
members = [x[1].metadata async for x in self.members(max_depth=None)]
return self._info(members=members)
def _info(
self, members: list[ArrayV2Metadata | ArrayV3Metadata | GroupMetadata] | None = None
) -> Any:
kwargs = {}
if members is not None:
kwargs["_count_members"] = len(members)
count_arrays = 0
count_groups = 0
for member in members:
if isinstance(member, GroupMetadata):
count_groups += 1
else:
count_arrays += 1
kwargs["_count_arrays"] = count_arrays
kwargs["_count_groups"] = count_groups
return GroupInfo(
_name=self.store_path.path,
_read_only=self.read_only,
_store_type=type(self.store_path.store).__name__,
_zarr_format=self.metadata.zarr_format,
# maybe do a typeddict
**kwargs, # type: ignore[arg-type]
)
@property
def store(self) -> Store:
return self.store_path.store
@property
def read_only(self) -> bool:
# Backwards compatibility for 2.x
return self.store_path.read_only
@property
def synchronizer(self) -> None:
# Backwards compatibility for 2.x
# Not implemented in 3.x yet.
return None
async def create_group(
self,
name: str,
*,
overwrite: bool = False,
attributes: dict[str, Any] | None = None,
) -> AsyncGroup:
"""Create a sub-group.
Parameters
----------
name : str
Group name.
overwrite : bool, optional
If True, do not raise an error if the group already exists.
attributes : dict, optional
Group attributes.
Returns
-------
g : AsyncGroup
"""
attributes = attributes or {}
return await type(self).from_store(
self.store_path / name,
attributes=attributes,
overwrite=overwrite,
zarr_format=self.metadata.zarr_format,
)
async def require_group(self, name: str, overwrite: bool = False) -> AsyncGroup:
"""Obtain a sub-group, creating one if it doesn't exist.
Parameters
----------
name : str
Group name.
overwrite : bool, optional
Overwrite any existing group with given `name` if present.
Returns
-------
g : AsyncGroup
"""
if overwrite:
# TODO: check that overwrite=True errors if an array exists where the group is being created
grp = await self.create_group(name, overwrite=True)
else:
try:
item: AsyncGroup | AnyAsyncArray = await self.getitem(name)
if not isinstance(item, AsyncGroup):
raise TypeError(
f"Incompatible object ({item.__class__.__name__}) already exists"
)
assert isinstance(item, AsyncGroup) # make mypy happy
grp = item
except KeyError:
grp = await self.create_group(name)
return grp
async def require_groups(self, *names: str) -> tuple[AsyncGroup, ...]:
"""Convenience method to require multiple groups in a single call.
Parameters
----------
*names : str
Group names.
Returns
-------
Tuple[AsyncGroup, ...]
"""
if not names:
return ()
return tuple(await asyncio.gather(*(self.require_group(name) for name in names)))
async def create_array(
self,
name: str,
*,
shape: ShapeLike | None = None,
dtype: ZDTypeLike | None = None,
data: np.ndarray[Any, np.dtype[Any]] | None = None,
chunks: tuple[int, ...] | Literal["auto"] = "auto",
shards: ShardsLike | None = None,
filters: FiltersLike = "auto",
compressors: CompressorsLike = "auto",
compressor: CompressorLike = "auto",
serializer: SerializerLike = "auto",
fill_value: Any | None = DEFAULT_FILL_VALUE,
order: MemoryOrder | None = None,
attributes: dict[str, JSON] | None = None,
chunk_key_encoding: ChunkKeyEncodingLike | None = None,
dimension_names: DimensionNames = None,
storage_options: dict[str, Any] | None = None,
overwrite: bool = False,
config: ArrayConfigLike | None = None,
write_data: bool = True,
) -> AnyAsyncArray:
"""Create an array within this group.
This method lightly wraps [zarr.core.array.create_array][].
Parameters
----------
name : str
The name of the array relative to the group. If ``path`` is ``None``, the array will be located
at the root of the store.
shape : tuple[int, ...]
Shape of the array.
dtype : npt.DTypeLike
Data type of the array.
chunks : tuple[int, ...], optional
Chunk shape of the array.
If not specified, default are guessed based on the shape and dtype.
shards : tuple[int, ...], optional
Shard shape of the array. The default value of ``None`` results in no sharding at all.
filters : Iterable[Codec] | Literal["auto"], optional
Iterable of filters to apply to each chunk of the array, in order, before serializing that
chunk to bytes.
For Zarr format 3, a "filter" is a codec that takes an array and returns an array,
and these values must be instances of [`zarr.abc.codec.ArrayArrayCodec`][], or a
dict representations of [`zarr.abc.codec.ArrayArrayCodec`][].
For Zarr format 2, a "filter" can be any numcodecs codec; you should ensure that the
the order if your filters is consistent with the behavior of each filter.
The default value of ``"auto"`` instructs Zarr to use a default used based on the data
type of the array and the Zarr format specified. For all data types in Zarr V3, and most
data types in Zarr V2, the default filters are empty. The only cases where default filters
are not empty is when the Zarr format is 2, and the data type is a variable-length data type like
[`zarr.dtype.VariableLengthUTF8`][] or [`zarr.dtype.VariableLengthUTF8`][]. In these cases,
the default filters contains a single element which is a codec specific to that particular data type.
To create an array with no filters, provide an empty iterable or the value ``None``.
compressors : Iterable[Codec], optional
List of compressors to apply to the array. Compressors are applied in order, and after any
filters are applied (if any are specified) and the data is serialized into bytes.
For Zarr format 3, a "compressor" is a codec that takes a bytestream, and
returns another bytestream. Multiple compressors my be provided for Zarr format 3.
If no ``compressors`` are provided, a default set of compressors will be used.
These defaults can be changed by modifying the value of ``array.v3_default_compressors``
in [`zarr.config`][zarr.config].
Use ``None`` to omit default compressors.
For Zarr format 2, a "compressor" can be any numcodecs codec. Only a single compressor may
be provided for Zarr format 2.
If no ``compressor`` is provided, a default compressor will be used.
in [`zarr.config`][zarr.config].
Use ``None`` to omit the default compressor.
compressor : Codec, optional
Deprecated in favor of ``compressors``.
serializer : dict[str, JSON] | ArrayBytesCodec, optional
Array-to-bytes codec to use for encoding the array data.
Zarr format 3 only. Zarr format 2 arrays use implicit array-to-bytes conversion.
If no ``serializer`` is provided, a default serializer will be used.
These defaults can be changed by modifying the value of ``array.v3_default_serializer``
in [`zarr.config`][zarr.config].
fill_value : Any, optional
Fill value for the array.
order : {"C", "F"}, optional
The memory of the array (default is "C").
For Zarr format 2, this parameter sets the memory order of the array.
For Zarr format 3, this parameter is deprecated, because memory order
is a runtime parameter for Zarr format 3 arrays. The recommended way to specify the memory
order for Zarr format 3 arrays is via the ``config`` parameter, e.g. ``{'config': 'C'}``.
If no ``order`` is provided, a default order will be used.
This default can be changed by modifying the value of ``array.order`` in [`zarr.config`][zarr.config].
attributes : dict, optional
Attributes for the array.
chunk_key_encoding : ChunkKeyEncoding, optional
A specification of how the chunk keys are represented in storage.
For Zarr format 3, the default is ``{"name": "default", "separator": "/"}}``.
For Zarr format 2, the default is ``{"name": "v2", "separator": "."}}``.
dimension_names : Iterable[str], optional
The names of the dimensions (default is None).
Zarr format 3 only. Zarr format 2 arrays should not use this parameter.
storage_options : dict, optional
If using an fsspec URL to create the store, these will be passed to the backend implementation.
Ignored otherwise.
overwrite : bool, default False
Whether to overwrite an array with the same name in the store, if one exists.
config : ArrayConfig or ArrayConfigLike, optional
Runtime configuration for the array.
write_data : bool
If a pre-existing array-like object was provided to this function via the ``data`` parameter
then ``write_data`` determines whether the values in that array-like object should be
written to the Zarr array created by this function. If ``write_data`` is ``False``, then the
array will be left empty.
Returns
-------
AsyncArray
"""
compressors = _parse_deprecated_compressor(
compressor, compressors, zarr_format=self.metadata.zarr_format
)
return await create_array(
store=self.store_path,
name=name,
shape=shape,
dtype=dtype,
data=data,
chunks=chunks,
shards=shards,
filters=filters,
compressors=compressors,
serializer=serializer,
fill_value=fill_value,
order=order,
zarr_format=self.metadata.zarr_format,
attributes=attributes,
chunk_key_encoding=chunk_key_encoding,
dimension_names=dimension_names,
storage_options=storage_options,
overwrite=overwrite,
config=config,
write_data=write_data,
)
@deprecated("Use AsyncGroup.create_array instead.", category=ZarrDeprecationWarning)
async def create_dataset(self, name: str, *, shape: ShapeLike, **kwargs: Any) -> AnyAsyncArray:
"""Create an array.
!!! warning "Deprecated"
`AsyncGroup.create_dataset()` is deprecated since v3.0.0 and will be removed in v3.1.0.
Use `AsyncGroup.create_array` instead.
Arrays are known as "datasets" in HDF5 terminology. For compatibility
with h5py, Zarr groups also implement the [zarr.AsyncGroup.require_dataset][] method.
Parameters
----------
name : str
Array name.
**kwargs : dict
Additional arguments passed to [zarr.AsyncGroup.create_array][].
Returns
-------
a : AsyncArray
"""
data = kwargs.pop("data", None)
# create_dataset in zarr 2.x requires shape but not dtype if data is
# provided. Allow this configuration by inferring dtype from data if
# necessary and passing it to create_array
if "dtype" not in kwargs and data is not None:
kwargs["dtype"] = data.dtype
array = await self.create_array(name, shape=shape, **kwargs)
if data is not None:
await array.setitem(slice(None), data)
return array
@deprecated("Use AsyncGroup.require_array instead.", category=ZarrDeprecationWarning)
async def require_dataset(
self,
name: str,
*,
shape: tuple[int, ...],
dtype: npt.DTypeLike = None,
exact: bool = False,
**kwargs: Any,
) -> AnyAsyncArray:
"""Obtain an array, creating if it doesn't exist.
!!! warning "Deprecated"
`AsyncGroup.require_dataset()` is deprecated since v3.0.0 and will be removed in v3.1.0.
Use `AsyncGroup.require_dataset` instead.
Arrays are known as "datasets" in HDF5 terminology. For compatibility
with h5py, Zarr groups also implement the [zarr.AsyncGroup.create_dataset][] method.
Other `kwargs` are as per [zarr.AsyncGroup.create_dataset][].
Parameters
----------
name : str
Array name.
shape : int or tuple of ints
Array shape.
dtype : str or dtype, optional
NumPy dtype.
exact : bool, optional
If True, require `dtype` to match exactly. If false, require
`dtype` can be cast from array dtype.
Returns
-------
a : AsyncArray
"""
return await self.require_array(name, shape=shape, dtype=dtype, exact=exact, **kwargs)
async def require_array(
self,
name: str,
*,
shape: ShapeLike,
dtype: npt.DTypeLike = None,
exact: bool = False,
**kwargs: Any,
) -> AnyAsyncArray:
"""Obtain an array, creating if it doesn't exist.
Other `kwargs` are as per [zarr.AsyncGroup.create_dataset][].
Parameters
----------
name : str
Array name.
shape : int or tuple of ints
Array shape.
dtype : str or dtype, optional
NumPy dtype.
exact : bool, optional
If True, require `dtype` to match exactly. If false, require
`dtype` can be cast from array dtype.
Returns
-------
a : AsyncArray
"""
try:
ds = await self.getitem(name)
if not isinstance(ds, AsyncArray):
raise TypeError(f"Incompatible object ({ds.__class__.__name__}) already exists")
shape = parse_shapelike(shape)
if shape != ds.shape:
raise TypeError(f"Incompatible shape ({ds.shape} vs {shape})")
dtype = np.dtype(dtype)
if exact:
if ds.dtype != dtype:
raise TypeError(f"Incompatible dtype ({ds.dtype} vs {dtype})")
else:
if not np.can_cast(ds.dtype, dtype):
raise TypeError(f"Incompatible dtype ({ds.dtype} vs {dtype})")
except KeyError:
ds = await self.create_array(name, shape=shape, dtype=dtype, **kwargs)
return ds
async def update_attributes(self, new_attributes: dict[str, Any]) -> AsyncGroup:
"""Update group attributes.
Parameters
----------
new_attributes : dict
New attributes to set on the group.
Returns
-------
self : AsyncGroup
"""
self.metadata.attributes.update(new_attributes)
# Write new metadata
await self._save_metadata()
return self
def __repr__(self) -> str:
return f"<AsyncGroup {self.store_path}>"
async def nmembers(
self,
max_depth: int | None = 0,
) -> int:
"""Count the number of members in this group.
Parameters
----------
max_depth : int, default 0
The maximum number of levels of the hierarchy to include. By
default, (``max_depth=0``) only immediate children are included. Set
``max_depth=None`` to include all nodes, and some positive integer
to consider children within that many levels of the root Group.
Returns
-------
count : int
"""
# check if we can use consolidated metadata, which requires that we have non-None
# consolidated metadata at all points in the hierarchy.
if self.metadata.consolidated_metadata is not None:
if max_depth is not None and max_depth < 0:
raise ValueError(f"max_depth must be None or >= 0. Got '{max_depth}' instead")
if max_depth is None:
return len(self.metadata.consolidated_metadata.flattened_metadata)
else:
return len(
[
x
for x in self.metadata.consolidated_metadata.flattened_metadata
if x.count("/") <= max_depth
]
)
# TODO: consider using aioitertools.builtins.sum for this
# return await aioitertools.builtins.sum((1 async for _ in self.members()), start=0)
n = 0
async for _ in self.members(max_depth=max_depth):
n += 1
return n
async def members(
self,
max_depth: int | None = 0,
*,
use_consolidated_for_children: bool = True,
) -> AsyncGenerator[
tuple[str, AnyAsyncArray | AsyncGroup],
None,
]:
"""
Returns an AsyncGenerator over the arrays and groups contained in this group.
This method requires that `store_path.store` supports directory listing.
The results are not guaranteed to be ordered.
Parameters
----------
max_depth : int, default 0
The maximum number of levels of the hierarchy to include. By
default, (``max_depth=0``) only immediate children are included. Set
``max_depth=None`` to include all nodes, and some positive integer
to consider children within that many levels of the root Group.
use_consolidated_for_children : bool, default True
Whether to use the consolidated metadata of child groups loaded
from the store. Note that this only affects groups loaded from the
store. If the current Group already has consolidated metadata, it
will always be used.
Returns
-------
path:
A string giving the path to the target, relative to the Group ``self``.
value: AsyncArray or AsyncGroup
The AsyncArray or AsyncGroup that is a child of ``self``.
"""
if max_depth is not None and max_depth < 0:
raise ValueError(f"max_depth must be None or >= 0. Got '{max_depth}' instead")
async for item in self._members(
max_depth=max_depth, use_consolidated_for_children=use_consolidated_for_children
):
yield item
def _members_consolidated(
self, max_depth: int | None, prefix: str = ""
) -> Generator[
tuple[str, AnyAsyncArray | AsyncGroup],
None,
]:
consolidated_metadata = self.metadata.consolidated_metadata
do_recursion = max_depth is None or max_depth > 0
# we kind of just want the top-level keys.
if consolidated_metadata is not None:
for key in consolidated_metadata.metadata:
obj = self._getitem_consolidated(
self.store_path, key, prefix=self.name
) # Metadata -> Group/Array
key = f"{prefix}/{key}".lstrip("/")
yield key, obj
if do_recursion and isinstance(obj, AsyncGroup):
if max_depth is None:
new_depth = None
else:
new_depth = max_depth - 1
yield from obj._members_consolidated(new_depth, prefix=key)
async def _members(
self, max_depth: int | None, *, use_consolidated_for_children: bool = True
) -> AsyncGenerator[tuple[str, AnyAsyncArray | AsyncGroup], None]:
skip_keys: tuple[str, ...]
if self.metadata.zarr_format == 2:
skip_keys = (".zattrs", ".zgroup", ".zarray", ".zmetadata")
elif self.metadata.zarr_format == 3:
skip_keys = ("zarr.json",)
else:
raise ValueError(f"Unknown Zarr format: {self.metadata.zarr_format}")
if self.metadata.consolidated_metadata is not None:
members = self._members_consolidated(max_depth=max_depth)
for member in members:
yield member
return
if not self.store_path.store.supports_listing:
msg = (
f"The store associated with this group ({type(self.store_path.store)}) "
"does not support listing, "
"specifically via the `list_dir` method. "
"This function requires a store that supports listing."
)
raise ValueError(msg)
# enforce a concurrency limit by passing a semaphore to all the recursive functions
semaphore = asyncio.Semaphore(config.get("async.concurrency"))
async for member in _iter_members_deep(
self,
max_depth=max_depth,
skip_keys=skip_keys,
semaphore=semaphore,
use_consolidated_for_children=use_consolidated_for_children,
):
yield member
async def create_hierarchy(
self,
nodes: dict[str, ArrayV2Metadata | ArrayV3Metadata | GroupMetadata],
*,
overwrite: bool = False,
) -> AsyncIterator[tuple[str, AsyncGroup | AnyAsyncArray]]:
"""
Create a hierarchy of arrays or groups rooted at this group.
This function will parse its input to ensure that the hierarchy is complete. Any implicit groups
will be inserted as needed. For example, an input like
```{'a/b': GroupMetadata}``` will be parsed to
```{'': GroupMetadata, 'a': GroupMetadata, 'b': Groupmetadata}```.
Explicitly specifying a root group, e.g. with ``nodes = {'': GroupMetadata()}`` is an error
because this group instance is the root group.
After input parsing, this function then creates all the nodes in the hierarchy concurrently.
Arrays and Groups are yielded in the order they are created. This order is not stable and
should not be relied on.
Parameters
----------
nodes : dict[str, GroupMetadata | ArrayV3Metadata | ArrayV2Metadata]
A dictionary defining the hierarchy. The keys are the paths of the nodes in the hierarchy,
relative to the path of the group. The values are instances of ``GroupMetadata`` or ``ArrayMetadata``. Note that
all values must have the same ``zarr_format`` as the parent group -- it is an error to mix zarr versions in the
same hierarchy.
Leading "/" characters from keys will be removed.
overwrite : bool
Whether to overwrite existing nodes. Defaults to ``False``, in which case an error is
raised instead of overwriting an existing array or group.
This function will not erase an existing group unless that group is explicitly named in
``nodes``. If ``nodes`` defines implicit groups, e.g. ``{`'a/b/c': GroupMetadata}``, and a
group already exists at path ``a``, then this function will leave the group at ``a`` as-is.
Yields
------
tuple[str, AsyncArray | AsyncGroup].
"""
# check that all the nodes have the same zarr_format as Self
prefix = self.path
nodes_parsed = {}
for key, value in nodes.items():
if value.zarr_format != self.metadata.zarr_format:
msg = (
"The zarr_format of the nodes must be the same as the parent group. "
f"The node at {key} has zarr_format {value.zarr_format}, but the parent group"
f" has zarr_format {self.metadata.zarr_format}."
)
raise ValueError(msg)
if normalize_path(key) == "":
msg = (
"The input defines a root node, but a root node already exists, namely this Group instance."
"It is an error to use this method to create a root node. "
"Remove the root node from the input dict, or use a function like "
"create_rooted_hierarchy to create a rooted hierarchy."
)
raise ValueError(msg)
else:
nodes_parsed[_join_paths([prefix, key])] = value
async for key, node in create_hierarchy(
store=self.store,
nodes=nodes_parsed,
overwrite=overwrite,
):
if prefix == "":
out_key = key
else:
out_key = key.removeprefix(prefix + "/")
yield out_key, node
async def keys(self) -> AsyncGenerator[str, None]:
"""Iterate over member names."""
async for key, _ in self.members():
yield key
async def contains(self, member: str) -> bool:
"""Check if a member exists in the group.
Parameters
----------
member : str
Member name.
Returns
-------
bool
"""
# TODO: this can be made more efficient.
try:
await self.getitem(member)
except KeyError:
return False
else:
return True
async def groups(self) -> AsyncGenerator[tuple[str, AsyncGroup], None]:
"""Iterate over subgroups."""
async for name, value in self.members():
if isinstance(value, AsyncGroup):
yield name, value
async def group_keys(self) -> AsyncGenerator[str, None]:
"""Iterate over group names."""
async for key, _ in self.groups():
yield key
async def group_values(self) -> AsyncGenerator[AsyncGroup, None]:
"""Iterate over group values."""
async for _, group in self.groups():
yield group
async def arrays(
self,
) -> AsyncGenerator[tuple[str, AnyAsyncArray], None]:
"""Iterate over arrays."""
async for key, value in self.members():
if isinstance(value, AsyncArray):
yield key, value
async def array_keys(self) -> AsyncGenerator[str, None]:
"""Iterate over array names."""
async for key, _ in self.arrays():
yield key
async def array_values(
self,
) -> AsyncGenerator[AnyAsyncArray, None]:
"""Iterate over array values."""
async for _, array in self.arrays():
yield array
async def tree(self, expand: bool | None = None, level: int | None = None) -> Any:
"""
Return a tree-like representation of a hierarchy.
This requires the optional ``rich`` dependency.
Parameters
----------
expand : bool, optional
This keyword is not yet supported. A NotImplementedError is raised if
it's used.
level : int, optional
The maximum depth below this Group to display in the tree.
Returns
-------
TreeRepr
A pretty-printable object displaying the hierarchy.
"""
from zarr.core._tree import group_tree_async
if expand is not None:
raise NotImplementedError("'expand' is not yet implemented.")
return await group_tree_async(self, max_depth=level)
async def empty(self, *, name: str, shape: tuple[int, ...], **kwargs: Any) -> AnyAsyncArray:
"""Create an empty array with the specified shape in this Group. The contents will
be filled with the array's fill value or zeros if no fill value is provided.
Parameters
----------
name : str
Name of the array.
shape : int or tuple of int
Shape of the empty array.
**kwargs
Keyword arguments passed to [zarr.api.asynchronous.create][].
Notes
-----
The contents of an empty Zarr array are not defined. On attempting to
retrieve data from an empty Zarr array, any values may be returned,
and these are not guaranteed to be stable from one access to the next.
"""
return await async_api.empty(shape=shape, store=self.store_path, path=name, **kwargs)
async def zeros(self, *, name: str, shape: tuple[int, ...], **kwargs: Any) -> AnyAsyncArray:
"""Create an array, with zero being used as the default value for uninitialized portions of the array.
Parameters
----------
name : str
Name of the array.
shape : int or tuple of int
Shape of the empty array.
**kwargs
Keyword arguments passed to [zarr.api.asynchronous.create][].
Returns
-------
AsyncArray
The new array.
"""
return await async_api.zeros(shape=shape, store=self.store_path, path=name, **kwargs)
async def ones(self, *, name: str, shape: tuple[int, ...], **kwargs: Any) -> AnyAsyncArray:
"""Create an array, with one being used as the default value for uninitialized portions of the array.
Parameters
----------
name : str
Name of the array.
shape : int or tuple of int
Shape of the empty array.
**kwargs
Keyword arguments passed to [zarr.api.asynchronous.create][].
Returns
-------
AsyncArray
The new array.
"""
return await async_api.ones(shape=shape, store=self.store_path, path=name, **kwargs)
async def full(
self, *, name: str, shape: tuple[int, ...], fill_value: Any | None, **kwargs: Any
) -> AnyAsyncArray:
"""Create an array, with "fill_value" being used as the default value for uninitialized portions of the array.
Parameters
----------
name : str
Name of the array.
shape : int or tuple of int
Shape of the empty array.
fill_value : scalar
Value to fill the array with.
**kwargs
Keyword arguments passed to [zarr.api.asynchronous.create][].
Returns
-------
AsyncArray
The new array.
"""
return await async_api.full(
shape=shape,
fill_value=fill_value,
store=self.store_path,
path=name,
**kwargs,
)
async def empty_like(
self, *, name: str, data: async_api.ArrayLike, **kwargs: Any
) -> AnyAsyncArray:
"""Create an empty sub-array like `data`. The contents will be filled with
the array's fill value or zeros if no fill value is provided.
Parameters
----------
name : str
Name of the array.
data : array-like
The array to create an empty array like.
**kwargs
Keyword arguments passed to [zarr.api.asynchronous.create][].
Returns
-------
AsyncArray
The new array.
"""
return await async_api.empty_like(a=data, store=self.store_path, path=name, **kwargs)
async def zeros_like(
self, *, name: str, data: async_api.ArrayLike, **kwargs: Any
) -> AnyAsyncArray:
"""Create a sub-array of zeros like `data`.
Parameters
----------
name : str
Name of the array.
data : array-like
The array to create the new array like.
**kwargs
Keyword arguments passed to [zarr.api.asynchronous.create][].
Returns
-------
AsyncArray
The new array.
"""
return await async_api.zeros_like(a=data, store=self.store_path, path=name, **kwargs)
async def ones_like(
self, *, name: str, data: async_api.ArrayLike, **kwargs: Any
) -> AnyAsyncArray:
"""Create a sub-array of ones like `data`.
Parameters
----------
name : str
Name of the array.
data : array-like
The array to create the new array like.
**kwargs
Keyword arguments passed to [zarr.api.asynchronous.create][].
Returns
-------
AsyncArray
The new array.
"""
return await async_api.ones_like(a=data, store=self.store_path, path=name, **kwargs)
async def full_like(
self, *, name: str, data: async_api.ArrayLike, **kwargs: Any
) -> AnyAsyncArray:
"""Create a sub-array like `data` filled with the `fill_value` of `data` .
Parameters
----------
name : str
Name of the array.
data : array-like
The array to create the new array like.
**kwargs
Keyword arguments passed to [zarr.api.asynchronous.create][].
Returns
-------
AsyncArray
The new array.
"""
return await async_api.full_like(a=data, store=self.store_path, path=name, **kwargs)
async def move(self, source: str, dest: str) -> None:
"""Move a sub-group or sub-array from one path to another.
Notes
-----
Not implemented
"""
raise NotImplementedError
@dataclass(frozen=True)
| AsyncGroup |
python | encode__django-rest-framework | tests/test_relations_pk.py | {
"start": 21772,
"end": 22474
} | class ____(TestCase):
def setUp(self):
target = OneToOneTarget(name='target-1')
target.save()
new_target = OneToOneTarget(name='target-2')
new_target.save()
source = NullableOneToOneSource(name='source-1', target=new_target)
source.save()
def test_reverse_foreign_key_retrieve_with_null(self):
queryset = OneToOneTarget.objects.all()
serializer = NullableOneToOneTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'nullable_source': None},
{'id': 2, 'name': 'target-2', 'nullable_source': 1},
]
assert serializer.data == expected
| PKNullableOneToOneTests |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_artifact_bundle_file_details.py | {
"start": 351,
"end": 8121
} | class ____(APITestCase):
@staticmethod
def get_compressed_zip_file(artifact_name, files, type="artifact.bundle"):
def remove_and_return(dictionary, key):
dictionary.pop(key)
return dictionary
compressed = io.BytesIO()
with zipfile.ZipFile(compressed, mode="w") as zip_file:
for file_path, info in files.items():
zip_file.writestr(file_path, bytes(info["content"]))
zip_file.writestr(
"manifest.json",
orjson.dumps(
{
# We remove the "content" key in the original dict, thus no subsequent calls should be made.
"files": {
file_path: remove_and_return(info, "content")
for file_path, info in files.items()
}
}
).decode(),
)
compressed.seek(0)
file = File.objects.create(name=artifact_name, type=type)
file.putfile(compressed)
return file
def test_archive_download(self) -> None:
project = self.create_project(name="foo")
file = self.get_compressed_zip_file(
"bundle.zip",
{
"files/_/_/index.js.map": {
"url": "~/index.js.map",
"type": "source_map",
"content": b"foo",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/index.js": {
"url": "~/index.js",
"type": "minified_source",
"content": b"bar",
"headers": {
"content-type": "application/json",
"sourcemap": "index.js.map",
},
},
},
)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id, bundle_id=uuid4(), file=file, artifact_count=2
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=project.id,
artifact_bundle=artifact_bundle,
)
# Download as a user with sufficient role
url = reverse(
"sentry-api-0-project-artifact-bundle-file-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"bundle_id": artifact_bundle.bundle_id,
"file_id": base64.urlsafe_b64encode(b"files/_/_/index.js.map").decode("utf-8"),
},
)
self.organization.update_option("sentry:debug_files_role", "admin")
user = self.create_user("baz@localhost")
self.create_member(user=user, organization=project.organization, role="owner")
self.login_as(user=user)
response = self.client.get(url)
assert response.status_code == 200, response.content
assert response.get("Content-Disposition") == 'attachment; filename="index.js.map"'
assert response.get("Content-Length") == str(3)
assert response.get("Content-Type") == "application/json"
assert b"foo" == close_streaming_response(response)
# Download as a superuser
url = reverse(
"sentry-api-0-project-artifact-bundle-file-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"bundle_id": artifact_bundle.bundle_id,
"file_id": base64.urlsafe_b64encode(b"files/_/_/index.js").decode("utf-8"),
},
)
self.login_as(user=self.user)
response = self.client.get(url)
assert response.status_code == 200, response.content
assert response.get("Content-Disposition") == 'attachment; filename="index.js"'
assert response.get("Content-Length") == str(3)
assert response.get("Content-Type") == "application/json"
assert b"bar" == close_streaming_response(response)
# Download as a superuser with non-existing file
url = reverse(
"sentry-api-0-project-artifact-bundle-file-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"bundle_id": artifact_bundle.bundle_id,
"file_id": base64.urlsafe_b64encode(b"files/_/_/bundle.js").decode("utf-8"),
},
)
self.login_as(user=self.user)
response = self.client.get(url)
assert response.status_code == 404, response.content
# Download as a superuser with invalid base64 file_id
url = reverse(
"sentry-api-0-project-artifact-bundle-file-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"bundle_id": artifact_bundle.bundle_id,
"file_id": 1234,
},
)
self.login_as(user=self.user)
response = self.client.get(url)
assert response.status_code == 400, response.content
# Download as a user without sufficient role
self.organization.update_option("sentry:debug_files_role", "owner")
user_no_role = self.create_user("bar@localhost")
self.create_member(user=user_no_role, organization=project.organization, role="member")
self.login_as(user=user_no_role)
response = self.client.get(url)
assert response.status_code == 403, response.content
# Download as a user with no permissions
user_no_permission = self.create_user("baz@localhost", username="baz")
self.login_as(user=user_no_permission)
response = self.client.get(url)
assert response.status_code == 403, response.content
def test_archive_download_with_invalid_project(self) -> None:
project = self.create_project(name="foo")
file = self.get_compressed_zip_file(
"bundle.zip",
{
"files/_/_/index.js.map": {
"url": "~/index.js.map",
"type": "source_map",
"content": b"foo",
"headers": {
"content-type": "application/json",
},
},
"files/_/_/index.js": {
"url": "~/index.js",
"type": "minified_source",
"content": b"bar",
"headers": {
"content-type": "application/json",
"sourcemap": "index.js.map",
},
},
},
)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id, bundle_id=uuid4(), file=file, artifact_count=2
)
# Download as a superuser
url = reverse(
"sentry-api-0-project-artifact-bundle-file-details",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
"bundle_id": artifact_bundle.bundle_id,
"file_id": base64.urlsafe_b64encode(b"files/_/_/bundle.js").decode("utf-8"),
},
)
self.login_as(user=self.user)
response = self.client.get(url)
assert response.status_code == 400, response.content
| ProjectArtifactBundleFileDetailsEndpointTest |
python | django-extensions__django-extensions | django_extensions/management/commands/shell_plus.py | {
"start": 1248,
"end": 25031
} | class ____(BaseCommand):
help = "Like the 'shell' command but autoloads the models of all installed Django apps." # noqa: E501
extra_args = None
tests_mode = False
def __init__(self):
super().__init__()
self.runners = [
member
for name, member in inspect.getmembers(self)
if hasattr(member, "runner_flags")
]
def add_arguments(self, parser):
super().add_arguments(parser)
group = parser.add_mutually_exclusive_group()
for runner in self.runners:
if runner.runner_help:
help = runner.runner_help
else:
help = "Tells Django to use %s." % runner.runner_name
group.add_argument(
*runner.runner_flags,
action="store_const",
dest="runner",
const=runner,
help=help,
)
parser.add_argument(
"--connection-file",
action="store",
dest="connection_file",
help="Specifies the connection file to use if using the --kernel option",
)
parser.add_argument(
"--no-startup",
action="store_true",
dest="no_startup",
default=False,
help=(
"When using plain Python, ignore the PYTHONSTARTUP environment "
"variable and ~/.pythonrc.py script."
),
)
parser.add_argument(
"--use-pythonrc",
action="store_true",
dest="use_pythonrc",
default=False,
help=(
"When using plain Python, load the PYTHONSTARTUP environment variable "
"and ~/.pythonrc.py script."
),
)
parser.add_argument(
"--print-sql",
action="store_true",
default=False,
help="Print SQL queries as they're executed",
)
parser.add_argument(
"--truncate-sql",
action="store",
type=int,
help="Truncate SQL queries to a number of characters.",
)
parser.add_argument(
"--print-sql-location",
action="store_true",
default=False,
help="Show location in code where SQL query generated from",
)
parser.add_argument(
"--dont-load",
action="append",
dest="dont_load",
default=[],
help="Ignore autoloading of some apps/models. Can be used several times.",
)
parser.add_argument(
"--quiet-load",
action="store_true",
default=False,
dest="quiet_load",
help="Do not display loaded models messages",
)
parser.add_argument(
"--vi",
action="store_true",
default=use_vi_mode(),
dest="vi_mode",
help="Load Vi key bindings (for --ptpython and --ptipython)",
)
parser.add_argument(
"--no-browser",
action="store_true",
default=False,
dest="no_browser",
help="Don't open the notebook in a browser after startup.",
)
parser.add_argument(
"-c",
"--command",
help=(
"Instead of opening an interactive shell, "
"run a command as Django and exit."
),
)
def run_from_argv(self, argv):
if "--" in argv[2:]:
idx = argv.index("--")
self.extra_args = argv[idx + 1 :]
argv = argv[:idx]
return super().run_from_argv(argv)
def get_ipython_arguments(self, options):
ipython_args = "IPYTHON_ARGUMENTS"
arguments = getattr(settings, ipython_args, [])
if not arguments:
arguments = os.environ.get(ipython_args, "").split()
return arguments
def get_notebook_arguments(self, options):
notebook_args = "NOTEBOOK_ARGUMENTS"
arguments = getattr(settings, notebook_args, [])
if not arguments:
arguments = os.environ.get(notebook_args, "").split()
return arguments
def get_imported_objects(self, options):
imported_objects = import_objects(options, self.style)
if self.tests_mode:
# save imported objects so we can run tests against it later
self.tests_imported_objects = imported_objects
return imported_objects
@shell_runner(flags=["--kernel"], name="IPython Kernel")
def get_kernel(self, options):
try:
from IPython import release
if release.version_info[0] < 2:
print(
self.style.ERROR("--kernel requires at least IPython version 2.0")
)
return
from IPython import start_kernel
except ImportError:
return traceback.format_exc()
def run_kernel():
imported_objects = self.get_imported_objects(options)
kwargs = dict(
argv=[],
user_ns=imported_objects,
)
connection_file = options["connection_file"]
if connection_file:
kwargs["connection_file"] = connection_file
start_kernel(**kwargs)
return run_kernel
def load_base_kernel_spec(self, app):
"""Finds and returns the base Python kernelspec to extend from."""
ksm = app.kernel_spec_manager
try_spec_names = getattr(
settings,
"NOTEBOOK_KERNEL_SPEC_NAMES",
[
"python3",
"python",
],
)
if isinstance(try_spec_names, str):
try_spec_names = [try_spec_names]
ks = None
for spec_name in try_spec_names:
try:
ks = ksm.get_kernel_spec(spec_name)
break
except Exception:
continue
if not ks:
raise CommandError(
"No notebook (Python) kernel specs found. Tried %r" % try_spec_names
)
return ks
def generate_kernel_specs(self, app, ipython_arguments):
"""Generate an IPython >= 3.0 kernelspec that loads django extensions"""
ks = self.load_base_kernel_spec(app)
ks.argv.extend(ipython_arguments)
ks.display_name = getattr(
settings, "IPYTHON_KERNEL_DISPLAY_NAME", "Django Shell-Plus"
)
manage_py_dir, manage_py = os.path.split(os.path.realpath(sys.argv[0]))
if manage_py == "manage.py" and os.path.isdir(manage_py_dir):
pythonpath = ks.env.get("PYTHONPATH", os.environ.get("PYTHONPATH", ""))
pythonpath = pythonpath.split(os.pathsep)
if manage_py_dir not in pythonpath:
pythonpath.append(manage_py_dir)
ks.env["PYTHONPATH"] = os.pathsep.join(filter(None, pythonpath))
return {"django_extensions": ks}
def run_notebookapp(self, app_init, options, use_kernel_specs=True, history=True):
no_browser = options["no_browser"]
if self.extra_args:
# if another '--' is found split the arguments notebook, ipython
if "--" in self.extra_args:
idx = self.extra_args.index("--")
notebook_arguments = self.extra_args[:idx]
ipython_arguments = self.extra_args[idx + 1 :]
# otherwise pass the arguments to the notebook
else:
notebook_arguments = self.extra_args
ipython_arguments = []
else:
notebook_arguments = self.get_notebook_arguments(options)
ipython_arguments = self.get_ipython_arguments(options)
# Treat IPYTHON_ARGUMENTS from settings
if "django_extensions.management.notebook_extension" not in ipython_arguments:
ipython_arguments.extend(
["--ext", "django_extensions.management.notebook_extension"]
)
# Treat NOTEBOOK_ARGUMENTS from settings
if no_browser and "--no-browser" not in notebook_arguments:
notebook_arguments.append("--no-browser")
if "--notebook-dir" not in notebook_arguments and not any(
e.startswith("--notebook-dir=") for e in notebook_arguments
):
notebook_arguments.extend(["--notebook-dir", "."])
# IPython < 3 passes through kernel args from notebook CLI
if not use_kernel_specs:
notebook_arguments.extend(ipython_arguments)
# disable history if not already configured in some other way
if not history and not any(
arg.startswith("--HistoryManager") for arg in ipython_arguments
):
ipython_arguments.append("--HistoryManager.enabled=False")
if not callable(app_init):
app = app_init
warnings.warn(
"Initialize should be a callable not an app instance",
DeprecationWarning,
)
app.initialize(notebook_arguments)
else:
app = app_init(notebook_arguments)
# IPython >= 3 uses kernelspecs to specify kernel CLI args
if use_kernel_specs:
ksm = app.kernel_spec_manager
for kid, ks in self.generate_kernel_specs(app, ipython_arguments).items():
roots = [os.path.dirname(ks.resource_dir), ksm.user_kernel_dir]
for root in roots:
kernel_dir = os.path.join(root, kid)
try:
if not os.path.exists(kernel_dir):
os.makedirs(kernel_dir)
with open(os.path.join(kernel_dir, "kernel.json"), "w") as f:
f.write(ks.to_json())
break
except OSError:
continue
else:
raise CommandError(
"Could not write kernel %r in directories %r" % (kid, roots)
)
app.start()
@shell_runner(flags=["--notebook"], name="IPython Notebook")
def get_notebook(self, options):
try:
from IPython import release
except ImportError:
return traceback.format_exc()
try:
from notebook.notebookapp import NotebookApp
except ImportError:
if release.version_info[0] >= 7:
return traceback.format_exc()
try:
from IPython.html.notebookapp import NotebookApp
except ImportError:
if release.version_info[0] >= 3:
return traceback.format_exc()
try:
from IPython.frontend.html.notebook import notebookapp
NotebookApp = notebookapp.NotebookApp
except ImportError:
return traceback.format_exc()
use_kernel_specs = release.version_info[0] >= 3
def app_init(*args, **kwargs):
app = NotebookApp.instance()
app.initialize(*args, **kwargs)
return app
def run_notebook():
self.run_notebookapp(app_init, options, use_kernel_specs)
return run_notebook
@shell_runner(flags=["--lab"], name="JupyterLab Notebook")
def get_jupyterlab(self, options):
try:
from jupyterlab.labapp import LabApp
except ImportError:
return traceback.format_exc()
# check for JupyterLab 3.0
try:
from notebook.notebookapp import NotebookApp
except ImportError:
NotebookApp = None
if not NotebookApp or not issubclass(LabApp, NotebookApp):
app_init = LabApp.initialize_server
else:
def app_init(*args, **kwargs):
app = LabApp.instance()
app.initialize(*args, **kwargs)
return app
def run_jupyterlab():
self.run_notebookapp(app_init, options, history=False)
return run_jupyterlab
@shell_runner(flags=["--plain"], name="plain Python")
def get_plain(self, options):
# Using normal Python shell
import code
# Set up a dictionary to serve as the environment for the shell.
imported_objects = self.get_imported_objects(options)
use_pythonrc = options["use_pythonrc"]
no_startup = options["no_startup"]
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then .pythonrc.py.
if use_pythonrc or not no_startup:
for pythonrc in OrderedSet(
[os.environ.get("PYTHONSTARTUP"), os.path.expanduser("~/.pythonrc.py")]
):
if not pythonrc:
continue
if not os.path.isfile(pythonrc):
continue
with open(pythonrc) as handle:
pythonrc_code = handle.read()
# Match the behavior of the cpython shell where an error in
# PYTHONSTARTUP prints an exception and continues.
try:
exec(compile(pythonrc_code, pythonrc, "exec"), imported_objects)
except Exception:
traceback.print_exc()
if self.tests_mode:
raise
# By default, this will set up readline to do tab completion and to read and
# write history to the .python_history file, but this can be overridden by
# $PYTHONSTARTUP or ~/.pythonrc.py.
try:
hook = sys.__interactivehook__
except AttributeError:
# Match the behavior of the cpython shell where a missing
# sys.__interactivehook__ is ignored.
pass
else:
try:
hook()
except Exception:
# Match the behavior of the cpython shell where an error in
# sys.__interactivehook__ prints a warning and the exception
# and continues.
print("Failed calling sys.__interactivehook__")
traceback.print_exc()
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
# Enable tab completion on systems using libedit (e.g. macOS).
# These lines are copied from Lib/site.py on Python 3.4.
readline_doc = getattr(readline, "__doc__", "")
if readline_doc is not None and "libedit" in readline_doc:
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab:complete")
def run_plain():
code.interact(local=imported_objects)
return run_plain
@shell_runner(flags=["--bpython"], name="BPython")
def get_bpython(self, options):
try:
from bpython import embed
except ImportError:
return traceback.format_exc()
def run_bpython():
imported_objects = self.get_imported_objects(options)
kwargs = {}
if self.extra_args:
kwargs["args"] = self.extra_args
embed(imported_objects, **kwargs)
return run_bpython
@shell_runner(flags=["--ipython"], name="IPython")
def get_ipython(self, options):
try:
from IPython import start_ipython
def run_ipython():
imported_objects = self.get_imported_objects(options)
ipython_arguments = self.extra_args or self.get_ipython_arguments(
options
)
start_ipython(argv=ipython_arguments, user_ns=imported_objects)
return run_ipython
except ImportError:
str_exc = traceback.format_exc()
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
# Notebook not supported for IPython < 0.11.
try:
from IPython.Shell import IPShell
except ImportError:
return str_exc + "\n" + traceback.format_exc()
def run_ipython():
imported_objects = self.get_imported_objects(options)
shell = IPShell(argv=[], user_ns=imported_objects)
shell.mainloop()
return run_ipython
@shell_runner(flags=["--ptpython"], name="PTPython")
def get_ptpython(self, options):
try:
from ptpython.repl import embed, run_config
except ImportError:
tb = traceback.format_exc()
try: # prompt_toolkit < v0.27
from prompt_toolkit.contrib.repl import embed, run_config
except ImportError:
return tb
def run_ptpython():
imported_objects = self.get_imported_objects(options)
history_filename = os.path.expanduser("~/.ptpython_history")
embed(
globals=imported_objects,
history_filename=history_filename,
vi_mode=options["vi_mode"],
configure=run_config,
)
return run_ptpython
@shell_runner(flags=["--ptipython"], name="PT-IPython")
def get_ptipython(self, options):
try:
from ptpython.repl import run_config
from ptpython.ipython import embed
except ImportError:
tb = traceback.format_exc()
try: # prompt_toolkit < v0.27
from prompt_toolkit.contrib.repl import run_config
from prompt_toolkit.contrib.ipython import embed
except ImportError:
return tb
def run_ptipython():
imported_objects = self.get_imported_objects(options)
history_filename = os.path.expanduser("~/.ptpython_history")
embed(
user_ns=imported_objects,
history_filename=history_filename,
vi_mode=options["vi_mode"],
configure=run_config,
)
return run_ptipython
@shell_runner(flags=["--idle"], name="Idle")
def get_idle(self, options):
from idlelib.pyshell import main
def run_idle():
sys.argv = [
sys.argv[0],
"-c",
"""
from django_extensions.management import shells
from django.core.management.color import no_style
for k, m in shells.import_objects({}, no_style()).items():
globals()[k] = m
""",
]
main()
return run_idle
def set_application_name(self, options):
"""
Set the application_name on PostgreSQL connection
Use the fallback_application_name to let the user override
it with PGAPPNAME env variable
https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS
""" # noqa: E501
supported_backends = (
"django.db.backends.postgresql",
"django.db.backends.postgresql_psycopg2",
)
opt_name = "fallback_application_name"
default_app_name = "django_shell"
dbs = getattr(settings, "DATABASES", [])
for connection in connections.all():
alias = connection.alias
mro = inspect.getmro(connection.__class__)
if any(klass.__module__.startswith(supported_backends) for klass in mro):
if "OPTIONS" not in dbs[alias] or opt_name not in dbs[alias]["OPTIONS"]:
dbs[alias].setdefault("OPTIONS", {}).update(
{opt_name: default_app_name}
)
@signalcommand
def handle(self, *args, **options):
verbosity = options["verbosity"]
get_runner = options["runner"]
print_sql = getattr(settings, "SHELL_PLUS_PRINT_SQL", False)
runner = None
runner_name = None
truncate = None if options["truncate_sql"] == 0 else options["truncate_sql"]
with monkey_patch_cursordebugwrapper(
print_sql=options["print_sql"] or print_sql,
truncate=truncate,
print_sql_location=options["print_sql_location"],
confprefix="SHELL_PLUS",
):
SETTINGS_SHELL_PLUS = getattr(settings, "SHELL_PLUS", None)
def get_runner_by_flag(flag):
for runner in self.runners:
if flag in runner.runner_flags:
return runner
return None
self.set_application_name(options)
if not get_runner and SETTINGS_SHELL_PLUS:
get_runner = get_runner_by_flag("--%s" % SETTINGS_SHELL_PLUS)
if not get_runner:
runner = None
runner_name = SETTINGS_SHELL_PLUS
if get_runner:
runner = get_runner(options)
runner_name = get_runner.runner_name
else:
def try_runner(get_runner):
runner_name = get_runner.runner_name
if verbosity > 2:
print(self.style.NOTICE("Trying: %s" % runner_name))
runner = get_runner(options)
if callable(runner):
if verbosity > 1:
print(self.style.NOTICE("Using: %s" % runner_name))
return runner
return None
tried_runners = set()
# try the runners that are least unexpected (normal shell runners)
preferred_runners = [
"ptipython",
"ptpython",
"bpython",
"ipython",
"plain",
]
for flag_suffix in preferred_runners:
get_runner = get_runner_by_flag("--%s" % flag_suffix)
tried_runners.add(get_runner)
runner = try_runner(get_runner)
if runner:
runner_name = get_runner.runner_name
break
# try any remaining runners if needed
if not runner:
for get_runner in self.runners:
if get_runner not in tried_runners:
runner = try_runner(get_runner)
if runner:
runner_name = get_runner.runner_name
break
if not callable(runner):
if runner:
print(runner)
if not runner_name:
raise CommandError("No shell runner could be found.")
raise CommandError("Could not load shell runner: '%s'." % runner_name)
if self.tests_mode:
return 130
if options["command"]:
imported_objects = self.get_imported_objects(options)
exec(options["command"], imported_objects)
return None
runner()
| Command |
python | numba__numba | setup.py | {
"start": 1259,
"end": 1938
} | class ____(Command):
description = "build documentation"
def run(self):
subprocess.run(['make', '-C', 'docs', 'html'])
cmdclass = versioneer.get_cmdclass()
cmdclass['build_doc'] = build_doc
extra_link_args = []
install_name_tool_fixer = []
if sys.platform == 'darwin':
install_name_tool_fixer += ['-headerpad_max_install_names']
if platform.machine() == 'ppc64le':
extra_link_args += ['-pthread']
build_ext = cmdclass.get('build_ext', build_ext)
numba_be_user_options = [
('werror', None, 'Build extensions with -Werror'),
('wall', None, 'Build extensions with -Wall'),
('noopt', None, 'Build extensions without optimization'),
]
| build_doc |
python | ray-project__ray | python/ray/_private/state_api_test_utils.py | {
"start": 1079,
"end": 5425
} | class ____:
pending_calls: int = 0
total_calls: int = 0
calls: Dict = field(default_factory=lambda: defaultdict(list))
GLOBAL_STATE_STATS = StateAPIStats()
STATE_LIST_LIMIT = int(1e6) # 1m
STATE_LIST_TIMEOUT = 600 # 10min
def invoke_state_api(
verify_cb: Callable,
state_api_fn: Callable,
state_stats: StateAPIStats = GLOBAL_STATE_STATS,
key_suffix: Optional[str] = None,
print_result: Optional[bool] = False,
err_msg: Optional[str] = None,
**kwargs,
):
"""Invoke a State API
Args:
- verify_cb: Callback that takes in the response from `state_api_fn` and
returns a boolean, indicating the correctness of the results.
- state_api_fn: Function of the state API
- state_stats: Stats
- kwargs: Keyword arguments to be forwarded to the `state_api_fn`
"""
if "timeout" not in kwargs:
kwargs["timeout"] = STATE_LIST_TIMEOUT
# Suppress missing output warning
kwargs["raise_on_missing_output"] = False
res = None
try:
state_stats.total_calls += 1
state_stats.pending_calls += 1
t_start = time.perf_counter()
res = state_api_fn(**kwargs)
t_end = time.perf_counter()
if print_result:
pprint.pprint(res)
metric = StateAPIMetric(t_end - t_start, len(res))
if key_suffix:
key = f"{state_api_fn.__name__}_{key_suffix}"
else:
key = state_api_fn.__name__
state_stats.calls[key].append(metric)
assert verify_cb(
res
), f"Calling State API failed. len(res)=({len(res)}): {err_msg}"
except Exception as e:
traceback.print_exc()
assert (
False
), f"Calling {state_api_fn.__name__}({kwargs}) failed with {repr(e)}."
finally:
state_stats.pending_calls -= 1
return res
def invoke_state_api_n(*args, **kwargs):
def verify():
NUM_API_CALL_SAMPLES = 10
for _ in range(NUM_API_CALL_SAMPLES):
invoke_state_api(*args, **kwargs)
return True
test_utils.wait_for_condition(verify, retry_interval_ms=2000, timeout=30)
def aggregate_perf_results(state_stats: StateAPIStats = GLOBAL_STATE_STATS):
"""Aggregate stats of state API calls
Return:
This returns a dict of below fields:
- max_{api_key_name}_latency_sec:
Max latency of call to {api_key_name}
- {api_key_name}_result_size_with_max_latency:
The size of the result (or the number of bytes for get_log API)
for the max latency invocation
- avg/p99/p95/p50_{api_key_name}_latency_sec:
The percentile latency stats
- avg_state_api_latency_sec:
The average latency of all the state apis tracked
"""
# Prevent iteration when modifying error
state_stats = deepcopy(state_stats)
perf_result = {}
for api_key_name, metrics in state_stats.calls.items():
# Per api aggregation
# Max latency
latency_key = f"max_{api_key_name}_latency_sec"
size_key = f"{api_key_name}_result_size_with_max_latency"
metric = max(metrics, key=lambda metric: metric.latency_sec)
perf_result[latency_key] = metric.latency_sec
perf_result[size_key] = metric.result_size
latency_list = np.array([metric.latency_sec for metric in metrics])
# avg latency
key = f"avg_{api_key_name}_latency_sec"
perf_result[key] = np.average(latency_list)
# p99 latency
key = f"p99_{api_key_name}_latency_sec"
perf_result[key] = np.percentile(latency_list, 99)
# p95 latency
key = f"p95_{api_key_name}_latency_sec"
perf_result[key] = np.percentile(latency_list, 95)
# p50 latency
key = f"p50_{api_key_name}_latency_sec"
perf_result[key] = np.percentile(latency_list, 50)
all_state_api_latency = sum(
metric.latency_sec
for metric_samples in state_stats.calls.values()
for metric in metric_samples
)
perf_result["avg_state_api_latency_sec"] = (
(all_state_api_latency / state_stats.total_calls)
if state_stats.total_calls != 0
else -1
)
return perf_result
@ray.remote(num_cpus=0)
| StateAPIStats |
python | Farama-Foundation__Gymnasium | gymnasium/envs/box2d/car_dynamics.py | {
"start": 1323,
"end": 12259
} | class ____:
def __init__(self, world, init_angle, init_x, init_y):
self.world: Box2D.b2World = world
self.hull: Box2D.b2Body = self.world.CreateDynamicBody(
position=(init_x, init_y),
angle=init_angle,
fixtures=[
fixtureDef(
shape=polygonShape(
vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY1]
),
density=1.0,
),
fixtureDef(
shape=polygonShape(
vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY2]
),
density=1.0,
),
fixtureDef(
shape=polygonShape(
vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY3]
),
density=1.0,
),
fixtureDef(
shape=polygonShape(
vertices=[(x * SIZE, y * SIZE) for x, y in HULL_POLY4]
),
density=1.0,
),
],
)
self.hull.color = (0.8, 0.0, 0.0)
self.wheels = []
self.fuel_spent = 0.0
WHEEL_POLY = [
(-WHEEL_W, +WHEEL_R),
(+WHEEL_W, +WHEEL_R),
(+WHEEL_W, -WHEEL_R),
(-WHEEL_W, -WHEEL_R),
]
for wx, wy in WHEELPOS:
front_k = 1.0 if wy > 0 else 1.0
w = self.world.CreateDynamicBody(
position=(init_x + wx * SIZE, init_y + wy * SIZE),
angle=init_angle,
fixtures=fixtureDef(
shape=polygonShape(
vertices=[
(x * front_k * SIZE, y * front_k * SIZE)
for x, y in WHEEL_POLY
]
),
density=0.1,
categoryBits=0x0020,
maskBits=0x001,
restitution=0.0,
),
)
w.wheel_rad = front_k * WHEEL_R * SIZE
w.color = WHEEL_COLOR
w.gas = 0.0
w.brake = 0.0
w.steer = 0.0
w.phase = 0.0 # wheel angle
w.omega = 0.0 # angular velocity
w.skid_start = None
w.skid_particle = None
rjd = revoluteJointDef(
bodyA=self.hull,
bodyB=w,
localAnchorA=(wx * SIZE, wy * SIZE),
localAnchorB=(0, 0),
enableMotor=True,
enableLimit=True,
maxMotorTorque=180 * 900 * SIZE * SIZE,
motorSpeed=0,
lowerAngle=-0.4,
upperAngle=+0.4,
)
w.joint = self.world.CreateJoint(rjd)
w.tiles = set()
w.userData = w
self.wheels.append(w)
self.drawlist = self.wheels + [self.hull]
self.particles = []
def gas(self, gas):
"""control: rear wheel drive
Args:
gas (float): How much gas gets applied. Gets clipped between 0 and 1.
"""
gas = np.clip(gas, 0, 1)
for w in self.wheels[2:4]:
diff = gas - w.gas
if diff > 0.1:
diff = 0.1 # gradually increase, but stop immediately
w.gas += diff
def brake(self, b):
"""control: brake
Args:
b (0..1): Degree to which the brakes are applied. More than 0.9 blocks the wheels to zero rotation
"""
for w in self.wheels:
w.brake = b
def steer(self, s):
"""control: steer
Args:
s (-1..1): target position, it takes time to rotate steering wheel from side-to-side
"""
self.wheels[0].steer = s
self.wheels[1].steer = s
def step(self, dt):
for w in self.wheels:
# Steer each wheel
dir = np.sign(w.steer - w.joint.angle)
val = abs(w.steer - w.joint.angle)
w.joint.motorSpeed = dir * min(50.0 * val, 3.0)
# Position => friction_limit
grass = True
friction_limit = FRICTION_LIMIT * 0.6 # Grass friction if no tile
for tile in w.tiles:
friction_limit = max(
friction_limit, FRICTION_LIMIT * tile.road_friction
)
grass = False
# Force
forw = w.GetWorldVector((0, 1))
side = w.GetWorldVector((1, 0))
v = w.linearVelocity
vf = forw[0] * v[0] + forw[1] * v[1] # forward speed
vs = side[0] * v[0] + side[1] * v[1] # side speed
# WHEEL_MOMENT_OF_INERTIA*np.square(w.omega)/2 = E -- energy
# WHEEL_MOMENT_OF_INERTIA*w.omega * domega/dt = dE/dt = W -- power
# domega = dt*W/WHEEL_MOMENT_OF_INERTIA/w.omega
# add small coef not to divide by zero
w.omega += (
dt
* ENGINE_POWER
* w.gas
/ WHEEL_MOMENT_OF_INERTIA
/ (abs(w.omega) + 5.0)
)
self.fuel_spent += dt * ENGINE_POWER * w.gas
if w.brake >= 0.9:
w.omega = 0
elif w.brake > 0:
BRAKE_FORCE = 15 # radians per second
dir = -np.sign(w.omega)
val = BRAKE_FORCE * w.brake
if abs(val) > abs(w.omega):
val = abs(w.omega) # low speed => same as = 0
w.omega += dir * val
w.phase += w.omega * dt
vr = w.omega * w.wheel_rad # rotating wheel speed
f_force = -vf + vr # force direction is direction of speed difference
p_force = -vs
# Physically correct is to always apply friction_limit until speed is equal.
# But dt is finite, that will lead to oscillations if difference is already near zero.
# Random coefficient to cut oscillations in few steps (have no effect on friction_limit)
f_force *= 205000 * SIZE * SIZE
p_force *= 205000 * SIZE * SIZE
force = np.sqrt(np.square(f_force) + np.square(p_force))
# Skid trace
if abs(force) > 2.0 * friction_limit:
if (
w.skid_particle
and w.skid_particle.grass == grass
and len(w.skid_particle.poly) < 30
):
w.skid_particle.poly.append((w.position[0], w.position[1]))
elif w.skid_start is None:
w.skid_start = w.position
else:
w.skid_particle = self._create_particle(
w.skid_start, w.position, grass
)
w.skid_start = None
else:
w.skid_start = None
w.skid_particle = None
if abs(force) > friction_limit:
f_force /= force
p_force /= force
force = friction_limit # Correct physics here
f_force *= force
p_force *= force
w.omega -= dt * f_force * w.wheel_rad / WHEEL_MOMENT_OF_INERTIA
w.ApplyForceToCenter(
(
p_force * side[0] + f_force * forw[0],
p_force * side[1] + f_force * forw[1],
),
True,
)
def draw(self, surface, zoom, translation, angle, draw_particles=True):
import pygame.draw
if draw_particles:
for p in self.particles:
poly = [pygame.math.Vector2(c).rotate_rad(angle) for c in p.poly]
poly = [
(
coords[0] * zoom + translation[0],
coords[1] * zoom + translation[1],
)
for coords in poly
]
pygame.draw.lines(
surface, color=p.color, points=poly, width=2, closed=False
)
for obj in self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
path = [trans * v for v in f.shape.vertices]
path = [(coords[0], coords[1]) for coords in path]
path = [pygame.math.Vector2(c).rotate_rad(angle) for c in path]
path = [
(
coords[0] * zoom + translation[0],
coords[1] * zoom + translation[1],
)
for coords in path
]
color = [int(c * 255) for c in obj.color]
pygame.draw.polygon(surface, color=color, points=path)
if "phase" not in obj.__dict__:
continue
a1 = obj.phase
a2 = obj.phase + 1.2 # radians
s1 = math.sin(a1)
s2 = math.sin(a2)
c1 = math.cos(a1)
c2 = math.cos(a2)
if s1 > 0 and s2 > 0:
continue
if s1 > 0:
c1 = np.sign(c1)
if s2 > 0:
c2 = np.sign(c2)
white_poly = [
(-WHEEL_W * SIZE, +WHEEL_R * c1 * SIZE),
(+WHEEL_W * SIZE, +WHEEL_R * c1 * SIZE),
(+WHEEL_W * SIZE, +WHEEL_R * c2 * SIZE),
(-WHEEL_W * SIZE, +WHEEL_R * c2 * SIZE),
]
white_poly = [trans * v for v in white_poly]
white_poly = [(coords[0], coords[1]) for coords in white_poly]
white_poly = [
pygame.math.Vector2(c).rotate_rad(angle) for c in white_poly
]
white_poly = [
(
coords[0] * zoom + translation[0],
coords[1] * zoom + translation[1],
)
for coords in white_poly
]
pygame.draw.polygon(surface, color=WHEEL_WHITE, points=white_poly)
def _create_particle(self, point1, point2, grass):
class Particle:
pass
p = Particle()
p.color = WHEEL_COLOR if not grass else MUD_COLOR
p.ttl = 1
p.poly = [(point1[0], point1[1]), (point2[0], point2[1])]
p.grass = grass
self.particles.append(p)
while len(self.particles) > 30:
self.particles.pop(0)
return p
def destroy(self):
self.world.DestroyBody(self.hull)
self.hull = None
for w in self.wheels:
self.world.DestroyBody(w)
self.wheels = []
| Car |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_any_decorator.py | {
"start": 1616,
"end": 1996
} | class ____:
def decorator(self, f, *args, **kwargs):
pass
app = Application()
@app.decorator
def test_local_variable_method_decorator():
return None
# Test that we can still match on ignored decorators (`@IgnoreDecorator`) with DSL queries.
def decorator_ignored(f):
return f
@decorator_ignored
def source_on_decorator_ignored(x):
return x
| Application |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 963613,
"end": 964056
} | class ____(sgqlc.types.Type):
"""An individual package"""
__schema__ = github_schema
__field_names__ = ("ecosystem", "name")
ecosystem = sgqlc.types.Field(sgqlc.types.non_null(SecurityAdvisoryEcosystem), graphql_name="ecosystem")
"""The ecosystem the package belongs to, e.g. RUBYGEMS, NPM"""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The package name"""
| SecurityAdvisoryPackage |
python | pydantic__pydantic | pydantic-core/tests/validators/test_dataclasses.py | {
"start": 8396,
"end": 41263
} | class ____:
a: str
b: bool
@pytest.mark.parametrize(
'revalidate_instances,input_value,expected',
[
('always', {'a': 'hello', 'b': True}, {'a': 'hello', 'b': True}),
('always', FooDataclass(a='hello', b=True), {'a': 'hello', 'b': True}),
('always', FooDataclassSame(a='hello', b=True), {'a': 'hello', 'b': True}),
# no error because we only look for fields in schema['fields']
('always', FooDataclassMore(a='hello', b=True, c='more'), {'a': 'hello', 'b': True}),
('always', FooDataclassSame(a='hello', b='wrong'), Err(r'b\s+Input should be a valid boolean,')),
('always', DuplicateDifferent(a='hello', b=True), Err('should be a dictionary or an instance of FooDataclass')),
# revalidate_instances='subclass-instances'
('subclass-instances', {'a': 'hello', 'b': True}, {'a': 'hello', 'b': True}),
('subclass-instances', FooDataclass(a='hello', b=True), {'a': 'hello', 'b': True}),
('subclass-instances', FooDataclass(a=b'hello', b='true'), {'a': b'hello', 'b': 'true'}),
('subclass-instances', FooDataclassSame(a='hello', b=True), {'a': 'hello', 'b': True}),
('subclass-instances', FooDataclassSame(a=b'hello', b='true'), {'a': 'hello', 'b': True}),
# no error because we only look for fields in schema['fields']
('subclass-instances', FooDataclassMore(a='hello', b=True, c='more'), {'a': 'hello', 'b': True}),
('subclass-instances', FooDataclassSame(a='hello', b='wrong'), Err(r'b\s+Input should be a valid boolean,')),
('subclass-instances', DuplicateDifferent(a='hello', b=True), Err('dictionary or an instance of FooDataclass')),
# revalidate_instances='never'
('never', {'a': 'hello', 'b': True}, {'a': 'hello', 'b': True}),
('never', FooDataclass(a='hello', b=True), {'a': 'hello', 'b': True}),
('never', FooDataclassSame(a='hello', b=True), {'a': 'hello', 'b': True}),
('never', FooDataclassMore(a='hello', b=True, c='more'), {'a': 'hello', 'b': True, 'c': 'more'}),
('never', FooDataclassMore(a='hello', b='wrong', c='more'), {'a': 'hello', 'b': 'wrong', 'c': 'more'}),
('never', DuplicateDifferent(a='hello', b=True), Err('should be a dictionary or an instance of FooDataclass')),
],
)
def test_dataclass_subclass(revalidate_instances, input_value, expected):
schema = core_schema.dataclass_schema(
FooDataclass,
core_schema.dataclass_args_schema(
'FooDataclass',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema()),
],
extra_behavior='forbid',
),
['a', 'b'],
revalidate_instances=revalidate_instances,
)
v = SchemaValidator(schema)
if isinstance(expected, Err):
with pytest.raises(ValidationError, match=expected.message) as exc_info:
print(v.validate_python(input_value))
# debug(exc_info.value.errors(include_url=False))
if expected.errors is not None:
assert exc_info.value.errors(include_url=False) == expected.errors
else:
dc = v.validate_python(input_value)
assert dataclasses.is_dataclass(dc)
assert dataclasses.asdict(dc) == expected
def test_dataclass_subclass_strict_never_revalidate():
v = SchemaValidator(
core_schema.dataclass_schema(
FooDataclass,
core_schema.dataclass_args_schema(
'FooDataclass',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema()),
],
),
['a', 'b'],
revalidate_instances='never',
strict=True,
)
)
foo = FooDataclass(a='hello', b=True)
assert v.validate_python(foo) is foo
sub_foo = FooDataclassSame(a='hello', b=True)
assert v.validate_python(sub_foo) is sub_foo
# this fails but that's fine, in realty `ArgsKwargs` should only be used via validate_init
with pytest.raises(ValidationError, match='Input should be an instance of FooDataclass'):
v.validate_python(ArgsKwargs((), {'a': 'hello', 'b': True}))
def test_dataclass_subclass_subclass_revalidate():
v = SchemaValidator(
core_schema.dataclass_schema(
FooDataclass,
core_schema.dataclass_args_schema(
'FooDataclass',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema()),
],
),
['a', 'b'],
revalidate_instances='subclass-instances',
strict=True,
)
)
foo = FooDataclass(a='hello', b=True)
assert v.validate_python(foo) is foo
sub_foo = FooDataclassSame(a='hello', b='True')
sub_foo2 = v.validate_python(sub_foo)
assert sub_foo2 is not sub_foo
assert type(sub_foo2) is FooDataclass
assert dataclasses.asdict(sub_foo2) == dict(a='hello', b=True)
def test_dataclass_post_init():
@dataclasses.dataclass
class Foo:
a: str
b: bool
def __post_init__(self):
self.a = self.a.upper()
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema()),
],
),
['a', 'b'],
post_init=True,
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': 'hello', 'b': True})
assert foo.a == 'HELLO'
assert foo.b is True
def test_dataclass_post_init_args():
c_value = None
@dataclasses.dataclass
class Foo:
a: str
b: bool
c: dataclasses.InitVar[int]
def __post_init__(self, c: int):
nonlocal c_value
c_value = c
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema()),
core_schema.dataclass_field(name='c', schema=core_schema.int_schema(), init_only=True),
],
collect_init_only=True,
),
['a', 'b'],
post_init=True,
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': b'hello', 'b': 'true', 'c': '42'})
assert foo.a == 'hello'
assert foo.b is True
assert not hasattr(foo, 'c')
assert c_value == 42
def test_dataclass_post_init_args_multiple():
dc_args = None
@dataclasses.dataclass
class Foo:
a: str
b: dataclasses.InitVar[bool]
c: dataclasses.InitVar[int]
def __post_init__(self, *args):
nonlocal dc_args
dc_args = args
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema(), init_only=True),
core_schema.dataclass_field(name='c', schema=core_schema.int_schema(), init_only=True),
],
collect_init_only=True,
),
['a', 'b'],
post_init=True,
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': b'hello', 'b': 'true', 'c': '42'})
assert dataclasses.asdict(foo) == {'a': 'hello'}
assert dc_args == (True, 42)
@pytest.mark.parametrize(
'revalidate_instances,input_value,expected',
[
('always', {'a': b'hello', 'b': 'true'}, {'a': 'hello', 'b': True}),
('always', FooDataclass(a='hello', b=True), {'a': 'hello', 'b': True}),
('always', FooDataclass(a=b'hello', b='true'), {'a': 'hello', 'b': True}),
('never', {'a': b'hello', 'b': 'true'}, {'a': 'hello', 'b': True}),
('never', FooDataclass(a='hello', b=True), {'a': 'hello', 'b': True}),
('never', FooDataclass(a=b'hello', b='true'), {'a': b'hello', 'b': 'true'}),
],
)
def test_dataclass_exact_validation(revalidate_instances, input_value, expected):
schema = core_schema.dataclass_schema(
FooDataclass,
core_schema.dataclass_args_schema(
'FooDataclass',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema()),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema()),
],
),
['a', 'b'],
revalidate_instances=revalidate_instances,
)
v = SchemaValidator(schema)
foo = v.validate_python(input_value)
assert dataclasses.asdict(foo) == expected
def test_dataclass_field_after_validator():
@dataclasses.dataclass
class Foo:
a: int
b: str
@classmethod
def validate_b(cls, v: str, info: core_schema.ValidationInfo) -> str:
assert v == 'hello'
assert info.field_name == 'b'
assert info.data == {'a': 1}
return 'hello world!'
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.int_schema()),
core_schema.dataclass_field(
name='b',
schema=core_schema.with_info_after_validator_function(Foo.validate_b, core_schema.str_schema()),
),
],
),
['a', 'b'],
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': 1, 'b': b'hello'})
assert dataclasses.asdict(foo) == {'a': 1, 'b': 'hello world!'}
def test_dataclass_field_plain_validator():
@dataclasses.dataclass
class Foo:
a: int
b: str
@classmethod
def validate_b(cls, v: bytes, info: core_schema.ValidationInfo) -> str:
assert v == b'hello'
assert info.field_name == 'b'
assert info.data == {'a': 1}
return 'hello world!'
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.int_schema()),
core_schema.dataclass_field(
name='b', schema=core_schema.with_info_plain_validator_function(Foo.validate_b)
),
],
),
['a', 'b'],
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': 1, 'b': b'hello'})
assert dataclasses.asdict(foo) == {'a': 1, 'b': 'hello world!'}
def test_dataclass_field_before_validator():
@dataclasses.dataclass
class Foo:
a: int
b: str
@classmethod
def validate_b(cls, v: bytes, info: core_schema.ValidationInfo) -> bytes:
assert v == b'hello'
assert info.field_name == 'b'
assert info.data == {'a': 1}
return b'hello world!'
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.int_schema()),
core_schema.dataclass_field(
name='b',
schema=core_schema.with_info_before_validator_function(Foo.validate_b, core_schema.str_schema()),
),
],
),
['a', 'b'],
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': 1, 'b': b'hello'})
assert dataclasses.asdict(foo) == {'a': 1, 'b': 'hello world!'}
def test_dataclass_field_wrap_validator1():
@dataclasses.dataclass
class Foo:
a: int
b: str
@classmethod
def validate_b(
cls, v: bytes, nxt: core_schema.ValidatorFunctionWrapHandler, info: core_schema.ValidationInfo
) -> str:
assert v == b'hello'
v = nxt(v)
assert v == 'hello'
assert info.field_name == 'b'
assert info.data == {'a': 1}
return 'hello world!'
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.int_schema()),
core_schema.dataclass_field(
name='b',
schema=core_schema.with_info_wrap_validator_function(Foo.validate_b, core_schema.str_schema()),
),
],
),
['a', 'b'],
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': 1, 'b': b'hello'})
assert dataclasses.asdict(foo) == {'a': 1, 'b': 'hello world!'}
def test_dataclass_field_wrap_validator2():
@dataclasses.dataclass
class Foo:
a: int
b: str
@classmethod
def validate_b(
cls, v: bytes, nxt: core_schema.ValidatorFunctionWrapHandler, info: core_schema.ValidationInfo
) -> bytes:
assert v == b'hello'
assert info.field_name == 'b'
assert info.data == {'a': 1}
return nxt(b'hello world!')
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.int_schema()),
core_schema.dataclass_field(
name='b',
schema=core_schema.with_info_wrap_validator_function(Foo.validate_b, core_schema.str_schema()),
),
],
),
['a', 'b'],
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': 1, 'b': b'hello'})
assert dataclasses.asdict(foo) == {'a': 1, 'b': 'hello world!'}
def test_dataclass_self_init():
@dataclasses.dataclass(init=False)
class Foo:
a: str
b: bool
def __init__(self, *args, **kwargs):
v.validate_python(ArgsKwargs(args, kwargs), self_instance=self)
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema(), kw_only=False),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema(), kw_only=False),
],
),
['a', 'b'],
)
v = SchemaValidator(schema)
foo = Foo(b'hello', 'True')
assert dataclasses.is_dataclass(foo)
assert dataclasses.asdict(foo) == {'a': 'hello', 'b': True}
def test_dataclass_self_init_alias():
@dataclasses.dataclass(init=False)
class Foo:
a: str
b: bool
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema(), validation_alias='aAlias'),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema(), validation_alias=['bAlias', 0]),
],
),
['a', 'b'],
)
v = SchemaValidator(schema)
def __init__(self, *args, **kwargs):
v.validate_python(ArgsKwargs(args, kwargs), self_instance=self)
Foo.__init__ = __init__
foo = Foo(aAlias=b'hello', bAlias=['True'])
assert dataclasses.is_dataclass(foo)
assert dataclasses.asdict(foo) == {'a': 'hello', 'b': True}
with pytest.raises(ValidationError) as exc_info:
Foo(aAlias=b'hello', bAlias=['wrong'])
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'bool_parsing',
'loc': ('bAlias', 0),
'msg': 'Input should be a valid boolean, unable to interpret input',
'input': 'wrong',
}
]
def test_dataclass_self_init_alias_field_name():
@dataclasses.dataclass(init=False)
class Foo:
a: str
b: bool
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema(), validation_alias='aAlias'),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema(), validation_alias=['bAlias', 0]),
],
),
['a', 'b'],
config={'loc_by_alias': False},
)
v = SchemaValidator(schema)
def __init__(self, *args, **kwargs):
v.validate_python(ArgsKwargs(args, kwargs), self_instance=self)
Foo.__init__ = __init__
foo = Foo(aAlias=b'hello', bAlias=['True'])
assert dataclasses.asdict(foo) == {'a': 'hello', 'b': True}
with pytest.raises(ValidationError) as exc_info:
Foo(aAlias=b'hello', bAlias=['wrong'])
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{
'type': 'bool_parsing',
'loc': ('b',),
'msg': 'Input should be a valid boolean, unable to interpret input',
'input': 'wrong',
}
]
def test_dataclass_self_init_post_init():
calls = []
@dataclasses.dataclass(init=False)
class Foo:
a: str
b: bool
# _: dataclasses.KW_ONLY
c: dataclasses.InitVar[int]
def __init__(self, *args, **kwargs):
v.validate_python(ArgsKwargs(args, kwargs), self_instance=self)
def __post_init__(self, c):
calls.append(c)
schema = core_schema.dataclass_schema(
Foo,
core_schema.dataclass_args_schema(
'Foo',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema(), kw_only=False),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema(), kw_only=False),
core_schema.dataclass_field(name='c', schema=core_schema.int_schema(), init_only=True),
],
collect_init_only=True,
),
['a', 'b', 'c'],
post_init=True,
)
v = SchemaValidator(schema)
foo = Foo(b'hello', 'True', c='123')
assert dataclasses.is_dataclass(foo)
assert dataclasses.asdict(foo) == {'a': 'hello', 'b': True}
assert calls == [123]
def test_dataclass_validate_assignment():
schema = core_schema.dataclass_schema(
FooDataclass,
core_schema.dataclass_args_schema(
'FooDataclass',
[
core_schema.dataclass_field(name='a', schema=core_schema.str_schema(), kw_only=False),
core_schema.dataclass_field(name='b', schema=core_schema.bool_schema(), kw_only=False),
],
),
['a', 'b'],
)
v = SchemaValidator(schema)
foo = v.validate_python({'a': 'hello', 'b': 'True'})
assert dataclasses.asdict(foo) == {'a': 'hello', 'b': True}
v.validate_assignment(foo, 'a', b'world')
assert dataclasses.asdict(foo) == {'a': 'world', 'b': True}
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(foo, 'a', 123)
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'string_type', 'loc': ('a',), 'msg': 'Input should be a valid string', 'input': 123}
]
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(foo, 'c', '123')
assert exc_info.value.errors(include_url=False) == [
{
'type': 'no_such_attribute',
'loc': ('c',),
'msg': "Object has no attribute 'c'",
'input': '123',
'ctx': {'attribute': 'c'},
}
]
assert not hasattr(foo, 'c')
# wrong arguments
with pytest.raises(AttributeError, match="'str' object has no attribute 'a'"):
v.validate_assignment('field_a', 'c', 123)
def test_validate_assignment_function():
@dataclasses.dataclass
class MyDataclass:
field_a: str
field_b: int
field_c: int
calls = []
def func(x, info):
calls.append(str(info))
return x * 2
v = SchemaValidator(
core_schema.dataclass_schema(
MyDataclass,
core_schema.dataclass_args_schema(
'MyDataclass',
[
core_schema.dataclass_field('field_a', core_schema.str_schema()),
core_schema.dataclass_field(
'field_b',
core_schema.with_info_after_validator_function(func, core_schema.int_schema()),
),
core_schema.dataclass_field('field_c', core_schema.int_schema()),
],
),
['field_a', 'field_b', 'field_c'],
)
)
m = v.validate_python({'field_a': 'x', 'field_b': 123, 'field_c': 456})
assert m.field_a == 'x'
assert m.field_b == 246
assert m.field_c == 456
assert calls == ["ValidationInfo(config=None, context=None, data={'field_a': 'x'}, field_name='field_b')"]
v.validate_assignment(m, 'field_b', '111')
assert m.field_b == 222
assert calls == [
"ValidationInfo(config=None, context=None, data={'field_a': 'x'}, field_name='field_b')",
"ValidationInfo(config=None, context=None, data={'field_a': 'x', 'field_c': 456}, field_name='field_b')",
]
def test_frozen():
@dataclasses.dataclass
class MyModel:
f: str
v = SchemaValidator(
core_schema.dataclass_schema(
MyModel,
core_schema.dataclass_args_schema('MyModel', [core_schema.dataclass_field('f', core_schema.str_schema())]),
['f'],
frozen=True,
)
)
m = v.validate_python({'f': 'x'})
assert m.f == 'x'
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(m, 'f', 'y')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'frozen_instance', 'loc': (), 'msg': 'Instance is frozen', 'input': 'y'}
]
def test_frozen_field():
@dataclasses.dataclass
class MyModel:
f: str
v = SchemaValidator(
core_schema.dataclass_schema(
MyModel,
core_schema.dataclass_args_schema(
'MyModel', [core_schema.dataclass_field('f', core_schema.str_schema(), frozen=True)]
),
['f'],
)
)
m = v.validate_python({'f': 'x'})
assert m.f == 'x'
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(m, 'f', 'y')
# insert_assert(exc_info.value.errors(include_url=False))
assert exc_info.value.errors(include_url=False) == [
{'type': 'frozen_field', 'loc': ('f',), 'msg': 'Field is frozen', 'input': 'y'}
]
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw,validate_fn_extra_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {}, None),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {'extra_behavior': None}, None),
(core_schema.CoreConfig(), {'extra_behavior': 'ignore'}, None),
(None, {'extra_behavior': 'ignore'}, None),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': 'ignore'}, None),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {}, 'ignore'),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': None}, 'ignore'),
(core_schema.CoreConfig(), {'extra_behavior': 'allow'}, 'ignore'),
(None, {'extra_behavior': 'allow'}, 'ignore'),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'allow'}, 'ignore'),
],
)
def test_extra_behavior_ignore(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
validate_fn_extra_kw: Union[ExtraBehavior, None],
):
@dataclasses.dataclass
class MyModel:
f: str
v = SchemaValidator(
core_schema.dataclass_schema(
MyModel,
core_schema.dataclass_args_schema(
'MyModel', [core_schema.dataclass_field('f', core_schema.str_schema())], **schema_extra_behavior_kw
),
['f'],
),
config=config,
)
m: MyModel = v.validate_python({'f': 'x', 'extra_field': 123}, extra=validate_fn_extra_kw)
assert m.f == 'x'
assert not hasattr(m, 'extra_field')
v.validate_assignment(m, 'f', 'y', extra=validate_fn_extra_kw)
assert m.f == 'y'
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(m, 'not_f', 'xyz', extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'no_such_attribute',
'loc': ('not_f',),
'msg': "Object has no attribute 'not_f'",
'input': 'xyz',
'ctx': {'attribute': 'not_f'},
}
]
assert not hasattr(m, 'not_f')
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw,validate_fn_extra_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {}, None),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': None}, None),
(core_schema.CoreConfig(), {'extra_behavior': 'forbid'}, None),
(None, {'extra_behavior': 'forbid'}, None),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {'extra_behavior': 'forbid'}, None),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {}, 'forbid'),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {'extra_behavior': None}, 'forbid'),
(core_schema.CoreConfig(), {'extra_behavior': 'ignore'}, 'forbid'),
(None, {'extra_behavior': 'ignore'}, 'forbid'),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': 'ignore'}, 'forbid'),
(core_schema.CoreConfig(), {}, 'forbid'),
(core_schema.CoreConfig(), {'extra_behavior': None}, 'forbid'),
(None, {'extra_behavior': None}, 'forbid'),
],
)
def test_extra_behavior_forbid(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
validate_fn_extra_kw: Union[ExtraBehavior, None],
):
@dataclasses.dataclass
class MyModel:
f: str
v = SchemaValidator(
core_schema.dataclass_schema(
MyModel,
core_schema.dataclass_args_schema(
'MyModel', [core_schema.dataclass_field('f', core_schema.str_schema())], **schema_extra_behavior_kw
),
['f'],
),
config=config,
)
m: MyModel = v.validate_python({'f': 'x'}, extra=validate_fn_extra_kw)
assert m.f == 'x'
v.validate_assignment(m, 'f', 'y', extra=validate_fn_extra_kw)
assert m.f == 'y'
with pytest.raises(ValidationError) as exc_info:
v.validate_assignment(m, 'not_f', 'xyz', extra=validate_fn_extra_kw)
assert exc_info.value.errors(include_url=False) == [
{
'type': 'no_such_attribute',
'loc': ('not_f',),
'msg': "Object has no attribute 'not_f'",
'input': 'xyz',
'ctx': {'attribute': 'not_f'},
}
]
assert not hasattr(m, 'not_f')
@pytest.mark.parametrize(
'config,schema_extra_behavior_kw,validate_fn_extra_kw',
[
(core_schema.CoreConfig(extra_fields_behavior='allow'), {}, None),
(core_schema.CoreConfig(extra_fields_behavior='allow'), {'extra_behavior': None}, None),
(core_schema.CoreConfig(), {'extra_behavior': 'allow'}, None),
(None, {'extra_behavior': 'allow'}, None),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': 'allow'}, None),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {}, 'allow'),
(core_schema.CoreConfig(extra_fields_behavior='forbid'), {'extra_behavior': None}, 'allow'),
(core_schema.CoreConfig(), {'extra_behavior': 'forbid'}, 'allow'),
(None, {'extra_behavior': 'forbid'}, 'allow'),
(core_schema.CoreConfig(extra_fields_behavior='ignore'), {'extra_behavior': 'forbid'}, 'allow'),
(core_schema.CoreConfig(), {}, 'allow'),
(core_schema.CoreConfig(), {'extra_behavior': None}, 'allow'),
(None, {'extra_behavior': None}, 'allow'),
],
)
def test_extra_behavior_allow(
config: Union[core_schema.CoreConfig, None],
schema_extra_behavior_kw: dict[str, Any],
validate_fn_extra_kw: Union[ExtraBehavior, None],
):
@dataclasses.dataclass
class MyModel:
f: str
v = SchemaValidator(
core_schema.dataclass_schema(
MyModel,
core_schema.dataclass_args_schema(
'MyModel', [core_schema.dataclass_field('f', core_schema.str_schema())], **schema_extra_behavior_kw
),
['f'],
config=config,
)
)
m: MyModel = v.validate_python({'f': 'x', 'extra_field': '123'}, extra=validate_fn_extra_kw)
assert m.f == 'x'
assert getattr(m, 'extra_field') == '123'
v.validate_assignment(m, 'f', 'y', extra=validate_fn_extra_kw)
assert m.f == 'y'
v.validate_assignment(m, 'not_f', '123', extra=validate_fn_extra_kw)
assert getattr(m, 'not_f') == '123'
def test_function_validator_wrapping_args_schema_after() -> None:
calls: list[Any] = []
def func(*args: Any) -> Any:
calls.append(args)
return args[0]
@dataclasses.dataclass
class Model:
number: int = 1
cs = core_schema.dataclass_schema(
Model,
core_schema.no_info_after_validator_function(
func,
core_schema.dataclass_args_schema(
'Model', [core_schema.dataclass_field('number', core_schema.int_schema())]
),
),
['number'],
)
v = SchemaValidator(cs)
instance: Model = v.validate_python({'number': 1})
assert instance.number == 1
assert calls == [(({'number': 1}, None),)]
v.validate_assignment(instance, 'number', 2)
assert instance.number == 2
assert calls == [(({'number': 1}, None),), (({'number': 2}, None),)]
def test_function_validator_wrapping_args_schema_before() -> None:
calls: list[Any] = []
def func(*args: Any) -> Any:
calls.append(args)
return args[0]
@dataclasses.dataclass
class Model:
number: int = 1
cs = core_schema.dataclass_schema(
Model,
core_schema.no_info_before_validator_function(
func,
core_schema.dataclass_args_schema(
'Model', [core_schema.dataclass_field('number', core_schema.int_schema())]
),
),
['number'],
)
v = SchemaValidator(cs)
instance: Model = v.validate_python({'number': 1})
assert instance.number == 1
assert calls == [({'number': 1},)]
v.validate_assignment(instance, 'number', 2)
assert instance.number == 2
assert calls == [({'number': 1},), ({'number': 2},)]
def test_function_validator_wrapping_args_schema_wrap() -> None:
calls: list[Any] = []
def func(*args: Any) -> Any:
assert len(args) == 2
input, handler = args
output = handler(input)
calls.append((input, output))
return output
@dataclasses.dataclass
class Model:
number: int = 1
cs = core_schema.dataclass_schema(
Model,
core_schema.no_info_wrap_validator_function(
func,
core_schema.dataclass_args_schema(
'Model', [core_schema.dataclass_field('number', core_schema.int_schema())]
),
),
['number'],
)
v = SchemaValidator(cs)
instance: Model = v.validate_python({'number': 1})
assert instance.number == 1
assert calls == [({'number': 1}, ({'number': 1}, None))]
v.validate_assignment(instance, 'number', 2)
assert instance.number == 2
assert calls == [({'number': 1}, ({'number': 1}, None)), ({'number': 2}, ({'number': 2}, None))]
@dataclasses.dataclass
| DuplicateDifferent |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 93507,
"end": 95948
} | class ____(fixtures.TestBase):
def _test(self, string, expected):
eq_(langhelpers.quoted_token_parser(string), expected)
def test_single(self):
self._test("name", ["name"])
def test_dotted(self):
self._test("schema.name", ["schema", "name"])
def test_dotted_quoted_left(self):
self._test('"Schema".name', ["Schema", "name"])
def test_dotted_quoted_left_w_quote_left_edge(self):
self._test('"""Schema".name', ['"Schema', "name"])
def test_dotted_quoted_left_w_quote_right_edge(self):
self._test('"Schema""".name', ['Schema"', "name"])
def test_dotted_quoted_left_w_quote_middle(self):
self._test('"Sch""ema".name', ['Sch"ema', "name"])
def test_dotted_quoted_right(self):
self._test('schema."SomeName"', ["schema", "SomeName"])
def test_dotted_quoted_right_w_quote_left_edge(self):
self._test('schema."""name"', ["schema", '"name'])
def test_dotted_quoted_right_w_quote_right_edge(self):
self._test('schema."name"""', ["schema", 'name"'])
def test_dotted_quoted_right_w_quote_middle(self):
self._test('schema."na""me"', ["schema", 'na"me'])
def test_quoted_single_w_quote_left_edge(self):
self._test('"""name"', ['"name'])
def test_quoted_single_w_quote_right_edge(self):
self._test('"name"""', ['name"'])
def test_quoted_single_w_quote_middle(self):
self._test('"na""me"', ['na"me'])
def test_dotted_quoted_left_w_dot_left_edge(self):
self._test('".Schema".name', [".Schema", "name"])
def test_dotted_quoted_left_w_dot_right_edge(self):
self._test('"Schema.".name', ["Schema.", "name"])
def test_dotted_quoted_left_w_dot_middle(self):
self._test('"Sch.ema".name', ["Sch.ema", "name"])
def test_dotted_quoted_right_w_dot_left_edge(self):
self._test('schema.".name"', ["schema", ".name"])
def test_dotted_quoted_right_w_dot_right_edge(self):
self._test('schema."name."', ["schema", "name."])
def test_dotted_quoted_right_w_dot_middle(self):
self._test('schema."na.me"', ["schema", "na.me"])
def test_quoted_single_w_dot_left_edge(self):
self._test('".name"', [".name"])
def test_quoted_single_w_dot_right_edge(self):
self._test('"name."', ["name."])
def test_quoted_single_w_dot_middle(self):
self._test('"na.me"', ["na.me"])
| QuotedTokenParserTest |
python | huggingface__transformers | src/transformers/models/mobilevit/modeling_mobilevit.py | {
"start": 12605,
"end": 18821
} | class ____(GradientCheckpointingLayer):
"""
MobileViT block: https://huggingface.co/papers/2110.02178
"""
def __init__(
self,
config: MobileViTConfig,
in_channels: int,
out_channels: int,
stride: int,
hidden_size: int,
num_stages: int,
dilation: int = 1,
) -> None:
super().__init__()
self.patch_width = config.patch_size
self.patch_height = config.patch_size
if stride == 2:
self.downsampling_layer = MobileViTInvertedResidual(
config,
in_channels=in_channels,
out_channels=out_channels,
stride=stride if dilation == 1 else 1,
dilation=dilation // 2 if dilation > 1 else 1,
)
in_channels = out_channels
else:
self.downsampling_layer = None
self.conv_kxk = MobileViTConvLayer(
config,
in_channels=in_channels,
out_channels=in_channels,
kernel_size=config.conv_kernel_size,
)
self.conv_1x1 = MobileViTConvLayer(
config,
in_channels=in_channels,
out_channels=hidden_size,
kernel_size=1,
use_normalization=False,
use_activation=False,
)
self.transformer = MobileViTTransformer(
config,
hidden_size=hidden_size,
num_stages=num_stages,
)
self.layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_eps)
self.conv_projection = MobileViTConvLayer(
config, in_channels=hidden_size, out_channels=in_channels, kernel_size=1
)
self.fusion = MobileViTConvLayer(
config, in_channels=2 * in_channels, out_channels=in_channels, kernel_size=config.conv_kernel_size
)
def unfolding(self, features: torch.Tensor) -> tuple[torch.Tensor, dict]:
patch_width, patch_height = self.patch_width, self.patch_height
patch_area = int(patch_width * patch_height)
batch_size, channels, orig_height, orig_width = features.shape
new_height = (
torch_int(torch.ceil(orig_height / patch_height) * patch_height)
if torch.jit.is_tracing()
else int(math.ceil(orig_height / patch_height) * patch_height)
)
new_width = (
torch_int(torch.ceil(orig_width / patch_width) * patch_width)
if torch.jit.is_tracing()
else int(math.ceil(orig_width / patch_width) * patch_width)
)
interpolate = False
if new_width != orig_width or new_height != orig_height:
# Note: Padding can be done, but then it needs to be handled in attention function.
features = nn.functional.interpolate(
features, size=(new_height, new_width), mode="bilinear", align_corners=False
)
interpolate = True
# number of patches along width and height
num_patch_width = new_width // patch_width
num_patch_height = new_height // patch_height
num_patches = num_patch_height * num_patch_width
# convert from shape (batch_size, channels, orig_height, orig_width)
# to the shape (batch_size * patch_area, num_patches, channels)
patches = features.reshape(
batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width
)
patches = patches.transpose(1, 2)
patches = patches.reshape(batch_size, channels, num_patches, patch_area)
patches = patches.transpose(1, 3)
patches = patches.reshape(batch_size * patch_area, num_patches, -1)
info_dict = {
"orig_size": (orig_height, orig_width),
"batch_size": batch_size,
"channels": channels,
"interpolate": interpolate,
"num_patches": num_patches,
"num_patches_width": num_patch_width,
"num_patches_height": num_patch_height,
}
return patches, info_dict
def folding(self, patches: torch.Tensor, info_dict: dict) -> torch.Tensor:
patch_width, patch_height = self.patch_width, self.patch_height
patch_area = int(patch_width * patch_height)
batch_size = info_dict["batch_size"]
channels = info_dict["channels"]
num_patches = info_dict["num_patches"]
num_patch_height = info_dict["num_patches_height"]
num_patch_width = info_dict["num_patches_width"]
# convert from shape (batch_size * patch_area, num_patches, channels)
# back to shape (batch_size, channels, orig_height, orig_width)
features = patches.contiguous().view(batch_size, patch_area, num_patches, -1)
features = features.transpose(1, 3)
features = features.reshape(
batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width
)
features = features.transpose(1, 2)
features = features.reshape(
batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width
)
if info_dict["interpolate"]:
features = nn.functional.interpolate(
features, size=info_dict["orig_size"], mode="bilinear", align_corners=False
)
return features
def forward(self, features: torch.Tensor) -> torch.Tensor:
# reduce spatial dimensions if needed
if self.downsampling_layer:
features = self.downsampling_layer(features)
residual = features
# local representation
features = self.conv_kxk(features)
features = self.conv_1x1(features)
# convert feature map to patches
patches, info_dict = self.unfolding(features)
# learn global representations
patches = self.transformer(patches)
patches = self.layernorm(patches)
# convert patches back to feature maps
features = self.folding(patches, info_dict)
features = self.conv_projection(features)
features = self.fusion(torch.cat((residual, features), dim=1))
return features
| MobileViTLayer |
python | ansible__ansible | lib/ansible/plugins/doc_fragments/connection_pipelining.py | {
"start": 167,
"end": 1182
} | class ____(object):
# common shelldocumentation fragment
DOCUMENTATION = """
options:
pipelining:
default: false
description:
- Pipelining reduces the number of connection operations required to execute a module on the remote server,
by executing many Ansible modules without actual file transfers.
- This can result in a very significant performance improvement when enabled.
- However this can conflict with privilege escalation (C(become)).
For example, when using sudo operations you must first disable C(requiretty) in the sudoers file for the target hosts,
which is why this feature is disabled by default.
env:
- name: ANSIBLE_PIPELINING
ini:
- section: defaults
key: pipelining
- section: connection
key: pipelining
type: boolean
vars:
- name: ansible_pipelining
"""
| ModuleDocFragment |
python | scipy__scipy | scipy/optimize/tests/test_optimize.py | {
"start": 39754,
"end": 72756
} | class ____(CheckOptimize):
def test_bfgs_nan(self):
# Test corner case where nan is fed to optimizer. See gh-2067.
def func(x):
return x
def fprime(x):
return np.ones_like(x)
x0 = [np.nan]
with np.errstate(over='ignore', invalid='ignore'):
x = optimize.fmin_bfgs(func, x0, fprime, disp=False)
assert np.isnan(func(x))
def test_bfgs_nan_return(self):
# Test corner cases where fun returns NaN. See gh-4793.
# First case: NaN from first call.
def func(x):
return np.nan
with np.errstate(invalid='ignore'):
result = optimize.minimize(func, 0)
assert np.isnan(result['fun'])
assert result['success'] is False
# Second case: NaN from second call.
def func(x):
return 0 if x == 0 else np.nan
def fprime(x):
return np.ones_like(x) # Steer away from zero.
with np.errstate(invalid='ignore'):
result = optimize.minimize(func, 0, jac=fprime)
assert np.isnan(result['fun'])
assert result['success'] is False
def test_bfgs_numerical_jacobian(self):
# BFGS with numerical Jacobian and a vector epsilon parameter.
# define the epsilon parameter using a random vector
rng = np.random.default_rng(1234)
epsilon = np.sqrt(np.spacing(1.)) * rng.random(len(self.solution))
params = optimize.fmin_bfgs(self.func, self.startparams,
epsilon=epsilon, args=(),
maxiter=self.maxiter, disp=False)
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_finite_differences_jac(self):
methods = ['BFGS', 'CG', 'TNC']
jacs = ['2-point', '3-point', None]
for method, jac in itertools.product(methods, jacs):
result = optimize.minimize(self.func, self.startparams,
method=method, jac=jac)
assert_allclose(self.func(result.x), self.func(self.solution),
atol=1e-6)
def test_finite_differences_hess(self):
# test that all the methods that require hess can use finite-difference
# For Newton-CG, trust-ncg, trust-krylov the FD estimated hessian is
# wrapped in a hessp function
# dogleg, trust-exact actually require true hessians at the moment, so
# they're excluded.
methods = ['trust-constr', 'Newton-CG', 'trust-ncg', 'trust-krylov']
hesses = FD_METHODS + (optimize.BFGS,)
for method, hess in itertools.product(methods, hesses):
if hess is optimize.BFGS:
hess = hess()
result = optimize.minimize(self.func, self.startparams,
method=method, jac=self.grad,
hess=hess)
assert result.success
# check that the methods demand some sort of Hessian specification
# Newton-CG creates its own hessp, and trust-constr doesn't need a hess
# specified either
methods = ['trust-ncg', 'trust-krylov', 'dogleg', 'trust-exact']
for method in methods:
with pytest.raises(ValueError):
optimize.minimize(self.func, self.startparams,
method=method, jac=self.grad,
hess=None)
def test_bfgs_gh_2169(self):
def f(x):
if x < 0:
return 1.79769313e+308
else:
return x + 1./x
xs = optimize.fmin_bfgs(f, [10.], disp=False)
assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)
def test_bfgs_double_evaluations(self):
# check BFGS does not evaluate twice in a row at same point
def f(x):
xp = x[0]
assert xp not in seen
seen.add(xp)
return 10*x**2, 20*x
seen = set()
optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7)
def test_l_bfgs_b(self):
# limited-memory bound-constrained BFGS algorithm
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
self.grad, args=(),
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
# Ensure that function call counts are 'known good'; these are from
# SciPy 0.7.0. Don't allow them to increase.
assert self.funccalls.c == 7, self.funccalls.c
assert self.gradcalls.c == 5, self.gradcalls.c
# Ensure that the function behaves the same; this is from SciPy 0.7.0
# test fixed in gh10673
assert_allclose(self.trace.t[3:5],
[[8.117083e-16, -5.196198e-01, 4.897617e-01],
[0., -0.52489628, 0.48753042]],
atol=1e-14, rtol=1e-7)
def test_l_bfgs_b_numjac(self):
# L-BFGS-B with numerical Jacobian
retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,
approx_grad=True,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_funjac(self):
# L-BFGS-B with combined objective function and Jacobian
def fun(x):
return self.func(x), self.grad(x)
retval = optimize.fmin_l_bfgs_b(fun, self.startparams,
maxiter=self.maxiter)
(params, fopt, d) = retval
assert_allclose(self.func(params), self.func(self.solution),
atol=1e-6)
def test_l_bfgs_b_maxiter(self):
# gh7854
# Ensure that not more than maxiters are ever run.
class Callback:
def __init__(self):
self.nit = 0
self.fun = None
self.x = None
def __call__(self, x):
self.x = x
self.fun = optimize.rosen(x)
self.nit += 1
c = Callback()
res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b',
callback=c, options={'maxiter': 5})
assert_equal(res.nit, 5)
assert_almost_equal(res.x, c.x)
assert_almost_equal(res.fun, c.fun)
assert_equal(res.status, 1)
assert res.success is False
assert_equal(res.message,
'STOP: TOTAL NO. OF ITERATIONS REACHED LIMIT')
def test_minimize_l_bfgs_b(self):
# Minimize with L-BFGS-B method
opts = {'maxiter': self.maxiter}
r = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
assert_allclose(self.func(r.x), self.func(self.solution),
atol=1e-6)
assert self.gradcalls.c == r.njev
self.funccalls.c = self.gradcalls.c = 0
# approximate jacobian
ra = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', options=opts)
# check that function evaluations in approximate jacobian are counted
# assert_(ra.nfev > r.nfev)
assert self.funccalls.c == ra.nfev
assert_allclose(self.func(ra.x), self.func(self.solution),
atol=1e-6)
self.funccalls.c = self.gradcalls.c = 0
# approximate jacobian
ra = optimize.minimize(self.func, self.startparams, jac='3-point',
method='L-BFGS-B', options=opts)
assert self.funccalls.c == ra.nfev
assert_allclose(self.func(ra.x), self.func(self.solution),
atol=1e-6)
def test_minimize_l_bfgs_b_ftol(self):
# Check that the `ftol` parameter in l_bfgs_b works as expected
v0 = None
for tol in [1e-1, 1e-4, 1e-7, 1e-10]:
opts = {'maxiter': self.maxiter, 'ftol': tol}
sol = optimize.minimize(self.func, self.startparams,
method='L-BFGS-B', jac=self.grad,
options=opts)
v = self.func(sol.x)
if v0 is None:
v0 = v
else:
assert v < v0
assert_allclose(v, self.func(self.solution), rtol=tol)
def test_minimize_l_bfgs_maxls(self):
# check that the maxls is passed down to the Fortran routine
sol = optimize.minimize(optimize.rosen, np.array([-1.2, 1.0]),
method='L-BFGS-B', jac=optimize.rosen_der,
options={'maxls': 1})
assert not sol.success
def test_minimize_l_bfgs_b_maxfun_interruption(self):
# gh-6162
f = optimize.rosen
g = optimize.rosen_der
values = []
x0 = np.full(7, 1000)
def objfun(x):
value = f(x)
values.append(value)
return value
# Look for an interesting test case.
# Request a maxfun that stops at a particularly bad function
# evaluation somewhere between 100 and 300 evaluations.
low, medium, high = 30, 100, 300
optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)
v, k = max((y, i) for i, y in enumerate(values[medium:]))
maxfun = medium + k
# If the minimization strategy is reasonable,
# the minimize() result should not be worse than the best
# of the first 30 function evaluations.
target = min(values[:low])
xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)
assert_array_less(fmin, target)
def test_custom(self):
# This function comes from the documentation example.
def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,
maxiter=100, callback=None, **options):
bestx = x0
besty = fun(x0)
funcalls = 1
niter = 0
improved = True
stop = False
while improved and not stop and niter < maxiter:
improved = False
niter += 1
for dim in range(np.size(x0)):
for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:
testx = np.copy(bestx)
testx[dim] = s
testy = fun(testx, *args)
funcalls += 1
if testy < besty:
besty = testy
bestx = testx
improved = True
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
stop = True
break
return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,
nfev=funcalls, success=(niter > 1))
x0 = [1.35, 0.9, 0.8, 1.1, 1.2]
res = optimize.minimize(optimize.rosen, x0, method=custmin,
options=dict(stepsize=0.05))
assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)
def test_gh10771(self):
# check that minimize passes bounds and constraints to a custom
# minimizer without altering them.
bounds = [(-2, 2), (0, 3)]
constraints = 'constraints'
def custmin(fun, x0, **options):
assert options['bounds'] is bounds
assert options['constraints'] is constraints
return optimize.OptimizeResult()
x0 = [1, 1]
optimize.minimize(optimize.rosen, x0, method=custmin,
bounds=bounds, constraints=constraints)
def test_minimize_tol_parameter(self):
# Check that the minimize() tol= argument does something
def func(z):
x, y = z
return x**2*y**2 + x**4 + 1
def dfunc(z):
x, y = z
return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])
for method in ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'l-bfgs-b', 'tnc',
'cobyla', 'cobyqa', 'slsqp']:
if method in ('nelder-mead', 'powell', 'cobyla', 'cobyqa'):
jac = None
else:
jac = dfunc
sol1 = optimize.minimize(func, [2, 2], jac=jac, tol=1e-10,
method=method)
sol2 = optimize.minimize(func, [2, 2], jac=jac, tol=1.0,
method=method)
assert func(sol1.x) < func(sol2.x), \
f"{method}: {func(sol1.x)} vs. {func(sol2.x)}"
@pytest.mark.fail_slow(10)
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.filterwarnings('ignore::RuntimeWarning') # See gh-18547
@pytest.mark.parametrize('method',
['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs',
'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc',
'fmin_slsqp'] + MINIMIZE_METHODS)
def test_minimize_callback_copies_array(self, method):
# Check that arrays passed to callbacks are not modified
# inplace by the optimizer afterward
if method in ('fmin_tnc', 'fmin_l_bfgs_b'):
def func(x):
return optimize.rosen(x), optimize.rosen_der(x)
else:
func = optimize.rosen
jac = optimize.rosen_der
hess = optimize.rosen_hess
x0 = np.zeros(10)
# Set options
kwargs = {}
if method.startswith('fmin'):
routine = getattr(optimize, method)
if method == 'fmin_slsqp':
kwargs['iter'] = 5
elif method == 'fmin_tnc':
kwargs['maxfun'] = 100
elif method in ('fmin', 'fmin_powell'):
kwargs['maxiter'] = 3500
else:
kwargs['maxiter'] = 5
else:
def routine(*a, **kw):
kw['method'] = method
return optimize.minimize(*a, **kw)
if method == 'tnc':
kwargs['options'] = dict(maxfun=100)
elif method == 'cobyla':
kwargs['options'] = dict(maxiter=100)
else:
kwargs['options'] = dict(maxiter=5)
if method in ('fmin_ncg',):
kwargs['fprime'] = jac
elif method in ('newton-cg',):
kwargs['jac'] = jac
elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
'trust-constr'):
kwargs['jac'] = jac
kwargs['hess'] = hess
# Run with callback
results = []
def callback(x, *args, **kwargs):
assert not isinstance(x, optimize.OptimizeResult)
results.append((x, np.copy(x)))
routine(func, x0, callback=callback, **kwargs)
# Check returned arrays coincide with their copies
# and have no memory overlap
assert len(results) > 2
assert all(np.all(x == y) for x, y in results)
combinations = itertools.combinations(results, 2)
assert not any(np.may_share_memory(x[0], y[0]) for x, y in combinations)
@pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg',
'bfgs', 'newton-cg', 'l-bfgs-b',
'tnc', 'cobyla', 'cobyqa', 'slsqp'])
def test_no_increase(self, method):
# Check that the solver doesn't return a value worse than the
# initial point.
def func(x):
return (x - 1)**2
def bad_grad(x):
# purposefully invalid gradient function, simulates a case
# where line searches start failing
return 2*(x - 1) * (-1) - 2
x0 = np.array([2.0])
f0 = func(x0)
jac = bad_grad
options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20)
if method in ['nelder-mead', 'powell', 'cobyla', 'cobyqa']:
jac = None
sol = optimize.minimize(func, x0, jac=jac, method=method,
options=options)
assert_equal(func(sol.x), sol.fun)
if method == 'slsqp':
pytest.xfail("SLSQP returns slightly worse")
assert func(sol.x) <= f0
def test_slsqp_respect_bounds(self):
# Regression test for gh-3108
def f(x):
return sum((x - np.array([1., 2., 3., 4.]))**2)
def cons(x):
a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])
return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])
x0 = np.array([0.5, 1., 1.5, 2.])
res = optimize.minimize(f, x0, method='slsqp',
constraints={'type': 'ineq', 'fun': cons})
assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)
@pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'CG', 'BFGS',
'Newton-CG', 'L-BFGS-B', 'SLSQP',
'trust-constr', 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov',
'cobyqa'])
def test_respect_maxiter(self, method):
# Check that the number of iterations equals max_iter, assuming
# convergence doesn't establish before
MAXITER = 4
x0 = np.zeros(10)
sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,
optimize.rosen_hess, None, None)
# Set options
kwargs = {'method': method, 'options': dict(maxiter=MAXITER)}
if method in ('Newton-CG',):
kwargs['jac'] = sf.grad
elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',
'trust-constr'):
kwargs['jac'] = sf.grad
kwargs['hess'] = sf.hess
sol = optimize.minimize(sf.fun, x0, **kwargs)
assert sol.nit == MAXITER
assert sol.nfev >= sf.nfev
if hasattr(sol, 'njev'):
assert sol.njev >= sf.ngev
# method specific tests
if method == 'SLSQP':
assert sol.status == 9 # Iteration limit reached
elif method == 'cobyqa':
assert sol.status == 6 # Iteration limit reached
@pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell',
'fmin', 'fmin_powell'])
def test_runtime_warning(self, method):
x0 = np.zeros(10)
sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,
optimize.rosen_hess, None, None)
options = {"maxiter": 1, "disp": True}
with pytest.warns(RuntimeWarning,
match=r'Maximum number of iterations'):
if method.startswith('fmin'):
routine = getattr(optimize, method)
routine(sf.fun, x0, **options)
else:
optimize.minimize(sf.fun, x0, method=method, options=options)
def test_respect_maxiter_trust_constr_ineq_constraints(self):
# special case of minimization with trust-constr and inequality
# constraints to check maxiter limit is obeyed when using internal
# method 'tr_interior_point'
MAXITER = 4
f = optimize.rosen
jac = optimize.rosen_der
hess = optimize.rosen_hess
def fun(x):
return np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])
cons = ({'type': 'ineq',
'fun': fun},)
x0 = np.zeros(10)
sol = optimize.minimize(f, x0, constraints=cons, jac=jac, hess=hess,
method='trust-constr',
options=dict(maxiter=MAXITER))
assert sol.nit == MAXITER
def test_minimize_automethod(self):
def f(x):
return x**2
def cons(x):
return x - 2
x0 = np.array([10.])
sol_0 = optimize.minimize(f, x0)
sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq',
'fun': cons}])
sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])
sol_3 = optimize.minimize(f, x0,
constraints=[{'type': 'ineq', 'fun': cons}],
bounds=[(5, 10)])
sol_4 = optimize.minimize(f, x0,
constraints=[{'type': 'ineq', 'fun': cons}],
bounds=[(1, 10)])
for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:
assert sol.success
assert_allclose(sol_0.x, 0, atol=1e-7)
assert_allclose(sol_1.x, 2, atol=1e-7)
assert_allclose(sol_2.x, 5, atol=1e-7)
assert_allclose(sol_3.x, 5, atol=1e-7)
assert_allclose(sol_4.x, 2, atol=1e-7)
def test_minimize_coerce_args_param(self):
# Regression test for gh-3503
def Y(x, c):
return np.sum((x-c)**2)
def dY_dx(x, c=None):
return 2*(x-c)
c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])
rng = np.random.default_rng(1234)
xinit = rng.standard_normal(len(c))
optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method="BFGS")
def test_initial_step_scaling(self):
# Check that optimizer initial step is not huge even if the
# function and gradients are
scales = [1e-50, 1, 1e50]
methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG']
def f(x):
if first_step_size[0] is None and x[0] != x0[0]:
first_step_size[0] = abs(x[0] - x0[0])
if abs(x).max() > 1e4:
raise AssertionError("Optimization stepped far away!")
return scale*(x[0] - 1)**2
def g(x):
return np.array([scale*(x[0] - 1)])
for scale, method in itertools.product(scales, methods):
if method in ('CG', 'BFGS'):
options = dict(gtol=scale*1e-8)
else:
options = dict()
if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'):
# XXX: return initial point if they see small gradient
continue
x0 = [-1.0]
first_step_size = [None]
res = optimize.minimize(f, x0, jac=g, method=method,
options=options)
err_msg = f"{method} {scale}: {first_step_size}: {res}"
assert res.success, err_msg
assert_allclose(res.x, [1.0], err_msg=err_msg)
assert res.nit <= 3, err_msg
if scale > 1e-10:
if method in ('CG', 'BFGS'):
assert_allclose(first_step_size[0], 1.01, err_msg=err_msg)
else:
# Newton-CG and L-BFGS-B use different logic for the first
# step, but are both scaling invariant with step sizes ~ 1
assert first_step_size[0] > 0.5 and first_step_size[0] < 3, err_msg
else:
# step size has upper bound of ||grad||, so line
# search makes many small steps
pass
@pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs',
'newton-cg', 'l-bfgs-b', 'tnc',
'cobyqa', 'slsqp',
'trust-constr', 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov'])
def test_nan_values(self, method):
# Check nan values result to failed exit status
# test is dependent on exact seed
rng = np.random.default_rng(123122)
count = [0]
def func(x):
return np.nan
def func2(x):
count[0] += 1
if count[0] > 2:
return np.nan
else:
return rng.random()
def grad(x):
return np.array([1.0])
def hess(x):
return np.array([[1.0]])
x0 = np.array([1.0])
needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact',
'trust-ncg', 'dogleg')
needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg',
'dogleg')
funcs = [func, func2]
grads = [grad] if needs_grad else [grad, None]
hesss = [hess] if needs_hess else [hess, None]
options = dict(maxfun=20) if method == 'tnc' else dict(maxiter=20)
with np.errstate(invalid='ignore'), warnings.catch_warnings():
warnings.filterwarnings("ignore", "delta_grad == 0.*", UserWarning)
warnings.filterwarnings(
"ignore", ".*does not use Hessian.*", RuntimeWarning)
warnings.filterwarnings(
"ignore", ".*does not use gradient.*", RuntimeWarning)
for f, g, h in itertools.product(funcs, grads, hesss):
count = [0]
sol = optimize.minimize(f, x0, jac=g, hess=h, method=method,
options=options)
assert_equal(sol.success, False)
@pytest.mark.parametrize('method', ['nelder-mead', 'cg', 'bfgs',
'l-bfgs-b', 'tnc',
'cobyla', 'cobyqa', 'slsqp',
'trust-constr', 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov'])
def test_duplicate_evaluations(self, method):
# check that there are no duplicate evaluations for any methods
jac = hess = None
if method in ('newton-cg', 'trust-krylov', 'trust-exact',
'trust-ncg', 'dogleg'):
jac = self.grad
if method in ('trust-krylov', 'trust-exact', 'trust-ncg',
'dogleg'):
hess = self.hess
with np.errstate(invalid='ignore'), warnings.catch_warnings():
# for trust-constr
warnings.filterwarnings("ignore", "delta_grad == 0.*", UserWarning)
optimize.minimize(self.func, self.startparams,
method=method, jac=jac, hess=hess)
for i in range(1, len(self.trace.t)):
if np.array_equal(self.trace.t[i - 1], self.trace.t[i]):
raise RuntimeError(
f"Duplicate evaluations made by {method}")
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.parametrize('method', MINIMIZE_METHODS_NEW_CB)
@pytest.mark.parametrize('new_cb_interface', [0, 1, 2])
def test_callback_stopiteration(self, method, new_cb_interface):
# Check that if callback raises StopIteration, optimization
# terminates with the same result as if iterations were limited
def f(x):
f.flag = False # check that f isn't called after StopIteration
return optimize.rosen(x)
f.flag = False
def g(x):
f.flag = False
return optimize.rosen_der(x)
def h(x):
f.flag = False
return optimize.rosen_hess(x)
maxiter = 5
if new_cb_interface == 1:
def callback_interface(*, intermediate_result):
assert intermediate_result.fun == f(intermediate_result.x)
callback()
elif new_cb_interface == 2:
class Callback:
def __call__(self, intermediate_result: OptimizeResult):
assert intermediate_result.fun == f(intermediate_result.x)
callback()
callback_interface = Callback()
else:
def callback_interface(xk, *args): # type: ignore[misc]
callback()
def callback():
callback.i += 1
callback.flag = False
if callback.i == maxiter:
callback.flag = True
raise StopIteration()
callback.i = 0
callback.flag = False
kwargs = {'x0': [1.1]*5, 'method': method,
'fun': f, 'jac': g, 'hess': h}
res = optimize.minimize(**kwargs, callback=callback_interface)
if method == 'nelder-mead':
maxiter = maxiter + 1 # nelder-mead counts differently
if method == 'cobyqa':
ref = optimize.minimize(**kwargs, options={'maxfev': maxiter})
assert res.nfev == ref.nfev == maxiter
elif method == 'cobyla':
# COBYLA calls the callback once per iteration, not once per function
# evaluation, so this test is not applicable. However we can test
# the COBYLA status to verify that res stopped back on the callback
# and ref stopped based on the iteration limit.
# COBYLA requires at least n+2 function evaluations
maxiter = max(maxiter, len(kwargs['x0'])+2)
ref = optimize.minimize(**kwargs, options={'maxiter': maxiter})
assert res.status == 30
assert res.message == ("Return from COBYLA because the callback function "
"requested termination")
assert ref.status == 3
assert ref.message == ("Return from COBYLA because the objective function "
"has been evaluated MAXFUN times.")
# Return early because res/ref will be unequal for COBYLA for the reasons
# mentioned above.
return
else:
ref = optimize.minimize(**kwargs, options={'maxiter': maxiter})
assert res.message.startswith("`callback` raised `StopIteration`")
assert res.nit == ref.nit == maxiter
if method != 'slsqp':
# Unlike all other methods, apparently SLSQP updates x/fun after the last
# call to the callback
assert res.fun == ref.fun
assert_equal(res.x, ref.x)
assert res.status == 3 if method in {'trust-constr', 'cobyqa'} else 99
if method != 'cobyqa':
assert not res.success
def test_ndim_error(self):
msg = "'x0' must only have one dimension."
with assert_raises(ValueError, match=msg):
optimize.minimize(lambda x: x, np.ones((2, 1)))
@pytest.mark.parametrize('method', ('nelder-mead', 'l-bfgs-b', 'tnc',
'powell', 'cobyla', 'cobyqa',
'trust-constr'))
def test_minimize_invalid_bounds(self, method):
def f(x):
return np.sum(x**2)
bounds = Bounds([1, 2], [3, 4])
msg = 'The number of bounds is not compatible with the length of `x0`.'
with pytest.raises(ValueError, match=msg):
optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds)
bounds = Bounds([1, 6, 1], [3, 4, 2])
msg = 'An upper bound is less than the corresponding lower bound.'
with pytest.raises(ValueError, match=msg):
optimize.minimize(f, x0=[1, 2, 3], method=method, bounds=bounds)
@pytest.mark.parametrize('method', ['bfgs', 'cg', 'newton-cg', 'powell'])
def test_minimize_warnings_gh1953(self, method):
# test that minimize methods produce warnings rather than just using
# `print`; see gh-1953.
kwargs = {} if method=='powell' else {'jac': optimize.rosen_der}
warning_type = (RuntimeWarning if method=='powell'
else optimize.OptimizeWarning)
options = {'disp': True, 'maxiter': 10}
with pytest.warns(warning_type, match='Maximum number'):
optimize.minimize(lambda x: optimize.rosen(x), [0, 0],
method=method, options=options, **kwargs)
options['disp'] = False
optimize.minimize(lambda x: optimize.rosen(x), [0, 0],
method=method, options=options, **kwargs)
@pytest.mark.parametrize(
'method',
['l-bfgs-b', 'tnc', 'Powell', 'Nelder-Mead', 'cobyqa']
)
def test_minimize_with_scalar(method):
# checks that minimize works with a scalar being provided to it.
def f(x):
return np.sum(x ** 2)
res = optimize.minimize(f, 17, bounds=[(-100, 100)], method=method)
assert res.success
assert_allclose(res.x, [0.0], atol=1e-5)
| TestOptimizeSimple |
python | ray-project__ray | python/ray/train/_internal/state/schema.py | {
"start": 1869,
"end": 1994
} | class ____(BaseModel):
rss: int
vms: int
pfaults: Optional[int]
pageins: Optional[int]
@DeveloperAPI
| MemoryInfo |
python | python-excel__xlwt | xlwt/antlr.py | {
"start": 67194,
"end": 68675
} | class ____(object):
def __init__(self):
pass
def addChild(self, c):
pass
def equals(self, t):
return False
def equalsList(self, t):
return False
def equalsListPartial(self, t):
return False
def equalsTree(self, t):
return False
def equalsTreePartial(self, t):
return False
def findAll(self, tree):
return None
def findAllPartial(self, subtree):
return None
def getFirstChild(self):
return self
def getNextSibling(self):
return self
def getText(self):
return ""
def getType(self):
return INVALID_TYPE
def getLine(self):
return 0
def getColumn(self):
return 0
def getNumberOfChildren(self):
return 0
def initialize(self, t):
pass
def setFirstChild(self, c):
pass
def setNextSibling(self, n):
pass
def setText(self, text):
pass
def setType(self, ttype):
pass
def toString(self):
self.getText()
__str__ = toString
def toStringList(self):
return self.getText()
def toStringTree(self):
return self.getText()
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### ASTNULLType ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### There is only one instance of this class **/
| AST |
python | Farama-Foundation__Gymnasium | gymnasium/error.py | {
"start": 1909,
"end": 2049
} | class ____(ImportError):
"""Error message for importing an old version of a wrapper."""
# Vectorized environments errors
| DeprecatedWrapper |
python | Textualize__textual | tests/toggles/test_checkbox.py | {
"start": 118,
"end": 1707
} | class ____(App[None]):
def __init__(self):
super().__init__()
self.events_received = []
def compose(self) -> ComposeResult:
yield Checkbox("Test", id="cb1")
yield Checkbox(id="cb2")
yield Checkbox(value=True, id="cb3")
def on_checkbox_changed(self, event: Checkbox.Changed) -> None:
self.events_received.append(
(event.checkbox.id, event.checkbox.value, event.checkbox == event.control)
)
async def test_checkbox_initial_state() -> None:
"""The initial states of the check boxes should be as we specified."""
async with CheckboxApp().run_test() as pilot:
assert [box.value for box in pilot.app.query(Checkbox)] == [False, False, True]
assert [box.has_class("-on") for box in pilot.app.query(Checkbox)] == [
False,
False,
True,
]
assert pilot.app.events_received == []
async def test_checkbox_toggle() -> None:
"""Test the status of the check boxes after they've been toggled."""
async with CheckboxApp().run_test() as pilot:
for box in pilot.app.query(Checkbox):
box.toggle()
assert [box.value for box in pilot.app.query(Checkbox)] == [True, True, False]
assert [box.has_class("-on") for box in pilot.app.query(Checkbox)] == [
True,
True,
False,
]
await pilot.pause()
assert pilot.app.events_received == [
("cb1", True, True),
("cb2", True, True),
("cb3", False, True),
]
| CheckboxApp |
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 33777,
"end": 34847
} | class ____(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@auto_docstring(
custom_intro="""
ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
| ConvBertClassificationHead |
python | gevent__gevent | src/gevent/_config.py | {
"start": 9942,
"end": 10096
} | class ____(ImportableSetting, Setting):
desc = """\
The kind of threadpool we use.
"""
default = 'gevent.threadpool.ThreadPool'
| Threadpool |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_collection_membership_files.py | {
"start": 435,
"end": 640
} | class ____(GQLResult):
artifact_collection: Optional[
ArtifactCollectionMembershipFilesProjectArtifactCollection
] = Field(alias="artifactCollection")
| ArtifactCollectionMembershipFilesProject |
python | django__django | tests/gis_tests/test_data.py | {
"start": 686,
"end": 895
} | class ____:
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
| TestObj |
python | gevent__gevent | src/gevent/tests/test__core_watcher.py | {
"start": 297,
"end": 3573
} | class ____(greentest.TestCase):
__timeout__ = None
def setUp(self):
super(Test, self).setUp()
self.loop = config.loop(default=False)
self.timer = self.loop.timer(0.01)
def tearDown(self):
if self.timer is not None:
self.timer.close()
if self.loop is not None:
self.loop.destroy()
self.loop = self.timer = None
super(Test, self).tearDown()
def test_non_callable_to_start(self):
# test that cannot pass non-callable thing to start()
self.assertRaises(TypeError, self.timer.start, None)
self.assertRaises(TypeError, self.timer.start, 5)
def test_non_callable_after_start(self):
# test that cannot set 'callback' to non-callable thing later either
lst = []
timer = self.timer
timer.start(lst.append)
with self.assertRaises(TypeError):
timer.callback = False
with self.assertRaises(TypeError):
timer.callback = 5
def test_args_can_be_changed_after_start(self):
lst = []
timer = self.timer
self.timer.start(lst.append)
self.assertEqual(timer.args, ())
timer.args = (1, 2, 3)
self.assertEqual(timer.args, (1, 2, 3))
# Only tuple can be args
with self.assertRaises(TypeError):
timer.args = 5
with self.assertRaises(TypeError):
timer.args = [4, 5]
self.assertEqual(timer.args, (1, 2, 3))
# None also works, means empty tuple
# XXX why?
timer.args = None
self.assertEqual(timer.args, None)
def test_run(self):
loop = self.loop
lst = []
self.timer.start(lambda *args: lst.append(args))
loop.run()
loop.update_now()
self.assertEqual(lst, [()])
# Even if we lose all references to it, the ref in the callback
# keeps it alive
self.timer.start(reset, self.timer, lst)
self.timer = None
loop.run()
self.assertEqual(lst, [(), 25])
def test_invalid_fd(self):
loop = self.loop
# Negative case caught everywhere. ValueError
# on POSIX, OSError on Windows Py3, IOError on Windows Py2
with self.assertRaises((ValueError, OSError, IOError)):
loop.io(-1, READ)
@greentest.skipOnWindows("Stdout can't be watched on Win32")
def test_reuse_io(self):
loop = self.loop
# Watchers aren't reused once all outstanding
# refs go away BUT THEY MUST BE CLOSED
tty_watcher = loop.io(1, WRITE)
watcher_handle = tty_watcher._watcher if CFFI_BACKEND else tty_watcher
tty_watcher.close()
del tty_watcher
# XXX: Note there is a cycle in the CFFI code
# from watcher_handle._handle -> watcher_handle.
# So it doesn't go away until a GC runs.
import gc
gc.collect()
tty_watcher = loop.io(1, WRITE)
self.assertIsNot(tty_watcher._watcher if CFFI_BACKEND else tty_watcher, watcher_handle)
tty_watcher.close()
def reset(watcher, lst):
watcher.args = None
watcher.callback = lambda: None
lst.append(25)
watcher.close()
if __name__ == '__main__':
greentest.main()
| Test |
python | falconry__falcon | falcon/testing/resource.py | {
"start": 7852,
"end": 9732
} | class ____(SimpleTestResource):
"""Mock resource for functional testing of ASGI framework components.
This class implements a simple test resource that can be extended
as needed to test middleware, hooks, and the Falcon framework
itself. It is identical to SimpleTestResource, except that it implements
asynchronous responders for use with the ASGI interface.
Only noop ``on_get()`` and ``on_post()`` responders are implemented;
when overriding these, or adding additional responders in child
classes, they can be decorated with the
:meth:`falcon.testing.capture_responder_args` hook in
order to capture the *req*, *resp*, and *params* arguments that
are passed to the responder. Responders may also be decorated with
the :meth:`falcon.testing.set_resp_defaults` hook in order to
set *resp* properties to default *status*, *body*, and *header*
values.
Keyword Arguments:
status (str): Default status string to use in responses
body (str): Default body string to use in responses
json (JSON serializable): Default JSON document to use in responses.
Will be serialized to a string and encoded as UTF-8. Either
*json* or *body* may be specified, but not both.
headers (dict): Default set of additional headers to include in
responses
"""
@falcon.before(capture_responder_args_async)
@falcon.before(set_resp_defaults_async)
async def on_get( # type: ignore[override]
self, req: asgi.Request, resp: asgi.Response, **kwargs: typing.Any
) -> None:
pass
@falcon.before(capture_responder_args_async)
@falcon.before(set_resp_defaults_async)
async def on_post( # type: ignore[override]
self, req: asgi.Request, resp: asgi.Response, **kwargs: typing.Any
) -> None:
pass
| SimpleTestResourceAsync |
python | huggingface__transformers | tests/models/kosmos2_5/test_processor_kosmos2_5.py | {
"start": 1228,
"end": 13128
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Kosmos2_5Processor
images_input_name = "flattened_patches"
model_id = "microsoft/kosmos-2.5"
@unittest.skip("Kosmos2_5Processor removes 'rows' and 'cols' from the output")
def test_image_processor_defaults(self):
pass
def test_image_procesor_load_save_reload(self):
# make sure load from Hub repo. -> save -> reload locally work
image_processor = Kosmos2_5ImageProcessor.from_pretrained("microsoft/kosmos-2.5")
with TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(tmp_dir)
reloaded_image_processor = Kosmos2_5ImageProcessor.from_pretrained(tmp_dir)
assert image_processor.to_dict() == reloaded_image_processor.to_dict()
assert image_processor.to_json_string() == reloaded_image_processor.to_json_string()
def test_can_load_various_tokenizers(self):
for checkpoint in ["microsoft/kosmos-2.5"]:
processor = AutoProcessor.from_pretrained(checkpoint)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
self.assertEqual(processor.tokenizer.__class__, tokenizer.__class__)
@require_torch
def test_model_input_names(self):
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = Kosmos2_5Processor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "This is a test"
image_input = self.prepare_image_inputs()
# both image and text
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(
list(inputs.keys()),
[
"flattened_patches",
"attention_mask",
"width",
"height",
"input_ids",
"image_embeds_position_mask",
],
)
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
@require_torch
@require_vision
def test_image_processor_defaults_preserved_by_image_kwargs(self):
# Rewrite as KOSMOS-2.5 processor return "flattened_patches" and not "pixel_values"
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor", max_patches=1024, patch_size={"height": 8, "width": 8})
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertEqual(len(inputs["flattened_patches"][0][0]), 194)
@require_torch
@require_vision
def test_kwargs_overrides_default_image_processor_kwargs(self):
# Rewrite as KOSMOS-2.5 processor return "flattened_patches" and not "pixel_values"
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor", max_patches=4096)
tokenizer = self.get_component("tokenizer", max_length=117, padding="max_length")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input, max_patches=1024)
self.assertEqual(len(inputs["flattened_patches"][0]), 1024)
@require_torch
@require_vision
def test_unstructured_kwargs(self):
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
max_patches=1024,
padding="max_length",
max_length=76,
)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["input_ids"][0]), 76)
@require_torch
@require_vision
def test_unstructured_kwargs_batched(self):
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=2)
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
max_patches=1024,
padding="longest",
max_length=76,
)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["input_ids"][0]), 76)
@require_torch
@require_vision
def test_structured_kwargs_nested(self):
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"max_patches": 1024},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(text=input_str, images=image_input, **all_kwargs)
self.skip_processor_without_typed_kwargs(processor)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["input_ids"][0]), 76)
@require_torch
@require_vision
def test_structured_kwargs_nested_from_dict(self):
# Rewrite as KOSMOS-2.5 processor doesn't use `rescale_factor`
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
image_processor = self.get_component("image_processor")
tokenizer = self.get_component("tokenizer")
processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs()
image_input = self.prepare_image_inputs()
# Define the kwargs for each modality
all_kwargs = {
"common_kwargs": {"return_tensors": "pt"},
"images_kwargs": {"max_patches": 1024},
"text_kwargs": {"padding": "max_length", "max_length": 76},
}
inputs = processor(text=input_str, images=image_input, **all_kwargs)
self.assertEqual(inputs["flattened_patches"].shape[1], 1024)
self.assertEqual(len(inputs["input_ids"][0]), 76)
@require_torch
def test_full_processor(self):
url = url_to_local_path("https://huggingface.co/microsoft/kosmos-2.5/resolve/main/receipt_00008.png")
processor = AutoProcessor.from_pretrained("microsoft/kosmos-2.5")
texts = ["<md>", "<ocr>"]
expected_input_ids = [
[100288],
[100282],
]
expected_attention_mask = [[1], [1]]
image = load_image(url)
# To match the official (microsoft) Kosmos-2 demo from which the expected values here are grabbed
image_path = os.path.join(self.tmpdirname, "image.png")
image.save(image_path)
image = Image.open(image_path)
# test single image
outputs = processor(images=image, text=texts[0])
self.assertListEqual(
outputs.input_ids[0].numpy().tolist(),
[0, 100283] + [0] * 2048 + [100284] + expected_input_ids[0],
)
self.assertListEqual(
outputs.image_embeds_position_mask[0].numpy().tolist(),
[0, -1] + [1] * 2048 + [-1] + [0] * (len(expected_input_ids[0])),
)
self.assertListEqual(
outputs.attention_mask[0].numpy().tolist(),
[1, 1] + [1] * 2048 + [1] + expected_attention_mask[0],
)
EXPECTED_FP_1 = [
1.0,
2.0,
-2.9527735710144043,
-2.672085762023926,
-2.9933173656463623,
-2.905944585800171,
-2.5891761779785156,
-2.8751866817474365,
-2.962153434753418,
-2.588062047958374,
]
EXPECTED_FP_200 = [
4.0,
45.0,
1.5713728666305542,
1.584628939628601,
1.3589054346084595,
1.6515952348709106,
1.7014952898025513,
1.3731343746185303,
1.6010395288467407,
1.6607422828674316,
]
self.assertTupleEqual(outputs.flattened_patches.shape, (1, 4096, 770))
np.testing.assert_allclose(
outputs.flattened_patches[0][1][:10].numpy().tolist(),
EXPECTED_FP_1,
atol=1e-9,
)
np.testing.assert_allclose(
outputs.flattened_patches[0][200][:10].numpy().tolist(),
EXPECTED_FP_200,
atol=1e-9,
)
# test a batch of images and texts, right padding
outputs = processor(images=[image, image], text=texts)
self.assertListEqual(
outputs.input_ids[1].numpy().tolist(),
[0, 100283] + [0] * 2048 + [100284] + expected_input_ids[1],
)
self.assertListEqual(
outputs.image_embeds_position_mask[1].numpy().tolist(),
[0, -1] + [1] * 2048 + [-1] + [0] * (len(expected_input_ids[1])),
)
self.assertListEqual(
outputs.attention_mask[1].numpy().tolist(),
[1, 1] + [1] * 2048 + [1] + expected_attention_mask[1],
)
self.assertTupleEqual(outputs.flattened_patches.shape, (2, 4096, 770))
np.testing.assert_allclose(
outputs.flattened_patches[1][1][:10].numpy().tolist(),
EXPECTED_FP_1,
atol=1e-9,
)
np.testing.assert_allclose(
outputs.flattened_patches[1][200][:10].numpy().tolist(),
EXPECTED_FP_200,
atol=1e-9,
)
| Kosmos2_5ProcessorTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-pgvector/destination_pgvector/common/state/state_writers.py | {
"start": 267,
"end": 524
} | class ____(abc.ABC):
"""A class to write state artifacts."""
@abc.abstractmethod
def write_state(
self,
state_message: AirbyteStateMessage,
) -> None:
"""Save or 'write' a state artifact."""
...
| StateWriterBase |
python | bokeh__bokeh | src/bokeh/models/text.py | {
"start": 4073,
"end": 4843
} | class ____(BaseText):
""" Represents plain text in contexts where text parsing is allowed.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| PlainText |
python | kamyu104__LeetCode-Solutions | Python/maximum-value-sum-by-placing-three-rooks-i.py | {
"start": 1425,
"end": 2169
} | class ____(object):
def maximumValueSum(self, board):
"""
:type board: List[List[int]]
:rtype: int
"""
k = 3
rows = [heapq.nlargest(k, [(board[i][j], i, j) for j in xrange(len(board[0]))]) for i in xrange(len(board))]
cols = [heapq.nlargest(k, [(board[i][j], i, j) for i in xrange(len(board))]) for j in xrange(len(board[0]))]
min_heap = heapq.nlargest((k-1)*(2*k-1)+1, set(itertools.chain(*rows)) & set(itertools.chain(*cols))) # each choice excludes at most 2k-1 candidates, we should have at least (k-1)*(2k-1)+1 candidates
return max(sum(x[0] for x in c) for c in itertools.combinations(min_heap, k) if len({x[1] for x in c}) == k == len({x[2] for x in c}))
| Solution2 |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1047258,
"end": 1047450
} | class ____(VegaLiteSchema):
"""ResolveMode schema wrapper."""
_schema = {"$ref": "#/definitions/ResolveMode"}
def __init__(self, *args):
super().__init__(*args)
| ResolveMode |
python | Farama-Foundation__Gymnasium | gymnasium/envs/box2d/lunar_lander.py | {
"start": 1996,
"end": 33295
} | class ____(gym.Env, EzPickle):
r"""
## Description
This environment is a classic rocket trajectory optimization problem.
According to Pontryagin's maximum principle, it is optimal to fire the
engine at full throttle or turn it off. This is the reason why this
environment has discrete actions: engine on or off.
There are two environment versions: discrete or continuous.
The landing pad is always at coordinates (0,0). The coordinates are the
first two numbers in the state vector.
Landing outside of the landing pad is possible. Fuel is infinite, so an agent
can learn to fly and then land on its first attempt.
To see a heuristic landing, run:
```shell
python gymnasium/envs/box2d/lunar_lander.py
```
## Action Space
There are four discrete actions available:
- 0: do nothing
- 1: fire left orientation engine
- 2: fire main engine
- 3: fire right orientation engine
## Observation Space
The state is an 8-dimensional vector: the coordinates of the lander in `x` & `y`, its linear
velocities in `x` & `y`, its angle, its angular velocity, and two booleans
that represent whether each leg is in contact with the ground or not.
## Rewards
After every step a reward is granted. The total reward of an episode is the
sum of the rewards for all the steps within that episode.
For each step, the reward:
- is increased/decreased the closer/further the lander is to the landing pad.
- is increased/decreased the slower/faster the lander is moving.
- is decreased the more the lander is tilted (angle not horizontal).
- is increased by 10 points for each leg that is in contact with the ground.
- is decreased by 0.03 points each frame a side engine is firing.
- is decreased by 0.3 points each frame the main engine is firing.
The episode receive an additional reward of -100 or +100 points for crashing or landing safely respectively.
An episode is considered a solution if it scores at least 200 points.
## Starting State
The lander starts at the top center of the viewport with a random initial
force applied to its center of mass.
## Episode Termination
The episode finishes if:
1) the lander crashes (the lander body gets in contact with the moon);
2) the lander gets outside of the viewport (`x` coordinate is greater than 1);
3) the lander is not awake. From the [Box2D docs](https://box2d.org/documentation/md__d_1__git_hub_box2d_docs_dynamics.html#autotoc_md61),
a body which is not awake is a body which doesn't move and doesn't
collide with any other body:
> When Box2D determines that a body (or group of bodies) has come to rest,
> the body enters a sleep state which has very little CPU overhead. If a
> body is awake and collides with a sleeping body, then the sleeping body
> wakes up. Bodies will also wake up if a joint or contact attached to
> them is destroyed.
## Arguments
Lunar Lander has a large number of arguments
```python
>>> import gymnasium as gym
>>> env = gym.make("LunarLander-v3", continuous=False, gravity=-10.0,
... enable_wind=False, wind_power=15.0, turbulence_power=1.5)
>>> env
<TimeLimit<OrderEnforcing<PassiveEnvChecker<LunarLander<LunarLander-v3>>>>>
```
* `continuous` determines if discrete or continuous actions (corresponding to the throttle of the engines) will be used with the
action space being `Discrete(4)` or `Box(-1, +1, (2,), dtype=np.float32)` respectively.
For continuous actions, the first coordinate of an action determines the throttle of the main engine, while the second
coordinate specifies the throttle of the lateral boosters. Given an action `np.array([main, lateral])`, the main
engine will be turned off completely if `main < 0` and the throttle scales affinely from 50% to 100% for
`0 <= main <= 1` (in particular, the main engine doesn't work with less than 50% power).
Similarly, if `-0.5 < lateral < 0.5`, the lateral boosters will not fire at all. If `lateral < -0.5`, the left
booster will fire, and if `lateral > 0.5`, the right booster will fire. Again, the throttle scales affinely
from 50% to 100% between -1 and -0.5 (and 0.5 and 1, respectively).
* `gravity` dictates the gravitational constant, this is bounded to be within 0 and -12. Default is -10.0
* `enable_wind` determines if there will be wind effects applied to the lander. The wind is generated using
the function `tanh(sin(2 k (t+C)) + sin(pi k (t+C)))` where `k` is set to 0.01 and `C` is sampled randomly between -9999 and 9999.
* `wind_power` dictates the maximum magnitude of linear wind applied to the craft. The recommended value for
`wind_power` is between 0.0 and 20.0.
* `turbulence_power` dictates the maximum magnitude of rotational wind applied to the craft.
The recommended value for `turbulence_power` is between 0.0 and 2.0.
## Version History
- v3:
- Reset wind and turbulence offset (`C`) whenever the environment is reset to ensure statistical independence between consecutive episodes (related [GitHub issue](https://github.com/Farama-Foundation/Gymnasium/issues/954)).
- Fix non-deterministic behaviour due to not fully destroying the world (related [GitHub issue](https://github.com/Farama-Foundation/Gymnasium/issues/728)).
- Changed observation space for `x`, `y` coordinates from $\pm 1.5$ to $\pm 2.5$, velocities from $\pm 5$ to $\pm 10$ and angles from $\pm \pi$ to $\pm 2\pi$ (related [GitHub issue](https://github.com/Farama-Foundation/Gymnasium/issues/752)).
- v2: Count energy spent and in v0.24, added turbulence with wind power and turbulence_power parameters
- v1: Legs contact with ground added in state vector; contact with ground give +10 reward points, and -10 if then lose contact; reward renormalized to 200; harder initial random push.
- v0: Initial version
## Notes
There are several unexpected bugs with the implementation of the environment.
1. The position of the side thrusters on the body of the lander changes, depending on the orientation of the lander.
This in turn results in an orientation dependent torque being applied to the lander.
2. The units of the state are not consistent. I.e.
* The angular velocity is in units of 0.4 radians per second. In order to convert to radians per second, the value needs to be multiplied by a factor of 2.5.
For the default values of VIEWPORT_W, VIEWPORT_H, SCALE, and FPS, the scale factors equal:
'x': 10, 'y': 6.666, 'vx': 5, 'vy': 7.5, 'angle': 1, 'angular velocity': 2.5
After the correction has been made, the units of the state are as follows:
'x': (units), 'y': (units), 'vx': (units/second), 'vy': (units/second), 'angle': (radians), 'angular velocity': (radians/second)
<!-- ## References -->
## Credits
Created by Oleg Klimov
"""
metadata = {
"render_modes": ["human", "rgb_array"],
"render_fps": FPS,
}
def __init__(
self,
render_mode: str | None = None,
continuous: bool = False,
gravity: float = -10.0,
enable_wind: bool = False,
wind_power: float = 15.0,
turbulence_power: float = 1.5,
):
EzPickle.__init__(
self,
render_mode,
continuous,
gravity,
enable_wind,
wind_power,
turbulence_power,
)
assert (
-12.0 < gravity and gravity < 0.0
), f"gravity (current value: {gravity}) must be between -12 and 0"
self.gravity = gravity
if 0.0 > wind_power or wind_power > 20.0:
gym.logger.warn(
f"wind_power value is recommended to be between 0.0 and 20.0, (current value: {wind_power})"
)
self.wind_power = wind_power
if 0.0 > turbulence_power or turbulence_power > 2.0:
gym.logger.warn(
f"turbulence_power value is recommended to be between 0.0 and 2.0, (current value: {turbulence_power})"
)
self.turbulence_power = turbulence_power
self.enable_wind = enable_wind
self.screen: pygame.Surface = None
self.clock = None
self.isopen = True
self.world = Box2D.b2World(gravity=(0, gravity))
self.moon = None
self.lander: Box2D.b2Body | None = None
self.particles = []
self.prev_reward = None
self.continuous = continuous
low = np.array(
[
# these are bounds for position
# realistically the environment should have ended
# long before we reach more than 50% outside
-2.5, # x coordinate
-2.5, # y coordinate
# velocity bounds is 5x rated speed
-10.0,
-10.0,
-2 * math.pi,
-10.0,
-0.0,
-0.0,
]
).astype(np.float32)
high = np.array(
[
# these are bounds for position
# realistically the environment should have ended
# long before we reach more than 50% outside
2.5, # x coordinate
2.5, # y coordinate
# velocity bounds is 5x rated speed
10.0,
10.0,
2 * math.pi,
10.0,
1.0,
1.0,
]
).astype(np.float32)
# useful range is -1 .. +1, but spikes can be higher
self.observation_space = spaces.Box(low, high)
if self.continuous:
# Action is two floats [main engine, left-right engines].
# Main engine: -1..0 off, 0..+1 throttle from 50% to 100% power. Engine can't work with less than 50% power.
# Left-right: -1.0..-0.5 fire left engine, +0.5..+1.0 fire right engine, -0.5..0.5 off
self.action_space = spaces.Box(-1, +1, (2,), dtype=np.float32)
else:
# Nop, fire left engine, main engine, right engine
self.action_space = spaces.Discrete(4)
self.render_mode = render_mode
def _destroy(self):
if not self.moon:
return
self.world.contactListener = None
self._clean_particles(True)
self.world.DestroyBody(self.moon)
self.moon = None
self.world.DestroyBody(self.lander)
self.lander = None
self.world.DestroyBody(self.legs[0])
self.world.DestroyBody(self.legs[1])
def reset(
self,
*,
seed: int | None = None,
options: dict | None = None,
):
super().reset(seed=seed)
self._destroy()
# Bug's workaround for: https://github.com/Farama-Foundation/Gymnasium/issues/728
# Not sure why the self._destroy() is not enough to clean(reset) the total world environment elements, need more investigation on the root cause,
# we must create a totally new world for self.reset(), or the bug#728 will happen
self.world = Box2D.b2World(gravity=(0, self.gravity))
self.world.contactListener_keepref = ContactDetector(self)
self.world.contactListener = self.world.contactListener_keepref
self.game_over = False
self.prev_shaping = None
W = VIEWPORT_W / SCALE
H = VIEWPORT_H / SCALE
# Create Terrain
CHUNKS = 11
height = self.np_random.uniform(0, H / 2, size=(CHUNKS + 1,))
chunk_x = [W / (CHUNKS - 1) * i for i in range(CHUNKS)]
self.helipad_x1 = chunk_x[CHUNKS // 2 - 1]
self.helipad_x2 = chunk_x[CHUNKS // 2 + 1]
self.helipad_y = H / 4
height[CHUNKS // 2 - 2] = self.helipad_y
height[CHUNKS // 2 - 1] = self.helipad_y
height[CHUNKS // 2 + 0] = self.helipad_y
height[CHUNKS // 2 + 1] = self.helipad_y
height[CHUNKS // 2 + 2] = self.helipad_y
smooth_y = [
0.33 * (height[i - 1] + height[i + 0] + height[i + 1])
for i in range(CHUNKS)
]
self.moon = self.world.CreateStaticBody(
shapes=edgeShape(vertices=[(0, 0), (W, 0)])
)
self.sky_polys = []
for i in range(CHUNKS - 1):
p1 = (chunk_x[i], smooth_y[i])
p2 = (chunk_x[i + 1], smooth_y[i + 1])
self.moon.CreateEdgeFixture(vertices=[p1, p2], density=0, friction=0.1)
self.sky_polys.append([p1, p2, (p2[0], H), (p1[0], H)])
self.moon.color1 = (0.0, 0.0, 0.0)
self.moon.color2 = (0.0, 0.0, 0.0)
# Create Lander body
initial_y = VIEWPORT_H / SCALE
initial_x = VIEWPORT_W / SCALE / 2
self.lander = self.world.CreateDynamicBody(
position=(initial_x, initial_y),
angle=0.0,
fixtures=fixtureDef(
shape=polygonShape(
vertices=[(x / SCALE, y / SCALE) for x, y in LANDER_POLY]
),
density=5.0,
friction=0.1,
categoryBits=0x0010,
maskBits=0x001, # collide only with ground
restitution=0.0,
), # 0.99 bouncy
)
self.lander.color1 = (128, 102, 230)
self.lander.color2 = (77, 77, 128)
# Apply the initial random impulse to the lander
self.lander.ApplyForceToCenter(
(
self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM),
self.np_random.uniform(-INITIAL_RANDOM, INITIAL_RANDOM),
),
True,
)
if self.enable_wind: # Initialize wind pattern based on index
self.wind_idx = self.np_random.integers(-9999, 9999)
self.torque_idx = self.np_random.integers(-9999, 9999)
# Create Lander Legs
self.legs = []
for i in [-1, +1]:
leg = self.world.CreateDynamicBody(
position=(initial_x - i * LEG_AWAY / SCALE, initial_y),
angle=(i * 0.05),
fixtures=fixtureDef(
shape=polygonShape(box=(LEG_W / SCALE, LEG_H / SCALE)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001,
),
)
leg.ground_contact = False
leg.color1 = (128, 102, 230)
leg.color2 = (77, 77, 128)
rjd = revoluteJointDef(
bodyA=self.lander,
bodyB=leg,
localAnchorA=(0, 0),
localAnchorB=(i * LEG_AWAY / SCALE, LEG_DOWN / SCALE),
enableMotor=True,
enableLimit=True,
maxMotorTorque=LEG_SPRING_TORQUE,
motorSpeed=+0.3 * i, # low enough not to jump back into the sky
)
if i == -1:
rjd.lowerAngle = (
+0.9 - 0.5
) # The most esoteric numbers here, angled legs have freedom to travel within
rjd.upperAngle = +0.9
else:
rjd.lowerAngle = -0.9
rjd.upperAngle = -0.9 + 0.5
leg.joint = self.world.CreateJoint(rjd)
self.legs.append(leg)
self.drawlist = [self.lander] + self.legs
if self.render_mode == "human":
self.render()
return self.step(np.array([0, 0]) if self.continuous else 0)[0], {}
def _create_particle(self, mass, x, y, ttl):
p = self.world.CreateDynamicBody(
position=(x, y),
angle=0.0,
fixtures=fixtureDef(
shape=circleShape(radius=2 / SCALE, pos=(0, 0)),
density=mass,
friction=0.1,
categoryBits=0x0100,
maskBits=0x001, # collide only with ground
restitution=0.3,
),
)
p.ttl = ttl
self.particles.append(p)
self._clean_particles(False)
return p
def _clean_particles(self, all_particle):
while self.particles and (all_particle or self.particles[0].ttl < 0):
self.world.DestroyBody(self.particles.pop(0))
def step(self, action):
assert self.lander is not None
# Update wind and apply to the lander
assert self.lander is not None, "You forgot to call reset()"
if self.enable_wind and not (
self.legs[0].ground_contact or self.legs[1].ground_contact
):
# the function used for wind is tanh(sin(2 k x) + sin(pi k x)),
# which is proven to never be periodic, k = 0.01
wind_mag = (
math.tanh(
math.sin(0.02 * self.wind_idx)
+ (math.sin(math.pi * 0.01 * self.wind_idx))
)
* self.wind_power
)
self.wind_idx += 1
self.lander.ApplyForceToCenter(
(wind_mag, 0.0),
True,
)
# the function used for torque is tanh(sin(2 k x) + sin(pi k x)),
# which is proven to never be periodic, k = 0.01
torque_mag = (
math.tanh(
math.sin(0.02 * self.torque_idx)
+ (math.sin(math.pi * 0.01 * self.torque_idx))
)
* self.turbulence_power
)
self.torque_idx += 1
self.lander.ApplyTorque(
torque_mag,
True,
)
if self.continuous:
action = np.clip(action, -1, +1).astype(np.float64)
else:
assert self.action_space.contains(
action
), f"{action!r} ({type(action)}) invalid "
# Apply Engine Impulses
# Tip is the (X and Y) components of the rotation of the lander.
tip = (math.sin(self.lander.angle), math.cos(self.lander.angle))
# Side is the (-Y and X) components of the rotation of the lander.
side = (-tip[1], tip[0])
# Generate two random numbers between -1/SCALE and 1/SCALE.
dispersion = [self.np_random.uniform(-1.0, +1.0) / SCALE for _ in range(2)]
m_power = 0.0
if (self.continuous and action[0] > 0.0) or (
not self.continuous and action == 2
):
# Main engine
if self.continuous:
m_power = (np.clip(action[0], 0.0, 1.0) + 1.0) * 0.5 # 0.5..1.0
assert m_power >= 0.5 and m_power <= 1.0
else:
m_power = 1.0
# 4 is move a bit downwards, +-2 for randomness
# The components of the impulse to be applied by the main engine.
ox = (
tip[0] * (MAIN_ENGINE_Y_LOCATION / SCALE + 2 * dispersion[0])
+ side[0] * dispersion[1]
)
oy = (
-tip[1] * (MAIN_ENGINE_Y_LOCATION / SCALE + 2 * dispersion[0])
- side[1] * dispersion[1]
)
impulse_pos = (self.lander.position[0] + ox, self.lander.position[1] + oy)
if self.render_mode is not None:
# particles are just a decoration, with no impact on the physics, so don't add them when not rendering
p = self._create_particle(
3.5, # 3.5 is here to make particle speed adequate
impulse_pos[0],
impulse_pos[1],
m_power,
)
p.ApplyLinearImpulse(
(
ox * MAIN_ENGINE_POWER * m_power,
oy * MAIN_ENGINE_POWER * m_power,
),
impulse_pos,
True,
)
self.lander.ApplyLinearImpulse(
(-ox * MAIN_ENGINE_POWER * m_power, -oy * MAIN_ENGINE_POWER * m_power),
impulse_pos,
True,
)
s_power = 0.0
if (self.continuous and np.abs(action[1]) > 0.5) or (
not self.continuous and action in [1, 3]
):
# Orientation/Side engines
if self.continuous:
direction = np.sign(action[1])
s_power = np.clip(np.abs(action[1]), 0.5, 1.0)
assert s_power >= 0.5 and s_power <= 1.0
else:
# action = 1 is left, action = 3 is right
direction = action - 2
s_power = 1.0
# The components of the impulse to be applied by the side engines.
ox = tip[0] * dispersion[0] + side[0] * (
3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE
)
oy = -tip[1] * dispersion[0] - side[1] * (
3 * dispersion[1] + direction * SIDE_ENGINE_AWAY / SCALE
)
# The constant 17 is a constant, that is presumably meant to be SIDE_ENGINE_HEIGHT.
# However, SIDE_ENGINE_HEIGHT is defined as 14
# This causes the position of the thrust on the body of the lander to change, depending on the orientation of the lander.
# This in turn results in an orientation dependent torque being applied to the lander.
impulse_pos = (
self.lander.position[0] + ox - tip[0] * 17 / SCALE,
self.lander.position[1] + oy + tip[1] * SIDE_ENGINE_HEIGHT / SCALE,
)
if self.render_mode is not None:
# particles are just a decoration, with no impact on the physics, so don't add them when not rendering
p = self._create_particle(0.7, impulse_pos[0], impulse_pos[1], s_power)
p.ApplyLinearImpulse(
(
ox * SIDE_ENGINE_POWER * s_power,
oy * SIDE_ENGINE_POWER * s_power,
),
impulse_pos,
True,
)
self.lander.ApplyLinearImpulse(
(-ox * SIDE_ENGINE_POWER * s_power, -oy * SIDE_ENGINE_POWER * s_power),
impulse_pos,
True,
)
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
pos = self.lander.position
vel = self.lander.linearVelocity
state = [
(pos.x - VIEWPORT_W / SCALE / 2) / (VIEWPORT_W / SCALE / 2),
(pos.y - (self.helipad_y + LEG_DOWN / SCALE)) / (VIEWPORT_H / SCALE / 2),
vel.x * (VIEWPORT_W / SCALE / 2) / FPS,
vel.y * (VIEWPORT_H / SCALE / 2) / FPS,
self.lander.angle,
20.0 * self.lander.angularVelocity / FPS,
1.0 if self.legs[0].ground_contact else 0.0,
1.0 if self.legs[1].ground_contact else 0.0,
]
assert len(state) == 8
reward = 0
shaping = (
-100 * np.sqrt(state[0] * state[0] + state[1] * state[1])
- 100 * np.sqrt(state[2] * state[2] + state[3] * state[3])
- 100 * abs(state[4])
+ 10 * state[6]
+ 10 * state[7]
) # And ten points for legs contact, the idea is if you
# lose contact again after landing, you get negative reward
if self.prev_shaping is not None:
reward = shaping - self.prev_shaping
self.prev_shaping = shaping
reward -= (
m_power * 0.30
) # less fuel spent is better, about -30 for heuristic landing
reward -= s_power * 0.03
terminated = False
if self.game_over or abs(state[0]) >= 1.0:
terminated = True
reward = -100
if not self.lander.awake:
terminated = True
reward = +100
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return np.array(state, dtype=np.float32), reward, terminated, False, {}
def render(self):
if self.render_mode is None:
assert self.spec is not None
gym.logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym.make("{self.spec.id}", render_mode="rgb_array")'
)
return
try:
import pygame
from pygame import gfxdraw
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[box2d]"`'
) from e
if self.screen is None and self.render_mode == "human":
pygame.init()
pygame.display.init()
self.screen = pygame.display.set_mode((VIEWPORT_W, VIEWPORT_H))
if self.clock is None:
self.clock = pygame.time.Clock()
self.surf = pygame.Surface((VIEWPORT_W, VIEWPORT_H))
pygame.transform.scale(self.surf, (SCALE, SCALE))
pygame.draw.rect(self.surf, (255, 255, 255), self.surf.get_rect())
for obj in self.particles:
obj.ttl -= 0.15
obj.color1 = (
int(max(0.2, 0.15 + obj.ttl) * 255),
int(max(0.2, 0.5 * obj.ttl) * 255),
int(max(0.2, 0.5 * obj.ttl) * 255),
)
obj.color2 = (
int(max(0.2, 0.15 + obj.ttl) * 255),
int(max(0.2, 0.5 * obj.ttl) * 255),
int(max(0.2, 0.5 * obj.ttl) * 255),
)
self._clean_particles(False)
for p in self.sky_polys:
scaled_poly = []
for coord in p:
scaled_poly.append((coord[0] * SCALE, coord[1] * SCALE))
pygame.draw.polygon(self.surf, (0, 0, 0), scaled_poly)
gfxdraw.aapolygon(self.surf, scaled_poly, (0, 0, 0))
for obj in self.particles + self.drawlist:
for f in obj.fixtures:
trans = f.body.transform
if type(f.shape) is circleShape:
pygame.draw.circle(
self.surf,
color=obj.color1,
center=trans * f.shape.pos * SCALE,
radius=f.shape.radius * SCALE,
)
pygame.draw.circle(
self.surf,
color=obj.color2,
center=trans * f.shape.pos * SCALE,
radius=f.shape.radius * SCALE,
)
else:
path = [trans * v * SCALE for v in f.shape.vertices]
pygame.draw.polygon(self.surf, color=obj.color1, points=path)
gfxdraw.aapolygon(self.surf, path, obj.color1)
pygame.draw.aalines(
self.surf, color=obj.color2, points=path, closed=True
)
for x in [self.helipad_x1, self.helipad_x2]:
x = x * SCALE
flagy1 = self.helipad_y * SCALE
flagy2 = flagy1 + 50
pygame.draw.line(
self.surf,
color=(255, 255, 255),
start_pos=(x, flagy1),
end_pos=(x, flagy2),
width=1,
)
pygame.draw.polygon(
self.surf,
color=(204, 204, 0),
points=[
(x, flagy2),
(x, flagy2 - 10),
(x + 25, flagy2 - 5),
],
)
gfxdraw.aapolygon(
self.surf,
[(x, flagy2), (x, flagy2 - 10), (x + 25, flagy2 - 5)],
(204, 204, 0),
)
self.surf = pygame.transform.flip(self.surf, False, True)
if self.render_mode == "human":
assert self.screen is not None
self.screen.blit(self.surf, (0, 0))
pygame.event.pump()
self.clock.tick(self.metadata["render_fps"])
pygame.display.flip()
elif self.render_mode == "rgb_array":
return np.transpose(
np.array(pygame.surfarray.pixels3d(self.surf)), axes=(1, 0, 2)
)
def close(self):
if self.screen is not None:
import pygame
pygame.display.quit()
pygame.quit()
self.isopen = False
def heuristic(env, s):
"""
The heuristic for
1. Testing
2. Demonstration rollout.
Args:
env: The environment
s (list): The state. Attributes:
s[0] is the horizontal coordinate
s[1] is the vertical coordinate
s[2] is the horizontal speed
s[3] is the vertical speed
s[4] is the angle
s[5] is the angular speed
s[6] 1 if first leg has contact, else 0
s[7] 1 if second leg has contact, else 0
Returns:
a: The heuristic to be fed into the step function defined above to determine the next step and reward.
"""
angle_targ = s[0] * 0.5 + s[2] * 1.0 # angle should point towards center
if angle_targ > 0.4:
angle_targ = 0.4 # more than 0.4 radians (22 degrees) is bad
if angle_targ < -0.4:
angle_targ = -0.4
hover_targ = 0.55 * np.abs(
s[0]
) # target y should be proportional to horizontal offset
angle_todo = (angle_targ - s[4]) * 0.5 - (s[5]) * 1.0
hover_todo = (hover_targ - s[1]) * 0.5 - (s[3]) * 0.5
if s[6] or s[7]: # legs have contact
angle_todo = 0
hover_todo = (
-(s[3]) * 0.5
) # override to reduce fall speed, that's all we need after contact
if env.unwrapped.continuous:
a = np.array([hover_todo * 20 - 1, -angle_todo * 20])
a = np.clip(a, -1, +1)
else:
a = 0
if hover_todo > np.abs(angle_todo) and hover_todo > 0.05:
a = 2
elif angle_todo < -0.05:
a = 3
elif angle_todo > +0.05:
a = 1
return a
def demo_heuristic_lander(env, seed=None, render=False):
total_reward = 0
steps = 0
s, info = env.reset(seed=seed)
while True:
a = heuristic(env, s)
s, r, terminated, truncated, info = step_api_compatibility(env.step(a), True)
total_reward += r
if render:
still_open = env.render()
if still_open is False:
break
if steps % 20 == 0 or terminated or truncated:
print("observations:", " ".join([f"{x:+0.2f}" for x in s]))
print(f"step {steps} total_reward {total_reward:+0.2f}")
steps += 1
if terminated or truncated:
break
if render:
env.close()
return total_reward
| LunarLander |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.