language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | lazyprogrammer__machine_learning_examples | rl3/a2c/neural_network.py | {
"start": 723,
"end": 1824
} | class ____:
def __init__(self, sess, ob_space, ac_space, nenv, nsteps, nstack, reuse=False):
gain = np.sqrt(2)
nbatch = nenv * nsteps
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc * nstack)
X = tf.placeholder(tf.uint8, ob_shape) # obs
X_normal = tf.cast(X, tf.float32) / 255.0
with tf.variable_scope("model", reuse=reuse):
h1 = conv(X_normal, 32, 8, 4, gain)
h2 = conv(h1, 64, 4, 2, gain)
h3 = conv(h2, 64, 3, 1, gain)
h3 = tf.layers.flatten(h3)
h4 = dense(h3, 512, gain=gain)
pi = dense(h4, ac_space.n, act=None)
vf = dense(h4, 1, act=None)
v0 = vf[:, 0]
a0 = sample(pi)
# self.initial_state = [] # State reserved for LSTM
def step(ob):
a, v = sess.run([a0, v0], {X: ob})
return a, v#, [] # dummy state
def value(ob):
return sess.run(v0, {X: ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
| CNN |
python | python-poetry__poetry | tests/types.py | {
"start": 3327,
"end": 3429
} | class ____(Protocol):
def __call__(self, name: str) -> NormalizedName: ...
| NormalizedNameTransformer |
python | django__django | tests/backends/base/test_features.py | {
"start": 74,
"end": 228
} | class ____(SimpleTestCase):
def test_nonexistent_feature(self):
self.assertFalse(hasattr(connection.features, "nonexistent"))
| TestDatabaseFeatures |
python | run-llama__llama_index | docs/examples/output_parsing/directory.py | {
"start": 247,
"end": 1112
} | class ____(BaseModel):
"""
Class representing a single node in a filesystem. Can be either a file or a folder.
Note that a file cannot have children, but a folder can.
Args:
name (str): The name of the node.
children (List[Node]): The list of child nodes (if any).
node_type (NodeType): The type of the node, either a file or a folder.
"""
name: str = Field(..., description="Name of the folder")
children: List["Node"] = Field(
default_factory=list,
description=(
"List of children nodes, only applicable for folders, files cannot"
" have children"
),
)
node_type: NodeType = Field(
default=NodeType.FILE,
description=(
"Either a file or folder, use the name to determine which it"
" could be"
),
)
| Node |
python | eventlet__eventlet | tests/db_pool_test.py | {
"start": 15040,
"end": 15151
} | class ____(MysqlConnectionPool, TpoolConnectionPool, tests.LimitedTestCase):
__test__ = True
| Test01MysqlTpool |
python | kamyu104__LeetCode-Solutions | Python/bulb-switcher-iv.py | {
"start": 29,
"end": 328
} | class ____(object):
def minFlips(self, target):
"""
:type target: str
:rtype: int
"""
result, curr = 0, '0'
for c in target:
if c == curr:
continue
curr = c
result += 1
return result
| Solution |
python | pypa__setuptools | setuptools/_static.py | {
"start": 138,
"end": 2332
} | class ____:
"""
Wrapper for built-in object types that are allow setuptools to identify
static core metadata (in opposition to ``Dynamic``, as defined :pep:`643`).
The trick is to mark values with :class:`Static` when they come from
``pyproject.toml`` or ``setup.cfg``, so if any plugin overwrite the value
with a built-in, setuptools will be able to recognise the change.
We inherit from built-in classes, so that we don't need to change the existing
code base to deal with the new types.
We also should strive for immutability objects to avoid changes after the
initial parsing.
"""
_mutated_: bool = False # TODO: Remove after deprecation warning is solved
def _prevent_modification(target: type, method: str, copying: str) -> None:
"""
Because setuptools is very flexible we cannot fully prevent
plugins and user customizations from modifying static values that were
parsed from config files.
But we can attempt to block "in-place" mutations and identify when they
were done.
"""
fn = getattr(target, method, None)
if fn is None:
return
@wraps(fn)
def _replacement(self: Static, *args, **kwargs):
# TODO: After deprecation period raise NotImplementedError instead of warning
# which obviated the existence and checks of the `_mutated_` attribute.
self._mutated_ = True
SetuptoolsDeprecationWarning.emit(
"Direct modification of value will be disallowed",
f"""
In an effort to implement PEP 643, direct/in-place changes of static values
that come from configuration files are deprecated.
If you need to modify this value, please first create a copy with {copying}
and make sure conform to all relevant standards when overriding setuptools
functionality (https://packaging.python.org/en/latest/specifications/).
""",
due_date=(2025, 10, 10), # Initially introduced in 2024-09-06
)
return fn(self, *args, **kwargs)
_replacement.__doc__ = "" # otherwise doctest may fail.
setattr(target, method, _replacement)
| Static |
python | Textualize__textual | docs/examples/guide/reactivity/dynamic_watch.py | {
"start": 172,
"end": 544
} | class ____(Widget):
DEFAULT_CSS = "Counter { height: auto; }"
counter = reactive(0) # (1)!
def compose(self) -> ComposeResult:
yield Label()
yield Button("+10")
def on_button_pressed(self) -> None:
self.counter += 10
def watch_counter(self, counter_value: int):
self.query_one(Label).update(str(counter_value))
| Counter |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 13690,
"end": 14018
} | class ____(LocalizableStreamlitException):
"""Exception raised when a page_link is created without a label."""
def __init__(self) -> None:
super().__init__(
"The `label` param is required for external links used with `st.page_link` - please provide a `label`."
)
| StreamlitMissingPageLabelError |
python | getsentry__sentry | tests/sentry/api/serializers/test_project.py | {
"start": 11688,
"end": 12381
} | class ____(TestCase):
def test_simple(self) -> None:
user = self.create_user(username="foo")
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(teams=[team], organization=organization, name="foo")
result = serialize(project, user, ProjectWithTeamSerializer())
assert result["slug"] == project.slug
assert result["name"] == project.name
assert result["id"] == str(project.id)
assert result["team"] == {
"id": str(team.id),
"slug": team.slug,
"name": team.name,
}
| ProjectWithTeamSerializerTest |
python | agronholm__apscheduler | tests/triggers/test_combining.py | {
"start": 447,
"end": 6431
} | class ____:
@pytest.mark.parametrize("threshold", [1, 0])
def test_two_datetriggers(self, timezone, serializer, threshold):
date1 = datetime(2020, 5, 16, 14, 17, 30, 254212, tzinfo=timezone)
date2 = datetime(2020, 5, 16, 14, 17, 31, 254212, tzinfo=timezone)
trigger = AndTrigger(
[DateTrigger(date1), DateTrigger(date2)], threshold=threshold
)
if serializer:
trigger = serializer.deserialize(serializer.serialize(trigger))
if threshold:
# date2 was within the threshold so it will not be used
assert trigger.next() == date1
assert trigger.next() is None
def test_max_iterations(self, timezone, serializer):
start_time = datetime(2020, 5, 16, 14, 17, 30, 254212, tzinfo=timezone)
trigger = AndTrigger(
[
IntervalTrigger(seconds=4, start_time=start_time),
IntervalTrigger(
seconds=4, start_time=start_time + timedelta(seconds=2)
),
]
)
if serializer:
trigger = serializer.deserialize(serializer.serialize(trigger))
pytest.raises(MaxIterationsReached, trigger.next)
def test_repr(self, timezone, serializer):
start_time = datetime(2020, 5, 16, 14, 17, 30, 254212, tzinfo=timezone)
trigger = AndTrigger(
[
IntervalTrigger(seconds=4, start_time=start_time),
IntervalTrigger(
seconds=4, start_time=start_time + timedelta(seconds=2)
),
]
)
if serializer:
trigger = serializer.deserialize(serializer.serialize(trigger))
assert repr(trigger) == (
"AndTrigger([IntervalTrigger(seconds=4, "
"start_time='2020-05-16 14:17:30.254212+02:00'), IntervalTrigger("
"seconds=4, start_time='2020-05-16 14:17:32.254212+02:00')], "
"threshold=1.0, max_iterations=10000)"
)
@pytest.mark.parametrize(
"left_trigger,right_trigger,expected_datetimes",
[
(
IntervalTrigger(
hours=6, start_time=datetime(2024, 5, 1, tzinfo=timezone.utc)
),
IntervalTrigger(
hours=12, start_time=datetime(2024, 5, 1, tzinfo=timezone.utc)
),
[
datetime(2024, 5, 1, 0, tzinfo=timezone.utc),
datetime(2024, 5, 1, 12, tzinfo=timezone.utc),
datetime(2024, 5, 2, 0, tzinfo=timezone.utc),
],
),
(
IntervalTrigger(
days=1, start_time=datetime(2024, 5, 1, tzinfo=timezone.utc)
),
IntervalTrigger(
weeks=1, start_time=datetime(2024, 5, 1, tzinfo=timezone.utc)
),
[
datetime(2024, 5, 1, tzinfo=timezone.utc),
datetime(2024, 5, 8, tzinfo=timezone.utc),
datetime(2024, 5, 15, tzinfo=timezone.utc),
],
),
(
CronTrigger(
day_of_week="mon-fri",
hour="*",
timezone=timezone.utc,
start_time=datetime(2024, 5, 3, tzinfo=timezone.utc),
),
IntervalTrigger(
hours=12, start_time=datetime(2024, 5, 3, tzinfo=timezone.utc)
),
[
datetime(2024, 5, 3, 0, tzinfo=timezone.utc),
datetime(2024, 5, 3, 12, tzinfo=timezone.utc),
datetime(2024, 5, 6, 0, tzinfo=timezone.utc),
],
),
(
CronTrigger(
day_of_week="mon-fri",
timezone=timezone.utc,
start_time=datetime(2024, 5, 13, tzinfo=timezone.utc),
),
IntervalTrigger(
days=4, start_time=datetime(2024, 5, 13, tzinfo=timezone.utc)
),
[
datetime(2024, 5, 13, tzinfo=timezone.utc),
datetime(2024, 5, 17, tzinfo=timezone.utc),
datetime(2024, 5, 21, tzinfo=timezone.utc),
datetime(2024, 5, 29, tzinfo=timezone.utc),
],
),
(
CalendarIntervalTrigger(
months=1,
timezone=timezone.utc,
start_date=datetime(2024, 1, 1, tzinfo=timezone.utc),
),
CronTrigger(
day_of_week="mon-fri",
timezone=timezone.utc,
start_time=datetime(2024, 1, 1, tzinfo=timezone.utc),
),
[
datetime(2024, 1, 1, tzinfo=timezone.utc),
datetime(2024, 2, 1, tzinfo=timezone.utc),
datetime(2024, 3, 1, tzinfo=timezone.utc),
datetime(2024, 4, 1, tzinfo=timezone.utc),
datetime(2024, 5, 1, tzinfo=timezone.utc),
datetime(2024, 7, 1, tzinfo=timezone.utc),
datetime(2024, 8, 1, tzinfo=timezone.utc),
datetime(2024, 10, 1, tzinfo=timezone.utc),
datetime(2024, 11, 1, tzinfo=timezone.utc),
],
),
],
)
def test_overlapping_triggers(
self, left_trigger, right_trigger, expected_datetimes
):
"""
Verify that the `AndTrigger` fires at the intersection of two triggers.
"""
and_trigger = AndTrigger([left_trigger, right_trigger])
for expected_datetime in expected_datetimes:
next_datetime = and_trigger.next()
assert next_datetime == expected_datetime
| TestAndTrigger |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_task_instances.py | {
"start": 52000,
"end": 57660
} | class ____:
def setup_method(self):
clear_db_runs()
def teardown_method(self):
clear_db_runs()
@pytest.mark.parametrize(
("hostname", "pid", "expected_status_code", "expected_detail"),
[
# Case: Successful heartbeat
("random-hostname", 1789, 204, None),
# Case: Conflict due to hostname mismatch
(
"wrong-hostname",
1789,
409,
{
"reason": "running_elsewhere",
"message": "TI is already running elsewhere",
"current_hostname": "random-hostname",
"current_pid": 1789,
},
),
# Case: Conflict due to pid mismatch
(
"random-hostname",
1054,
409,
{
"reason": "running_elsewhere",
"message": "TI is already running elsewhere",
"current_hostname": "random-hostname",
"current_pid": 1789,
},
),
],
)
def test_ti_heartbeat(
self,
client,
session,
create_task_instance,
hostname,
pid,
expected_status_code,
expected_detail,
time_machine,
):
"""Test the TI heartbeat endpoint for various scenarios including conflicts."""
time_now = timezone.parse("2024-10-31T12:00:00Z")
# Freeze time to a specific time
time_machine.move_to(time_now, tick=False)
ti = create_task_instance(
task_id="test_ti_heartbeat",
state=State.RUNNING,
hostname="random-hostname",
pid=1789,
session=session,
)
session.commit()
task_instance_id = ti.id
# Pre-condition: TI heartbeat is NONE
assert ti.last_heartbeat_at is None
response = client.put(
f"/execution/task-instances/{task_instance_id}/heartbeat",
json={"hostname": hostname, "pid": pid},
)
assert response.status_code == expected_status_code
if expected_status_code == 204:
# If successful, ensure last_heartbeat_at is updated
session.refresh(ti)
assert ti.last_heartbeat_at == time_now
assert response.text == ""
else:
# If there's an error, check the error detail
assert response.json()["detail"] == expected_detail
def test_ti_heartbeat_non_existent_task(self, client, session, create_task_instance):
"""Test that a 404 error is returned when the Task Instance does not exist."""
task_instance_id = "0182e924-0f1e-77e6-ab50-e977118bc139"
# Pre-condition: the Task Instance does not exist
assert session.get(TaskInstance, task_instance_id) is None
response = client.put(
f"/execution/task-instances/{task_instance_id}/heartbeat",
json={"hostname": "random-hostname", "pid": 1547},
)
assert response.status_code == 404
assert response.json()["detail"] == {
"reason": "not_found",
"message": "Task Instance not found",
}
@pytest.mark.parametrize(
"ti_state",
[State.SUCCESS, State.FAILED],
)
def test_ti_heartbeat_when_task_not_running(self, client, session, create_task_instance, ti_state):
"""Test that a 409 error is returned when the Task Instance is not in RUNNING state."""
ti = create_task_instance(
task_id="test_ti_heartbeat_when_task_not_running",
state=ti_state,
hostname="random-hostname",
pid=1547,
session=session,
)
session.commit()
task_instance_id = ti.id
response = client.put(
f"/execution/task-instances/{task_instance_id}/heartbeat",
json={"hostname": "random-hostname", "pid": 1547},
)
assert response.status_code == 409
assert response.json()["detail"] == {
"reason": "not_running",
"message": "TI is no longer in the running state and task should terminate",
"current_state": ti_state,
}
def test_ti_heartbeat_update(self, client, session, create_task_instance, time_machine):
"""Test that the Task Instance heartbeat is updated when the Task Instance is running."""
# Set initial time for the test
time_now = timezone.parse("2024-10-31T12:00:00Z")
time_machine.move_to(time_now, tick=False)
ti = create_task_instance(
task_id="test_ti_heartbeat_update",
state=State.RUNNING,
hostname="random-hostname",
pid=1547,
last_heartbeat_at=time_now,
session=session,
)
session.commit()
task_instance_id = ti.id
# Pre-condition: TI heartbeat is set
assert ti.last_heartbeat_at == time_now, "Initial last_heartbeat_at should match time_now"
# Move time forward by 10 minutes
new_time = time_now.add(minutes=10)
time_machine.move_to(new_time, tick=False)
response = client.put(
f"/execution/task-instances/{task_instance_id}/heartbeat",
json={"hostname": "random-hostname", "pid": 1547},
)
assert response.status_code == 204
# If successful, ensure last_heartbeat_at is updated
session.refresh(ti)
assert ti.last_heartbeat_at == time_now.add(minutes=10)
| TestTIHealthEndpoint |
python | numba__numba | numba/tests/test_heapq.py | {
"start": 14343,
"end": 14456
} | class ____(_TestHeapq, TestCase):
"""Test heapq with typed lists"""
listimpl = typed.List
| TestHeapqTypedList |
python | python-markdown__markdown | tests/test_syntax/extensions/test_md_in_html.py | {
"start": 961,
"end": 1545
} | class ____(TestCase):
""" Ensure any remaining elements in HTML stash are properly serialized. """
def test_stash_to_string(self):
# There should be no known cases where this actually happens so we need to
# forcefully pass an `etree` `Element` to the method to ensure proper behavior.
element = Element('div')
element.text = 'Foo bar.'
md = Markdown(extensions=['md_in_html'])
result = md.postprocessors['raw_html'].stash_to_string(element)
self.assertEqual(result, '<div>Foo bar.</div>')
| TestMarkdownInHTMLPostProcessor |
python | allegroai__clearml | examples/distributed/pytorch_distributed_example.py | {
"start": 521,
"end": 1209
} | class ____(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = th.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
| Net |
python | graphql-python__graphene | graphene/types/tests/test_json.py | {
"start": 96,
"end": 2347
} | class ____(ObjectType):
json = JSONString(input=JSONString())
def resolve_json(self, info, input):
return input
schema = Schema(query=Query)
def test_jsonstring_query():
json_value = '{"key": "value"}'
json_value_quoted = json_value.replace('"', '\\"')
result = schema.execute("""{ json(input: "%s") }""" % json_value_quoted)
assert not result.errors
assert result.data == {"json": json_value}
result = schema.execute("""{ json(input: "{}") }""")
assert not result.errors
assert result.data == {"json": "{}"}
def test_jsonstring_query_variable():
json_value = '{"key": "value"}'
result = schema.execute(
"""query Test($json: JSONString){ json(input: $json) }""",
variables={"json": json_value},
)
assert not result.errors
assert result.data == {"json": json_value}
def test_jsonstring_optional_uuid_input():
"""
Test that we can provide a null value to an optional input
"""
result = schema.execute("{ json(input: null) }")
assert not result.errors
assert result.data == {"json": None}
def test_jsonstring_invalid_query():
"""
Test that if an invalid type is provided we get an error
"""
result = schema.execute("{ json(input: 1) }")
assert result.errors == [
{"message": "Expected value of type 'JSONString', found 1."},
]
result = schema.execute("{ json(input: {}) }")
assert result.errors == [
{"message": "Expected value of type 'JSONString', found {}."},
]
result = schema.execute('{ json(input: "a") }')
assert result.errors == [
{
"message": "Expected value of type 'JSONString', found \"a\"; "
"Badly formed JSONString: Expecting value: line 1 column 1 (char 0)",
},
]
result = schema.execute("""{ json(input: "{\\'key\\': 0}") }""")
assert result.errors == [
{"message": "Syntax Error: Invalid character escape sequence: '\\''."},
]
result = schema.execute("""{ json(input: "{\\"key\\": 0,}") }""")
assert len(result.errors) == 1
assert result.errors[0].message.startswith(
'Expected value of type \'JSONString\', found "{\\"key\\": 0,}"; Badly formed JSONString:'
)
| Query |
python | astropy__astropy | astropy/io/fits/card.py | {
"start": 916,
"end": 50087
} | class ____(_Verify):
length = CARD_LENGTH
"""The length of a Card image; should always be 80 for valid FITS files."""
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC_RE = re.compile(r"^[A-Z0-9_-]{0,%d}$" % KEYWORD_LENGTH) # noqa: UP031, RUF100
# This will match any printable ASCII character excluding '='
_keywd_hierarch_RE = re.compile(
r"^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$", re.IGNORECASE
)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r"(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?"
_digits_NFSC = r"(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?"
_numr_FSC = r"[+-]?" + _digits_FSC
_numr_NFSC = r"[+-]? *" + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values (this is not-greedy, however,
# so it may not strip leading zeros from a float, which is fine)
_number_FSC_RE = re.compile(rf"(?P<sign>[+-])?0*?(?P<digt>{_digits_FSC})")
_number_NFSC_RE = re.compile(rf"(?P<sign>[+-])? *0*?(?P<digt>{_digits_NFSC})")
# Used in cards using the CONTINUE convention which expect a string
# followed by an optional comment
_strg = r"\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )"
_comm_field = r"(?P<comm_field>(?P<sepr>/ *)(?P<comm>(.|\n)*))"
_strg_comment_RE = re.compile(f"({_strg})? *{_comm_field}?$")
# FSC commentary card string which must contain printable ASCII characters.
# Note: \Z matches the end of the string without allowing newlines
_ascii_text_re = re.compile(r"[ -~]*\Z")
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The value group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
# fmt: off
_value_FSC_RE = re.compile(
r'(?P<value_field> *'
r'(?P<value>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + r') *, *'
r'(?P<imag>' + _numr_FSC + r') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$'
)
# fmt: on
# fmt: off
_value_NFSC_RE = re.compile(
r'(?P<value_field> *'
r'(?P<value>'
rf'{_strg}|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + r') *, *'
r'(?P<imag>' + _numr_NFSC + r') *\))'
fr')? *){_comm_field}?$'
)
# fmt: on
_rvkc_identifier = r"[a-zA-Z_]\w*"
_rvkc_field = _rvkc_identifier + r"(\.\d+)?"
_rvkc_field_specifier_s = rf"{_rvkc_field}(\.{_rvkc_field})*"
_rvkc_field_specifier_val = (
rf"(?P<keyword>{_rvkc_field_specifier_s}): +(?P<val>{_numr_FSC})"
)
_rvkc_keyword_val = rf"\'(?P<rawval>{_rvkc_field_specifier_val})\'"
_rvkc_keyword_val_comm = rf" *{_rvkc_keyword_val} *(/ *(?P<comm>[ -~]*))?$"
_rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + "$")
# regular expression to extract the key and the field specifier from a
# string that is being used to index into a card list that contains
# record value keyword cards (ex. 'DP1.AXIS.1')
_rvkc_keyword_name_RE = re.compile(
rf"(?P<keyword>{_rvkc_identifier})\.(?P<field_specifier>{_rvkc_field_specifier_s})$"
)
# regular expression to extract the field specifier and value and comment
# from the string value of a record value keyword card
# (ex "'AXIS.1: 1' / a comment")
_rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm)
_commentary_keywords = {"", "COMMENT", "HISTORY", "END"}
_special_keywords = _commentary_keywords.union(["CONTINUE"])
# The default value indicator; may be changed if required by a convention
# (namely HIERARCH cards)
_value_indicator = VALUE_INDICATOR
def __init__(self, keyword=None, value=None, comment=None, **kwargs):
# For backwards compatibility, support the 'key' keyword argument:
if keyword is None and "key" in kwargs:
keyword = kwargs["key"]
self._keyword = None
self._value = None
self._comment = None
self._valuestring = None
self._image = None
# This attribute is set to False when creating the card from a card
# image to ensure that the contents of the image get verified at some
# point
self._verified = True
# A flag to conveniently mark whether or not this was a valid HIERARCH
# card
self._hierarch = False
# If the card could not be parsed according the FITS standard or
# any recognized non-standard conventions, this will be True
self._invalid = False
self._field_specifier = None
# These are used primarily only by RVKCs
self._rawkeyword = None
self._rawvalue = None
if not (
keyword is not None
and value is not None
and self._check_if_rvkc(keyword, value)
):
# If _check_if_rvkc passes, it will handle setting the keyword and
# value
if keyword is not None:
self.keyword = keyword
if value is not None:
self.value = value
if comment is not None:
self.comment = comment
self._modified = False
self._valuemodified = False
def __repr__(self):
return repr((self.keyword, self.value, self.comment))
def __str__(self):
return self.image
def __len__(self):
return 3
def __getitem__(self, index):
return (self.keyword, self.value, self.comment)[index]
@property
def keyword(self):
"""Returns the keyword name parsed from the card image."""
if self._keyword is not None:
return self._keyword
elif self._image:
self._keyword = self._parse_keyword()
return self._keyword
else:
self.keyword = ""
return ""
@keyword.setter
def keyword(self, keyword):
"""Set the key attribute; once set it cannot be modified."""
if self._keyword is not None:
raise AttributeError("Once set, the Card keyword may not be modified")
elif isinstance(keyword, str):
# Be nice and remove trailing whitespace--some FITS code always
# pads keywords out with spaces; leading whitespace, however,
# should be strictly disallowed.
keyword = keyword.rstrip()
keyword_upper = keyword.upper()
if len(keyword) <= KEYWORD_LENGTH and self._keywd_FSC_RE.match(
keyword_upper
):
# For keywords with length > 8 they will be HIERARCH cards,
# and can have arbitrary case keywords
if keyword_upper == "END":
raise ValueError("Keyword 'END' not allowed.")
keyword = keyword_upper
elif self._keywd_hierarch_RE.match(keyword):
# In prior versions of PyFITS (*) HIERARCH cards would only be
# created if the user-supplied keyword explicitly started with
# 'HIERARCH '. Now we will create them automatically for long
# keywords, but we still want to support the old behavior too;
# the old behavior makes it possible to create HIERARCH cards
# that would otherwise be recognized as RVKCs
# (*) This has never affected Astropy, because it was changed
# before PyFITS was merged into Astropy!
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
if keyword_upper[:9] == "HIERARCH ":
# The user explicitly asked for a HIERARCH card, so don't
# bug them about it...
keyword = keyword[9:].strip()
else:
# We'll gladly create a HIERARCH card, but a warning is
# also displayed
warnings.warn(
f"Keyword name {keyword!r} is greater than 8 characters or "
"contains characters not allowed by the FITS "
"standard; a HIERARCH card will be created.",
VerifyWarning,
)
else:
raise ValueError(f"Illegal keyword name: {keyword!r}.")
self._keyword = keyword
self._modified = True
else:
raise ValueError(f"Keyword name {keyword!r} is not a string.")
@property
def value(self):
"""The value associated with the keyword stored in this card."""
if self.field_specifier:
return float(self._value)
if self._value is not None:
value = self._value
elif self._valuestring is not None or self._image:
value = self._value = self._parse_value()
else:
if self._keyword == "":
self._value = value = ""
else:
self._value = value = UNDEFINED
if conf.strip_header_whitespace and isinstance(value, str):
value = value.rstrip()
return value
@value.setter
def value(self, value):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if value is None:
value = UNDEFINED
try:
oldvalue = self.value
except VerifyError:
# probably a parsing error, falling back to the internal _value
# which should be None. This may happen while calling _fix_value.
oldvalue = self._value
if oldvalue is None:
oldvalue = UNDEFINED
if not isinstance(
value,
(
str,
int,
float,
complex,
bool,
Undefined,
np.floating,
np.integer,
np.complexfloating,
np.bool_,
),
):
raise ValueError(f"Illegal value: {value!r}.")
if isinstance(value, (float, np.float32)) and (
np.isnan(value) or np.isinf(value)
):
# value is checked for both float and np.float32 instances
# since np.float32 is not considered a Python float.
raise ValueError(
f"Floating point {value!r} values are not allowed in FITS headers."
)
elif isinstance(value, str):
m = self._ascii_text_re.match(value)
if not m:
raise ValueError(
"FITS header values must contain standard printable ASCII "
f"characters; {value!r} contains characters not representable in "
"ASCII or non-printable characters."
)
elif isinstance(value, np.bool_):
value = bool(value)
if conf.strip_header_whitespace and (
isinstance(oldvalue, str) and isinstance(value, str)
):
# Ignore extra whitespace when comparing the new value to the old
different = oldvalue.rstrip() != value.rstrip()
elif isinstance(oldvalue, bool) or isinstance(value, bool):
different = oldvalue is not value
else:
different = oldvalue != value or not isinstance(value, type(oldvalue))
if different:
self._value = value
self._rawvalue = None
self._modified = True
self._valuestring = None
self._valuemodified = True
if self.field_specifier:
try:
self._value = _int_or_float(self._value)
except ValueError:
raise ValueError(f"value {self._value} is not a float")
@value.deleter
def value(self):
if self._invalid:
raise ValueError(
"The value of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
if not self.field_specifier:
self.value = ""
else:
raise AttributeError(
"Values cannot be deleted from record-valued keyword cards"
)
@property
def rawkeyword(self):
"""On record-valued keyword cards this is the name of the standard <= 8
character FITS keyword that this RVKC is stored in. Otherwise it is
the card's normal keyword.
"""
if self._rawkeyword is not None:
return self._rawkeyword
elif self.field_specifier is not None:
self._rawkeyword = self.keyword.split(".", 1)[0]
return self._rawkeyword
else:
return self.keyword
@property
def rawvalue(self):
"""On record-valued keyword cards this is the raw string value in
the ``<field-specifier>: <value>`` format stored in the card in order
to represent a RVKC. Otherwise it is the card's normal value.
"""
if self._rawvalue is not None:
return self._rawvalue
elif self.field_specifier is not None:
self._rawvalue = f"{self.field_specifier}: {self.value}"
return self._rawvalue
else:
return self.value
@property
def comment(self):
"""Get the comment attribute from the card image if not already set."""
if self._comment is None:
self._comment = self._parse_comment() if self._image else ""
if conf.strip_header_whitespace and isinstance(self._comment, str):
return self._comment.rstrip()
else:
return self._comment
@comment.setter
def comment(self, comment):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot set. Either "
"delete this card from the header or replace it."
)
if comment is None:
comment = ""
if isinstance(comment, str):
m = self._ascii_text_re.match(comment)
if not m:
raise ValueError(
"FITS header comments must contain standard printable "
f"ASCII characters; {comment!r} contains characters not "
"representable in ASCII or non-printable characters."
)
try:
oldcomment = self.comment
except VerifyError:
# probably a parsing error, falling back to the internal _comment
# which should be None.
oldcomment = self._comment
if oldcomment is None:
oldcomment = ""
if comment != oldcomment:
self._comment = comment
self._modified = True
@comment.deleter
def comment(self):
if self._invalid:
raise ValueError(
"The comment of invalid/unparsable cards cannot deleted. "
"Either delete this card from the header or replace it."
)
self.comment = ""
@property
def field_specifier(self):
"""
The field-specifier of record-valued keyword cards; always `None` on
normal cards.
"""
# Ensure that the keyword exists and has been parsed--the will set the
# internal _field_specifier attribute if this is a RVKC.
if self.keyword:
return self._field_specifier
else:
return None
@field_specifier.setter
def field_specifier(self, field_specifier):
if not field_specifier:
raise ValueError(
"The field-specifier may not be blank in record-valued keyword cards."
)
elif not self.field_specifier:
raise AttributeError(
"Cannot coerce cards to be record-valued keyword cards by "
"setting the field_specifier attribute"
)
elif field_specifier != self.field_specifier:
self._field_specifier = field_specifier
# The keyword need also be updated
keyword = self._keyword.split(".", 1)[0]
self._keyword = f"{keyword}.{field_specifier}"
self._modified = True
@field_specifier.deleter
def field_specifier(self):
raise AttributeError(
"The field_specifier attribute may not be "
"deleted from record-valued keyword cards."
)
@property
def image(self):
"""
The card "image", that is, the 80 byte character string that represents
this card in an actual FITS header.
"""
if self._image and not self._verified:
self.verify("fix+warn")
if self._image is None or self._modified:
self._image = self._format_image()
return self._image
@property
def is_blank(self):
"""
`True` if the card is completely blank--that is, it has no keyword,
value, or comment. It appears in the header as 80 spaces.
Returns `False` otherwise.
"""
if not self._verified:
# The card image has not been parsed yet; compare directly with the
# string representation of a blank card
return self._image == BLANK_CARD
# If the keyword, value, and comment are all empty (for self.value
# explicitly check that it is a string value, since a blank value is
# returned as '')
return (
not self.keyword
and (isinstance(self.value, str) and not self.value)
and not self.comment
)
@classmethod
def fromstring(cls, image):
"""
Construct a `Card` object from a (raw) string. It will pad the string
if it is not the length of a card image (80 columns). If the card
image is longer than 80 columns, assume it contains ``CONTINUE``
card(s).
"""
card = cls()
if isinstance(image, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place
image = image.decode("latin1")
card._image = _pad(image)
card._verified = False
return card
@classmethod
def normalize_keyword(cls, keyword):
"""
`classmethod` to convert a keyword value that may contain a
field-specifier to uppercase. The effect is to raise the key to
uppercase and leave the field specifier in its original case.
Parameters
----------
keyword : or str
A keyword value or a ``keyword.field-specifier`` value
"""
# Test first for the most common case: a standard FITS keyword provided
# in standard all-caps
if len(keyword) <= KEYWORD_LENGTH and cls._keywd_FSC_RE.match(keyword):
return keyword
# Test if this is a record-valued keyword
match = cls._rvkc_keyword_name_RE.match(keyword)
if match:
return ".".join(
(match.group("keyword").strip().upper(), match.group("field_specifier"))
)
elif len(keyword) > 9 and keyword[:9].upper() == "HIERARCH ":
# Remove 'HIERARCH' from HIERARCH keywords; this could lead to
# ambiguity if there is actually a keyword card containing
# "HIERARCH HIERARCH", but shame on you if you do that.
return keyword[9:].strip().upper()
else:
# A normal FITS keyword, but provided in non-standard case
return keyword.strip().upper()
def _check_if_rvkc(self, *args):
"""
Determine whether or not the card is a record-valued keyword card.
If one argument is given, that argument is treated as a full card image
and parsed as such. If two arguments are given, the first is treated
as the card keyword (including the field-specifier if the card is
intended as a RVKC), and the second as the card value OR the first value
can be the base keyword, and the second value the 'field-specifier:
value' string.
If the check passes the ._keyword, ._value, and .field_specifier
keywords are set.
Examples
--------
::
self._check_if_rvkc('DP1', 'AXIS.1: 2')
self._check_if_rvkc('DP1.AXIS.1', 2)
self._check_if_rvkc('DP1 = AXIS.1: 2')
"""
if not conf.enable_record_valued_keyword_cards:
return False
if len(args) == 1:
return self._check_if_rvkc_image(*args)
elif len(args) == 2:
keyword, value = args
if not isinstance(keyword, str):
return False
if keyword in self._commentary_keywords:
return False
match = self._rvkc_keyword_name_RE.match(keyword)
if match and isinstance(value, (int, float)):
self._init_rvkc(
match.group("keyword"), match.group("field_specifier"), None, value
)
return True
# Testing for ': ' is a quick way to avoid running the full regular
# expression, speeding this up for the majority of cases
if isinstance(value, str) and value.find(": ") > 0:
match = self._rvkc_field_specifier_val_RE.match(value)
if match and self._keywd_FSC_RE.match(keyword):
self._init_rvkc(
keyword, match.group("keyword"), value, match.group("val")
)
return True
def _check_if_rvkc_image(self, *args):
"""
Implements `Card._check_if_rvkc` for the case of an unparsed card
image. If given one argument this is the full intact image. If given
two arguments the card has already been split between keyword and
value+comment at the standard value indicator '= '.
"""
if len(args) == 1:
image = args[0]
eq_idx = image.find(VALUE_INDICATOR)
if eq_idx < 0 or eq_idx > 9:
return False
keyword = image[:eq_idx]
rest = image[eq_idx + VALUE_INDICATOR_LEN :]
else:
keyword, rest = args
rest = rest.lstrip()
# This test allows us to skip running the full regular expression for
# the majority of cards that do not contain strings or that definitely
# do not contain RVKC field-specifiers; it's very much a
# micro-optimization but it does make a measurable difference
if not rest or rest[0] != "'" or rest.find(": ") < 2:
return False
match = self._rvkc_keyword_val_comm_RE.match(rest)
if match:
self._init_rvkc(
keyword,
match.group("keyword"),
match.group("rawval"),
match.group("val"),
)
return True
def _init_rvkc(self, keyword, field_specifier, field, value):
"""
Sort of addendum to Card.__init__ to set the appropriate internal
attributes if the card was determined to be a RVKC.
"""
keyword_upper = keyword.upper()
self._keyword = f"{keyword_upper}.{field_specifier}"
self._rawkeyword = keyword_upper
self._field_specifier = field_specifier
self._value = _int_or_float(value)
self._rawvalue = field
def _parse_keyword(self):
keyword = self._image[:KEYWORD_LENGTH].strip()
keyword_upper = keyword.upper()
if keyword_upper in self._special_keywords:
return keyword_upper
elif (
keyword_upper == "HIERARCH"
and self._image[8] == " "
and HIERARCH_VALUE_INDICATOR in self._image
):
# This is valid HIERARCH card as described by the HIERARCH keyword
# convention:
# http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:]
return keyword.strip()
else:
val_ind_idx = self._image.find(VALUE_INDICATOR)
if 0 <= val_ind_idx <= KEYWORD_LENGTH:
# The value indicator should appear in byte 8, but we are
# flexible and allow this to be fixed
if val_ind_idx < KEYWORD_LENGTH:
keyword = keyword[:val_ind_idx]
keyword_upper = keyword_upper[:val_ind_idx]
rest = self._image[val_ind_idx + VALUE_INDICATOR_LEN :]
# So far this looks like a standard FITS keyword; check whether
# the value represents a RVKC; if so then we pass things off to
# the RVKC parser
if self._check_if_rvkc_image(keyword, rest):
return self._keyword
return keyword_upper
else:
warnings.warn(
"The following header keyword is invalid or follows an "
f"unrecognized non-standard convention:\n{self._image}",
AstropyUserWarning,
)
self._invalid = True
return keyword
def _parse_value(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# Likewise for invalid cards
if self.keyword.upper() in self._commentary_keywords or self._invalid:
return self._image[KEYWORD_LENGTH:].rstrip()
if self._check_if_rvkc(self._image):
return self._value
m = self._value_NFSC_RE.match(self._split()[1])
if m is None:
raise VerifyError(
f"Unparsable card ({self.keyword}), fix it first with .verify('fix')."
)
if m.group("bool") is not None:
value = m.group("bool") == "T"
elif m.group("strg") is not None:
value = re.sub("''", "'", m.group("strg"))
elif m.group("numr") is not None:
# Check for numbers with leading 0s.
numr = self._number_NFSC_RE.match(m.group("numr"))
digt = translate(numr.group("digt"), FIX_FP_TABLE2, " ")
if numr.group("sign") is None:
sign = ""
else:
sign = numr.group("sign")
value = _str_to_num(sign + digt)
elif m.group("cplx") is not None:
# Check for numbers with leading 0s.
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE2, " ")
if real.group("sign") is None:
rsign = ""
else:
rsign = real.group("sign")
value = _str_to_num(rsign + rdigt)
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE2, " ")
if imag.group("sign") is None:
isign = ""
else:
isign = imag.group("sign")
value += _str_to_num(isign + idigt) * 1j
else:
value = UNDEFINED
if not self._valuestring:
self._valuestring = m.group("value")
return value
def _parse_comment(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# likewise for invalid/unparsable cards
if self.keyword in Card._commentary_keywords or self._invalid:
return ""
valuecomment = self._split()[1]
m = self._value_NFSC_RE.match(valuecomment)
comment = ""
if m is not None:
# Don't combine this if statement with the one above, because
# we only want the elif case to run if this was not a valid
# card at all
if m.group("comm"):
comment = m.group("comm").rstrip()
elif "/" in valuecomment:
# The value in this FITS file was not in a valid/known format. In
# this case the best we can do is guess that everything after the
# first / was meant to be the comment
comment = valuecomment.split("/", 1)[1].strip()
return comment
def _split(self):
"""
Split the card image between the keyword and the rest of the card.
"""
if self._image is not None:
# If we already have a card image, don't try to rebuild a new card
# image, which self.image would do
image = self._image
else:
image = self.image
# Split cards with CONTINUE cards or commentary keywords with long
# values
if len(self._image) > self.length:
values = []
comments = []
keyword = None
for card in self._itersubcards():
kw, vc = card._split()
if keyword is None:
keyword = kw
if keyword in self._commentary_keywords:
values.append(vc)
continue
# Should match a string followed by a comment; if not it
# might be an invalid Card, so we just take it verbatim
m = self._strg_comment_RE.match(vc)
if not m:
return kw, vc
value = m.group("strg") or ""
value = value.rstrip()
if value and value[-1] == "&":
value = value[:-1]
values.append(value)
comment = m.group("comm")
if comment:
comments.append(comment.rstrip())
if keyword in self._commentary_keywords:
valuecomment = "".join(values)
else:
# CONTINUE card
valuecomment = f"'{''.join(values)}' / {' '.join(comments)}"
return keyword, valuecomment
if self.keyword in self._special_keywords:
keyword, valuecomment = image.split(" ", 1)
else:
try:
delim_index = image.index(self._value_indicator)
except ValueError:
delim_index = None
# The equal sign may not be any higher than column 10; anything
# past that must be considered part of the card value
if delim_index is None:
keyword = image[:KEYWORD_LENGTH]
valuecomment = image[KEYWORD_LENGTH:]
elif delim_index > 10 and image[:9] != "HIERARCH ":
keyword = image[:8]
valuecomment = image[8:]
else:
keyword, valuecomment = image.split(self._value_indicator, 1)
return keyword.strip(), valuecomment.strip()
def _fix_keyword(self):
if self.field_specifier:
keyword, field_specifier = self._keyword.split(".", 1)
self._keyword = f"{keyword.upper()}.{field_specifier}"
else:
self._keyword = self._keyword.upper()
self._modified = True
def _fix_value(self):
"""Fix the card image for fixable non-standard compliance."""
value = None
keyword, valuecomment = self._split()
m = self._value_NFSC_RE.match(valuecomment)
# for the unparsable case
if m is None:
try:
value, comment = valuecomment.split("/", 1)
self.value = value.strip()
self.comment = comment.strip()
except (ValueError, IndexError):
self.value = valuecomment
self._valuestring = self._value
return
elif m.group("numr") is not None:
numr = self._number_NFSC_RE.match(m.group("numr"))
value = translate(numr.group("digt"), FIX_FP_TABLE, " ")
if numr.group("sign") is not None:
value = numr.group("sign") + value
elif m.group("cplx") is not None:
real = self._number_NFSC_RE.match(m.group("real"))
rdigt = translate(real.group("digt"), FIX_FP_TABLE, " ")
if real.group("sign") is not None:
rdigt = real.group("sign") + rdigt
imag = self._number_NFSC_RE.match(m.group("imag"))
idigt = translate(imag.group("digt"), FIX_FP_TABLE, " ")
if imag.group("sign") is not None:
idigt = imag.group("sign") + idigt
value = f"({rdigt}, {idigt})"
self._valuestring = value
# The value itself has not been modified, but its serialized
# representation (as stored in self._valuestring) has been changed, so
# still set this card as having been modified (see ticket #137)
self._modified = True
def _format_keyword(self):
if self.keyword:
if self.field_specifier:
keyword = self.keyword.split(".", 1)[0]
return "{:{len}}".format(keyword, len=KEYWORD_LENGTH)
elif self._hierarch:
return f"HIERARCH {self.keyword} "
else:
return "{:{len}}".format(self.keyword, len=KEYWORD_LENGTH)
else:
return " " * KEYWORD_LENGTH
def _format_value(self):
# value string
float_types = (float, np.floating, complex, np.complexfloating)
# Force the value to be parsed out first
value = self.value
# But work with the underlying raw value instead (to preserve
# whitespace, for now...)
value = self._value
if self.keyword in self._commentary_keywords:
# The value of a commentary card must be just a raw unprocessed
# string
value = str(value)
elif (
self._valuestring
and not self._valuemodified
and isinstance(self.value, float_types)
):
# Keep the existing formatting for float/complex numbers
value = f"{self._valuestring:>20}"
elif self.field_specifier:
value = _format_value(self._value).strip()
value = f"'{self.field_specifier}: {value}'"
else:
value = _format_value(value)
# For HIERARCH cards the value should be shortened to conserve space
if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH:
value = value.strip()
return value
def _format_comment(self):
if not self.comment:
return ""
else:
return f" / {self._comment}"
def _format_image(self):
keyword = self._format_keyword()
value = self._format_value()
is_commentary = keyword.strip() in self._commentary_keywords
if is_commentary:
comment = ""
else:
comment = self._format_comment()
# equal sign string
# by default use the standard value indicator even for HIERARCH cards;
# later we may abbreviate it if necessary
delimiter = VALUE_INDICATOR
if is_commentary:
delimiter = ""
# put all parts together
output = f"{keyword}{delimiter}{value}{comment}"
# For HIERARCH cards we can save a bit of space if necessary by
# removing the space between the keyword and the equals sign; I'm
# guessing this is part of the HIEARCH card specification
keywordvalue_length = len(keyword) + len(delimiter) + len(value)
if (
keywordvalue_length == self.length + 1
and keyword.startswith("HIERARCH")
and keyword[-1] == " "
):
output = "".join([keyword[:-1], delimiter, value, comment])
if len(output) <= self.length:
output = f"{output:80}"
else:
# longstring case (CONTINUE card)
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if isinstance(self.value, str) and len(value) > (
self.length - len(keyword) - 2
):
output = self._format_long_image()
else:
warnings.warn(
"Card is too long, comment will be truncated.", VerifyWarning
)
output = output[: Card.length]
return output
def _format_long_image(self):
"""
Break up long string value/comment into ``CONTINUE`` cards.
This is a primitive implementation: it will put the value
string in one block and the comment string in another. Also,
it does not break at the blank space between words. So it may
not look pretty.
"""
if self.keyword in Card._commentary_keywords:
return self._format_long_commentary_image()
value_length = 67
comment_length = 64
# We have to be careful that the first line may be able to hold less
# of the value, if it is a HIERARCH keyword.
headstr = self._format_keyword() + VALUE_INDICATOR
first_value_length = value_length + KEYWORD_LENGTH + 2 - len(headstr)
# do the value string
value = self._value.replace("'", "''")
words = _words_group(value, value_length, first_value_length)
output = []
for idx, word in enumerate(words):
if idx > 0:
headstr = "CONTINUE "
# If this is the final CONTINUE remove the '&'
if not self.comment and idx == len(words) - 1:
value_format = "'{}'"
else:
value_format = "'{}&'"
value = value_format.format(word)
output.append(f"{headstr + value:80}")
# do the comment string
comment_format = "{}"
if self.comment:
words = _words_group(self.comment, comment_length)
for idx, word in enumerate(words):
# If this is the final CONTINUE remove the '&'
if idx == len(words) - 1:
headstr = "CONTINUE '' / "
else:
headstr = "CONTINUE '&' / "
comment = headstr + comment_format.format(word)
output.append(f"{comment:80}")
return "".join(output)
def _format_long_commentary_image(self):
"""
If a commentary card's value is too long to fit on a single card, this
will render the card as multiple consecutive commentary card of the
same type.
"""
maxlen = Card.length - KEYWORD_LENGTH
value = self._format_value()
output = []
idx = 0
while idx < len(value):
output.append(str(Card(self.keyword, value[idx : idx + maxlen])))
idx += maxlen
return "".join(output)
def _verify(self, option="warn"):
errs = []
fix_text = f"Fixed {self.keyword!r} card to meet the FITS standard."
# Don't try to verify cards that already don't meet any recognizable
# standard
if self._invalid:
return _ErrList(errs)
# verify the equal sign position
if self.keyword not in self._commentary_keywords and (
self._image
and self._image[:9].upper() != "HIERARCH "
and self._image.find("=") != 8
):
errs.append(
{
"err_text": (
f"Card {self.keyword!r} is not FITS standard (equal sign not "
"at column 8)."
),
"fix_text": fix_text,
"fix": self._fix_value,
}
)
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keys.
if (self._image and self._image[:8].upper() == "HIERARCH") or self._hierarch:
pass
else:
if self._image:
# PyFITS will auto-uppercase any standard keyword, so lowercase
# keywords can only occur if they came from the wild
keyword = self._split()[0]
if keyword != keyword.upper():
# Keyword should be uppercase unless it's a HIERARCH card
errs.append(
{
"err_text": f"Card keyword {keyword!r} is not upper case.",
"fix_text": fix_text,
"fix": self._fix_keyword,
}
)
keyword = self.keyword
if self.field_specifier:
keyword = keyword.split(".", 1)[0]
if not self._keywd_FSC_RE.match(keyword):
errs.append(
{"err_text": f"Illegal keyword name {keyword!r}", "fixable": False}
)
# verify the value, it may be fixable
keyword, valuecomment = self._split()
if self.keyword in self._commentary_keywords:
# For commentary keywords all that needs to be ensured is that it
# contains only printable ASCII characters
if not self._ascii_text_re.match(valuecomment):
errs.append(
{
"err_text": (
f"Unprintable string {valuecomment!r}; commentary "
"cards may only contain printable ASCII characters"
),
"fixable": False,
}
)
else:
if not self._valuemodified:
m = self._value_FSC_RE.match(valuecomment)
# If the value of a card was replaced before the card was ever
# even verified, the new value can be considered valid, so we
# don't bother verifying the old value. See
# https://github.com/astropy/astropy/issues/5408
if m is None:
errs.append(
{
"err_text": (
f"Card {self.keyword!r} is not FITS standard "
f"(invalid value string: {valuecomment!r})."
),
"fix_text": fix_text,
"fix": self._fix_value,
}
)
# verify the comment (string), it is never fixable
m = self._value_NFSC_RE.match(valuecomment)
if m is not None:
comment = m.group("comm")
if comment is not None:
if not self._ascii_text_re.match(comment):
errs.append(
{
"err_text": (
f"Unprintable string {comment!r}; header comments "
"may only contain printable ASCII characters"
),
"fixable": False,
}
)
errs = _ErrList([self.run_option(option, **err) for err in errs])
self._verified = True
return errs
def _itersubcards(self):
"""
If the card image is greater than 80 characters, it should consist of a
normal card followed by one or more CONTINUE card. This method returns
the subcards that make up this logical card.
This can also support the case where a HISTORY or COMMENT card has a
long value that is stored internally as multiple concatenated card
images.
"""
ncards = len(self._image) // Card.length
for idx in range(0, Card.length * ncards, Card.length):
card = Card.fromstring(self._image[idx : idx + Card.length])
if idx > 0 and card.keyword.upper() not in self._special_keywords:
raise VerifyError(
"Long card images must have CONTINUE cards after "
"the first card or have commentary keywords like "
"HISTORY or COMMENT."
)
if not isinstance(card.value, str):
raise VerifyError("CONTINUE cards must have string values.")
yield card
def _int_or_float(s):
"""
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised.
"""
if isinstance(s, float):
# Already a float so just pass through
return s
try:
return int(s)
except (ValueError, TypeError):
try:
return float(s)
except (ValueError, TypeError) as e:
raise ValueError(str(e))
def _format_value(value):
"""
Converts a card value to its appropriate string representation as
defined by the FITS format.
"""
# string value should occupies at least 8 columns, unless it is
# a null string
if isinstance(value, str):
if value == "":
return "''"
else:
exp_val_str = value.replace("'", "''")
val_str = f"'{exp_val_str:8}'"
return f"{val_str:20}"
# must be before int checking since bool is also int
elif isinstance(value, (bool, np.bool_)):
return f"{repr(value)[0]:>20}" # T or F
elif _is_int(value):
return f"{value:>20d}"
elif isinstance(value, (float, np.floating)):
return f"{_format_float(value):>20}"
elif isinstance(value, (complex, np.complexfloating)):
val_str = f"({_format_float(value.real)}, {_format_float(value.imag)})"
return f"{val_str:>20}"
elif isinstance(value, Undefined):
return ""
else:
return ""
def _format_float(value):
"""Format a floating number to make sure it is at most 20 characters."""
value_str = str(value).replace("e", "E")
# Limit the value string to at most 20 characters.
if (str_len := len(value_str)) > 20:
idx = value_str.find("E")
if idx < 0:
# No scientific notation, truncate decimal places
value_str = value_str[:20]
else:
# Scientific notation, truncate significand (mantissa)
value_str = value_str[: 20 - (str_len - idx)] + value_str[idx:]
return value_str
def _pad(input):
"""Pad blank space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + " " * (Card.length - strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + " " * (Card.length - strlen)
| Card |
python | python-poetry__poetry | src/poetry/inspection/info.py | {
"start": 1264,
"end": 1544
} | class ____(ValueError):
def __init__(self, path: Path, *reasons: BaseException | str) -> None:
reasons = (f"Unable to determine package info for path: {path!s}", *reasons)
super().__init__("\n\n".join(str(msg).strip() for msg in reasons if msg))
| PackageInfoError |
python | conda__conda | tests/plugins/test_reporter_backends.py | {
"start": 410,
"end": 696
} | class ____(ReporterRendererBase):
"""Dummy reporter backend class only for tests"""
def detail_view(self, data: dict[str, str | int | bool], **kwargs) -> str:
return str(data)
def envs_list(self, data, **kwargs) -> str:
return str(data)
| DummyReporterRenderer |
python | django-extensions__django-extensions | django_extensions/management/commands/create_template_tags.py | {
"start": 178,
"end": 2957
} | class ____(AppCommand):
help = "Creates a Django template tags directory structure for the given app name "
"in the apps's directory"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--name",
"-n",
action="store",
dest="tag_library_name",
default="appname_tags",
help="The name to use for the template tag base name. "
"Defaults to `appname`_tags.",
)
requires_system_checks: List[str] = []
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = True
@signalcommand
def handle_app_config(self, app_config, **options):
app_dir = app_config.path
tag_library_name = options["tag_library_name"]
if tag_library_name == "appname_tags":
tag_library_name = "%s_tags" % os.path.basename(app_dir)
copy_template("template_tags_template", app_dir, tag_library_name)
def copy_template(template_name, copy_to, tag_library_name):
"""Copy the specified template directory to the copy_to location"""
import django_extensions
import shutil
template_dir = os.path.join(django_extensions.__path__[0], "conf", template_name)
# walk the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1 :]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith("."):
del subdirs[i]
for f in files:
if f.endswith(".pyc") or f.startswith(".DS_Store"):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(
copy_to, relative_dir, f.replace("sample", tag_library_name)
)
if os.path.exists(path_new):
path_new = os.path.join(copy_to, relative_dir, f)
if os.path.exists(path_new):
continue
path_new = path_new.rstrip(".tmpl")
fp_old = open(path_old, "r")
fp_new = open(path_new, "w")
fp_new.write(fp_old.read())
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(
"Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" # noqa: E501
% path_new
)
| Command |
python | joke2k__faker | faker/providers/automotive/el_GR/__init__.py | {
"start": 59,
"end": 555
} | class ____(AutomotiveProvider):
"""Implement automotive provider for ``el_GR`` locale."""
uppercase_letters = "ABEZHIKMNOPTYX"
license_formats = (
"??? ####",
"?? ####",
)
def license_plate(self) -> str:
"""Generate a license plate."""
temp = re.sub(
r"\?",
lambda x: self.random_element(self.uppercase_letters),
self.random_element(self.license_formats),
)
return self.numerify(temp)
| Provider |
python | spyder-ide__spyder | spyder/widgets/collectionseditor.py | {
"start": 24939,
"end": 25998
} | class ____(QHeaderView):
"""
A header view for the BaseTableView that emits a signal when the width of
one of its sections is resized by the user.
"""
sig_user_resized_section = Signal(int, int, int)
def __init__(self, parent=None):
super().__init__(Qt.Horizontal, parent)
self._handle_section_is_pressed = False
self.sectionResized.connect(self.sectionResizeEvent)
# Needed to enable sorting by column
# See spyder-ide/spyder#9835
self.setSectionsClickable(True)
def mousePressEvent(self, e):
super().mousePressEvent(e)
self._handle_section_is_pressed = (self.cursor().shape() ==
Qt.SplitHCursor)
def mouseReleaseEvent(self, e):
super().mouseReleaseEvent(e)
self._handle_section_is_pressed = False
def sectionResizeEvent(self, logicalIndex, oldSize, newSize):
if self._handle_section_is_pressed:
self.sig_user_resized_section.emit(logicalIndex, oldSize, newSize)
| BaseHeaderView |
python | walkccc__LeetCode | solutions/3098. Find the Sum of Subsequence Powers/3098.py | {
"start": 0,
"end": 920
} | class ____:
def sumOfPowers(self, nums: list[int], k: int) -> int:
MOD = 1_000_000_007
nums.sort()
@functools.lru_cache(None)
def dp(
i: int,
k: int,
lastPickedIndex: int,
firstIndex: int,
secondIndex: int
) -> int:
if k == 0:
return nums[secondIndex] - nums[firstIndex]
if i == len(nums):
return 0
newFirstIndex = firstIndex
newSecondIndex = secondIndex
if firstIndex == -1:
newFirstIndex = i
elif secondIndex == -1:
newSecondIndex = i
elif nums[i] - nums[lastPickedIndex] < nums[secondIndex] - nums[firstIndex]:
newFirstIndex = lastPickedIndex
newSecondIndex = i
pick = dp(i + 1, k - 1, i, newFirstIndex, newSecondIndex)
skip = dp(i + 1, k, lastPickedIndex, firstIndex, secondIndex)
return (pick + skip) % MOD
return dp(0, k, -1, -1, -1)
| Solution |
python | pytorch__pytorch | test/jit/test_freezing.py | {
"start": 1148,
"end": 66969
} | class ____(JitTestCase):
def test_freeze_module(self):
class M(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 1 # folded
self.b = 1.2 # folded
self.c = "hello" # folded
self.c2 = "hi\xa1" # not folded
self.d = [1, 1] # folded
self.e = [1.0, 1.1] # folded
self.f = ["hello", "world"] # folded
self.f2 = [(1, "Over \u0e55\u0e57 57")]
self.g = (
[1, 2],
3.2,
"4.4",
torch.tensor([5.5], requires_grad=True),
) # folded
self.h = {"layer": [torch.tensor([7.7], requires_grad=True)]}
self.h2 = {"layer\xb1": [torch.tensor([8.8], requires_grad=True)]}
self.t = torch.tensor([1.2, 2.4], requires_grad=True) # folded
self.ts = [
torch.tensor([1.0, 2.0], requires_grad=True),
torch.tensor([3.0, 4.0], requires_grad=True),
] # folded
self.tt = [[torch.tensor([3.3, 2.3], requires_grad=True), None]]
def forward(self, x):
return (
str(self.a)
+ str(self.b)
+ self.c
+ self.c2
+ str(self.d)
+ str(self.e)
+ str(self.f)
+ str(self.f2)
+ str(self.g)
+ str(self.h)
+ str(self.h2)
+ str(self.t)
+ str(self.ts)
+ str(self.tt)
)
m = torch.jit.script(M())
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
m._c = torch._C._freeze_module(m._c)
buffer = io.BytesIO()
torch.jit.save(m._c, buffer)
buffer.seek(0)
m2 = torch.jit.load(buffer)
# Check if frozen module looks as below:
# module m {
# attributes {
# tt = ...
# }
# ...
# }
self.assertFalse(m2._c.hasattr("a"))
self.assertFalse(m2._c.hasattr("b"))
self.assertFalse(m2._c.hasattr("c"))
self.assertFalse(m2._c.hasattr("c2"))
self.assertFalse(m2._c.hasattr("d"))
self.assertFalse(m2._c.hasattr("e"))
self.assertFalse(m2._c.hasattr("f"))
self.assertFalse(m2._c.hasattr("f2"))
self.assertFalse(m2._c.hasattr("g"))
self.assertFalse(m2._c.hasattr("h"))
self.assertFalse(m2._c.hasattr("h2"))
self.assertFalse(m2._c.hasattr("t"))
self.assertFalse(m2._c.hasattr("ts"))
self.assertFalse(m2._c.hasattr("tt"))
output_f = m2.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_submodule(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 11
self.b = 2
def forward(self, x):
return self.a + self.b
class SubModule2(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 12
self.b = 2
def forward(self, x):
self.b = 30
return self.a + self.b
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub1 = SubModule()
self.sub2 = SubModule2()
self.a = 3
self.b = 4
def forward(self, x):
self.b = 20
return self.sub1(x) + self.a + self.b + self.sub2(x)
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
mf = torch.jit.freeze(m)
# Check if frozen module looks as below:
# module m {
# attributes {
# sub2 = ...
# b =
# }
# ...
# submodule {
# module m {
# attributes {
# sub2 = ...
# b =
# }
# ...
# }
# }
# }
mf = mf._c
self.assertFalse(mf.hasattr("sub1"))
self.assertFalse(mf.hasattr("a"))
self.assertTrue(mf.hasattr("b"))
self.assertTrue(mf.hasattr("sub2"))
self.assertTrue(mf.sub2.hasattr("b")) # verify b is preserved in sub2
self.assertFalse(mf.sub2.hasattr("a")) # verify a is removed in sub2
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_fork(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.ones(20, 20)
self.b = torch.ones(20, 20)
def forward(self, x):
return self.a * self.b + x
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = SubModule()
def forward(self, x):
fut = torch.jit._fork(self.sub.forward, x)
y_hat = self.sub(x)
y = torch.jit._wait(fut)
return y_hat + y
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(20, 20)
output_s = m.forward(input)
mf = torch._C._freeze_module(m._c)
# Check if frozen module looks as below:
# module m {
# attributes {
# }
# ...
# submodule {
# }
# }
self.assertFalse(mf.hasattr("a"))
self.assertFalse(mf.hasattr("b"))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_nested_fork(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.ones(20, 20)
self.b = torch.ones(20, 20)
def forward(self, x):
return self.a * self.b + x
class SubModule2(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = SubModule()
self.c = torch.ones(20, 20)
def forward(self, x):
fut = torch.jit._fork(self.sub.forward, x)
y_hat = self.sub(x)
y = torch.jit._wait(fut)
return y_hat + y + self.c
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = SubModule2()
self.d = 1
def forward(self, x):
fut = torch.jit._fork(self.sub.forward, x)
y_hat = self.sub(x)
y = torch.jit._wait(fut)
self.d = 2
return y_hat * y + self.d
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(20, 20)
output_s = m.forward(input)
mf = torch._C._freeze_module(m._c)
# Check if frozen module looks as below:
# module m {
# attributes {
# }
# ...
# submodule {
# }
# }
self.assertFalse(mf.hasattr("a"))
self.assertFalse(mf.hasattr("b"))
self.assertFalse(mf.hasattr("c"))
self.assertTrue(mf.hasattr("d"))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_fork2(self):
@torch.jit.script
def foo(x):
return x * 2
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.ones(20, 20)
self.b = torch.ones(20, 20)
def forward(self, x):
fut = torch.jit._fork(foo, self.a)
y_hat = foo(self.b)
y = torch.jit._wait(fut)
return y_hat + y
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
mf = torch._C._freeze_module(m._c)
# Check if frozen module looks as below:
# module m {
# attributes {
# self.a = ...
# self.b = ..
# }
# ...
# submodule {
# }
# }
# TODO: Although there are no mutation, the alias analysis
# conservatively assumes there is a mutation because attributes are
# passed to fork subgraph. both 'a' and 'b' are preserved.
self.assertTrue(mf.hasattr("a"))
self.assertFalse(mf.hasattr("b"))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_fork_calling_module_method(self):
@torch.jit.script
def foo(x, y):
return x * y
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.ones(20, 20)
self.b = torch.ones(20, 20)
@torch.jit.export
def foo(self, x):
return x * self.a
@torch.jit.export
def bar(self, x):
return x * self.b
def forward(self, x):
fut = torch.jit._fork(self.foo, self.b)
y_hat = self.bar(self.a)
y = torch.jit._wait(fut)
return y_hat + y
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
mf = torch._C._freeze_module(m._c)
# Check if frozen module looks as below:
# module m {
# attributes {
# self.b = ..
# }
# ...
# TODO: Although there are no mutation, the alias analysis
# conservatively assumes there is a mutation because attributes are
# passed to fork subgraph. 'b' is preserved.
self.assertFalse(mf.hasattr("a"))
self.assertTrue(mf.hasattr("b"))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_sharedclasstype(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1.1])
self.b = torch.tensor([2.2])
def forward(self, x):
return self.a + self.b
@torch.jit.export
def modify_a(self, x):
self.a[0] += 10
return self.b
@torch.jit.export
def modify_b(self, x):
self.b[0] += 20
return self.a
class SubModule2(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = SubModule()
self.b = torch.tensor([3.3])
def forward(self, x):
y = self.sub.modify_b(x)
return y + self.b
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub1 = SubModule() # sub1 and sub2.sub shared same class type.
self.sub2 = SubModule2()
self.a = torch.tensor([4.4])
def forward(self, x):
z = self.sub1.modify_a(x)
return self.sub2(x) + z + self.a
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
mf = torch._C._freeze_module(m._c)
# Checking if Frozen module looks as below
# module mf {
# attributes {
# sub1 = ...
# sub2 = ...
# }
# ...
# submodules {
# module sub1 {
# attributes {
# a = ...
# b = ...
# }
# ...
# }
# module sub2 {
# attributes {
# sub = ...
# }
# ...
# submodule {
# module sub {
# attributes {
# a = ...
# b = ...
# }
# ...
# }
# }
# }
# }
# }
self.assertTrue(mf.hasattr("sub1"))
self.assertTrue(mf.sub1.hasattr("a"))
self.assertTrue(mf.sub1.hasattr("b"))
self.assertFalse(mf.hasattr("a"))
self.assertTrue(mf.hasattr("sub2"))
self.assertTrue(mf.sub2.hasattr("sub"))
self.assertFalse(mf.sub2.hasattr("b"))
self.assertTrue(mf.sub2.sub.hasattr("a"))
self.assertTrue(mf.sub2.sub.hasattr("b"))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_nestedaliasing(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1.1])
self.b = torch.tensor([2.2])
def forward(self, x):
return self.a + self.b
@torch.jit.export
def modify_a(self, x):
self.a[0] = 10
return self.b
@torch.jit.export
def modify_b(self, x):
self.b[0] = 20
return self.a
Sub = SubModule()
class SubModule2(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = Sub # aliasing
def forward(self, x):
return self.sub.a
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub1 = Sub # aliasing
self.sub2 = SubModule2()
def forward(self, x):
z = self.sub1.modify_a(x)
return self.sub2(x) + z
m = torch.jit.script(TestModule())
m.eval()
mf = torch._C._freeze_module(m._c)
self.assertTrue(mf.hasattr("sub1"))
self.assertTrue(mf.sub1.hasattr("a"))
self.assertFalse(mf.sub1.hasattr("b"))
self.assertTrue(mf.hasattr("sub2"))
self.assertTrue(mf.sub2.hasattr("sub"))
self.assertTrue(
mf.sub2.sub.hasattr("a")
) # Freezing detects that self.sub2.sub.a and self.sub1.a are alias
self.assertFalse(mf.sub2.sub.hasattr("b"))
input = torch.randn(2, 2)
output_s = m.forward(input)
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
# FIXME: JIT is not honoring aliasing. 'Sub' module is copied. As a result
# Eager and Script modules produce different output.
def test_freeze_module_with_nestedaliasingscalar(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 1.1
self.b = 2.2
def forward(self, x):
return self.a + self.b
@torch.jit.export
def modify_a(self, x):
self.a = 10.0
return self.b
@torch.jit.export
def modify_b(self, x):
self.b = 20.0
return self.a
Sub = SubModule()
class SubModule2(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = Sub # aliasing
def forward(self, x):
return self.sub.a
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub1 = Sub # aliasing
self.sub2 = SubModule2()
def forward(self, x):
z = self.sub1.modify_a(x)
return self.sub2(x) + z
m = TestModule()
ms = torch.jit.script(m)
ms.eval()
mf = torch._C._freeze_module(ms._c)
self.assertTrue(mf.hasattr("sub1"))
self.assertTrue(mf.sub1.hasattr("a"))
self.assertFalse(mf.sub1.hasattr("b"))
# sub2 is fully folded because self.sub1 and self.sub2.sub are not alias (Scripting bug)
self.assertFalse(mf.hasattr("sub2"))
input = torch.randn(2, 2)
output = m.forward(input)
output_s = ms.forward(input)
output_f = mf.forward(input)
# Should be equal
self.assertNotEqual(output, output_s)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_preserve_sub_module(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1.1])
self.b = 2.2
def forward(self, x):
return self.a
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub1 = SubModule() # aliasing
self.sub2 = SubModule()
def forward(self, x):
return self.sub2(x) + self.sub1(x)
m = TestModule()
ms = torch.jit.script(m)
ms.eval()
mf = torch._C._freeze_module(ms._c, ["sub1"])
# Test that 'sub1' is preserved entirely and 'sub2' is completely folded
self.assertTrue(mf.hasattr("sub1"))
self.assertTrue(mf.sub1.hasattr("a"))
self.assertTrue(mf.sub1.hasattr("b"))
self.assertFalse(mf.hasattr("sub2"))
input = torch.randn(2, 2)
output_s = ms.forward(input)
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_preserve_sub_module_and_mutation(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1.1])
self.b = 2.2
def forward(self, x):
self.a[0] = 3.3
return self.a
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub1 = SubModule() # aliasing
self.sub2 = SubModule()
def forward(self, x):
return self.sub2(x) + self.sub1(x)
m = TestModule()
ms = torch.jit.script(m)
ms.eval()
mf = torch._C._freeze_module(ms._c, ["sub1"])
# Test that be both sub1 and sub1 are preserved and 'b' is preserved
# even if it is not used. To fulfill user request to preserve 'sub1'
self.assertTrue(mf.hasattr("sub1"))
self.assertTrue(mf.sub1.hasattr("a"))
self.assertTrue(mf.sub1.hasattr("b"))
self.assertTrue(mf.hasattr("sub2"))
self.assertTrue(mf.sub2.hasattr("a"))
self.assertTrue(mf.sub2.hasattr("b"))
input = torch.randn(2, 2)
output_s = ms.forward(input)
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_helperfunction(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 11
self.b = 2
def forward(self, x):
return self.a + self.b
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = SubModule()
self.a = 3
self.b = 4
def forward(self, x):
self.b = 20
return self._forward(x) + self.a + self.b
def _forward(self, x):
return self.sub(x)
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
mf = torch._C._freeze_module(m._c)
self.assertFalse(mf.hasattr("sub"))
self.assertFalse(mf.hasattr("a"))
self.assertTrue(mf.hasattr("b"))
with self.assertRaisesRegex(
AttributeError, "TestModule (.*) does not have a field with name '_forward'"
):
mf._forward(x) # noqa: F821
def test_freeze_module_with_inplace_mutable(self):
class FreezeMe(torch.jit.ScriptModule):
def __init__(self) -> None:
super().__init__()
self.a = [11, 22]
@torch.jit.script_method
def forward(self, x):
for i in range(3):
self.a.append(i)
return self.a
m = FreezeMe()
m.eval()
m_f = torch._C._freeze_module(m._c)
self.assertTrue(m_f.hasattr("a"))
m.forward(torch.tensor([3]))
out = m_f.forward(torch.tensor([5]))
expected = [11, 22, 0, 1, 2, 0, 1, 2]
self.assertEqual(out, expected)
# Mutable attributes
def test_freeze_module_with_mutable_list(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = [1, 2]
def forward(self, x):
return self.a
m = FreezeMe()
m.eval()
m.a.append(3)
m_s = torch.jit.script(m)
v = m_s.a
v.append(4)
m_s.a = v
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
# Post-freezing mutating m_s.a does not affect m_f (m_f has its own copy).
v = m_s.a
v.append(5)
m_s.a = v
self.assertFalse(m_f.hasattr("a"))
out = m_f.forward(torch.tensor([5]))
expected = [1, 2, 3, 4]
self.assertEqual(out, expected)
def test_freeze_module_with_mutable_dict(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = {"layer": "4"}
def forward(self, x):
return self.a
@torch.jit.export
def modify_a(self, x):
self.a["layer"] = self.a["layer"] + "1"
return self.a
m = FreezeMe()
m.eval()
m.a["layer2"] = "3"
m_s = torch.jit.script(m)
t = torch.tensor(5)
m_s.modify_a(t)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
m.a["layer2"] += "2"
m_s.modify_a(t)
self.assertFalse(m_f.hasattr("a"))
out = m_f.forward(t)
expected = {"layer": "411", "layer2": "3"}
self.assertEqual(out, expected)
def test_freeze_module_with_mutable_tensor(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1.0, 2.0, 3.0])
def forward(self, x):
return self.a
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.a[1] += 3.0
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
# Post-freezing tensor attribute mutations affect m_f.
# FIXME: deep copy all folded attributes so that m_f has full ownership.
m_s.a[0] += 5.0
self.assertFalse(m_f.hasattr("a"))
out = m_f.forward(torch.tensor([5]))
expected = [6.0, 5.0, 3.0]
self.assertEqual(out, expected)
def test_freeze_module_with_tuple(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = (torch.tensor([1, 2, 3, 4, 5, 6]), "hi")
def forward(self, x):
if x[0] == 2.0:
self.a[0][0] = 10
return self.a[0].sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([2.0])
expected = m_s.forward(inp)
m_s.a[0][0] = 1
m_f = torch._C._freeze_module(m_s._c)
self.assertFalse(m_f.hasattr("a"))
out = m_f.forward(inp)
self.assertEqual(out, expected)
def test_freeze_module_with_tensor(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
def forward(self, x):
x = self.a.view(2, 3)
x[0][0] += 10
return self.a.sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr("a"))
m_f.a[0] -= 10
out = m_f.forward(inp)
self.assertEqual(out, expected)
def test_freeze_module_with_list(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = [torch.tensor([1, 2, 3, 4, 5, 6])]
def forward(self, x):
self.a[0][1] += 10
return self.a[0].sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_s.a[0][1] -= 10
m_f = torch._C._freeze_module(m_s._c)
self.assertFalse(m_f.hasattr("a"))
out = m_f.forward(inp)
self.assertEqual(out, expected)
def test_freeze_module_with_aliased_tensor_attr(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
self.b = self.a.view(2, 3)
def forward(self, x):
self.b[1] += 10
return self.a.sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr("a"))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = torch.tensor(51) # 1+2+3+14+15+16
self.assertEqual(out, expected)
def test_freeze_module_with_aliased_tensor_attr2(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
self.b = {"layer": ([self.a.view(2, 3), torch.tensor([10])], 20)}
self.c = ([self.a.view(2, 3), torch.tensor([10])], 20)
self.d = (self.a.view(2, 3), 20)
def forward(self, x):
self.d[0][0] += 10
return self.a.sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
with self.assertRaisesRegex(
RuntimeError, "module contains attributes values that overlaps"
):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_with_aliased_tensor_attr3(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
self.b = [self.a, torch.tensor([10])]
def forward(self, x):
self.a[1] += 10
return self.b[0].sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr("a"))
self.assertTrue(m_f.hasattr("b"))
out = m_f.forward(inp)
expected += 10 # account for self.a += 10.
self.assertEqual(out, expected)
def test_freeze_module_with_aliased_tensor_attr4(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
self.b = [self.a, torch.tensor([10])]
def forward(self, x):
self.b[0][0] += 10
return self.a.sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_s.a[0] -= 10
with self.assertRaisesRegex(
RuntimeError, "module contains attributes values that overlaps"
):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_with_overlapping_attrs(self):
a = torch.tensor([1, 2, 3, 4, 5, 6])
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.b = [a.view(3, 2), torch.tensor([10])]
self.c = (20, a.view(2, 3))
def forward(self, x):
self.b[0][0] += 10
return self.c[1].sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
a[0] -= 10
with self.assertRaisesRegex(
RuntimeError, "module contains attributes values that overlaps"
):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_with_aliased_attr(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = [1, 2, 3, 4, 5, 6]
self.b = self.a
self.c = (self.a, 10)
def forward(self, x):
self.b[1] += 10
return str(self.a) + str(self.c)
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
# FIXME: It should be assertTrue. Currently scripting is making a copy for setting self.b (see #33034)
self.assertFalse(m_f.hasattr("a"))
self.assertFalse(m_f.hasattr("c"))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = m_s.forward(inp)
self.assertEqual(out, expected)
# Check attribute a is preserved. Alias analysis detects that 'a' has output writers.
# In this example, 'a' is not mutated. However, we do not track which sub
# values of a composite ivalue is mutated.
def test_freeze_module_with_aliased_attr2(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = [1, 2, 3, 4, 5, 6]
self.b = ([11], [10])
def forward(self, x):
v = self.a
self.b = (v, [12])
v2 = self.b[1]
v2.append(7)
return str(v) + str(v2)
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr("a"))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = m.forward(inp)
self.assertEqual(out, expected)
def test_freeze_module_with_aliased_attr3(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = [1, 2, 3, 4, 5, 6]
self.b = ([11], [10])
def forward(self, x):
v = self.a
v2 = (v, [12])
v3 = v2[0]
v3.append(7)
return str(self.a)
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr("a"))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = m.forward(inp)
self.assertEqual(out, expected)
def test_freeze_module_return_self(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1.0, 2.0, 3.0])
def forward(self, x):
return self
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
with self.assertRaisesRegex(
RuntimeError, "attempted to freeze a module that return itself"
):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_inlining(self):
@torch.jit.script # noqa: B903
class Obj: # noqa: B903
def __init__(self, x: int, y: int):
self.x = x
self.y = y
class Mod(nn.Module):
def __init__(self) -> None:
super().__init__()
self.obj = Obj(2, 3)
def forward(self, i: int):
print(self.obj)
return i
mod = torch.jit.freeze(torch.jit.script(Mod().eval()))
obj = mod.graph.findNode("prim::Constant")
self.assertTrue(torch._C._jit_object_is_non_holding(obj))
buffer = io.BytesIO()
torch.jit.save(mod, buffer)
buffer.seek(0)
loaded = torch.jit.load(buffer)
obj = mod.graph.findNode("prim::Constant")
self.assertTrue(torch._C._jit_object_is_non_holding(obj))
def test_freeze_module_return_sub_module(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
def forward(self, x):
return self.conv1
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr("conv1"))
def test_freeze_module_no_forward(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = nn.Linear(10, 1)
@torch.jit.export
def foo(self, x):
return self.lin(x)
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c, preservedAttrs=["foo"])
input = torch.ones(10)
self.assertEqual(m_s.foo(input), m_f.foo(input))
def test_freeze_no_forward(self):
class FreezeMe(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = nn.Linear(10, 1)
@torch.jit.export
def foo(self, x):
return self.lin(x)
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch.jit.freeze(m_s, preserved_attrs=["foo"])
input = torch.ones(10)
self.assertEqual(m_s.foo(input), m_f.foo(input))
def test_freeze_module_in_training_mode(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = nn.functional.relu(x)
x = self.conv2(x)
x = nn.functional.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = nn.functional.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = nn.functional.log_softmax(x, dim=1)
return output
model = torch.jit.script(Net())
model.train()
mTrain_freezed = torch._C._freeze_module(model._c)
# verify mTrain_freezed looks exactly as:
# module {
# attributes {
# conv1 = ...
# conv2 = ...
# dropout1 = ...
# dropout2 = ...
# fc1 = ...
# fc2 = ...
# }
# ...
# submodules {
# module conv1 {
# attributes {
# weight = ...
# bias = ...
# }
# ...
# }
# module conv2 {
# attributes {
# weight = ...
# bias = ...
# }
# ...
# }
# module dropout1 {
# attributes {
# training = ...
# }
# ...
# }
# module dropout2 {
# attributes {
# training = ...
# }
# ...
# }
# module fc1 {
# attributes {
# weight = ...
# bias = ...
# }
# ...
# }
# module fc2 {
# attributes {
# weight = ...
# bias = ...
# }
# ...
# }
self.assertFalse(mTrain_freezed.hasattr("training"))
self.assertTrue(mTrain_freezed.hasattr("conv1"))
self.assertFalse(mTrain_freezed.conv1.hasattr("training"))
self.assertTrue(mTrain_freezed.conv1.hasattr("weight"))
self.assertTrue(mTrain_freezed.conv1.hasattr("bias"))
self.assertTrue(mTrain_freezed.hasattr("conv2"))
self.assertFalse(mTrain_freezed.conv2.hasattr("training"))
self.assertTrue(mTrain_freezed.conv2.hasattr("weight"))
self.assertTrue(mTrain_freezed.conv2.hasattr("bias"))
self.assertTrue(mTrain_freezed.hasattr("dropout1"))
self.assertTrue(mTrain_freezed.dropout1.hasattr("training"))
self.assertTrue(mTrain_freezed.hasattr("dropout2"))
self.assertTrue(mTrain_freezed.dropout2.hasattr("training"))
self.assertTrue(mTrain_freezed.hasattr("fc1"))
self.assertTrue(mTrain_freezed.fc1.hasattr("weight"))
self.assertTrue(mTrain_freezed.fc1.hasattr("bias"))
self.assertTrue(mTrain_freezed.hasattr("fc2"))
self.assertTrue(mTrain_freezed.fc2.hasattr("weight"))
self.assertTrue(mTrain_freezed.fc2.hasattr("bias"))
model.eval()
mEval_freezed = torch._C._freeze_module(model._c)
self.assertFalse(mEval_freezed.hasattr("conv1"))
self.assertFalse(mEval_freezed.hasattr("conv2"))
self.assertFalse(mEval_freezed.hasattr("dropout1"))
self.assertFalse(mEval_freezed.hasattr("training"))
self.assertFalse(mEval_freezed.hasattr("fc1"))
self.assertFalse(mEval_freezed.hasattr("dropout2"))
self.assertFalse(mEval_freezed.hasattr("fc2"))
with self.assertRaisesRegex(
AttributeError, "does not have a field with name 'state_dict'"
):
print(mEval_freezed.state_dict())
buffer = io.BytesIO()
torch.jit.save(mEval_freezed, buffer)
buffer.seek(0)
m = torch.jit.load(buffer)
FileCheck().check_not("GetAttr[name=").run(m._c._get_method("forward").graph)
m2 = torch._C._freeze_module(model._c, preserveParameters=True)
self.assertTrue(m2.hasattr("conv1"))
self.assertTrue(m2.hasattr("conv2"))
self.assertFalse(m2.hasattr("dropout1"))
self.assertFalse(m2.hasattr("training"))
self.assertTrue(m2.hasattr("fc1"))
self.assertFalse(m2.hasattr("dropout2"))
self.assertTrue(m2.hasattr("fc2"))
def test_freeze_module_detach_gradient(self):
mod = nn.Conv2d(8, 3, 4, 2, 1)
self.assertTrue(mod.weight.requires_grad)
smod = torch.jit.script(mod)
smod.eval()
fmod = torch._C._freeze_module(smod._c)
self.assertTrue(mod.weight.requires_grad)
self.assertTrue(smod.weight.requires_grad)
self.assertFalse(fmod.hasattr("weight"))
inp = torch.ones(1, 8, 32, 32)
out1 = fmod.forward(inp)
# FIXME: frozen module mutated from outside (original module).
with torch.no_grad():
smod.weight[0, 0, 0, 0] += 100.0
out2 = fmod.forward(inp)
out3 = smod(inp)
self.assertNotEqual(out1, out2)
self.assertEqual(out2, out3)
def test_freeze_module_with_user_preserved_attr(self):
class Module(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1.1])
self.b = torch.tensor([2.2])
def forward(self, x):
return self.a + self.b
m = torch.jit.script(Module())
m.eval()
fm = torch._C._freeze_module(m._c, ["a"])
# Attribute "a" is preserved
self.assertTrue(fm.hasattr("a"))
self.assertFalse(fm.hasattr("b"))
def test_freeze_module_with_user_preserved_method(self):
class Module(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1.1])
self.b = torch.tensor([2.2])
def forward(self, x):
return self.a + self.b
@torch.jit.export
def modify_a(self, x):
self.a[0] += 10
return self.b
@torch.jit.export
def modify_b(self, x):
self.b[0] += 20
return self.a
m = torch.jit.script(Module())
m.eval()
fm = torch._C._freeze_module(m._c, ["modify_a"])
# Both attribute "a" and method "modify_a" are preserved
self.assertTrue(fm.hasattr("a"))
self.assertFalse(fm.hasattr("b"))
input = torch.randn(2, 2)
expected = m.forward(input)
out = fm.forward(input)
self.assertEqual(out, expected)
def test_freeze_module_with_user_preserved_method2(self):
class Module(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.tensor([1.1])
self.b = torch.tensor([2.2])
def forward(self, x):
self.b += 10
return self.a + self.b
@torch.jit.export
def modify_a(self, x):
self.a[0] += 10
return self.b + self.a
m = torch.jit.script(Module())
m.eval()
fm = torch._C._freeze_module(m._c, ["modify_a"])
FileCheck().check('prim::GetAttr[name="a"]').run(fm.forward.graph)
FileCheck().check('prim::GetAttr[name="b"]').run(fm.modify_a.graph)
def test_freeze_module_with_user_preserved_attribute_on_submodule(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 1
self.b = 2
def forward(self):
return self.a + self.b
class Module(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub1 = SubModule()
self.sub2 = SubModule()
def forward(self):
return self.sub1() + self.sub2()
m = torch.jit.script(Module())
m.eval()
m = torch.jit.freeze(m, preserved_attrs=["sub1.a", "sub2.a"])
fm = m._c
self.assertTrue(fm.hasattr("sub1"))
self.assertTrue(fm.sub1.hasattr("a"))
self.assertFalse(fm.sub1.hasattr("b"))
self.assertTrue(fm.hasattr("sub2"))
self.assertTrue(fm.sub2.hasattr("a"))
self.assertFalse(fm.sub2.hasattr("b"))
self.assertEqual(m(), 6)
m.sub1.a += 1
self.assertEqual(m(), 7)
def test_freeze_module_with_user_preserved_attribute_on_unused_submodule(self):
class SubModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 1
self.b = 2
def forward(self):
return self.a + self.b
@torch.jit.export
def method_a(self):
return 42
class Module(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = SubModule()
def forward(self):
return 1
m = torch.jit.script(Module())
m.eval()
fm = torch.jit.freeze(m, preserved_attrs=["sub.a", "sub.method_a"])._c
self.assertTrue(fm.hasattr("sub"))
self.assertTrue(fm.sub.hasattr("a"))
self.assertFalse(fm.sub.hasattr("b"))
self.assertTrue(fm.sub._has_method("method_a"))
def test_freeze_module_with_user_preserved_method_on_submodule(self):
class SubModule(nn.Module):
def forward(self, x):
return self.method_a(x) + self.method_b(x)
def method_a(self, x):
return x * x
def method_b(self, x):
return x + x
class Module(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = SubModule()
def forward(self, x):
return self.sub(x)
m = torch.jit.script(Module())
m.eval()
fm = torch.jit.freeze(m, preserved_attrs=["sub.method_a"])._c
self.assertTrue(fm.hasattr("sub"))
self.assertTrue(fm.sub._has_method("method_a"))
self.assertFalse(fm.sub._has_method("method_b"))
@skipIfNoFBGEMM
def test_module_with_shared_type_instances(self):
class Child(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 1, 1).to(dtype=torch.float32)
def forward(self, x):
x = self.conv1(x)
return x
class Parent(nn.Module):
def __init__(self) -> None:
super().__init__()
self.quant = torch.ao.quantization.QuantStub()
self.conv1 = nn.Conv2d(1, 1, 1).to(dtype=torch.float32)
self.child = Child()
self.child2 = Child()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.child(x)
x = self.child2(x)
x = self.dequant(x)
return x
def _static_quant(model):
qModel = torch.ao.quantization.QuantWrapper(model)
qModel.qconfig = torch.ao.quantization.default_qconfig
torch.ao.quantization.prepare(qModel, inplace=True)
qModel(torch.rand(4, 1, 4, 4, dtype=torch.float32))
torch.ao.quantization.convert(qModel, inplace=True)
return model
with override_quantized_engine("fbgemm"):
data = torch.randn(4, 1, 4, 4, dtype=torch.float32)
m = Parent().to(torch.float32)
m = _static_quant(m)
m = torch.jit.script(m)
m.eval()
torch._C._jit_pass_inline(m.graph)
m_frozen = wrap_cpp_module(torch._C._freeze_module(m._c))
# Earlier bug resulted in _packed_params set to false.
FileCheck().check_not("_packed_params = False").run(
m_frozen._c.dump_to_str(True, True, False)
)
m_res = m(data)
# It used to segfault while running frozen module.
m_frozen_res = m_frozen(data)
self.assertEqual(m_res, m_frozen_res)
def test_module_getattr_indirection(self):
@torch.jit.script
class ValHolder:
def __init__(self, val: int):
self.val: int = val
class Mod(nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod1 = ValHolder(1)
self.mod2 = ValHolder(2)
def forward(self, cond: bool):
if cond:
mod = self.mod1
else:
mod = self.mod2
return mod.val
mod = Mod()
mod.eval()
frozen_mod = torch.jit.freeze(torch.jit.script(mod))
mod_eager = Mod()
self.assertEqual(mod_eager(True), frozen_mod(True))
self.assertEqual(mod_eager(False), frozen_mod(False))
def test_freeze_module_with_non_static_module_container_index(self):
"""
Test that Modules containing non-static ModuleDict or ModuleList
indexing cannot be frozen.
"""
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
pass
class ImplementsInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
if isinstance(inp, torch.Tensor):
return torch.max(inp, dim=0)
return inp
class ModWithDict(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.d = torch.nn.ModuleDict({"module": ImplementsInterface()})
def forward(self, x: torch.Tensor, key: str) -> Any:
value: ModuleInterface = self.d[key]
return value.forward(x)
m = torch.jit.script(ModWithDict())
m.eval()
with self.assertRaisesRegex(
RuntimeError,
"Freezing modules containing prim::ModuleContainerIndex is not supported",
):
mf = torch._C._freeze_module(m._c)
class ModWithList(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l = torch.nn.ModuleList([ImplementsInterface()])
def forward(self, x: torch.Tensor, idx: int) -> Any:
value: ModuleInterface = self.l[idx]
return value.forward(x)
m = torch.jit.script(ModWithList())
m.eval()
with self.assertRaisesRegex(
RuntimeError,
"Freezing modules containing prim::ModuleContainerIndex is not supported",
):
mf = torch._C._freeze_module(m._c)
def test_freeze_with_interface_mutable(self):
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
class ImplementsInterface(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sum = torch.zeros((2, 2))
def forward(self, inp: torch.Tensor) -> torch.Tensor:
self.sum += inp.relu()
return self.sum
class WrapperModule(torch.nn.Module):
impl: ModuleInterface
def __init__(self) -> None:
super().__init__()
self.impl = ImplementsInterface()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.impl.forward(x)
m = torch.jit.script(WrapperModule())
m.eval()
m_frozen = torch.jit.freeze(m)
x = torch.rand((2, 2))
m_frozen(x)
self.assertEqual(m_frozen.impl.sum, x.relu())
def test_freeze_with_swapping_interfaces(self):
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
class Implementation1(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
return inp.relu()
class Implementation2(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
return inp.sin()
class WrapperModule(torch.nn.Module):
impl: ModuleInterface
def __init__(self) -> None:
super().__init__()
self.option1 = Implementation1()
self.option2 = Implementation2()
self.impl = self.option1
self.idx = 0
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.idx += 1
if self.idx % 2 == 1:
self.impl = self.option1
else:
self.impl = self.option2
return self.impl(x)
m = torch.jit.script(WrapperModule())
m.eval()
with self.assertRaisesRegex(
RuntimeError, "Freezing does not support SetAttr on an interface type"
):
m_frozen = torch.jit.freeze(m)
def test_freeze_recursive_interfaces(self):
@torch.jit.interface
class InnerInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
@torch.jit.interface
class OuterInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
class InnerImpl(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.ones((2, 2))
def forward(self, inp):
return inp.cos() * self.x
class OuterImpl(torch.nn.Module):
inner_impl: InnerInterface
def __init__(self) -> None:
super().__init__()
self.inner_impl = InnerImpl()
def forward(self, inp):
return inp.relu() + self.inner_impl(inp.sin())
class WrapperModule(torch.nn.Module):
outer_impl: OuterInterface
def __init__(self) -> None:
super().__init__()
self.outer_impl = OuterImpl()
def forward(self, inp):
return self.outer_impl(inp) + inp
m = WrapperModule()
x = torch.rand((2, 2))
expected = m(x)
m_s = torch.jit.script(m)
m_s.eval()
m_s = torch.jit.freeze(m_s)
actual = m_s(x)
self.assertEqual(expected, actual)
def test_freeze_recursive_interfaces_with_reassignment(self):
@torch.jit.interface
class InnerInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
@torch.jit.interface
class OuterInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
class InnerImpl1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.ones((2, 2))
def forward(self, inp):
return inp.cos() * self.x
class InnerImpl2(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.ones((2, 2)) * 2
def forward(self, inp):
return inp.sin() / self.x
class OuterImpl(torch.nn.Module):
inner_impl: InnerInterface
def __init__(self) -> None:
super().__init__()
self.inner_impl = InnerImpl1()
self.impl1 = InnerImpl1()
self.impl2 = InnerImpl1()
self.idx = 0
def forward(self, inp):
self.idx += 1
if self.idx % 2 == 0:
self.inner_impl = self.impl1
else:
self.inner_impl = self.impl2
return inp.relu() + self.inner_impl(inp.sin())
class WrapperModule(torch.nn.Module):
outer_impl: OuterInterface
def __init__(self) -> None:
super().__init__()
self.outer_impl = OuterImpl()
def forward(self, inp):
return self.outer_impl(inp) + inp
m = WrapperModule()
m_s = torch.jit.script(m)
m_s.eval()
with self.assertRaisesRegex(
RuntimeError, "Freezing does not support SetAttr on an interface type"
):
m_s = torch.jit.freeze(m_s)
def test_freeze_interface_swapping_two_methods(self):
@torch.jit.interface
class MyInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
class Impl1(torch.nn.Module):
def forward(self, inp):
return inp.cos()
class Impl2(torch.nn.Module):
def forward(self, inp):
return inp.sin()
class WrapperModule1(torch.nn.Module):
interface_impl: MyInterface
def __init__(self) -> None:
super().__init__()
self.interface_impl = Impl1()
self.impl1 = Impl1()
self.impl2 = Impl2()
self.idx = 0
def forward(self, x):
return self.interface_impl(x)
@torch.jit.export
def other_method(self, x):
self.idx += 1
if self.idx % 2 == 0:
self.interface_impl = self.impl1
else:
self.interface_impl = self.impl2
return self.interface_impl(x)
class WrapperModule2(torch.nn.Module):
interface_impl: MyInterface
def __init__(self) -> None:
super().__init__()
self.interface_impl = Impl1()
self.impl1 = Impl1()
self.impl2 = Impl2()
self.idx = 0
def forward(self, x):
self.idx += 1
if self.idx % 2 == 0:
self.interface_impl = self.impl1
else:
self.interface_impl = self.impl2
return self.interface_impl(x)
@torch.jit.export
def other_method(self, x):
return self.interface_impl(x)
m1 = torch.jit.script(WrapperModule1())
m2 = torch.jit.script(WrapperModule2())
m1.eval()
m2.eval()
with self.assertRaisesRegex(
RuntimeError, "Freezing does not support SetAttr on an interface type"
):
torch.jit.freeze(m1, preserved_attrs=["other_method"])
with self.assertRaisesRegex(
RuntimeError, "Freezing does not support SetAttr on an interface type"
):
torch.jit.freeze(m2, preserved_attrs=["other_method"])
def test_freeze_recursive_interfaces_same_name(self):
@torch.jit.interface
class InnerInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
@torch.jit.interface
class OuterInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
class InnerImpl(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.ones((2, 2))
def forward(self, inp):
return inp.cos() * self.x
class OuterImpl(torch.nn.Module):
impl: InnerInterface
def __init__(self) -> None:
super().__init__()
self.impl = InnerImpl()
self.x = torch.ones((2, 2)) * 5
def forward(self, inp):
return self.other_method(inp)
def other_method(self, inp):
return inp.relu() + self.impl(inp.sin()) + self.x
class WrapperModule(torch.nn.Module):
impl: OuterInterface
def __init__(self) -> None:
super().__init__()
self.impl = OuterImpl()
def forward(self, inp):
return self.impl(inp) + inp
m = WrapperModule()
x = torch.rand((2, 2))
expected = m(x)
m_s = torch.jit.script(m)
m_s.eval()
m_s = torch.jit.freeze(m_s)
actual = m_s(x)
self.assertEqual(expected, actual)
def test_freeze_non_interface_module_swap(self):
class InnerModule(torch.nn.Module):
def __init__(self, x):
super().__init__()
self.x = x
def forward(self, inp: torch.Tensor) -> torch.Tensor:
return inp.relu() + self.x
class WrapperModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.option1 = InnerModule(torch.rand((2, 2)))
self.option2 = InnerModule(torch.rand((2, 2)))
self.impl = self.option1
self.idx = 0
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.idx += 1
if self.idx % 2 == 1:
self.impl = self.option1
else:
self.impl = self.option2
return self.impl(x)
unfrozen = WrapperModule()
m = torch.jit.script(unfrozen)
m.eval()
m_frozen = torch.jit.freeze(m)
x = torch.rand((2, 2))
expected = unfrozen(x)
actual = m_frozen(x)
self.assertEqual(expected, actual)
@unittest.expectedFailure
def test_freeze_interface_within_object(self):
# I don't think there's any way to create a plain python object that
# contains a torch.nn.Module inside it, but just in case... I'm not
# sure freezing would handle this case correctly, so marking as xfail
# so that if this ever _does_ start working someone will need to
# investigate to make sure this is handled correctly.
class MyIface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
pass
class MyImpl(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> torch.Tensor:
return inp.sin()
class MyObject:
impl: MyIface
def run(self, x):
return self.impl(x)
class WrapperModule(torch.nn.Module):
impl: MyObject
def __init__(self) -> None:
super().__init__()
self.impl = MyObject()
self.impl.impl = MyImpl()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.impl(x)
unfrozen = WrapperModule()
m = torch.jit.script(unfrozen)
m.eval()
m_frozen = torch.jit.freeze(m)
x = torch.rand((2, 2))
expected = unfrozen(x)
actual = m_frozen(x)
self.expectEqual(expected, actual)
def test_freeze_non_module_class_getattr(self):
class BoxCoder:
def __init__(self, bbox_xform_clip):
# type: (float) -> None
self.bbox_xform_clip = bbox_xform_clip
def decode(self, input):
return input * self.bbox_xform_clip
class MyModule(torch.nn.Module):
__annotations__ = {
"box_coder": BoxCoder,
}
def __init__(self) -> None:
super().__init__()
self.box_coder = BoxCoder(50.0)
def forward(self, input):
return self.box_coder.decode(input)
model = MyModule()
model.eval()
script_model = torch.jit.freeze(torch.jit.script(model))
inp = torch.randn([4, 4])
output_eager = model(inp)
self.assertEqual(model(inp), script_model(inp))
FileCheck().check_not("GetAttr").run(script_model.graph)
def test_freeze_module_with_tupleoutput_submodule(self):
class SubModule(nn.Module):
def forward(self, x):
return (x + 1, x + 2)
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub = SubModule()
def forward(self, x):
y1, y2 = self.sub(x)
return y1 + y2
m = torch.jit.script(TestModule())
m = m.eval()
mf = torch.jit.freeze(m)
inp = torch.randn(2, 2)
expected = m.forward(inp)
output = mf.forward(inp)
# Check if prim::TupleConstruct and prim::TupleUnpack
# Don't exist in frozen graph
FileCheck().check_not("prim::TupleConstruct").run(mf.graph)
FileCheck().check_not("prim::TupleUnpack").run(mf.graph)
self.assertEqual(output, expected)
def test_freeze_module_with_call_method(self):
class Mod(nn.Module):
def __init__(self, val):
super().__init__()
self.param = nn.Parameter(val)
def forward(self, x):
# this method will change during freezing
return x + self.param
@torch.jit.export
def make_prediction(self, x):
y = x + x
return self.forward(y)
param = torch.rand([2, 2])
x = torch.rand([2, 2])
unscripted_mod = Mod(param)
mod = torch.jit.script(unscripted_mod)
mod.eval()
mod = torch.jit.freeze(mod, preserved_attrs=["make_prediction"])
self.assertEqual(
mod.forward(x), unscripted_mod.forward(x), atol=1e-5, rtol=1e-5
)
@skipIfTorchDynamo("somehow causing hanging during python shutdown")
| TestFreezing |
python | arrow-py__arrow | arrow/locales.py | {
"start": 18155,
"end": 19477
} | class ____(Locale):
names = ["sv", "sv-se"]
past = "för {0} sen"
future = "om {0}"
and_word = "och"
timeframes = {
"now": "just nu",
"second": "en sekund",
"seconds": "{0} sekunder",
"minute": "en minut",
"minutes": "{0} minuter",
"hour": "en timme",
"hours": "{0} timmar",
"day": "en dag",
"days": "{0} dagar",
"week": "en vecka",
"weeks": "{0} veckor",
"month": "en månad",
"months": "{0} månader",
"year": "ett år",
"years": "{0} år",
}
month_names = [
"",
"januari",
"februari",
"mars",
"april",
"maj",
"juni",
"juli",
"augusti",
"september",
"oktober",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"feb",
"mar",
"apr",
"maj",
"jun",
"jul",
"aug",
"sep",
"okt",
"nov",
"dec",
]
day_names = [
"",
"måndag",
"tisdag",
"onsdag",
"torsdag",
"fredag",
"lördag",
"söndag",
]
day_abbreviations = ["", "mån", "tis", "ons", "tor", "fre", "lör", "sön"]
| SwedishLocale |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/ext.py | {
"start": 1327,
"end": 4050
} | class ____(expression.ColumnElement[_T]):
"""Represent a PostgreSQL aggregate order by expression.
E.g.::
from sqlalchemy.dialects.postgresql import aggregate_order_by
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select(expr)
would represent the expression:
.. sourcecode:: sql
SELECT array_agg(a ORDER BY b DESC) FROM table;
.. legacy:: An improved dialect-agnostic form of this function is now
available in Core by calling the
:meth:`_functions.Function.aggregate_order_by` method on any function
defined by the backend as an aggregate function.
.. seealso::
:func:`_sql.aggregate_order_by` - Core level function
:class:`_functions.array_agg`
"""
__visit_name__ = "aggregate_order_by"
stringify_dialect = "postgresql"
_traverse_internals: _TraverseInternalsType = [
("target", InternalTraversal.dp_clauseelement),
("type", InternalTraversal.dp_type),
("order_by", InternalTraversal.dp_clauseelement),
]
@overload
def __init__(
self,
target: ColumnElement[_T],
*order_by: _ColumnExpressionArgument[Any],
): ...
@overload
def __init__(
self,
target: _ColumnExpressionArgument[_T],
*order_by: _ColumnExpressionArgument[Any],
): ...
def __init__(
self,
target: _ColumnExpressionArgument[_T],
*order_by: _ColumnExpressionArgument[Any],
):
self.target: ClauseElement = coercions.expect(
roles.ExpressionElementRole, target
)
self.type = self.target.type
_lob = len(order_by)
self.order_by: ClauseElement
if _lob == 0:
raise TypeError("at least one ORDER BY element is required")
elif _lob == 1:
self.order_by = coercions.expect(
roles.ExpressionElementRole, order_by[0]
)
else:
self.order_by = elements.ClauseList(
*order_by, _literal_as_text_role=roles.ExpressionElementRole
)
def self_group(
self, against: Optional[OperatorType] = None
) -> ClauseElement:
return self
def get_children(self, **kwargs: Any) -> Iterable[ClauseElement]:
return self.target, self.order_by
def _copy_internals(
self, clone: _CloneCallableType = elements._clone, **kw: Any
) -> None:
self.target = clone(self.target, **kw)
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self) -> List[FromClause]:
return self.target._from_objects + self.order_by._from_objects
| aggregate_order_by |
python | tensorflow__tensorflow | tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py | {
"start": 121691,
"end": 127697
} | class ____(test.TestCase):
def _npBias(self, inputs, bias):
assert len(bias.shape) == 1
assert inputs.shape[-1] == bias.shape[0]
return inputs + bias.reshape(
([1] * (len(inputs.shape) - 1)) + [bias.shape[0]]
)
def testNpBias(self):
self.assertAllClose(
np.array([[11, 22, 33], [41, 52, 63]]),
self._npBias(
np.array([[10, 20, 30], [40, 50, 60]]), np.array([1, 2, 3])
),
)
def _testBias(self, np_inputs, np_bias, use_gpu=False):
np_val = self._npBias(np_inputs, np_bias)
tf_val = nn_ops.bias_add(np_inputs, np_bias)
self.assertAllCloseAccordingToType(np_val, tf_val)
def _AtLeast3d(self, np_value):
# fill the input value to at least 3-dimension
if np_value.ndim < 3:
return np.reshape(np_value, (1,) * (3 - np_value.ndim) + np_value.shape)
return np_value
def _NHWCToNCHW(self, np_value):
# fill the input value to at least 3-dimension
np_value = self._AtLeast3d(np_value)
# move the last dimension to second
np_dim = list(range(np_value.ndim))
np_dim_new = list(np_dim[0:1]) + list(np_dim[-1:]) + list(np_dim[1:-1])
return np.transpose(np_value, np_dim_new)
def _NCHWToNHWC(self, np_value):
assert len(np_value.shape) >= 3
np_dim = list(range(np_value.ndim))
# move the second dimension to the last
np_dim_new = list(np_dim[0:1]) + list(np_dim[2:]) + list(np_dim[1:2])
return np.transpose(np_value, np_dim_new)
def _testBiasNCHW(self, np_inputs, np_bias, use_gpu):
np_val = self._npBias(np_inputs, np_bias)
np_inputs = self._NHWCToNCHW(np_inputs)
tf_val = nn_ops.bias_add(np_inputs, np_bias, data_format="NCHW")
tf_val = self._NCHWToNHWC(tf_val)
self.assertAllCloseAccordingToType(self._AtLeast3d(np_val), tf_val)
def _testAll(self, np_inputs, np_bias):
if np_inputs.dtype in [np.float32, np.float16]:
self._testBias(np_inputs, np_bias, use_gpu=True)
self._testBiasNCHW(np_inputs, np_bias, use_gpu=True)
def testFloatTypes(self):
for t in [np.float32, np.float16]:
self._testAll(
np.random.rand(4, 3, 3).astype(t), np.random.rand(3).astype(t)
)
self._testAll(
np.random.rand(7, 5, 13).astype(t), np.random.rand(13).astype(t)
)
self._testAll(np.random.rand(9, 9).astype(t), np.random.rand(9).astype(t))
def _testGradient(self, np_input, bias, dtype, data_format, use_gpu):
with self.cached_session(use_gpu=use_gpu):
if data_format == "NCHW":
np_input = self._NHWCToNCHW(np_input)
input_tensor = constant_op.constant(
np_input, shape=np_input.shape, dtype=dtype
)
bias_tensor = constant_op.constant(bias, shape=bias.shape, dtype=dtype)
if dtype == dtypes.float16:
delta = 4.0 / 1024
else:
delta = 1.0 / 1024
output_tensor = nn_ops.bias_add(
input_tensor, bias_tensor, data_format=data_format
)
tensor_jacob_t, tensor_jacob_n = gradient_checker.compute_gradient(
input_tensor,
np_input.shape,
output_tensor,
np_input.shape,
delta=delta,
)
bias_jacob_t, bias_jacob_n = gradient_checker.compute_gradient(
bias_tensor, bias.shape, output_tensor, np_input.shape, delta=delta
)
# Test gradient of BiasAddGrad
bias_add_grad = gradients_impl.gradients(
nn_ops.l2_loss(output_tensor), bias_tensor
)[0]
grad_jacob_t, grad_jacob_n = gradient_checker.compute_gradient(
output_tensor, np_input.shape, bias_add_grad, bias.shape, delta=delta
)
threshold = 5e-3
if dtype == dtypes.float64:
threshold = 1e-10
if dtype == dtypes.float16:
threshold = 2e-2
# threshold for fp16 < threshold for fp32 since precision is lower.
self.assertAllClose(tensor_jacob_t, tensor_jacob_n, threshold, threshold)
self.assertAllClose(bias_jacob_t, bias_jacob_n, threshold, threshold)
self.assertAllClose(grad_jacob_t, grad_jacob_n, threshold, threshold)
@test_util.run_deprecated_v1
def testGradientTensor2D(self):
for data_format, use_gpu in [("NHWC", True)]:
for dtype in [dtypes.float32, dtypes.float16]:
np_input = np.array(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype
).reshape(3, 2)
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
@test_util.run_deprecated_v1
def testGradientTensor3D(self):
for data_format, use_gpu in [("NHWC", True)]:
for dtype in (dtypes.float32, dtypes.float64, dtypes.float16):
print(data_format)
np_input = np.array(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0],
dtype=dtype.as_numpy_dtype,
).reshape((2, 3, 2))
bias = np.array([1.3, 2.4], dtype=dtype.as_numpy_dtype)
self._testGradient(np_input, bias, dtype, data_format, use_gpu)
@test_util.run_deprecated_v1
def testEmpty(self):
np.random.seed(7)
for shape in (0, 0), (2, 0), (0, 2), (4, 3, 0), (4, 0, 3), (0, 4, 3):
self._testAll(np.random.randn(*shape), np.random.randn(shape[-1]))
@test_util.run_deprecated_v1
def testEmptyGradient(self):
for data_format, use_gpu in ("NHWC", False), ("NHWC", True):
for shape in (0, 0), (2, 0), (0, 2):
self._testGradient(
np.random.randn(*shape),
np.random.randn(shape[-1]),
dtypes.float64,
data_format,
use_gpu,
)
for data_format, use_gpu in [
("NHWC", False),
("NHWC", True),
("NCHW", False),
("NCHW", True),
]:
for shape in (4, 3, 0), (4, 0, 3), (0, 4, 3):
self._testGradient(
np.random.randn(*shape),
np.random.randn(shape[-1]),
dtypes.float64,
data_format,
use_gpu,
)
| BiasAddTestBase |
python | django__django | django/contrib/auth/backends.py | {
"start": 1700,
"end": 9259
} | class ____(BaseBackend):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
def authenticate(self, request, username=None, password=None, **kwargs):
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
if username is None or password is None:
return
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a nonexistent user (#20760).
UserModel().set_password(password)
else:
if user.check_password(password) and self.user_can_authenticate(user):
return user
async def aauthenticate(self, request, username=None, password=None, **kwargs):
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
if username is None or password is None:
return
try:
user = await UserModel._default_manager.aget_by_natural_key(username)
except UserModel.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a nonexistent user (#20760).
UserModel().set_password(password)
else:
if await user.acheck_password(password) and self.user_can_authenticate(
user
):
return user
def user_can_authenticate(self, user):
"""
Reject users with is_active=False. Custom user models that don't have
that attribute are allowed.
"""
return getattr(user, "is_active", True)
def _get_user_permissions(self, user_obj):
return user_obj.user_permissions.all()
def _get_group_permissions(self, user_obj):
return Permission.objects.filter(group__in=user_obj.groups.all())
def _get_permissions(self, user_obj, obj, from_name):
"""
Return the permissions of `user_obj` from `from_name`. `from_name` can
be either "group" or "user" to return permissions from
`_get_group_permissions` or `_get_user_permissions` respectively.
"""
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
perm_cache_name = "_%s_perm_cache" % from_name
if not hasattr(user_obj, perm_cache_name):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = getattr(self, "_get_%s_permissions" % from_name)(user_obj)
perms = perms.values_list("content_type__app_label", "codename").order_by()
setattr(
user_obj, perm_cache_name, {"%s.%s" % (ct, name) for ct, name in perms}
)
return getattr(user_obj, perm_cache_name)
async def _aget_permissions(self, user_obj, obj, from_name):
"""See _get_permissions()."""
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
perm_cache_name = "_%s_perm_cache" % from_name
if not hasattr(user_obj, perm_cache_name):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = getattr(self, "_get_%s_permissions" % from_name)(user_obj)
perms = perms.values_list("content_type__app_label", "codename").order_by()
setattr(
user_obj,
perm_cache_name,
{"%s.%s" % (ct, name) async for ct, name in perms},
)
return getattr(user_obj, perm_cache_name)
def get_user_permissions(self, user_obj, obj=None):
"""
Return a set of permission strings the user `user_obj` has from their
`user_permissions`.
"""
return self._get_permissions(user_obj, obj, "user")
async def aget_user_permissions(self, user_obj, obj=None):
"""See get_user_permissions()."""
return await self._aget_permissions(user_obj, obj, "user")
def get_group_permissions(self, user_obj, obj=None):
"""
Return a set of permission strings the user `user_obj` has from the
groups they belong.
"""
return self._get_permissions(user_obj, obj, "group")
async def aget_group_permissions(self, user_obj, obj=None):
"""See get_group_permissions()."""
return await self._aget_permissions(user_obj, obj, "group")
def get_all_permissions(self, user_obj, obj=None):
if not user_obj.is_active or user_obj.is_anonymous or obj is not None:
return set()
if not hasattr(user_obj, "_perm_cache"):
user_obj._perm_cache = super().get_all_permissions(user_obj)
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
return user_obj.is_active and super().has_perm(user_obj, perm, obj=obj)
async def ahas_perm(self, user_obj, perm, obj=None):
return user_obj.is_active and await super().ahas_perm(user_obj, perm, obj=obj)
def has_module_perms(self, user_obj, app_label):
"""
Return True if user_obj has any permissions in the given app_label.
"""
return user_obj.is_active and any(
perm[: perm.index(".")] == app_label
for perm in self.get_all_permissions(user_obj)
)
async def ahas_module_perms(self, user_obj, app_label):
"""See has_module_perms()"""
return user_obj.is_active and any(
perm[: perm.index(".")] == app_label
for perm in await self.aget_all_permissions(user_obj)
)
def with_perm(self, perm, is_active=True, include_superusers=True, obj=None):
"""
Return users that have permission "perm". By default, filter out
inactive users and include superusers.
"""
if isinstance(perm, str):
try:
app_label, codename = perm.split(".")
except ValueError:
raise ValueError(
"Permission name should be in the form "
"app_label.permission_codename."
)
elif not isinstance(perm, Permission):
raise TypeError(
"The `perm` argument must be a string or a permission instance."
)
if obj is not None:
return UserModel._default_manager.none()
permission_q = Q(group__user=OuterRef("pk")) | Q(user=OuterRef("pk"))
if isinstance(perm, Permission):
permission_q &= Q(pk=perm.pk)
else:
permission_q &= Q(codename=codename, content_type__app_label=app_label)
user_q = Exists(Permission.objects.filter(permission_q))
if include_superusers:
user_q |= Q(is_superuser=True)
if is_active is not None:
user_q &= Q(is_active=is_active)
return UserModel._default_manager.filter(user_q)
def get_user(self, user_id):
try:
user = UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
return user if self.user_can_authenticate(user) else None
async def aget_user(self, user_id):
try:
user = await UserModel._default_manager.aget(pk=user_id)
except UserModel.DoesNotExist:
return None
return user if self.user_can_authenticate(user) else None
| ModelBackend |
python | great-expectations__great_expectations | docs/docusaurus/versioned_docs/version-0.18/snippets/expect_column_values_to_equal_three.py | {
"start": 4997,
"end": 15950
} | class ____(ColumnMapExpectation):
# </snippet>
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py docstring">
"""Expect values in this column to equal 3."""
# </snippet>
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py examples">
examples = [
{
"data": {
"all_threes": [3, 3, 3, 3, 3],
"some_zeroes": [3, 3, 3, 0, 0],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_threes"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_zeroes", "mostly": 0.8},
"out": {
"success": False,
},
},
],
}
]
# </snippet>
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py map_metric">
map_metric = "column_values.equal_three"
# </snippet>
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/expectations/expectations.html#expectation-concepts-domain-and-success-keys
# for more information about domain and success keys, and other arguments to Expectations
success_keys = ("mostly",)
@renderer(renderer_type="renderer.diagnostic.observed_value")
@render_evaluation_parameter_string
def _diagnostic_observed_value_renderer(
cls,
configuration: ExpectationConfiguration = None,
result: ExpectationValidationResult = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
assert result, "Must provide a result object."
result_dict = result.result
if result_dict is None:
return "--"
if result_dict.get("observed_value"):
observed_value = result_dict.get("observed_value")
if isinstance(observed_value, (int, float)) and not isinstance(
observed_value, bool
):
return num_to_str(observed_value, precision=10, use_locale=True)
return str(observed_value)
elif result_dict.get("unexpected_percent") is not None:
return (
num_to_str(result_dict.get("unexpected_percent"), precision=5)
+ "% unexpected"
)
else:
return "--"
@renderer(renderer_type="renderer.diagnostic.unexpected_statement")
@render_evaluation_parameter_string
def _diagnostic_unexpected_statement_renderer(
cls,
configuration: ExpectationConfiguration = None,
result: ExpectationValidationResult = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
assert result, "Must provide a result object."
success = result.success
result = result.result
if result.exception_info["raised_exception"]:
exception_message_template_str = (
"\n\n$expectation_type raised an exception:\n$exception_message"
)
exception_message = RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": exception_message_template_str,
"params": {
"expectation_type": result.expectation_config.expectation_type,
"exception_message": result.exception_info[
"exception_message"
],
},
"tag": "strong",
"styling": {
"classes": ["text-danger"],
"params": {
"exception_message": {"tag": "code"},
"expectation_type": {
"classes": ["badge", "badge-danger", "mb-2"]
},
},
},
},
}
)
exception_traceback_collapse = CollapseContent(
**{
"collapse_toggle_link": "Show exception traceback...",
"collapse": [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": result.exception_info[
"exception_traceback"
],
"tag": "code",
},
}
)
],
}
)
return [exception_message, exception_traceback_collapse]
if success or not result_dict.get("unexpected_count"):
return []
else:
unexpected_count = num_to_str(
result_dict["unexpected_count"], use_locale=True, precision=20
)
unexpected_percent = (
num_to_str(result_dict["unexpected_percent"], precision=4) + "%"
)
element_count = num_to_str(
result_dict["element_count"], use_locale=True, precision=20
)
template_str = (
"\n\n$unexpected_count unexpected values found. "
"$unexpected_percent of $element_count total rows."
)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": {
"unexpected_count": unexpected_count,
"unexpected_percent": unexpected_percent,
"element_count": element_count,
},
"tag": "strong",
"styling": {"classes": ["text-danger"]},
},
}
)
]
@renderer(renderer_type="renderer.diagnostic.unexpected_table")
@render_evaluation_parameter_string
def _diagnostic_unexpected_table_renderer( # noqa: PLR0912 # too complex
cls,
configuration: ExpectationConfiguration = None,
result: ExpectationValidationResult = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
try:
result_dict = result.result
except KeyError:
return None
if result_dict is None:
return None
if not result_dict.get("partial_unexpected_list") and not result_dict.get(
"partial_unexpected_counts"
):
return None
table_rows = []
if result_dict.get("partial_unexpected_counts"):
total_count = 0
for unexpected_count_dict in result_dict.get("partial_unexpected_counts"):
value = unexpected_count_dict.get("value")
count = unexpected_count_dict.get("count")
total_count += count
if value is not None and value != "":
table_rows.append([value, count])
elif value == "":
table_rows.append(["EMPTY", count])
else:
table_rows.append(["null", count])
if total_count == result_dict.get("unexpected_count"):
header_row = ["Unexpected Value", "Count"]
else:
header_row = ["Sampled Unexpected Values"]
table_rows = [[row[0]] for row in table_rows]
else:
header_row = ["Sampled Unexpected Values"]
sampled_values_set = set()
for unexpected_value in result_dict.get("partial_unexpected_list"):
if unexpected_value:
string_unexpected_value = str(unexpected_value)
elif unexpected_value == "":
string_unexpected_value = "EMPTY"
else:
string_unexpected_value = "null"
if string_unexpected_value not in sampled_values_set:
table_rows.append([unexpected_value])
sampled_values_set.add(string_unexpected_value)
unexpected_table_content_block = RenderedTableContent(
**{
"content_block_type": "table",
"table": table_rows,
"header_row": header_row,
"styling": {
"body": {"classes": ["table-bordered", "table-sm", "mt-3"]}
},
}
)
return unexpected_table_content_block
# This dictionary contains metadata for display in the public gallery
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py library_metadata">
library_metadata = {
"tags": ["extremely basic math"],
"contributors": ["@joegargery"],
}
# </snippet>
if __name__ == "__main__":
# <snippet name="docs/docusaurus/docs/snippets/expect_column_values_to_equal_three.py diagnostics">
ExpectColumnValuesToEqualThree().print_diagnostic_checklist()
# </snippet>
# Note to users: code below this line is only for integration testing -- ignore!
diagnostics = ExpectColumnValuesToEqualThree().run_diagnostics()
for check in diagnostics["tests"]:
assert check["test_passed"] is True
assert check["error_diagnostics"] is None
for check in diagnostics["errors"]:
assert check is None
for check in diagnostics["maturity_checklist"]["experimental"]:
if check["message"] == "Passes all linting checks":
continue
assert check["passed"] is True
| ExpectColumnValuesToEqualThree |
python | dagster-io__dagster | python_modules/dagster/dagster/_cli/workspace/cli_target.py | {
"start": 12098,
"end": 25631
} | class ____:
repository: Optional[str] = None
location: Optional[str] = None
@classmethod
def extract_from_cli_options(cls, cli_options: dict[str, object]) -> Self:
return cls(
repository=check.opt_inst(cli_options.pop("repository"), str),
location=check.opt_inst(cli_options.pop("location"), str),
)
# ########################
# ##### CLICK DECORATORS
# ########################
# These are named as *_options and can be directly applied to click commands/groups as decorators.
# They contain various subsets from the generate_*
def run_config_option(*, name: str, command_name: str) -> Callable[[T_Callable], T_Callable]:
def wrap(f: T_Callable) -> T_Callable:
return apply_click_params(f, _generate_run_config_option(name, command_name))
return wrap
def job_name_option(f: Optional[T_Callable] = None, *, name: str) -> T_Callable:
if f is None:
return lambda f: job_name_option(f, name=name) # type: ignore
else:
return apply_click_params(f, _generate_job_name_option(name))
def repository_name_option(f: Optional[T_Callable] = None, *, name: str) -> T_Callable:
if f is None:
return lambda f: repository_name_option(f, name=name) # type: ignore
else:
return apply_click_params(f, _generate_repository_name_option(name))
def repository_options(f: T_Callable) -> T_Callable:
return apply_click_params(
f,
_generate_repository_name_option("repository"),
_generate_code_location_name_option("location"),
)
# ########################
# ##### OPTION GENERATORS
# ########################
# These are named as generate_*_option(s) and return a ClickOption or list of Click Options. These
# cannot be directly applied to click commands/groups as decorators. They are intended to be private
# to this module-- external code should use the below decorators.
def _generate_job_name_option(name: str) -> ClickOption:
return click.option(
"--job",
"-j",
name,
help="Job within the repository, necessary if more than one job is present.",
)
def _generate_repository_name_option(name: str) -> ClickOption:
return click.option(
"--repository",
"-r",
name,
help=(
"Name of the repository, necessary if more than one repository is present in the"
" code location."
),
)
def _generate_code_location_name_option(name: str) -> ClickOption:
return click.option(
"--location",
"-l",
name,
help="Name of the code location, necessary if more than one location is present.",
)
def _generate_run_config_option(name: str, command_name: str) -> ClickOption:
return click.option(
"-c",
"--config",
name,
type=click.Path(exists=True),
multiple=True,
help=(
"Specify one or more run config files. These can also be file patterns. "
"If more than one run config file is captured then those files are merged. "
"Files listed first take precedence. They will smash the values of subsequent "
"files at the key-level granularity. If the file is a pattern then you must "
"enclose it in double quotes"
"\n\nExample: "
f"dagster job {command_name} -f hello_world.py -j pandas_hello_world "
'-c "pandas_hello_world/*.yaml"'
"\n\nYou can also specify multiple files:"
"\n\nExample: "
f"dagster job {command_name} -f hello_world.py -j pandas_hello_world "
"-c pandas_hello_world/ops.yaml -c pandas_hello_world/env.yaml"
),
)
def _get_name_from_target_def(target_def: object) -> str:
if isinstance(target_def, (Definitions, LazyDefinitions)):
return SINGLETON_REPOSITORY_NAME
return check.not_none(repository_def_from_target_def(target_def)).name
def _get_code_pointer_dict_from_python_pointer_opts(
params: PythonPointerOpts,
) -> Mapping[str, CodePointer]:
working_directory = params.working_directory or os.getcwd()
loadable_targets = get_loadable_targets(
python_file=params.python_file,
module_name=params.module_name,
package_name=params.package_name,
working_directory=working_directory,
attribute=params.attribute,
autoload_defs_module_name=params.autoload_defs_module_name,
resolve_lazy_defs=True,
)
# repository_name -> code_pointer
code_pointer_dict: dict[str, CodePointer] = {}
for loadable_target in loadable_targets:
name = _get_name_from_target_def(loadable_target.target_definition)
if params.python_file:
code_pointer = CodePointer.from_python_file(
params.python_file, loadable_target.attribute, working_directory
)
elif params.module_name:
code_pointer = CodePointer.from_module(
params.module_name, loadable_target.attribute, working_directory
)
elif params.package_name:
code_pointer = CodePointer.from_python_package(
params.package_name, loadable_target.attribute, working_directory
)
elif params.autoload_defs_module_name:
code_pointer = AutoloadDefsModuleCodePointer(
module=params.autoload_defs_module_name,
working_directory=working_directory,
)
else:
check.failed("Must specify a Python file or module name")
code_pointer_dict[name] = code_pointer
return code_pointer_dict
def get_repository_python_origin_from_cli_opts(
params: PythonPointerOpts, repo_name: Optional[str] = None
) -> RepositoryPythonOrigin:
if (
sum(
[
bool(x)
for x in (
params.python_file,
params.module_name,
params.package_name,
params.autoload_defs_module_name,
)
]
)
!= 1
):
_raise_cli_usage_error()
# Short-circuit the case where an attribute and no repository name is passed in,
# giving us enough information to return an origin without loading any target
# definitions - we may need to return an origin for a non-existent repository
# (e.g. to log an origin ID for an error message)
if params.attribute and not repo_name:
working_directory = params.working_directory or os.getcwd()
if params.python_file:
code_pointer: CodePointer = CodePointer.from_python_file(
params.python_file,
params.attribute,
working_directory,
)
elif params.module_name:
code_pointer = CodePointer.from_module(
params.module_name,
params.attribute,
working_directory,
)
elif params.package_name:
code_pointer = CodePointer.from_python_package(
params.package_name,
params.attribute,
working_directory,
)
elif params.autoload_defs_module_name:
code_pointer = AutoloadDefsModuleCodePointer(
module=params.autoload_defs_module_name,
working_directory=working_directory,
)
else:
check.failed("Must specify a Python file or module name")
return RepositoryPythonOrigin(
executable_path=sys.executable,
code_pointer=code_pointer,
entry_point=DEFAULT_DAGSTER_ENTRY_POINT,
)
code_pointer_dict = _get_code_pointer_dict_from_python_pointer_opts(params)
found_repo_names = serialize_sorted_quoted(code_pointer_dict.keys())
if repo_name is None and len(code_pointer_dict) == 1:
code_pointer = next(iter(code_pointer_dict.values()))
elif repo_name is None:
raise click.UsageError(
"Must provide --repository as there is more than one repository. "
f"Options are: {found_repo_names}."
)
elif repo_name not in code_pointer_dict:
raise click.UsageError(
f'Repository "{repo_name}" not found. Found {found_repo_names} instead.'
)
else:
code_pointer = code_pointer_dict[repo_name]
return RepositoryPythonOrigin(
executable_path=sys.executable,
code_pointer=code_pointer,
entry_point=DEFAULT_DAGSTER_ENTRY_POINT,
)
def get_code_location_from_workspace(
workspace: WorkspaceRequestContext, code_location_name: Optional[str]
) -> CodeLocation:
if code_location_name is None:
if len(workspace.code_location_names) == 1:
code_location_name = workspace.code_location_names[0]
elif len(workspace.code_location_names) == 0:
raise click.UsageError("No locations found in workspace")
elif code_location_name is None:
raise click.UsageError(
"Must provide --location as there are multiple locations "
f"available. Options are: {serialize_sorted_quoted(workspace.code_location_names)}"
)
if code_location_name not in workspace.code_location_names:
raise click.UsageError(
f'Location "{code_location_name}" not found in workspace. '
f"Found {serialize_sorted_quoted(workspace.code_location_names)} instead."
)
if workspace.has_code_location_error(code_location_name):
raise click.UsageError(
f'Error loading location "{code_location_name}": {workspace.get_code_location_error(code_location_name)!s}'
)
return workspace.get_code_location(code_location_name)
def get_remote_repository_from_code_location(
code_location: CodeLocation, provided_repo_name: Optional[str]
) -> RemoteRepository:
check.inst_param(code_location, "code_location", CodeLocation)
check.opt_str_param(provided_repo_name, "provided_repo_name")
repo_dict = code_location.get_repositories()
check.invariant(repo_dict, "There should be at least one repo.")
# no name provided and there is only one repo. Automatically return
if provided_repo_name is None and len(repo_dict) == 1:
return next(iter(repo_dict.values()))
if provided_repo_name is None:
raise click.UsageError(
"Must provide --repository as there is more than one repository "
f"in {code_location.name}. Options are: {serialize_sorted_quoted(repo_dict.keys())}."
)
if not code_location.has_repository(provided_repo_name):
raise click.UsageError(
f'Repository "{provided_repo_name}" not found in location "{code_location.name}". '
f"Found {serialize_sorted_quoted(repo_dict.keys())} instead."
)
return code_location.get_repository(provided_repo_name)
def get_remote_job_from_remote_repo(
remote_repo: RemoteRepository,
provided_name: Optional[str],
) -> RemoteJob:
check.inst_param(remote_repo, "remote_repo", RemoteRepository)
check.opt_str_param(provided_name, "provided_name")
remote_jobs = {ep.name: ep for ep in (remote_repo.get_all_jobs())}
check.invariant(remote_jobs)
if provided_name is None and len(remote_jobs) == 1:
return next(iter(remote_jobs.values()))
if provided_name is None:
raise click.UsageError(
"Must provide --job as there is more than one job "
f"in {remote_repo.name}. Options are: {serialize_sorted_quoted(remote_jobs.keys())}."
)
if provided_name not in remote_jobs:
raise click.UsageError(
f'Job "{provided_name}" not found in repository "{remote_repo.name}". '
f"Found {serialize_sorted_quoted(remote_jobs.keys())} instead."
)
return remote_jobs[provided_name]
def get_run_config_from_file_list(file_list: list[str]) -> Mapping[str, object]:
check.opt_sequence_param(file_list, "file_list", of_type=str)
return cast("Mapping[str, object]", load_yaml_from_glob_list(file_list) if file_list else {})
def get_run_config_from_cli_opts(
config_files: tuple[str, ...], config_json: Optional[str]
) -> Mapping[str, object]:
if not (config_files or config_json):
return {}
elif config_files and config_json:
raise click.UsageError("Cannot specify both -c / --config and --config-json")
elif config_files:
return get_run_config_from_file_list(list(config_files))
elif config_json:
try:
return json.loads(config_json)
except JSONDecodeError:
raise click.UsageError(
f"Invalid JSON-string given for `--config-json`: {config_json}\n\n{serializable_error_info_from_exc_info(sys.exc_info()).to_string()}"
)
else:
check.failed("Unhandled case getting config from kwargs")
# ########################
# ##### HELPERS
# ########################
def _raise_cli_usage_error(msg: Optional[str] = None) -> Never:
raise UsageError(
msg or "Invalid set of CLI arguments for loading repository/job. See --help for details."
)
def _check_attrs_falsey(obj: object, *attrs: str) -> None:
if not _are_attrs_falsey(obj, *attrs):
_raise_cli_usage_error()
def _are_attrs_falsey(obj: object, *attrs: str) -> bool:
for attr in attrs:
if getattr(obj, attr):
return False
return True
| RepositoryOpts |
python | lepture__authlib | authlib/oauth2/rfc7523/token.py | {
"start": 90,
"end": 3384
} | class ____:
"""A JSON Web Token formatted bearer token generator for jwt-bearer grant type.
This token generator can be registered into authorization server::
authorization_server.register_token_generator(
"urn:ietf:params:oauth:grant-type:jwt-bearer",
JWTBearerTokenGenerator(private_rsa_key),
)
In this way, we can generate the token into JWT format. And we don't have to
save this token into database, since it will be short time valid. Consider to
rewrite ``JWTBearerGrant.save_token``::
class MyJWTBearerGrant(JWTBearerGrant):
def save_token(self, token):
pass
:param secret_key: private RSA key in bytes, JWK or JWK Set.
:param issuer: a string or URI of the issuer
:param alg: ``alg`` to use in JWT
"""
DEFAULT_EXPIRES_IN = 3600
def __init__(self, secret_key, issuer=None, alg="RS256"):
self.secret_key = secret_key
self.issuer = issuer
self.alg = alg
@staticmethod
def get_allowed_scope(client, scope):
if scope:
scope = client.get_allowed_scope(scope)
return scope
@staticmethod
def get_sub_value(user):
"""Return user's ID as ``sub`` value in token payload. For instance::
@staticmethod
def get_sub_value(user):
return str(user.id)
"""
return user.get_user_id()
def get_token_data(self, grant_type, client, expires_in, user=None, scope=None):
scope = self.get_allowed_scope(client, scope)
issued_at = int(time.time())
data = {
"scope": scope,
"grant_type": grant_type,
"iat": issued_at,
"exp": issued_at + expires_in,
"client_id": client.get_client_id(),
}
if self.issuer:
data["iss"] = self.issuer
if user:
data["sub"] = self.get_sub_value(user)
return data
def generate(self, grant_type, client, user=None, scope=None, expires_in=None):
"""Generate a bearer token for OAuth 2.0 authorization token endpoint.
:param client: the client that making the request.
:param grant_type: current requested grant_type.
:param user: current authorized user.
:param expires_in: if provided, use this value as expires_in.
:param scope: current requested scope.
:return: Token dict
"""
if expires_in is None:
expires_in = self.DEFAULT_EXPIRES_IN
token_data = self.get_token_data(grant_type, client, expires_in, user, scope)
access_token = jwt.encode(
{"alg": self.alg}, token_data, key=self.secret_key, check=False
)
token = {
"token_type": "Bearer",
"access_token": to_native(access_token),
"expires_in": expires_in,
}
if scope:
token["scope"] = scope
return token
def __call__(
self,
grant_type,
client,
user=None,
scope=None,
expires_in=None,
include_refresh_token=True,
):
# there is absolutely no refresh token in JWT format
return self.generate(grant_type, client, user, scope, expires_in)
| JWTBearerTokenGenerator |
python | pypa__pip | src/pip/_vendor/pkg_resources/__init__.py | {
"start": 8510,
"end": 9372
} | class ____(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self) -> Distribution:
return self.args[0]
@property
def req(self) -> Requirement:
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by: set[Distribution | str]):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
| VersionConflict |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/taskinstance.py | {
"start": 11469,
"end": 11802
} | class ____(BaseModel):
"""Schema for response with previous successful DagRun information for Task Template Context."""
data_interval_start: UtcDateTime | None = None
data_interval_end: UtcDateTime | None = None
start_date: UtcDateTime | None = None
end_date: UtcDateTime | None = None
| PrevSuccessfulDagRunResponse |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/specialization1.py | {
"start": 150,
"end": 202
} | class ____(A):
pass
_T1 = TypeVar("_T1", A, B)
| C |
python | getsentry__sentry | src/sentry/models/projectcodeowners.py | {
"start": 855,
"end": 6567
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
# no db constraint to prevent locks on the Project table
project = FlexibleForeignKey("sentry.Project", db_constraint=False)
# repository_project_path_config ⇒ use this to transform CODEOWNERS paths to stacktrace paths
repository_project_path_config = FlexibleForeignKey(
"sentry.RepositoryProjectPathConfig", unique=True, on_delete=models.PROTECT
)
# raw ⇒ original CODEOWNERS file.
raw = models.TextField()
# schema ⇒ transformed into IssueOwner syntax
schema = JSONField()
date_updated = models.DateTimeField(default=timezone.now)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_projectcodeowners"
__repr__ = sane_repr("project_id", "id")
@classmethod
def get_cache_key(self, project_id: int) -> str:
return f"projectcodeowners_project_id:1:{project_id}"
@classmethod
def get_codeowners_cached(self, project_id: int) -> ProjectCodeOwners | None:
"""
Cached read access to sentry_projectcodeowners.
This method implements a negative cache which saves us
a pile of read queries in post_processing as most projects
don't have CODEOWNERS.
"""
cache_key = self.get_cache_key(project_id)
code_owners = cache.get(cache_key)
if code_owners is None:
query = self.objects.filter(project_id=project_id).order_by("-date_added") or ()
code_owners = self.merge_code_owners_list(code_owners_list=query) if query else query
cache.set(cache_key, code_owners, READ_CACHE_DURATION)
return code_owners or None
@classmethod
def merge_code_owners_list(
self, code_owners_list: Iterable[ProjectCodeOwners]
) -> ProjectCodeOwners | None:
"""
Merge list of code_owners into a single code_owners object concatenating
all the rules. We assume schema version is constant.
"""
merged_code_owners: ProjectCodeOwners | None = None
for code_owners in code_owners_list:
if code_owners.schema:
if merged_code_owners is None:
merged_code_owners = code_owners
continue
merged_code_owners.schema["rules"] = [
*merged_code_owners.schema["rules"],
*code_owners.schema["rules"],
]
return merged_code_owners
def update_schema(self, organization: Organization, raw: str | None = None) -> None:
"""
Updating the schema goes through the following steps:
1. Update the raw content (original CODEOWNERS text)
2. Parse the original CODEOWNERS file to get the associations
3. Convert the CODEOWNERS file to the ownership syntax
4. Convert the ownership syntax to the schema
"""
from sentry.api.validators.project_codeowners import build_codeowners_associations
from sentry.utils.codeowners import MAX_RAW_LENGTH
if raw and self.raw != raw:
self.raw = raw
if not self.raw:
return
if len(self.raw) > MAX_RAW_LENGTH:
try:
analytics.record(
CodeOwnersMaxLengthExceeded(
organization_id=organization.id,
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
logger.warning({"raw": f"Raw needs to be <= {MAX_RAW_LENGTH} characters in length"})
return
associations, _ = build_codeowners_associations(self.raw, self.project)
issue_owner_rules = convert_codeowners_syntax(
codeowners=self.raw,
associations=associations,
code_mapping=self.repository_project_path_config,
)
# Convert IssueOwner syntax into schema syntax
try:
schema = create_schema_from_issue_owners(
project_id=self.project.id,
issue_owners=issue_owner_rules,
remove_deleted_owners=True,
)
# Convert IssueOwner syntax into schema syntax
if schema:
self.schema = schema
self.save()
except ValidationError:
logger.exception("Failed to create schema from issue owners.")
return
def modify_date_updated(instance, **kwargs):
if instance.id is None:
return
instance.date_updated = timezone.now()
def process_resource_change(instance, change, **kwargs):
from sentry.models.groupowner import GroupOwner
from sentry.models.projectownership import ProjectOwnership
cache.set(
ProjectCodeOwners.get_cache_key(instance.project_id),
None,
READ_CACHE_DURATION,
)
ownership = ProjectOwnership.get_ownership_cached(instance.project_id)
if not ownership:
ownership = ProjectOwnership(project_id=instance.project_id)
GroupOwner.invalidate_debounce_issue_owners_evaluation_cache(instance.project_id)
pre_save.connect(
modify_date_updated,
sender=ProjectCodeOwners,
dispatch_uid="projectcodeowners_modify_date_updated",
weak=False,
)
# Signals update the cached reads used in post_processing
post_save.connect(
lambda instance, **kwargs: process_resource_change(instance, "updated", **kwargs),
sender=ProjectCodeOwners,
weak=False,
)
post_delete.connect(
lambda instance, **kwargs: process_resource_change(instance, "deleted", **kwargs),
sender=ProjectCodeOwners,
weak=False,
)
| ProjectCodeOwners |
python | Lightning-AI__lightning | src/lightning/pytorch/loops/prediction_loop.py | {
"start": 2101,
"end": 18725
} | class ____(_Loop):
"""Top-level loop where prediction starts."""
def __init__(self, trainer: "pl.Trainer", inference_mode: bool = True) -> None:
super().__init__(trainer)
self.inference_mode = inference_mode
# dataloaders x batches x samples. used by PredictionWriter
self.epoch_batch_indices: list[list[list[int]]] = []
self.current_batch_indices: list[int] = [] # used by PredictionWriter
self.batch_progress = _Progress() # across dataloaders
self.max_batches: list[Union[int, float]] = []
self._warning_cache = WarningCache()
self._data_source = _DataLoaderSource(None, "predict_dataloader")
self._combined_loader: Optional[CombinedLoader] = None
self._data_fetcher: Optional[_DataFetcher] = None
self._results = None # for `trainer._results` access
self._predictions: list[list[Any]] = [] # dataloaders x batches
self._return_predictions = False
self._module_mode = _ModuleMode()
@property
def return_predictions(self) -> bool:
"""Whether to return the predictions or not."""
return self._return_predictions
@return_predictions.setter
def return_predictions(self, return_predictions: Optional[bool] = None) -> None:
# Strategies that spawn or fork don't support returning predictions
return_supported = not isinstance(self.trainer.strategy.launcher, _MultiProcessingLauncher)
if return_predictions and not return_supported:
raise MisconfigurationException(
"`return_predictions` should be set to `False` when using the strategies that spawn or fork."
f" Found {return_predictions} with strategy {type(self.trainer.strategy)}."
)
# For strategies that support it, `return_predictions` is True by default unless user decide otherwise.
self._return_predictions = return_supported if return_predictions is None else return_predictions
@property
def predictions(self) -> list[Any]:
"""The cached predictions."""
if self._predictions == []:
return self._predictions
return self._predictions[0] if self.num_dataloaders == 1 else self._predictions
@property
def num_dataloaders(self) -> int:
"""Returns the number of prediction dataloaders."""
combined_loader = self._combined_loader
assert combined_loader is not None
return len(combined_loader.flattened)
@property
def skip(self) -> bool:
return sum(self.max_batches) == 0
@_no_grad_context
def run(self) -> Optional[_PREDICT_OUTPUT]:
self.setup_data()
if self.skip:
return None
self.reset()
self.on_run_start()
data_fetcher = self._data_fetcher
assert data_fetcher is not None
while True:
try:
if isinstance(data_fetcher, _DataLoaderIterDataFetcher):
dataloader_iter = next(data_fetcher)
# hook's batch_idx and dataloader_idx arguments correctness cannot be guaranteed in this setting
batch = data_fetcher._batch
batch_idx = data_fetcher._batch_idx
dataloader_idx = data_fetcher._dataloader_idx
else:
dataloader_iter = None
batch, batch_idx, dataloader_idx = next(data_fetcher)
self.batch_progress.is_last_batch = data_fetcher.done
# run step hooks
self._predict_step(batch, batch_idx, dataloader_idx, dataloader_iter)
except StopIteration:
# this needs to wrap the `*_step` call too (not just `next`) for `dataloader_iter` support
break
finally:
self._restarting = False
return self.on_run_end()
def setup_data(self) -> None:
trainer = self.trainer
# a default `predict_step` exists in the LightningModule, so no need to check if it's overridden
if trainer.limit_predict_batches == 0:
return
source = self._data_source
dataloaders = _request_dataloader(source)
trainer.strategy.barrier("predict_dataloader()")
if not isinstance(dataloaders, CombinedLoader):
combined_loader = CombinedLoader(dataloaders, "sequential")
else:
combined_loader = dataloaders
allow_zero_length = trainer.lightning_module.allow_zero_length_dataloader_with_multiple_devices
if trainer.datamodule is not None:
allow_zero_length |= trainer.datamodule.allow_zero_length_dataloader_with_multiple_devices
trainer_fn = TrainerFn.PREDICTING
stage = RunningStage.PREDICTING
dataloaders = []
self.max_batches = []
for dl in combined_loader.flattened:
_check_dataloader_iterable(dl, source, trainer_fn)
dl = _process_dataloader(trainer, trainer_fn, stage, dl)
dataloaders.append(dl)
# determine number of batches
length = len(dl) if has_len_all_ranks(dl, trainer.strategy, allow_zero_length) else float("inf")
num_batches = _parse_num_batches(stage, length, trainer.limit_predict_batches)
self.max_batches.append(num_batches)
combined_loader.flattened = dataloaders
self._combined_loader = combined_loader
def reset(self) -> None:
"""Resets the internal state of the loop for a new run."""
self.batch_progress.reset_on_run()
assert self.trainer.state.stage is not None
data_fetcher = _select_data_fetcher(self.trainer, self.trainer.state.stage)
combined_loader = self._combined_loader
assert combined_loader is not None
if combined_loader._mode != "sequential":
raise ValueError('`trainer.predict()` only supports the `CombinedLoader(mode="sequential")` mode.')
# set the per-dataloader limits
combined_loader.limits = self.max_batches
data_fetcher.setup(combined_loader)
iter(data_fetcher) # creates the iterator inside the fetcher
# add the previous `fetched` value to properly track `is_last_batch` with no prefetching
data_fetcher.fetched += self.batch_progress.current.ready
data_fetcher._start_profiler = self._on_before_fetch
data_fetcher._stop_profiler = self._on_after_fetch
self._data_fetcher = data_fetcher
num_dataloaders = self.num_dataloaders
self.epoch_batch_indices = [[] for _ in range(num_dataloaders)]
self._predictions = [[] for _ in range(num_dataloaders)]
def on_run_start(self) -> None:
"""Calls ``_on_predict_model_eval``, ``_on_predict_start`` and ``_on_predict_epoch_start`` hooks."""
self._verify_dataloader_idx_requirement()
self._on_predict_model_eval()
self._on_predict_start()
self._on_predict_epoch_start()
def on_run_end(self) -> Optional[_PREDICT_OUTPUT]:
"""Calls ``on_predict_epoch_end`` and ``on_predict_end`` hooks and returns results from all dataloaders."""
results = self._on_predict_epoch_end()
self._on_predict_end()
self._on_predict_model_train()
return results
def teardown(self) -> None:
if self._data_fetcher is not None:
self._data_fetcher.teardown()
self._data_fetcher = None
def _predict_step(
self, batch: Any, batch_idx: int, dataloader_idx: int, dataloader_iter: Optional[Iterator]
) -> None:
"""Runs the actual predict step together with all the necessary bookkeeping and the hooks tied to it.
Args:
batch: the current batch to run the prediction on
batch_idx: The index of the current batch.
dataloader_idx: the index of the dataloader producing the current batch.
dataloader_iter: The iterator if using this step flavor.
"""
trainer = self.trainer
data_fetcher = self._data_fetcher
assert data_fetcher is not None
if not (using_dataloader_iter := isinstance(data_fetcher, _DataLoaderIterDataFetcher)):
batch = trainer.precision_plugin.convert_input(batch)
batch = trainer.lightning_module._on_before_batch_transfer(batch, dataloader_idx=dataloader_idx)
batch = call._call_strategy_hook(trainer, "batch_to_device", batch, dataloader_idx=dataloader_idx)
self.batch_progress.increment_ready()
any_on_epoch = (
self._store_data_for_prediction_writer(batch_idx, dataloader_idx) if not using_dataloader_iter else False
)
# the `_step` methods don't take a batch_idx when `dataloader_iter` is used, but all other hooks still do,
# so we need different kwargs
hook_kwargs = self._build_kwargs(batch, batch_idx, dataloader_idx if self.num_dataloaders > 1 else None)
call._call_callback_hooks(trainer, "on_predict_batch_start", *hook_kwargs.values())
call._call_lightning_module_hook(trainer, "on_predict_batch_start", *hook_kwargs.values())
self.batch_progress.increment_started()
# configure step_kwargs
step_args = (
self._build_step_args_from_hook_kwargs(hook_kwargs, "predict_step")
if not using_dataloader_iter
else (dataloader_iter,)
)
predictions = call._call_strategy_hook(trainer, "predict_step", *step_args)
if predictions is None:
self._warning_cache.warn("predict returned None if it was on purpose, ignore this warning...")
self.batch_progress.increment_processed()
if using_dataloader_iter:
# update the hook kwargs now that the step method might have consumed the iterator
batch = data_fetcher._batch
batch_idx = data_fetcher._batch_idx
dataloader_idx = data_fetcher._dataloader_idx
hook_kwargs = self._build_kwargs(batch, batch_idx, dataloader_idx if self.num_dataloaders > 1 else None)
call._call_callback_hooks(trainer, "on_predict_batch_end", predictions, *hook_kwargs.values())
call._call_lightning_module_hook(trainer, "on_predict_batch_end", predictions, *hook_kwargs.values())
self.batch_progress.increment_completed()
if self._return_predictions or any_on_epoch:
self._predictions[dataloader_idx].append(move_data_to_device(predictions, torch.device("cpu")))
def _build_kwargs(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int]) -> OrderedDict:
"""Assembles the keyword arguments for the ``predict_step``
Args:
batch: the current batch to run the prediction on
batch_idx: the index of the current batch.
dataloader_idx: the index of the dataloader producing the current batch. None if not multiple dataloaders
in sequential mode.
Returns:
the dictionary containing all the keyboard arguments for the predict step
"""
step_kwargs = OrderedDict([("batch", batch), ("batch_idx", batch_idx)])
if dataloader_idx is not None:
step_kwargs["dataloader_idx"] = dataloader_idx
return step_kwargs
def _build_step_args_from_hook_kwargs(self, hook_kwargs: OrderedDict, step_hook_name: str) -> tuple:
"""Helper method to build args for `predict_step`."""
kwargs = hook_kwargs.copy()
step_hook_fx = getattr(self.trainer.lightning_module, step_hook_name)
if not is_param_in_hook_signature(step_hook_fx, "batch_idx", min_args=2):
kwargs.pop("batch_idx", None)
return tuple(kwargs.values())
def _get_batch_indices(self, dataloader: object) -> list[list[int]]: # batches x samples
"""Returns a reference to the seen batch indices if the dataloader has a batch sampler wrapped by our
:class:`~lightning.pytorch.overrides.distributed._IndexBatchSamplerWrapper`."""
batch_sampler = getattr(dataloader, "batch_sampler", None)
if not isinstance(batch_sampler, _IndexBatchSamplerWrapper):
self._warning_cache.warn(
f"Couldn't infer the batch indices fetched from your dataloader: `{type(dataloader).__name__}`"
)
return []
return batch_sampler.seen_batch_indices
def _store_data_for_prediction_writer(self, batch_idx: int, dataloader_idx: int) -> bool:
prediction_writers = [cb for cb in self.trainer.callbacks if isinstance(cb, BasePredictionWriter)]
any_on_epoch = any(cb.interval.on_epoch for cb in prediction_writers)
any_on_batch = any(cb.interval.on_batch for cb in prediction_writers)
if any_on_batch or any_on_epoch:
combined_loader = self._combined_loader
assert combined_loader is not None
dataloader = combined_loader.flattened[dataloader_idx]
batch_indices = self._get_batch_indices(dataloader)
if not batch_indices:
# this is only available with `_IndexBatchSamplerWrapper`, but it's only used on DataLoaders, if this is
# reached, it's likely because a non-DataLoader was passed
return any_on_epoch
batch_indices = batch_indices[batch_idx]
if any_on_epoch:
self.epoch_batch_indices[dataloader_idx].append(batch_indices)
if any_on_batch:
self.current_batch_indices = batch_indices
return any_on_epoch
def _on_before_fetch(self) -> None:
self.trainer.profiler.start(f"[{type(self).__name__}].predict_next")
def _on_after_fetch(self) -> None:
# the dataloader_idx cannot be easily included here because it might be different from the index used on
# profiler start, since the `__next__` call might use a different iterator
self.trainer.profiler.stop(f"[{type(self).__name__}].predict_next")
def _on_predict_start(self) -> None:
"""Calls ``on_predict_start`` hooks."""
trainer = self.trainer
call._call_callback_hooks(trainer, "on_predict_start")
call._call_lightning_module_hook(trainer, "on_predict_start")
call._call_strategy_hook(trainer, "on_predict_start")
def _on_predict_model_eval(self) -> None:
self._module_mode.capture(self.trainer.lightning_module)
call._call_lightning_module_hook(self.trainer, "on_predict_model_eval")
def _on_predict_model_train(self) -> None:
self._module_mode.restore(self.trainer.lightning_module)
def _on_predict_epoch_start(self) -> None:
"""Calls ``on_predict_epoch_start`` hooks."""
trainer = self.trainer
call._call_callback_hooks(trainer, "on_predict_epoch_start")
call._call_lightning_module_hook(trainer, "on_predict_epoch_start")
def _on_predict_epoch_end(self) -> Optional[_PREDICT_OUTPUT]:
"""Calls ``on_predict_epoch_end`` hook.
Returns:
the results for all dataloaders
"""
trainer = self.trainer
call._call_callback_hooks(trainer, "on_predict_epoch_end")
call._call_lightning_module_hook(trainer, "on_predict_epoch_end")
if self.return_predictions:
return self.predictions
return None
def _on_predict_end(self) -> None:
"""Resets previous gradient status and calls ``on_predict_end`` hook."""
if not self.return_predictions:
self._predictions = []
self.epoch_batch_indices = []
trainer = self.trainer
# hook
call._call_callback_hooks(trainer, "on_predict_end")
call._call_lightning_module_hook(trainer, "on_predict_end")
call._call_strategy_hook(trainer, "on_predict_end")
def _verify_dataloader_idx_requirement(self) -> None:
trainer = self.trainer
assert self._combined_loader is not None
_verify_dataloader_idx_requirement(
("predict_step",),
self._combined_loader._mode == "sequential"
and self.num_dataloaders > 1
and not isinstance(self._data_fetcher, _DataLoaderIterDataFetcher),
RunningStage.PREDICTING,
trainer.lightning_module,
)
_verify_dataloader_idx_requirement(
("on_predict_batch_start", "on_predict_batch_end"),
self._combined_loader._mode == "sequential" and self.num_dataloaders > 1,
RunningStage.PREDICTING,
trainer.lightning_module,
)
| _PredictionLoop |
python | doocs__leetcode | solution/3300-3399/3377.Digit Operations to Make Two Integers Equal/Solution.py | {
"start": 15,
"end": 1534
} | class ____:
def __init__(self):
self.sieve = []
def run_sieve(self):
self.sieve = [True] * 100000
self.sieve[0], self.sieve[1] = False, False
for i in range(2, 100000):
if self.sieve[i]:
for j in range(2 * i, 100000, i):
self.sieve[j] = False
def solve(self, n, m):
pq = []
heapq.heappush(pq, (n, n))
visited = set()
while pq:
sum_, cur = heapq.heappop(pq)
if cur in visited:
continue
visited.add(cur)
if cur == m:
return sum_
s = list(str(cur))
for i in range(len(s)):
c = s[i]
if s[i] < '9':
s[i] = chr(ord(s[i]) + 1)
next_ = int(''.join(s))
if not self.sieve[next_] and next_ not in visited:
heapq.heappush(pq, (sum_ + next_, next_))
s[i] = c
if s[i] > '0' and not (i == 0 and s[i] == '1'):
s[i] = chr(ord(s[i]) - 1)
next_ = int(''.join(s))
if not self.sieve[next_] and next_ not in visited:
heapq.heappush(pq, (sum_ + next_, next_))
s[i] = c
return -1
def minOperations(self, n, m):
self.run_sieve()
if self.sieve[n] or self.sieve[m]:
return -1
return self.solve(n, m)
| Solution |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/organization_integration_serverless_functions.py | {
"start": 724,
"end": 887
} | class ____(CamelSnakeSerializer):
action = serializers.ChoiceField(ACTIONS)
target = serializers.CharField()
@region_silo_endpoint
| ServerlessActionSerializer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1235767,
"end": 1236390
} | class ____(sgqlc.types.Type, Node):
"""A GitHub Enterprise Importer (GEI) migration source."""
__schema__ = github_schema
__field_names__ = ("name", "type", "url")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The migration source name."""
type = sgqlc.types.Field(sgqlc.types.non_null(MigrationSourceType), graphql_name="type")
"""The migration source type."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The migration source URL, for example `https://github.com` or
`https://monalisa.ghe.com`.
"""
| MigrationSource |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/client/utils.py | {
"start": 657,
"end": 1475
} | class ____(NamedTuple):
"""This class gives information about the result of reloading
a Dagster repository location with a GraphQL mutation.
Args:
status (ReloadRepositoryLocationStatus): The status of the reload repository location mutation
failure_type: (Optional[str], optional): the failure type if `status == ReloadRepositoryLocationStatus.FAILURE`.
Can be one of `ReloadNotSupported`, `RepositoryLocationNotFound`, or `RepositoryLocationLoadFailure`. Defaults to None.
message (Optional[str], optional): the failure message/reason if
`status == ReloadRepositoryLocationStatus.FAILURE`. Defaults to None.
"""
status: ReloadRepositoryLocationStatus
failure_type: Optional[str] = None
message: Optional[str] = None
| ReloadRepositoryLocationInfo |
python | astropy__astropy | astropy/utils/masked/tests/test_masked.py | {
"start": 56711,
"end": 56788
} | class ____(TestMaskedArrayRepr, QuantitySetup):
pass
| TestMaskedQuantityRepr |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/hybrid.py | {
"start": 43380,
"end": 43589
} | class ____(Protocol[_T_con]):
def __call__(
s,
cls: Any,
value: Union[_T_con, _ColumnExpressionArgument[_T_con]],
) -> List[Tuple[_DMLColumnArgument, Any]]: ...
| _HybridUpdaterType |
python | doocs__leetcode | solution/1400-1499/1419.Minimum Number of Frogs Croaking/Solution.py | {
"start": 0,
"end": 580
} | class ____:
def minNumberOfFrogs(self, croakOfFrogs: str) -> int:
if len(croakOfFrogs) % 5 != 0:
return -1
idx = {c: i for i, c in enumerate('croak')}
cnt = [0] * 5
ans = x = 0
for i in map(idx.get, croakOfFrogs):
cnt[i] += 1
if i == 0:
x += 1
ans = max(ans, x)
else:
if cnt[i - 1] == 0:
return -1
cnt[i - 1] -= 1
if i == 4:
x -= 1
return -1 if x else ans
| Solution |
python | walkccc__LeetCode | solutions/2280. Minimum Lines to Represent a Line Chart/2280.py | {
"start": 0,
"end": 598
} | class ____:
def minimumLines(self, stockPrices: list[list[int]]) -> int:
ans = 0
stockPrices.sort()
def getSlope(p: list[int], q: list[int]) -> tuple[int, int]:
dx = p[0] - q[0]
dy = p[1] - q[1]
if dx == 0:
return (0, p[0])
if dy == 0:
return (p[1], 0)
d = gcd(dx, dy)
return (dx // d, dy // d)
for i in range(2, len(stockPrices)):
a = getSlope(stockPrices[i - 2], stockPrices[i - 1])
b = getSlope(stockPrices[i - 1], stockPrices[i])
if a != b:
ans += 1
return ans + (len(stockPrices) > 1)
| Solution |
python | huggingface__transformers | tests/generation/test_utils.py | {
"start": 129866,
"end": 235108
} | class ____(unittest.TestCase):
# TODO joao, manuel: remove in v4.62.0
@slow
def test_diverse_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood.
The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People.
"Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports.
The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both."""
bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
outputs = bart_model.generate(
input_ids,
num_beams=4,
num_return_sequences=2,
num_beam_groups=4,
diversity_penalty=2.0,
remove_invalid_values=True,
trust_remote_code=True,
custom_generate="transformers-community/group-beam-search",
)
generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the"
" middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle"
" name, as well as his father's first. It is the first baby for both of them.",
"Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the"
" first child for both. The couple announced the pregnancy in January. The name Silas is the middle"
" name of Timberlake's maternal grandfather. It's also his own middle name.",
],
)
@slow
def test_beam_search_early_stop_heuristic(self):
"""Regression test for #38778 (early stopping needs to be tracked at a batch level)"""
EXPECTED_OUTPUT = (
"<|user|>\nWhat is 3+5?\n<|assistant|>\nThe sum of 3 and 5 is 8. \n\nSo, 3 + 5 = 8. \n\n"
"Let's confirm this using Python code:\n\n```python\n# Define the numbers\nnum1 = 3\nnum2 = 5\n\n"
"# Calculate the sum\nresult = num1 + num2\n\n# Print the result\nprint(result)\n```\n"
"```output\n8\n```\nThe sum of 3 and 5 is \\(\\boxed{8}\\)."
)
model = AutoModelForCausalLM.from_pretrained("allenai/OLMo-2-0425-1B-Instruct").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-2-0425-1B-Instruct", padding_side="left")
generation_config = GenerationConfig(
num_beams=10,
max_new_tokens=256,
length_penalty=2,
)
# batch of 1
question = [{"role": "user", "content": "What is 3+5?"}]
question = tokenizer.apply_chat_template(
question, tokenize=False, add_generation_prompt=True, return_tensors="pt"
)
inputs = tokenizer(question, return_tensors="pt", padding=True).to(torch_device)
outputs = model.generate(**inputs, generation_config=generation_config)
responses = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertEqual(responses[0], EXPECTED_OUTPUT)
# batch of 2
question = [{"role": "user", "content": "What is 3+5?"}]
cot_question = [
{
"role": "user",
"content": "What is 3+5? Explain your reasoning step by step, and provide the final answer at the end.",
}
]
question = tokenizer.apply_chat_template(
question, tokenize=False, add_generation_prompt=True, return_tensors="pt"
)
cot_question = tokenizer.apply_chat_template(
cot_question, tokenize=False, add_generation_prompt=True, return_tensors="pt"
)
inputs = tokenizer([question, cot_question], return_tensors="pt", padding=True).to(torch_device)
outputs = model.generate(**inputs, generation_config=generation_config)
responses = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertEqual(responses[0], EXPECTED_OUTPUT)
def test_max_length_if_input_embeds(self):
article = "Today a dragon flew over Paris."
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
inputs_embeds = model.get_input_embeddings()(input_ids)
max_length = 20
input_len = input_ids.shape[-1]
out_gen = model.generate(input_ids=input_ids, max_length=max_length)
out_gen_embeds = model.generate(inputs_embeds=inputs_embeds, max_length=max_length)
self.assertEqual(out_gen.shape[-1], input_len + out_gen_embeds.shape[-1])
def test_min_length_if_input_embeds(self):
article = "Today a dragon flew over Paris."
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
inputs_embeds = model.get_input_embeddings()(input_ids)
min_length = 10
input_len = input_ids.shape[-1]
out_gen = model.generate(input_ids=input_ids, min_length=min_length)
out_gen_embeds = model.generate(inputs_embeds=inputs_embeds, min_length=min_length)
self.assertEqual(out_gen.shape[-1], input_len + out_gen_embeds.shape[-1])
def test_custom_stopping_criteria_overload_error(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
stopping_criteria = StoppingCriteriaList()
stopping_criteria.append(MaxLengthCriteria(max_length=42))
with self.assertRaises(ValueError):
bart_model.generate(input_ids, stopping_criteria=stopping_criteria)
with self.assertRaises(ValueError):
bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32)
def test_custom_stopping_criteria(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
class DummyCriteria(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return input_ids.shape[-1] >= 20
stopping_criteria = StoppingCriteriaList()
stopping_criteria.append(DummyCriteria())
self.assertEqual(
list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape),
[1, 20],
)
self.assertEqual(
list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape),
[1, 18],
)
# TODO (joao): replace `stop_sequence` in the pipeline by the more recent `generate` functionality
def test_stop_sequence_stopping_criteria(self):
prompt = """Hello I believe in"""
generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-bart")
output = generator(prompt, max_new_tokens=10)
self.assertEqual(
output,
[{"generated_text": ("Hello I believe in we we we we we we we we we")}],
)
output = generator(prompt, stop_sequence=" we")
self.assertEqual(output, [{"generated_text": "Hello I believe in we"}])
def test_generate_non_nlp_input_ids_as_kwarg(self):
model = ImageGPTForCausalImageModeling.from_pretrained(
"hf-internal-testing/tiny-random-imagegpt", max_length=10
).to(torch_device)
input_ids = ids_tensor((3, 5), vocab_size=10)
output_sequences_kwargs = model.generate(input_ids=input_ids).cpu()
output_sequences = model.generate(input_ids).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (3, 10))
def test_generate_input_values_as_encoder_kwarg(self):
input_values = floats_tensor((2, 250))
model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder")
model = model.to(torch_device)
output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu()
output_sequences = model.generate(input_values, max_length=5).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (2, 5))
# TODO joao, manuel: remove in v4.62.0
def test_transition_scores_group_beam_search_encoder_decoder(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
eos_token_id=None,
)
generation_config = GenerationConfig(
max_length=10,
num_beams=2,
num_beam_groups=2,
num_return_sequences=2,
diversity_penalty=1.0,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(
input_ids=input_ids,
generation_config=generation_config,
trust_remote_code=True,
custom_generate="transformers-community/group-beam-search",
)
transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices)
transition_scores_sum = transition_scores.sum(-1)
torch.testing.assert_close(transition_scores_sum, outputs.sequences_scores, rtol=1e-3, atol=1e-3)
@slow
def test_green_red_watermark_generation(self):
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
tokenizer.pad_token_id = tokenizer.eos_token_id
model_inputs = tokenizer("I will be", return_tensors="pt").to(torch_device)
input_len = model_inputs["input_ids"].shape[-1]
# generation should work with both input types: WatermarkingConfig or Dict, so let's check it here :)
watermark_config = WatermarkingConfig(bias=2.5, seeding_scheme="selfhash")
_ = model.generate(**model_inputs, watermarking_config=watermark_config, do_sample=False, max_length=15)
# We will not check watermarked text, since we check it in `logits_processors` tests
# Checking if generated ids are as expected fails on different hardware
args = {
"bias": 2.0,
"context_width": 1,
"seeding_scheme": "selfhash",
"greenlist_ratio": 0.25,
"hashing_key": 15485863,
}
output = model.generate(**model_inputs, do_sample=False, max_length=15)
output_selfhash = model.generate(**model_inputs, watermarking_config=args, do_sample=False, max_length=15)
# Check that the detector is detecting watermarked text
detector = WatermarkDetector(model_config=model.config, device=torch_device, watermarking_config=args)
detection_out_watermarked = detector(output_selfhash[:, input_len:], return_dict=True)
detection_out = detector(output[:, input_len:], return_dict=True)
self.assertListEqual(detection_out_watermarked.prediction.tolist(), [True])
self.assertListEqual(detection_out.prediction.tolist(), [False])
"""Check the mean bias inserted by the watermarking algorithm."""
@slow
def test_synthid_text_watermark_generation_mean_expected_bias(self):
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
tokenizer.pad_token_id = tokenizer.eos_token_id
model_inputs = tokenizer("I will be", return_tensors="pt").to(torch_device)
input_len = 5
batch_size = 200
# generation should work with both input types: WatermarkingConfig or Dict, so let's check it here :)
watermark_config = SynthIDTextWatermarkingConfig(keys=[10, 20], ngram_len=5, debug_mode=True)
logits_processor = watermark_config.construct_processor(model.config.vocab_size, torch_device)
mean_g_values_repeats = []
for _ in range(40):
input_ids = torch.zeros(
(batch_size, input_len),
dtype=torch.int64,
device=torch_device,
)
model_inputs = {
"input_ids": input_ids,
"attention_mask": torch.ones_like(input_ids, device=torch_device),
}
output = model.generate(
**model_inputs, watermarking_config=watermark_config, do_sample=True, max_length=500, top_k=1000
)
g_values = logits_processor.compute_g_values(input_ids=output[:, input_len:])
context_repetition_mask = logits_processor.compute_context_repetition_mask(
input_ids=output[:, input_len:],
).unsqueeze(dim=2)
mean_g_values = torch.masked.mean(
g_values,
mask=context_repetition_mask,
dim=0,
keepdim=True,
dtype=torch.float64,
)
mean_g_values_repeats.append(mean_g_values)
mean_g_values = torch.concat(mean_g_values_repeats, dim=0).mean(dim=0)
expected_mean_g_value = logits_processor.expected_mean_g_value(
vocab_size=model.config.vocab_size,
)
atol = 0.03
is_close = torch.isclose(
mean_g_values,
torch.tensor(expected_mean_g_value, dtype=torch.float64),
atol=atol,
rtol=0,
)
self.assertTrue(torch.all(is_close))
@slow
def test_TopH_example_integration(self):
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-3B")
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-3B")
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = tokenizer.pad_token_id
encoder_input_str = "Tell me a joke about a monkey."
input_ids = tokenizer(encoder_input_str, return_tensors="pt")
torch.manual_seed(0)
outputs = model.generate(
**input_ids,
eos_token_id=model.config.eos_token_id,
do_sample=True,
temperature=1.0,
top_h=0.4,
max_new_tokens=32,
pad_token_id=tokenizer.pad_token_id,
)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
outputs,
[
'Tell me a joke about a monkey. Why did the monkey go to the doctor? Because he was feeling a little "tropic"!'
],
)
@slow
def test_beam_search_example_integration(self):
# exactly the example provided in the docstrings of beam search, which previously
# failed after directly copying from it. Refer to PR #15555
tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
encoder_input_str = "translate English to German: How old are you?"
encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
# lets run beam search using 3 beams
num_beams = 3
# define decoder start token ids
input_ids = torch.ones((1, 1), device=model.device, dtype=torch.long)
input_ids = input_ids * model.config.decoder_start_token_id
# add encoder_outputs to model keyword arguments
model_kwargs = {"encoder_outputs": model.get_encoder()(encoder_input_ids, return_dict=True)}
outputs = model.generate(
input_ids, num_beams=num_beams, min_length=5, eos_token_id=model.config.eos_token_id, **model_kwargs
)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alt bist du?"])
@slow
def test_cfg_mixin(self):
model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
input = tokenizer(["The dragon flew over Paris,"], return_tensors="pt", return_attention_mask=True)
input["input_ids"] = input["input_ids"].to(torch_device)
input["attention_mask"] = input["attention_mask"].to(torch_device)
outputs = model.generate(**input, max_new_tokens=32, guidance_scale=1.5)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The dragon flew over Paris, landing in the Rue de la Bastille. The crowd was so excited "
'that they had to leave the city.\n\n"We\'re going to Paris!"\n'
],
)
neg = tokenizer(["France,"], return_tensors="pt", return_attention_mask=True)
neg["input_ids"] = neg["input_ids"].to(torch_device)
neg["attention_mask"] = neg["attention_mask"].to(torch_device)
outputs = model.generate(
**input,
max_new_tokens=32,
guidance_scale=1.5,
negative_prompt_ids=neg["input_ids"],
negative_prompt_attention_mask=neg["attention_mask"],
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
'The dragon flew over Paris, landing on the pavement.\n\n"Paris!"\n\n"Paris!"\n\n"'
'Paris!"\n\n"Paris!"\n\n"Paris!"\n\n'
],
)
# TODO joao, manuel: remove in v4.62.0
@slow
def test_constrained_beam_search_example_integration(self):
tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
encoder_input_str = "translate English to German: How old are you?"
encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
# lets run beam search using 5 beams
num_beams = 5
# define decoder start token ids
input_ids = torch.ones((1, 1), device=model.device, dtype=torch.long)
input_ids = input_ids * model.config.decoder_start_token_id
# add encoder_outputs to model keyword arguments
model_kwargs = {"encoder_outputs": model.get_encoder()(encoder_input_ids, return_dict=True)}
constraint_str = "sind"
constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token
outputs = model.generate(
input_ids,
num_beams=num_beams,
force_words_ids=[constraint_token_ids],
min_length=5,
eos_token_id=model.config.eos_token_id,
trust_remote_code=True,
custom_generate="transformers-community/constrained-beam-search",
**model_kwargs,
)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alt sind Sie?"])
@slow
def test_per_row_stopping_criteria(self):
text = [
"They completed the challenging puzzle, revealing the hidden",
"Today a dragon flew over France",
"The aroma of freshly baked pizza filled the kitchen",
]
stop_strings = ["secrets"]
model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
tokenizer.padding_side = "left"
tokenizer.pad_token_id = tokenizer.eos_token_id
input_ids = tokenizer(text, return_tensors="pt", padding="longest", add_special_tokens=False).input_ids.to(
torch_device
)
# normal generation with one stopping criteria
out = model.generate(input_ids, max_length=15)
out_text = tokenizer.batch_decode(out)
expected_out = [
"They completed the challenging puzzle, revealing the hidden secrets of the world.\n",
"<|endoftext|><|endoftext|><|endoftext|>Today a dragon flew over France and the French government was forced",
"The aroma of freshly baked pizza filled the kitchen with a sense of freshness",
]
self.assertListEqual(out_text, expected_out)
# generation should stop at "secrets" for first batch only, filling the rest with eos tokens
out = model.generate(input_ids, max_length=15, stop_strings=stop_strings, tokenizer=tokenizer)
out_text = tokenizer.batch_decode(out)
expected_out = [
"They completed the challenging puzzle, revealing the hidden secrets<|endoftext|><|endoftext|><|endoftext|><|endoftext|><|endoftext|>",
"<|endoftext|><|endoftext|><|endoftext|>Today a dragon flew over France and the French government was forced",
"The aroma of freshly baked pizza filled the kitchen with a sense of freshness",
]
self.assertListEqual(out_text, expected_out)
def test_batched_decoder_start_id(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
decoder_start_token_id = bart_model.generation_config.decoder_start_token_id
decoder_start_token_id_batch = [decoder_start_token_id] * input_ids.shape[0]
outputs = bart_model.generate(input_ids, decoder_start_token_id=decoder_start_token_id)
outputs_batched_ids = bart_model.generate(input_ids, decoder_start_token_id=decoder_start_token_id_batch)
self.assertListEqual(outputs.tolist(), outputs_batched_ids.tolist())
def test_decoder_start_id_from_config(self):
# Refer to: (#30899)
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
decoder_start_token_id = bart_model.generation_config.decoder_start_token_id
# we should be able to take `decoder_start_token_id` from model's generation config if user passes a `GenerationConfig` type
outputs = bart_model.generate(input_ids, generation_config=GenerationConfig(do_sample=False))
# If the generatoin config has no `decoder_start_token_id` or `bos_token_id`, we will raise an error unless user passes it in config
bart_model.generation_config.decoder_start_token_id = None
bart_model.generation_config.bos_token_id = None
outputs_with_user_id = bart_model.generate(
input_ids,
generation_config=GenerationConfig(do_sample=False, decoder_start_token_id=decoder_start_token_id),
)
self.assertListEqual(outputs.tolist(), outputs_with_user_id.tolist())
with self.assertRaises(ValueError):
outputs = bart_model.generate(input_ids, generation_config=GenerationConfig(do_sample=False))
def test_logits_processor_not_inplace(self):
article = "Today a dragon flew over Paris."
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
out = model.generate(input_ids, output_logits=True, output_scores=True, return_dict_in_generate=True)
out_with_temp = model.generate(
input_ids,
temperature=0.5,
do_sample=True,
output_logits=True,
output_scores=True,
return_dict_in_generate=True,
)
# if no logits processor is used, scores == logits. Otherwise, the processor has to modify the scores
self.assertListEqual(out.logits[-1].tolist(), out.scores[-1].tolist())
self.assertNotEqual(out_with_temp.logits[-1].tolist(), out_with_temp.scores[-1].tolist())
def test_eos_token_id_int_and_list_top_k_top_sampling(self):
generation_kwargs = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
expectation = 20
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = """Hello, my dog is cute and"""
tokens = tokenizer(text, return_tensors="pt").to(torch_device)
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
# Only some seeds will work both on CPU/GPU for a fixed `expectation` value.
# The selected seed is not guaranteed to work on all torch versions.
torch.manual_seed(1)
eos_token_id = 846
generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs)
self.assertTrue(expectation == len(generated_tokens[0]))
torch.manual_seed(1)
eos_token_id = [846, 198]
generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs)
self.assertTrue(expectation == len(generated_tokens[0]))
def test_model_kwarg_encoder_signature_filtering(self):
bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
article = """Hugging Face is a technology company based in New York and Paris."""
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
output = bart_model.generate(input_ids).cpu().numpy()
# Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an
# argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of
# the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and
# saves the day.
class FakeBart(BartForConditionalGeneration):
def forward(self, input_ids, foo=None, **kwargs):
return super().forward(input_ids, **kwargs)
bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device)
fake_output = bart_model.generate(input_ids, foo="bar").cpu().numpy()
self.assertTrue(np.array_equal(output, fake_output))
# Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail
# because it doesn't do signature filtering.
class FakeEncoder(bart_model.model.encoder.__class__):
def forward(self, input_ids, **kwargs):
return super().forward(input_ids, **kwargs)
fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared).to(torch_device)
bart_model.model.encoder = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
fake_output = bart_model.generate(input_ids).cpu().numpy()
with self.assertRaises(TypeError):
# FakeEncoder.forward() accepts **kwargs -> no filtering -> type error due to unexpected input "foo"
bart_model.generate(input_ids, foo="bar")
def test_default_max_length_warning(self):
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model.generation_config.pad_token_id = tokenizer.eos_token_id
text = "Hello world"
tokenized_inputs = tokenizer([text], return_tensors="pt")
input_ids = tokenized_inputs.input_ids.to(torch_device)
# Default generation config value of 20 -> emits warning
with self.assertWarns(UserWarning):
model.generate(input_ids)
# Explicitly setting max_length to 20 -> no warning
with warnings.catch_warnings(record=True) as warning_list:
model.generate(input_ids, max_length=20)
self.assertEqual(len(warning_list), 0)
# Generation config max_length != 20 -> no warning
with warnings.catch_warnings(record=True) as warning_list:
# generation_config is modified -> legacy mode is disabled = generation_config takes precedence
model.generation_config.max_length = 10
model.generate(input_ids)
self.assertEqual(len(warning_list), 0)
def test_length_warning_assisted_generation(self):
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model.generation_config.pad_token_id = tokenizer.eos_token_id
assistant.generation_config.pad_token_id = tokenizer.eos_token_id
text = "Hello world"
tokenized_inputs = tokenizer([text], return_tensors="pt")
input_ids = tokenized_inputs.input_ids.to(torch_device)
# This should not raise any warning that min length is not feasible in candidate generation
with warnings.catch_warnings(record=True) as warning_list:
model.generate(
input_ids,
assistant_model=assistant,
min_new_tokens=10,
max_length=20,
)
self.assertEqual(len(warning_list), 0)
def test_default_assisted_generation(self):
# Initialize the GenerationConfig object
config = GenerationConfig()
# Check the default values
self.assertEqual(config.num_assistant_tokens, 20)
self.assertEqual(config.num_assistant_tokens_schedule, "constant")
self.assertEqual(config.assistant_confidence_threshold, 0.4)
self.assertEqual(config.is_assistant, False)
def test_generated_length_assisted_generation(self):
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model.generation_config.pad_token_id = tokenizer.eos_token_id
assistant.generation_config.pad_token_id = tokenizer.eos_token_id
text = "Hello world"
tokenized_inputs = tokenizer([text], return_tensors="pt")
input_ids = tokenized_inputs.input_ids.to(torch_device)
input_length = input_ids.shape[-1]
out = model.generate(
input_ids,
assistant_model=assistant,
min_new_tokens=10,
max_new_tokens=20,
)
self.assertTrue((10 + input_length) <= out.shape[-1] <= (20 + input_length))
out = model.generate(
input_ids,
assistant_model=assistant,
min_new_tokens=10,
)
self.assertTrue((input_length + 10) <= out.shape[-1])
out = model.generate(
input_ids,
assistant_model=assistant,
max_new_tokens=7,
)
self.assertTrue(out.shape[-1] <= (input_length + 7))
def test_model_kwarg_assisted_decoding_decoder_only(self):
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model.generation_config.pad_token_id = tokenizer.eos_token_id
text = "Hello world"
tokenized_inputs = tokenizer([text], return_tensors="pt")
input_ids = tokenized_inputs.input_ids.to(torch_device)
# Traditional way of generating text
outputs_normal = model.generate(input_ids)
self.assertEqual(outputs_normal.shape, (1, 20))
# Should be different with token_type_ids
outputs_tti = model.generate(
input_ids,
token_type_ids=torch.zeros(input_ids.shape, dtype=torch.long).to(torch_device),
)
with self.assertRaises(AssertionError):
self.assertListEqual(outputs_tti.tolist(), outputs_normal.tolist())
# Assistant model
assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
assistant.config.pad_token_id = tokenizer.eos_token_id
# If assisted generation passes model_kwargs correctly, should be same as previous
outputs_assisted = model.generate(
input_ids,
token_type_ids=torch.zeros(input_ids.shape, dtype=torch.long).to(torch_device),
assistant_model=assistant,
)
self.assertListEqual(outputs_assisted.tolist(), outputs_tti.tolist())
def test_assisted_decoding_num_assistant_tokens_heuristic_schedule(self):
# This test ensures that the assisted generation num_assistant_tokens 'heuristic' schedule works properly.
prompt = "Alice and Bob"
checkpoint = "EleutherAI/pythia-160m-deduped"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
inputs = tokenizer(prompt, return_tensors="pt")
model = AutoModelForCausalLM.from_pretrained(checkpoint)
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 5
assistant_model.generation_config.num_assistant_tokens_schedule = "heuristic"
generation_kwargs = {
"eos_token_id": -1,
"max_new_tokens": 5,
"do_sample": False,
"assistant_model": assistant_model,
}
model.generate(**inputs, **generation_kwargs)
# update_candidate_strategy is called only once and therefore, assistant_model.generation_config.num_assistant_tokens should be either 4 or 7
self.assertTrue(assistant_model.generation_config.num_assistant_tokens in (4, 7))
def test_assisted_decoding_num_assistant_tokens_heuristic_transient_schedule(self):
# This test ensures that the assisted generation num_assistant_tokens 'heuristic' schedule works properly.
prompt = "Alice and Bob"
checkpoint = "EleutherAI/pythia-160m-deduped"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
inputs = tokenizer(prompt, return_tensors="pt")
model = AutoModelForCausalLM.from_pretrained(checkpoint)
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 5
assistant_model.generation_config.num_assistant_tokens_schedule = "heuristic_transient"
generation_kwargs = {
"eos_token_id": -1,
"max_new_tokens": 5,
"do_sample": False,
"assistant_model": assistant_model,
}
model.generate(**inputs, **generation_kwargs)
# update_candidate_strategy is called once but assistant_model.generation_config.num_assistant_tokens should stay 5
self.assertEqual(assistant_model.generation_config.num_assistant_tokens, 5)
@slow
def test_validate_assistant(self):
# Generate a random sample:
inputs = np.random.rand(160000)
# Load a main encoder-decoder model:
model_id = "openai/whisper-large-v2"
processor = AutoProcessor.from_pretrained(model_id)
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id,
use_safetensors=True,
)
model.to(torch_device)
# process the input:
features = processor(inputs, return_tensors="pt").to(torch_device)
# Load an encoder-decoder assistant with same encoder as the main model:
assistant_distil_model_id = "distil-whisper/distil-large-v2"
assistant_seq_to_seq = AutoModelForSpeechSeq2Seq.from_pretrained(
assistant_distil_model_id,
use_safetensors=True,
).to(torch_device)
self.assertTrue(model.generate(**features, assistant_model=assistant_seq_to_seq).sum())
# Load its decoder only version:
assistant_causal_lm = AutoModelForCausalLM.from_pretrained(
assistant_distil_model_id,
use_safetensors=True,
).to(torch_device)
self.assertTrue(model.generate(**features, assistant_model=assistant_causal_lm).sum())
# Load an encoder-decoder assistant with a different encoder than the main model:
assistant_distil_model_id = "openai/whisper-tiny"
assistant_seq_to_seq = AutoModelForSpeechSeq2Seq.from_pretrained(
assistant_distil_model_id,
use_safetensors=True,
).to(torch_device)
self.assertTrue(model.generate(**features, assistant_model=assistant_seq_to_seq).sum())
# Load its decoder only version:
assistant_causal_lm = AutoModelForCausalLM.from_pretrained(
assistant_distil_model_id,
use_safetensors=True,
).to(torch_device)
# It will raise an error as the encoder of the main and assistant model are not compatible:
with self.assertRaises(ValueError):
model.generate(**features, assistant_model=assistant_causal_lm)
# Load an encoder-decoder model with a different tokenizer than the main model:
assistant_distil_model_id = "hf-internal-testing/tiny-random-SeamlessM4Tv2ForSpeechToText"
assistant_seq_to_seq = AutoModelForSpeechSeq2Seq.from_pretrained(
assistant_distil_model_id,
).to(torch_device)
# This should raise an error as the main and assistant model don't use the same tokenizer:
with self.assertRaises(ValueError):
model.generate(**features, assistant_model=assistant_seq_to_seq)
def test_compare_unprocessed_logit_scores(self):
# Get unprocessed logit scores back from model generate function.
# Assert that unprocessed logits from generate() are same as those from modal eval()
# tell model to generate text and return unprocessed/unwarped logit scores
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = "generate yes or no: "
input_ids = tokenizer([text], return_tensors="pt").input_ids.to(torch_device)
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
with torch.no_grad():
# Get logits for the next token from fwd pass
logits_fwd = model(input_ids).logits[:, -1, :][0]
# Get logits for the next token from generate function
outputs = model.generate(
input_ids=input_ids,
return_dict_in_generate=True,
output_logits=True,
max_new_tokens=1,
do_sample=True,
)
logits_gen = outputs.logits[0][0]
# assert that unprocessed logits from generate() are same as those from modal eval()
torch.testing.assert_allclose(logits_fwd.tolist(), logits_gen.tolist())
def test_return_unprocessed_logit_scores(self):
# tell model to generate text and return unprocessed/unwarped logit scores
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = "generate yes or no: "
input_ids = tokenizer([text], return_tensors="pt").input_ids.to(torch_device)
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
outputs = model.generate(
input_ids=input_ids, return_dict_in_generate=True, output_logits=True, max_new_tokens=3
)
# perform dummy check if unpreprocessed logits make sense.
# do preselection on high probabilities; find scores of y and n tokens
probs_all = torch.nn.functional.softmax(outputs.logits[2][0], dim=-1)
indices = torch.argwhere(probs_all > 0.001)
indices = indices[:, -1]
tokens_max = tokenizer.batch_decode(indices, skip_special_tokens=True)
probs_max = probs_all[probs_all > 0.001]
self.assertTrue(len(indices) >= 2)
next_token_dict = {str(t): p for t, p in zip(tokens_max, probs_max)}
self.assertTrue("n" in next_token_dict)
self.assertTrue("y" in next_token_dict)
y_prob = next_token_dict["y"]
n_prob = next_token_dict["n"]
self.assertTrue(y_prob > 0.001 and n_prob > 0.001)
self.assertTrue(y_prob <= 1.0 and n_prob <= 1.0)
@slow
@require_torch_multi_accelerator
def test_assisted_decoding_in_different_accelerator(self):
device_0 = f"{torch_device}:0" if torch_device != "cpu" else "cpu"
device_1 = f"{torch_device}:1" if torch_device != "cpu" else "cpu"
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM").to(device_0)
assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM").to(
device_1
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
model.config.pad_token_id = tokenizer.eos_token_id
assistant.config.pad_token_id = tokenizer.eos_token_id
text = "Hello world"
tokenized_inputs = tokenizer([text], return_tensors="pt")
input_ids = tokenized_inputs.input_ids.to(torch_device)
input_length = input_ids.shape[-1]
out = model.generate(
input_ids,
assistant_model=assistant,
max_new_tokens=20,
)
self.assertTrue(input_length <= out.shape[-1] <= input_length + 20)
@slow
@require_torch_accelerator
def test_assisted_decoding_model_in_accelerator_assistant_in_cpu(self):
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM").to(
torch_device
)
assistant = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM").to(
"cpu"
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
model.config.pad_token_id = tokenizer.eos_token_id
assistant.config.pad_token_id = tokenizer.eos_token_id
text = "Hello world"
tokenized_inputs = tokenizer([text], return_tensors="pt")
input_ids = tokenized_inputs.input_ids.to(torch_device)
input_length = input_ids.shape[-1]
out = model.generate(
input_ids,
assistant_model=assistant,
max_new_tokens=20,
)
self.assertTrue(input_length <= out.shape[-1] <= input_length + 20)
def test_special_tokens_fall_back_to_model_default(self):
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM").to(
torch_device
)
test_bos_id = 50
# Sanity-check: the model has a BOS token set, and the first generated token is a BOS token
gen_output = model.generate()
self.assertTrue(model.generation_config.bos_token_id is not None)
self.assertTrue(model.generation_config.bos_token_id == gen_output[0, 0])
# If we pass a generation config **with** a BOS token, `generate` will use it
generation_config = GenerationConfig(bos_token_id=test_bos_id)
gen_output = model.generate(generation_config=generation_config)
self.assertFalse(model.generation_config.bos_token_id == gen_output[0, 0])
self.assertTrue(generation_config.bos_token_id == gen_output[0, 0])
self.assertTrue(test_bos_id == gen_output[0, 0])
# If we pass a generation config **without** a BOS token, `generate` will fetch the BOS token from
# `model.generation_config`
generation_config = GenerationConfig(bos_token_id=None)
gen_output = model.generate(generation_config=generation_config)
self.assertTrue(model.generation_config.bos_token_id == gen_output[0, 0])
self.assertFalse(test_bos_id == gen_output[0, 0])
self.assertTrue(generation_config.bos_token_id is None)
# Changing `model.generation_config` will affect fallback behavior
model.generation_config.bos_token_id = test_bos_id
gen_output = model.generate(generation_config=generation_config)
self.assertTrue(model.generation_config.bos_token_id == gen_output[0, 0])
self.assertTrue(test_bos_id == gen_output[0, 0])
self.assertTrue(generation_config.bos_token_id is None)
def test_speculative_decoding_equals_regular_decoding(self):
draft_name = "double7/vicuna-68m"
target_name = "Qwen/Qwen2-0.5B-Instruct"
draft_model = AutoModelForCausalLM.from_pretrained(draft_name)
target_model = AutoModelForCausalLM.from_pretrained(target_name)
assistant_tokenizer = AutoTokenizer.from_pretrained(draft_name)
target_tokenizer = AutoTokenizer.from_pretrained(target_name)
prompt_size = torch.randint(low=20, high=100, size=(1,))
max_new_tokens = torch.randint(low=10, high=50, size=(1,))
input_ids = (torch.rand(1, prompt_size[0]) * 100).to(int) + 50
max_new_tokens_item = max_new_tokens[0].item()
expected_out = target_model.generate(input_ids, do_sample=False, max_new_tokens=max_new_tokens_item)
predicted_out = target_model.generate(
input_ids,
do_sample=False,
max_new_tokens=max_new_tokens_item,
assistant_model=draft_model,
tokenizer=target_tokenizer,
assistant_tokenizer=assistant_tokenizer,
)
self.assertEqual(expected_out.shape, predicted_out.shape)
self.assertTrue((expected_out == predicted_out).all().item())
@pytest.mark.generate
@require_torch_multi_accelerator
def test_generate_with_static_cache_multi_accelerator(self):
"""
Tests if the static cache has been set correctly and if generate works correctly when we are using multi-acceleratorss.
"""
# need to split manually as auto doesn't work well with unbalanced model
device_map = {"model.embed_tokens": 0, "model.layers.0": 0, "model.layers.1": 1, "model.norm": 1, "lm_head": 0}
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM", device_map=device_map
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
text = "Hello world"
tokenized_inputs = tokenizer([text], return_tensors="pt")
input_ids = tokenized_inputs.input_ids.to(torch_device)
generation_kwargs = {
"max_new_tokens": 20,
"cache_implementation": "static",
"return_dict_in_generate": True, # Required to return `past_key_values`
}
results = model.generate(input_ids, **generation_kwargs)
self.assertTrue(isinstance(results.past_key_values, StaticCache))
# check device of each layer
keys_0 = results.past_key_values.layers[0].keys
values_0 = results.past_key_values.layers[0].values
self.assertTrue(keys_0.device == values_0.device == torch.device(0))
keys_1 = results.past_key_values.layers[1].keys
values_1 = results.past_key_values.layers[1].values
self.assertTrue(keys_1.device == values_1.device == torch.device(1))
@pytest.mark.generate
@require_torch_multi_accelerator
def test_generate_multi_accelerator_causal_mask(self):
"""
Tests that cache position device doesn't clash with causal mask device when we are using multi-accelerators.
In real life happens only when multimodal encoder size is big, so `embed_tokens` gets allocated to the next device.
The error will be triggered whenever a bacthed input is used, so that `causal_mask` is actually prepared instead of
being `None`.
"""
# need to split manually as auto doesn't work well with unbalanced model
device_map = {
"visual": 0,
"model.embed_tokens": 1,
"model.layers.0": 1,
"model.layers.1": 1,
"model.rotary_emb": 1,
"model.norm.weight": 1,
"lm_head": 1,
}
model = AutoModelForImageTextToText.from_pretrained(
"hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration", device_map=device_map
)
processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration")
text = ["Hello world", "Today I went to the supermarket to buy"]
inputs = processor(text=text, padding=True, return_tensors="pt").to(torch_device)
_ = model.generate(**inputs, max_new_tokens=20)
@pytest.mark.generate
@require_torch_multi_accelerator
def test_init_static_cache_multi_accelerator(self):
"""
Tests if the static cache has been set correctly when we initialize it manually in a multi-accelerator setup.
"""
# need to split manually as auto doesn't work well with unbalanced model
device_map = {"model.embed_tokens": 0, "model.layers.0": 0, "model.layers.1": 1, "model.norm": 1, "lm_head": 0}
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM", device_map=device_map
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
text = "Hello world"
tokenized_inputs = tokenizer([text], return_tensors="pt")
input_ids = tokenized_inputs.input_ids.to(torch_device)
generation_kwargs = {
"max_new_tokens": 20,
"return_dict_in_generate": True, # Required to return `past_key_values`
}
# TODO: We need to raise a warning in case the cache is not set correctly
# with self.assertRaisesRegex(ValueError, "If you are manually initializing the cache"):
# past_key_values = StaticCache(
# config=model.config, max_batch_size=1, max_cache_len=30, device=torch_device, dtype=model.dtype
# )
# results = model.generate(input_ids, past_key_values=past_key_values, **generation_kwargs)
past_key_values = StaticCache(config=model.config, max_cache_len=30)
results = model.generate(input_ids, past_key_values=past_key_values, **generation_kwargs)
# check device of each layer
keys_0 = results.past_key_values.layers[0].keys
values_0 = results.past_key_values.layers[0].values
self.assertTrue(keys_0.device == values_0.device == torch.device(0))
keys_1 = results.past_key_values.layers[1].keys
values_1 = results.past_key_values.layers[1].values
self.assertTrue(keys_1.device == values_1.device == torch.device(1))
def test_prepare_inputs_for_generation_decoder_llm(self):
"""Tests GenerationMixin.prepare_inputs_for_generation against expected usage with decoder-only llms."""
config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM")
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM")
model = model.to(torch_device)
# 1. Sanity check: the model's `prepare_inputs_for_generation` comes from `GenerationMixin`
self.assertTrue("GenerationMixin" in str(model.prepare_inputs_for_generation))
# 2. If we pass input ids by themselves, we should get back the same input ids
input_ids = torch.tensor([[1, 2, 3], [4, 5, 6]]).to(torch_device)
model_inputs = model.prepare_inputs_for_generation(input_ids)
self.assertTrue(torch.all(model_inputs["input_ids"] == input_ids))
# 3. If we pass the attention mask too, we will get back the attention mask and position ids built from it
attention_mask = torch.tensor([[1, 1, 1], [1, 1, 1]]).to(torch_device)
model_inputs = model.prepare_inputs_for_generation(input_ids, attention_mask=attention_mask)
self.assertTrue(torch.all(model_inputs["attention_mask"] == attention_mask))
self.assertTrue(model_inputs["position_ids"].shape == input_ids.shape)
# 4. `use_cache` (and other kwargs) are forwarded
self.assertFalse("use_cache" in model_inputs) # From the previous input, there is no `use_cache`
model_inputs = model.prepare_inputs_for_generation(input_ids, use_cache=True, foo="bar")
self.assertTrue(model_inputs["use_cache"] is True)
self.assertTrue(model_inputs["foo"] == "bar")
# 5. When we pass a cache, we discard data related to already seen tokens in some tensors. We are now also
# forced to pass a correctly prepared `cache_positions` to slice the data accordingly.
init_input_ids = input_ids[:, :2]
dynamic_cache = DynamicCache(config=config)
dynamic_cache = model(init_input_ids, past_key_values=dynamic_cache).past_key_values
with self.assertRaises(AttributeError): # past_key_values + no cache_position -> exception
model_inputs = model.prepare_inputs_for_generation(input_ids, past_key_values=dynamic_cache)
cache_position = torch.arange(input_ids.shape[-1], dtype=torch.long).to(torch_device)
cache_position = cache_position[dynamic_cache.get_seq_length() :]
model_inputs = model.prepare_inputs_for_generation(
input_ids, past_key_values=dynamic_cache, cache_position=cache_position, attention_mask=attention_mask
)
self.assertTrue("past_key_values" in model_inputs)
self.assertTrue(torch.all(model_inputs["cache_position"] == cache_position))
self.assertTrue(model_inputs["input_ids"].shape[-1] == 1) # 1 = 3 fed tokens - 2 tokens in the cache
self.assertTrue(model_inputs["position_ids"].shape[-1] == 1)
self.assertTrue(model_inputs["attention_mask"].shape[-1] == 3) # we still need the full attention mask!
# 6. If we pass a `static_cache`, the attention mask will be prepared as a static shape 4D mask
max_cache_len = 10
batch_size = 2
query_length = input_ids.shape[-1] - init_input_ids.shape[-1]
static_cache = StaticCache(config=config, max_cache_len=max_cache_len)
static_cache = model(init_input_ids, past_key_values=static_cache).past_key_values
model_inputs = model.prepare_inputs_for_generation(
input_ids, past_key_values=static_cache, cache_position=cache_position, attention_mask=attention_mask
)
self.assertTrue("past_key_values" in model_inputs)
self.assertTrue(list(model_inputs["attention_mask"].shape) == [batch_size, 1, query_length, max_cache_len])
# 7. We can also pass `inputs_embeds` as the embedded prompt. Because `generate` will append its result to
# `input_ids` and the models will only accept one of the two inputs (`input_ids` or `inputs_embeds`), we
# a) must use the cache b) must expect `input_ids` after the prompt is processed
init_inputs_embeds = model.get_input_embeddings()(init_input_ids)
init_cache_positions = torch.arange(init_input_ids.shape[-1], dtype=torch.long).to(torch_device)
empty_cache = DynamicCache(config=config)
# Prompt processing
model_inputs = model.prepare_inputs_for_generation(
init_input_ids,
past_key_values=empty_cache,
inputs_embeds=init_inputs_embeds,
cache_position=init_cache_positions,
)
self.assertTrue(model_inputs["input_ids"] is None)
self.assertTrue(model_inputs["inputs_embeds"] is not None)
# After prompt processing
model_inputs = model.prepare_inputs_for_generation(
input_ids, past_key_values=dynamic_cache, inputs_embeds=init_inputs_embeds, cache_position=cache_position
)
self.assertTrue(model_inputs["input_ids"] is not None)
self.assertTrue(model_inputs["inputs_embeds"] is None)
def test_prepare_inputs_for_generation_encoder_decoder_llm(self):
"""
Same as `test_prepare_inputs_for_generation_decoder_llm` but for encoder-decoder models. Main difference: we
should look for `decoder_input_ids`, instead of `input_ids`.
"""
model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5")
model = model.to(torch_device)
# 1. Sanity check: the model's `prepare_inputs_for_generation` comes from `GenerationMixin`
self.assertTrue("GenerationMixin" in str(model.prepare_inputs_for_generation))
# 2. If we pass input ids by themselves, we should get back the same input ids -- with the encoder-decoder key
decoder_input_ids = torch.tensor([[1, 2, 3], [4, 5, 6]]).to(torch_device)
model_inputs = model.prepare_inputs_for_generation(decoder_input_ids)
self.assertTrue(torch.all(model_inputs["decoder_input_ids"] == decoder_input_ids))
# 3. If we pass the attention mask too, we will get back the attention mask. Encoder-decoder models usually
# don't use `position_ids`
decoder_attention_mask = torch.tensor([[1, 1, 1], [1, 1, 1]]).to(torch_device)
model_inputs = model.prepare_inputs_for_generation(
decoder_input_ids, decoder_attention_mask=decoder_attention_mask
)
self.assertTrue(torch.all(model_inputs["decoder_attention_mask"] == decoder_attention_mask))
self.assertTrue("position_ids" not in model_inputs)
# 4. `use_cache` (and other kwargs, like the encoder outputs) are forwarded
self.assertFalse("use_cache" in model_inputs) # From the previous input, there is no `use_cache`
model_inputs = model.prepare_inputs_for_generation(decoder_input_ids, use_cache=True, encoder_outputs="foo")
self.assertTrue(model_inputs["use_cache"] is True)
self.assertTrue(model_inputs["encoder_outputs"] == "foo")
# See the decoder-only test for more corner cases. The code is the same, so we don't repeat it here.
@pytest.mark.torch_compile_test
def test_generate_compile_fullgraph_tiny(self):
"""
Tests that we can call end-to-end generation with a tiny model (i.e. doesn't crash)
NOTE: this test is quite slow (~20s on a consumer desktop), but it is important that we keep it as part of the
non-slow tests to prevent regressions!
"""
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-LlamaForCausalLM", dtype=torch.bfloat16, device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM")
# compile generate
compiled_generate = torch.compile(model.generate, fullgraph=True, mode="reduce-overhead")
# compiled generate does NOT accept parameterization except a) model inputs b) a generation config
generation_config = copy.deepcopy(model.generation_config)
generation_config.pad_token_id = model.config.eos_token_id
model_inputs = tokenizer(["Write a poem about the market crashing in summer"], return_tensors="pt")
model_inputs = model_inputs.to(model.device)
gen_out = compiled_generate(**model_inputs, generation_config=generation_config)
self.assertTrue(gen_out.shape[1] > model_inputs["input_ids"].shape[1]) # some text was generated
@require_read_token
@slow
def test_assisted_generation_early_exit(self):
"""
Tests that assisted generation with early exit works as expected. Under the hood, this has complex cache
manipulation, which will cause the test to fail if something goes wrong there.
"""
expected_output = "Alice and Bob are playing a game of poker. Alice has a pair of 8s and Bob has a pair"
prompt = "Alice and Bob"
checkpoint = "facebook/layerskip-llama3.2-1B"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
inputs = tokenizer(prompt, return_tensors="pt").to(torch_device)
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(torch_device)
original_outputs = model.generate(**inputs, do_sample=False, max_new_tokens=20)
original_decoded = tokenizer.batch_decode(original_outputs, skip_special_tokens=True)
self.assertEqual(original_decoded, [expected_output])
outputs_assisted = model.generate(**inputs, assistant_early_exit=4, do_sample=False, max_new_tokens=20)
decoded_assisted = tokenizer.batch_decode(outputs_assisted, skip_special_tokens=True)
self.assertEqual(decoded_assisted, [expected_output])
@slow
def test_beam_search_advanced_stopping_criteria(self):
"""
Tests that beam search works with a stopping criteria that is not max length or EOS token. Prior to the beam
search vectorization PR (#35802), beam search was not accepting other stopping criteria. Test inspired on
the original issue (#34843).
"""
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct")
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-0.5B-Instruct").to(torch_device)
prompt = (
"Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. "
"How many clips did Natalia sell altogether in April and May?"
)
tokens = tokenizer(prompt, return_tensors="pt").to(torch_device)
generation_config = GenerationConfig(num_beams=3, do_sample=False, length_penalty=1.0, max_new_tokens=100)
# This particular prompt should result in a ":" being present in the answer
out = model.generate(**tokens, generation_config=generation_config, tokenizer=tokenizer)
output_text = tokenizer.decode(out[0], skip_special_tokens=True)
last_non_special_token_decoded = tokenizer.decode(out[out != tokenizer.pad_token_id][-1])
self.assertTrue(":" in output_text)
self.assertFalse(":" in output_text[-5:])
self.assertFalse(":" in last_non_special_token_decoded)
# Adding an advanced stopping criteria: text generation should stop when a ":" is generated.
# Note that:
# 1 - the text up to ":" doesn't have to be the same, it can belong to a different beam
# 2 - ":" may not be the last char, but it must be in the last non-special token
generation_config.stop_strings = ":"
out = model.generate(**tokens, generation_config=generation_config, tokenizer=tokenizer)
output_text = tokenizer.decode(out[0], skip_special_tokens=True)
last_non_special_token_decoded = tokenizer.decode(out[out != tokenizer.pad_token_id][-1])
self.assertTrue(":" in output_text)
self.assertTrue(":" in output_text[-5:])
self.assertTrue(":" in last_non_special_token_decoded)
def test_max_time(self):
tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2")
model.to(torch_device)
torch.manual_seed(0)
tokenized = tokenizer("Today is a nice day and", return_tensors="pt", return_token_type_ids=True)
input_ids = tokenized.input_ids.to(torch_device)
MAX_TIME = 0.1
MAX_LENGTH = 64
# sampling on
start = datetime.datetime.now()
model.generate(input_ids, do_sample=True, max_time=MAX_TIME, max_length=MAX_LENGTH)
duration = datetime.datetime.now() - start
self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME))
self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))
# sampling off
start = datetime.datetime.now()
model.generate(input_ids, do_sample=False, max_time=MAX_TIME, max_length=MAX_LENGTH)
duration = datetime.datetime.now() - start
self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME))
self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))
# beam search
start = datetime.datetime.now()
model.generate(input_ids, do_sample=False, num_beams=2, max_time=MAX_TIME, max_length=MAX_LENGTH)
duration = datetime.datetime.now() - start
self.assertGreater(duration, datetime.timedelta(seconds=MAX_TIME))
self.assertLess(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))
# sanity check: no time limit
start = datetime.datetime.now()
model.generate(input_ids, do_sample=False, max_time=None, max_length=MAX_LENGTH)
duration = datetime.datetime.now() - start
self.assertGreater(duration, datetime.timedelta(seconds=1.5 * MAX_TIME))
def test_validate_generation_inputs(self):
"""Tests validation of inputs to `generate`"""
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5")
encoder_input_str = "Hello world"
input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(ValueError, "do_samples"):
model.generate(input_ids, do_samples=True)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(ValueError, "foo"):
fake_model_kwargs = {"foo": "bar"}
model.generate(input_ids, **fake_model_kwargs)
# however, valid model_kwargs are accepted
valid_model_kwargs = {"attention_mask": torch.tensor(np.zeros_like(input_ids))}
model.generate(input_ids, **valid_model_kwargs)
def test_custom_logits_processor(self):
"""Tests that custom logits processors can be used in `generate`, and that redundant arguments are caught."""
bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-bart", min_length=1)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids
logits_processor = LogitsProcessorList()
logits_processor.append(MinLengthLogitsProcessor(min_length=10, eos_token_id=0))
# it should not be allowed to both define `min_length` via config and `logits_processor` list
with self.assertRaises(ValueError):
bart_model.generate(input_ids, logits_processor=logits_processor, min_length=10)
bart_model.generate(input_ids, logits_processor=logits_processor)
def test_transition_scores_greedy_search(self):
"""Test that `compute_transition_scores` is working as expected with gready search"""
articles = ["Justin Timberlake", "Michael Phelps"]
tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2", padding_side="left")
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
model.generation_config.eos_token_id = None
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids
model = model.to(torch_device)
input_ids = input_ids.to(torch_device)
outputs = model.generate(
input_ids=input_ids,
max_new_tokens=5,
pad_token_id=tokenizer.eos_token_id,
return_dict_in_generate=True,
output_scores=True,
)
transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores)
transition_scores = transition_scores.cpu().numpy()
expected_scores = np.array(
[
[-57.8844, -60.45698, -70.16364, -65.50791, -66.35648],
[-54.417572, -60.216614, -62.661243, -58.621933, -58.298683],
]
)
self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3))
def test_transition_scores_greedy_search_normalized(self):
"""
Test that `compute_transition_scores` is working as expected with gready search, with `normalize_logits=True`
"""
articles = ["Justin Timberlake", "Michael Phelps"]
tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2", padding_side="left")
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
model.generation_config.eos_token_id = None
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids
model = model.to(torch_device)
input_ids = input_ids.to(torch_device)
outputs = model.generate(
input_ids=input_ids,
max_new_tokens=5,
pad_token_id=tokenizer.eos_token_id,
return_dict_in_generate=True,
output_scores=True,
)
transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, normalize_logits=True)
transition_scores = transition_scores.cpu().numpy()
expected_scores = np.array(
[
[-2.538938, -2.2694316, -2.1580915, -1.572299, -2.6719835],
[-1.8826028, -2.2461371, -1.7556462, -2.9644494, -1.7996008],
]
)
self.assertTrue(np.allclose(transition_scores, expected_scores, atol=1e-3))
def test_transition_scores_beam_search_encoder_decoder(self):
"""
Test that `compute_transition_scores` is working as expected with beam search and encoder-decoder models
"""
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-bart")
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids
model = model.to(torch_device)
input_ids = input_ids.to(torch_device)
outputs = model.generate(
input_ids=input_ids,
max_length=10,
num_beams=4,
num_return_sequences=2,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices)
transition_scores = transition_scores.cpu().numpy()
outputs.sequences_scores = outputs.sequences_scores.cpu().numpy()
self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_search_encoder_decoder_with_eos(self):
"""
Test that `compute_transition_scores` is working as expected with beam search and encoder-decoder models, when
an EOS token is defined
"""
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-bart")
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids
model = model.to(torch_device)
input_ids = input_ids.to(torch_device)
outputs = model.generate(
input_ids=input_ids,
max_length=10,
num_beams=4,
num_return_sequences=2,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices)
transition_scores = transition_scores.cpu().numpy()
outputs.sequences_scores = outputs.sequences_scores.cpu().numpy()
self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_search_decoder_only(self):
"""
Test that `compute_transition_scores` is working as expected with beam search and decoder-only models
"""
articles = [
"Justin Timberlake",
"Michael Phelps",
]
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids
model = model.to(torch_device)
input_ids = input_ids.to(torch_device)
outputs = model.generate(
input_ids=input_ids,
max_length=10,
num_beams=4,
num_return_sequences=2,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices)
transition_scores = transition_scores.cpu().numpy()
outputs.sequences_scores = outputs.sequences_scores.cpu().numpy()
self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores, atol=1e-3))
@slow
def test_transition_scores_early_stopping(self):
"""
Test that `compute_transition_scores` is working as expected with beam search and early stopping
This is an aggressive test that makes sure that `beam_search's`
transition scores are computed correctly for varying `num_return_sequences`, `num_beams` and `batch_size > 1`
2 x input_ids for "question: How are you? \n context: I had a long day, "
"""
input_ids = torch.tensor(2 * [[822, 10, 571, 33, 25, 58, 2625, 10, 27, 141, 3, 9, 307, 239, 6, 1]])
model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small")
model = model.to(torch_device)
input_ids = input_ids.to(torch_device)
outputs = model.generate(
input_ids,
max_length=10,
return_dict_in_generate=True,
output_scores=True,
forced_eos_token_id=model.config.eos_token_id,
num_beams=4,
do_sample=False,
num_return_sequences=3,
length_penalty=0.0,
)
transition_scores = model.compute_transition_scores(
sequences=outputs.sequences, scores=outputs.scores, beam_indices=outputs.beam_indices
)
transition_scores = transition_scores.cpu().numpy()
outputs.sequences_scores = outputs.sequences_scores.cpu().numpy()
self.assertTrue(np.allclose(np.sum(transition_scores, axis=-1), outputs.sequences_scores))
def test_encoder_decoder_generate_attention_mask(self):
"""
Test that `generate` automagically creates the correct `attention_mask` for encoder-decoder models (which
has a different keyword)
"""
articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"]
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
# need extreme generation values here to force this test
# to fail when `attention_mask` is not correctly treated in generate
model = AutoModelForSeq2SeqLM.from_pretrained(
"hf-internal-testing/tiny-random-bart",
)
model.config.eos_token_id = None
input_ids = tokenizer(articles[0], return_tensors="pt").input_ids
input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids
model = model.to(torch_device)
input_ids = input_ids.to(torch_device)
input_ids_batched = input_ids_batched.to(torch_device)
generate_kwargs = {
"return_dict_in_generate": True,
"output_scores": True,
"max_length": 50,
"num_beams": 5,
"num_return_sequences": 5,
}
output_sequences_batched = model.generate(input_ids=input_ids_batched, **generate_kwargs)
output_sequences = model.generate(input_ids=input_ids, **generate_kwargs)
batched_out = output_sequences_batched.sequences_scores
out = output_sequences.sequences_scores
batched_out = batched_out.cpu().numpy()
out = out.cpu().numpy()
diff = np.abs(np.sum(batched_out[:5]) - np.sum(out))
self.assertTrue(diff < 1e-4)
def test_generate_input_ids_as_kwarg(self):
"""Test that `input_ids` work equally as a positional and keyword argument in decoder-only models"""
article = "I need input_ids to generate"
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15)
input_ids = tokenizer(article, return_tensors="pt").input_ids
model = model.to(torch_device)
input_ids = input_ids.to(torch_device)
output_sequences_kwargs = model.generate(input_ids=input_ids)
output_sequences = model.generate(input_ids)
output_sequences_kwargs = output_sequences_kwargs.cpu().numpy()
output_sequences = output_sequences.cpu().numpy()
self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs))
self.assertEqual(output_sequences.shape, (1, 15))
def test_generate_input_ids_as_encoder_kwarg(self):
"""Test that `input_ids` work equally as a positional and keyword argument in encoder-decoder models"""
article = "Justin Timberlake and Jessica Biel, welcome to parenthood."
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-bart")
model.config.eos_token_id = None
input_ids = tokenizer(article, return_tensors="pt").input_ids
model = model.to(torch_device)
input_ids = input_ids.to(torch_device)
output_sequences_kwargs = model.generate(input_ids=input_ids, max_length=5)
output_sequences = model.generate(input_ids, max_length=5)
output_sequences_kwargs = output_sequences_kwargs.cpu().numpy()
output_sequences = output_sequences.cpu().numpy()
self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs))
self.assertEqual(output_sequences.shape, (1, 5))
def test_generate_inputs_and_encoder_kwargs(self):
"""
Test that an exception is thrown if the main tensor (`input_ids` in LLMs) is passed as both a positional and
keyword argument
"""
article = "I need input_ids to generate"
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10)
input_ids = tokenizer(article, return_tensors="pt").input_ids
with self.assertRaises(ValueError):
model.generate(input_ids, input_ids=input_ids)
def test_generate_too_many_encoder_kwargs(self):
"""Test that passing redundant inputs results in an exception (`input_ids` and `inputs_embeds` in LLMs)"""
article = "I need input_ids to generate"
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=10)
input_ids = tokenizer(article, return_tensors="pt").input_ids
with self.assertRaises(ValueError):
model.generate(input_ids=input_ids, inputs_embeds=input_ids)
def test_generate_input_features_as_encoder_kwarg(self):
"""Test that non-`input_ids` main model inputs are correctly handled as positional arguments"""
input_features = floats_tensor((3, 80, 60))
model = AutoModelForSpeechSeq2Seq.from_pretrained(
"hf-internal-testing/tiny-random-WhisperForConditionalGeneration"
)
input_features.to(torch_device)
model = model.to(torch_device)
output_sequences_kwargs = model.generate(input_features=input_features, max_length=5)
output_sequences = model.generate(input_features, max_length=5)
output_sequences_kwargs = output_sequences_kwargs.cpu().numpy()
output_sequences = output_sequences.cpu().numpy()
self.assertTrue(np.array_equal(output_sequences, output_sequences_kwargs))
self.assertEqual(output_sequences.shape, (3, 5))
def test_generate_encoder_outputs_attention_mask(self):
"""Test that `generate` can handle attention masks when the encoder outputs are passed"""
input_features = floats_tensor((3, 80, 60))
attention_mask = torch.randint(0, 2, input_features.shape).to(torch_device)
model = AutoModelForSpeechSeq2Seq.from_pretrained(
"hf-internal-testing/tiny-random-WhisperForConditionalGeneration"
)
input_features = input_features.to(torch_device)
attention_mask = attention_mask.to(torch_device)
model = model.to(torch_device)
encoder = model.get_encoder()
encoder_outputs = encoder(input_features)
output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs)
output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask)
output_sequences_no_mask = output_sequences_no_mask.cpu().numpy()
output_sequences_with_mask = output_sequences_with_mask.cpu().numpy()
self.assertFalse(np.array_equal(output_sequences_no_mask, output_sequences_with_mask))
def test_eos_token_id_int_and_list_greedy_search(self):
"""Test that `generate` can handle multiple EOS tokens"""
generation_kwargs = {
"do_sample": False,
"num_beams": 1,
}
expectation = 13
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
text = """Hello, my dog is cute and"""
tokens = tokenizer(text, return_tensors="pt")
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = model.to(torch_device)
tokens = tokens.to(torch_device)
eos_token_id = 873
generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs)
self.assertTrue(expectation == len(generated_tokens[0]))
eos_token_id = [873, 198]
generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs)
self.assertTrue(expectation == len(generated_tokens[0]))
def test_generate_vision2text_conditioning(self):
"""Test that `decoder_input_ids` can be used to condition the generation in vision-to-text models"""
pixel_values = floats_tensor((2, 3, 30, 30))
conditioning_input = torch.tensor([[10], [10]]) # this should be the 2nd output token, after the BOS token
model = AutoModelForVision2Seq.from_pretrained(
"hf-internal-testing/tiny-random-VisionEncoderDecoderModel-vit-gpt2"
)
pixel_values = pixel_values.to(torch_device)
model = model.to(torch_device)
conditioning_input = conditioning_input.to(torch_device)
# we can condition on decoder_input_ids (expected decoder input) and input_ids (which we pipe internally as
# decoder_input_ids, if the encoder is not a model with text input)
output_sequences_decoder_input_ids = model.generate(
pixel_values, max_length=5, decoder_input_ids=conditioning_input
)
output_sequences_input_ids = model.generate(pixel_values, max_length=5, input_ids=conditioning_input)
output_sequences_decoder_input_ids = output_sequences_decoder_input_ids.cpu().numpy()
output_sequences_input_ids = output_sequences_input_ids.cpu().numpy()
conditioning_input = conditioning_input.cpu().numpy()
self.assertTrue(np.array_equal(output_sequences_decoder_input_ids, output_sequences_input_ids))
self.assertTrue(np.array_equal(output_sequences_decoder_input_ids[:, 1:2], conditioning_input))
@require_read_token
@slow
@require_torch_accelerator
def test_cache_device_map_with_vision_layer_device_map(self):
"""
Test that the cache device map is correctly set when the vision layer has a device map. Regression test for
#36942
"""
# gemma 3 uses hybrid cache, which can be compiled -> needs a device map at allocation time
model_id = "google/gemma-3-4b-it"
# important part of this device map: the `.layers.` pattern is NOT present in the decoder
device_map = {
"vision_tower.vision_model.embeddings": 0,
"vision_tower.vision_model.encoder.layers.0": 0,
"vision_tower.vision_model.encoder.layers.1": 0,
"vision_tower.vision_model.encoder.layers.2": 0,
"vision_tower.vision_model.encoder.layers.3": 0,
"vision_tower.vision_model.encoder.layers.4": 0,
"vision_tower.vision_model.encoder.layers.5": 0,
"vision_tower.vision_model.encoder.layers.6": 0,
"vision_tower.vision_model.encoder.layers.7": 0,
"vision_tower.vision_model.encoder.layers.8": 0,
"vision_tower.vision_model.encoder.layers.9": 0,
"vision_tower.vision_model.encoder.layers.10": 0,
"vision_tower.vision_model.encoder.layers.11": 0,
"vision_tower.vision_model.encoder.layers.12": 0,
"vision_tower.vision_model.encoder.layers.13": 0,
"vision_tower.vision_model.encoder.layers.14": "cpu",
"vision_tower.vision_model.encoder.layers.15": "cpu",
"vision_tower.vision_model.encoder.layers.16": "cpu",
"vision_tower.vision_model.encoder.layers.17": "cpu",
"vision_tower.vision_model.encoder.layers.18": "cpu",
"vision_tower.vision_model.encoder.layers.19": "cpu",
"vision_tower.vision_model.encoder.layers.20": "cpu",
"vision_tower.vision_model.encoder.layers.21": "cpu",
"vision_tower.vision_model.encoder.layers.22": "cpu",
"vision_tower.vision_model.encoder.layers.23": "cpu",
"vision_tower.vision_model.encoder.layers.24": "cpu",
"vision_tower.vision_model.encoder.layers.25": "cpu",
"vision_tower.vision_model.encoder.layers.26": "cpu",
"vision_tower.vision_model.post_layernorm": "cpu",
"multi_modal_projector": "cpu",
"language_model": "cpu",
}
model = AutoModelForImageTextToText.from_pretrained(model_id, device_map=device_map, dtype=torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(["This is a text input"], return_tensors="pt").to(model.device)
# If the generate doesn't infer the DECODER device map correctly, this will fail
_ = model.generate(**inputs, max_new_tokens=2, do_sample=False)
@require_torch_accelerator
@pytest.mark.torch_compile_test
def test_cpu_offload_doesnt_compile(self):
"""Test that CPU offload doesn't trigger compilation"""
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
tokenized_inputs = tokenizer(["Hello world"], return_tensors="pt")
generate_kwargs = {"max_new_tokens": 3, "cache_implementation": "static"}
# Sanity check: if we don't specify a device map, the model will get compiled
model_gpu = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto"
)
input_ids = tokenized_inputs.input_ids.to(model_gpu.device)
_ = model_gpu.generate(input_ids, **generate_kwargs)
self.assertTrue(hasattr(model_gpu, "_compiled_call"))
# If we specify a device map, the model will not be compiled
# (as of April 2025, compiling with CPU offload results in a crash)
device_map = {
"model.embed_tokens": 0,
"model.layers.0": 0,
"model.layers.1": "cpu",
"model.norm": "cpu",
"lm_head": 0,
}
model_cpu = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM", device_map=device_map
)
input_ids = tokenized_inputs.input_ids.to(model_cpu.device)
_ = model_cpu.generate(input_ids, **generate_kwargs)
self.assertFalse(hasattr(model_cpu, "_compiled_call"))
def test_custom_generate_from_argument_in_generate(self):
"""Tests that the `custom_generate` argument is used when passed to `generate`"""
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device)
# Note: `transformers-community/custom_generate_example` has a custom decoding method with a `left_padding`
# argument (int), which prepends as many pad tokens.
gen_out = model.generate(
**model_inputs,
left_padding=5,
max_new_tokens=5,
custom_generate="transformers-community/custom_generate_example",
trust_remote_code=True,
)
text_output = tokenizer.decode(gen_out[0])
self.assertTrue(text_output.startswith("<unk><unk><unk><unk><unk>")) # <unk> is the pad token
def test_custom_generate_from_model_repo_with_custom_generate_code(self):
"""
Tests that models from model repos containing custom generation code override `generate` with the custom code
"""
model = AutoModelForCausalLM.from_pretrained(
"transformers-community/custom_generate_example", device_map="auto", trust_remote_code=True
)
generate_signature = inspect.signature(model.generate)
# `left_padding` is a custom argument, doesn't exist in the base `generate` method
self.assertTrue(generate_signature.parameters.get("left_padding"))
def test_custom_generate_bad_requirements(self):
"""Tests that we check the `requirements.txt` file from custom generation repos"""
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device)
with self.assertRaises(ImportError):
# Note: `transformers-community/custom_generate_bad_requirements` has a `requirements.txt` with
# impossible requirements
model.generate(
**model_inputs,
custom_generate="transformers-community/custom_generate_bad_requirements",
trust_remote_code=True,
)
def test_custom_generate_requires_trust_remote_code(self):
"""Tests that `trust_remote_code` is required when using `custom_generate`"""
# Case 1: A model from a repo containing custom generation code must be loaded with `trust_remote_code`
with self.assertRaises(ValueError):
AutoModelForCausalLM.from_pretrained("transformers-community/custom_generate_example", device_map="auto")
# Case 2: Using the `custom_generate` argument in `generate` requires `trust_remote_code` if the code is not
# local
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device)
with self.assertRaises(ValueError):
model.generate(**model_inputs, custom_generate="transformers-community/custom_generate_example")
def test_custom_generate_local_directory(self):
"""Tests that custom_generate works with local directories containing importable relative modules"""
with tempfile.TemporaryDirectory() as tmp_dir:
custom_generate_dir = Path(tmp_dir) / "custom_generate"
custom_generate_dir.mkdir()
with open(custom_generate_dir / "generate.py", "w") as f:
f.write("from .helper import ret_success\ndef generate(*args, **kwargs):\n return ret_success()\n")
with open(custom_generate_dir / "helper.py", "w") as f:
f.write('def ret_success():\n return "success"\n')
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device)
value = model.generate(
**model_inputs,
custom_generate=str(tmp_dir),
trust_remote_code=True,
)
assert value == "success"
def test_custom_generate_callable(self):
"""Tests that passing a callable to `custom_generate` executes the callable decoding loop"""
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device)
def custom_loop(model, input_ids, logits_processor, stopping_criteria, generation_config, **model_kwargs):
# Check that generate() correctly prepares the stopping criteria
assert stopping_criteria[0].max_length == input_ids.shape[1] + 3
return "callable_success"
value = model.generate(
**model_inputs,
max_new_tokens=3,
custom_generate=custom_loop,
)
self.assertEqual(value, "callable_success")
@pytest.mark.generate
def test_generate_custom_cache_position(self):
"""
Regression test for #39261. Tests that we can continue generating from past key values, returned from a
previous `generate` call, without the tokens that correspond to the cached part. This is achieved by passing
manually creating `cache_position` -- this tests that it is piped correctly.
"""
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM", device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-MistralForCausalLM")
model_inputs = tokenizer("Hello, world!", return_tensors="pt").to(model.device)
generate_kwargs = {
"use_cache": True,
"do_sample": False,
"return_dict_in_generate": True,
"output_scores": True,
}
# Traditional way to continue generating text using kv cache
# output2
# /~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\
# input2
# /~~~~~~~~~~~~~~~~~~~~~~~~\
# output1
# /~~~~~~~~~~~~~~~~\
# input1
# /~~~~~~\
# IIIIIIIIOOOOOOOOOOIIIIIIIIOOOOOOOOOOOOOOOOOO
inputs_1a = model_inputs
outputs_1a = model.generate(**inputs_1a, **generate_kwargs, max_new_tokens=2)
inputs_2a = {**model_inputs}
inputs_2a["input_ids"] = torch.cat((outputs_1a.sequences, model_inputs["input_ids"]), dim=1)
inputs_2a["attention_mask"] = torch.nn.functional.pad(
inputs_1a["attention_mask"],
(0, inputs_2a["input_ids"].shape[1] - inputs_1a["input_ids"].shape[1]),
mode="constant",
value=1,
)
inputs_2a["past_key_values"] = outputs_1a.past_key_values
outputs_2a = model.generate(**inputs_2a, **generate_kwargs, max_new_tokens=2)
# Keep only the part of the output related to the second output + last token from the first output, for future
# comparison
traditional_outputs = copy.deepcopy(outputs_2a)
traditional_outputs.sequences = traditional_outputs.sequences[:, outputs_1a.sequences.shape[1] - 1 :]
# Continue generating text using kv cache, but without providing the cached part of the input in the input_ids.
# cache_position
# /~~~~~~~\
# inputs2["attention_mask"]
# /~~~~~~~~~~~~~~~~~~~~~~~~~\
# output1 output2
# /~~~~~~~~~~~~~~~~\/~~~~~~~~~~~~~~~~~~~~~~~~~\
# input1 input2
# /~~~~~~\ /~~~~~~~\
# IIIIIIIIOOOOOOOOOOIIIIIIIIIOOOOOOOOOOOOOOOOOO
#
inputs_1b = model_inputs
outputs_1b = model.generate(**inputs_1b, **generate_kwargs, max_new_tokens=2)
inputs_2b = {**model_inputs}
# The last output token isn't cached, so it needs to be included in the new input
inputs_2b["input_ids"] = torch.cat((outputs_1b.sequences[:, -1:], model_inputs["input_ids"]), dim=1)
inputs_2b["attention_mask"] = torch.nn.functional.pad(
inputs_1b["attention_mask"],
(0, outputs_1b.sequences.shape[1]),
mode="constant",
value=1,
)
inputs_2b["past_key_values"] = outputs_1b.past_key_values
cache_length_1b = outputs_1b.past_key_values.get_seq_length()
inputs_2b["cache_position"] = torch.arange(
cache_length_1b,
cache_length_1b + inputs_2b["input_ids"].shape[1],
dtype=torch.int64,
device=model.device,
)
outputs_2b = model.generate(**inputs_2b, **generate_kwargs, max_new_tokens=2)
incremental_outputs = outputs_2b
# The two sets of generated text and past kv should be equal to each other
self.assertTrue(has_similar_generate_outputs(traditional_outputs, incremental_outputs))
cache1, cache2 = traditional_outputs.past_key_values, incremental_outputs.past_key_values
for idx in range(len(cache1)):
if isinstance(cache1, EncoderDecoderCache):
for subcache in ["self_attention_cache", "cross_attention_cache"]:
torch.testing.assert_close(
getattr(cache1, subcache).layers[idx].keys, getattr(cache2, subcache).layers[idx].keys
)
torch.testing.assert_close(
getattr(cache1, subcache).layers[idx].values, getattr(cache2, subcache).layers[idx].values
)
else:
torch.testing.assert_close(cache1.layers[idx].keys, cache2.layers[idx].keys)
torch.testing.assert_close(cache1.layers[idx].values, cache2.layers[idx].values)
@pytest.mark.generate
@parameterized.expand(
[
("transformers-community/dola", {"dola_layers": "low"}),
("transformers-community/contrastive-search", {"penalty_alpha": 0.6, "top_k": 4}),
(
"transformers-community/group-beam-search",
{
"do_sample": False,
"num_beams": 2,
"num_beam_groups": 2,
"diversity_penalty": 2.0,
"length_penalty": 2.0,
},
),
(
"transformers-community/constrained-beam-search",
{"do_sample": False, "num_beams": 2, "force_words_ids": [[167, 168, 169]]},
),
]
)
def test_hub_gen_strategies(self, custom_generate, extra_kwargs):
model = AutoModelForCausalLM.from_pretrained(
"hf-internal-testing/tiny-random-MistralForCausalLM",
device_map=torch_device,
attn_implementation="eager",
).eval()
model_inputs = {
"input_ids": torch.tensor([[1, 22557, 28725, 1526, 28808]], device=torch_device),
"attention_mask": torch.tensor([[1, 1, 1, 1, 1]], device=torch_device),
}
# Sets generation arguments such that:
# a) no EOS is generated, to ensure generation doesn't break early
# b) there are at least two forward passes in the main model, to ensure the input preparation of
# the main model is correct
generation_kwargs = {
"eos_token_id": -1, # see a)
"max_new_tokens": 4, # see b)
"num_beams": 1,
"do_sample": True,
"output_scores": True,
"output_logits": True,
"output_hidden_states": True,
"output_attentions": True,
"return_dict_in_generate": True,
"use_cache": True,
"trust_remote_code": True,
"custom_generate": custom_generate,
}
generation_kwargs.update(extra_kwargs)
torch.manual_seed(0)
output = model.generate(**generation_kwargs, **model_inputs)
self.assertEqual(output.sequences.shape, (1, 9))
@require_torch
| GenerationIntegrationTests |
python | huggingface__transformers | src/transformers/models/t5/modeling_t5.py | {
"start": 62191,
"end": 64990
} | class ____(T5PreTrainedModel):
def __init__(self, config: T5Config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = T5EncoderModel(config)
self.dropout = nn.Dropout(config.classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits, outputs[2:-1])
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| T5ForTokenClassification |
python | streamlit__streamlit | lib/streamlit/runtime/runtime.py | {
"start": 2955,
"end": 4724
} | class ____:
"""Config options for StreamlitRuntime."""
# The filesystem path of the Streamlit script to run.
script_path: str
# DEPRECATED: We need to keep this field around for compatibility reasons, but we no
# longer use this anywhere.
command_line: str | None
# The storage backend for Streamlit's MediaFileManager.
media_file_storage: MediaFileStorage
# The upload file manager
uploaded_file_manager: UploadedFileManager
# The cache storage backend for Streamlit's st.cache_data.
cache_storage_manager: CacheStorageManager = field(
default_factory=LocalDiskCacheStorageManager
)
# The ComponentRegistry instance to use.
component_registry: BaseComponentRegistry = field(
default_factory=LocalComponentRegistry
)
# The BidiComponentManager instance to use for v2 components.
bidi_component_registry: BidiComponentManager = field(
default_factory=BidiComponentManager
)
# The SessionManager class to be used.
session_manager_class: type[SessionManager] = WebsocketSessionManager
# The SessionStorage instance for the SessionManager to use.
session_storage: SessionStorage = field(default_factory=MemorySessionStorage)
# True if the command used to start Streamlit was `streamlit hello`.
is_hello: bool = False
# TODO(vdonato): Eventually add a new fragment_storage_class field enabling the code
# creating a new Streamlit Runtime to configure the FragmentStorage instances
# created by each new AppSession. We choose not to do this for now to avoid adding
# additional complexity to RuntimeConfig/SessionManager/etc when it's unlikely
# we'll have a custom implementation of this class anytime soon.
| RuntimeConfig |
python | Lightning-AI__lightning | src/lightning/pytorch/loops/fetchers.py | {
"start": 2807,
"end": 5105
} | class ____(_DataFetcher):
"""This class is used to control batch fetching flow.
Args:
prefetch_batches: Number of batches to pre-fetch. Pre-fetching at least 1 batch is necessary to properly track
whether a batch is the last one (available with :attr:`self.done`) when the length is not available. The
value of this argument is ignored when the length is available.
"""
def __init__(self, prefetch_batches: int = 1) -> None:
super().__init__()
if prefetch_batches < 0:
raise ValueError("`prefetch_batches` should at least be 0.")
self.prefetch_batches = prefetch_batches
self.batches: list[Any] = []
@override
def __iter__(self) -> "_PrefetchDataFetcher":
super().__iter__()
if self.length is not None:
# ignore pre-fetching, it's not necessary
return self
# prefetch batches to know when the iterator will be exhausted in advance
for _ in range(self.prefetch_batches):
try:
batch = super().__next__()
self.batches.append(batch)
except StopIteration:
# this would only happen when prefetch_batches > the number of batches available and makes
# `__next__` jump directly to the empty iterator case without trying to fetch again
break
return self
@override
def __next__(self) -> _ITERATOR_RETURN:
if self.batches:
# there are pre-fetched batches already from a previous `prefetching` call.
# consume one
batch = self.batches.pop(0)
try:
# refill the consumed batch
self.batches.append(super().__next__())
except StopIteration:
# no more batches to fetch. we are done only if all pre-fetched batches were returned
self.done = not self.batches
elif not self.done:
# this will run only when no pre-fetching was done.
batch = super().__next__()
else:
# the iterator is empty
raise StopIteration
return batch
@override
def reset(self) -> None:
super().reset()
self.batches = []
| _PrefetchDataFetcher |
python | getsentry__sentry | tests/sentry/integrations/aws_lambda/test_utils.py | {
"start": 4056,
"end": 7837
} | class ____(TestCase):
node_fn = {
"Runtime": "nodejs10.x",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaB",
}
python_fn = {
"Runtime": "python3.6",
"FunctionArn": "arn:aws:lambda:us-east-2:599817902985:function:lambdaC",
}
cache_value = {
"aws-layer:node": {
"name": "AWS Lambda Node Layer",
"canonical": "aws-layer:node",
"sdk_version": "5.29.3",
"account_number": "943013980633",
"layer_name": "SentryNodeServerlessSDK",
"repo_url": "https://github.com/getsentry/sentry-javascript",
"main_docs_url": "https://docs.sentry.io/platforms/javascript/guides/aws-lambda",
"regions": [
{"region": "us-east-2", "version": "19"},
{"region": "us-west-1", "version": "17"},
],
},
"aws-layer:python": {
"name": "AWS Lambda Python Layer",
"canonical": "aws-layer:python",
"sdk_version": "0.20.3",
"account_number": "943013980633",
"layer_name": "SentryPythonServerlessSDK",
"repo_url": "https://github.com/getsentry/sentry-python",
"main_docs_url": "https://docs.sentry.io/platforms/python/guides/aws-lambda/",
"regions": [
{"region": "eu-west-1", "version": "2"},
{"region": "us-east-2", "version": "2"},
],
},
}
def test_no_cache(self) -> None:
assert get_option_value(self.node_fn, OPTION_VERSION) == "3"
assert get_option_value(self.node_fn, OPTION_LAYER_NAME) == "my-layer"
assert get_option_value(self.node_fn, OPTION_ACCOUNT_NUMBER) == "1234"
assert get_option_value(self.python_fn, OPTION_VERSION) == "34"
assert get_option_value(self.python_fn, OPTION_LAYER_NAME) == "my-python-layer"
assert get_option_value(self.python_fn, OPTION_ACCOUNT_NUMBER) == "1234"
@patch.object(cache, "get")
def test_with_cache(self, mock_get: MagicMock) -> None:
mock_get.return_value = self.cache_value
with override_settings(SENTRY_RELEASE_REGISTRY_BASEURL="http://localhost:5000"):
assert get_option_value(self.node_fn, OPTION_VERSION) == "19"
assert get_option_value(self.node_fn, OPTION_LAYER_NAME) == "SentryNodeServerlessSDK"
assert get_option_value(self.node_fn, OPTION_ACCOUNT_NUMBER) == "943013980633"
assert get_option_value(self.python_fn, OPTION_VERSION) == "2"
assert (
get_option_value(self.python_fn, OPTION_LAYER_NAME) == "SentryPythonServerlessSDK"
)
assert get_option_value(self.python_fn, OPTION_ACCOUNT_NUMBER) == "943013980633"
@patch.object(cache, "get")
def test_invalid_region(self, mock_get: MagicMock) -> None:
fn = {
"Runtime": "nodejs10.x",
"FunctionArn": "arn:aws:lambda:us-gov-east-1:599817902985:function:lambdaB",
}
mock_get.return_value = self.cache_value
with override_settings(SENTRY_RELEASE_REGISTRY_BASEURL="http://localhost:5000"):
with pytest.raises(IntegrationError, match="Unsupported region us-gov-east-1"):
get_option_value(fn, OPTION_VERSION)
@patch.object(cache, "get")
def test_cache_miss(self, mock_get: MagicMock) -> None:
mock_get.return_value = {}
with override_settings(SENTRY_RELEASE_REGISTRY_BASEURL="http://localhost:5000"):
with pytest.raises(
IntegrationError,
match="Could not find cache value with key aws-layer:node",
):
get_option_value(self.node_fn, OPTION_VERSION)
| GetOptionValueTest |
python | streamlit__streamlit | lib/tests/streamlit/connections/sql_connection_test.py | {
"start": 1312,
"end": 8474
} | class ____(unittest.TestCase):
def tearDown(self) -> None:
st.cache_data.clear()
@patch("sqlalchemy.engine.make_url", MagicMock(return_value="some_sql_conn_string"))
@patch(
"streamlit.connections.sql_connection.SQLConnection._secrets",
PropertyMock(return_value=AttrDict({"url": "some_sql_conn_string"})),
)
@patch("sqlalchemy.create_engine")
def test_url_set_explicitly_in_secrets(self, patched_create_engine):
SQLConnection("my_sql_connection")
patched_create_engine.assert_called_once_with("some_sql_conn_string")
@patch(
"streamlit.connections.sql_connection.SQLConnection._secrets",
PropertyMock(return_value=AttrDict(DB_SECRETS)),
)
@patch("sqlalchemy.create_engine")
def test_url_constructed_from_secrets_params(self, patched_create_engine):
SQLConnection("my_sql_connection")
patched_create_engine.assert_called_once()
args, _ = patched_create_engine.call_args_list[0]
assert (
args[0].render_as_string(hide_password=False)
== "postgres+psycopg2://AzureDiamond:hunter2@localhost:5432/postgres"
)
@patch(
"streamlit.connections.sql_connection.SQLConnection._secrets",
PropertyMock(return_value=AttrDict(DB_SECRETS)),
)
@patch("sqlalchemy.create_engine")
def test_kwargs_overwrite_secrets_values(self, patched_create_engine):
SQLConnection(
"my_sql_connection",
port=2345,
username="DnomaidEruza",
query={"charset": "utf8mb4"},
)
patched_create_engine.assert_called_once()
args, _ = patched_create_engine.call_args_list[0]
assert (
args[0].render_as_string(hide_password=False)
== "postgres+psycopg2://DnomaidEruza:hunter2@localhost:2345/postgres?charset=utf8mb4"
)
def test_error_if_no_config(self):
with patch(
"streamlit.connections.sql_connection.SQLConnection._secrets",
PropertyMock(return_value=AttrDict({})),
):
with pytest.raises(StreamlitAPIException) as e:
SQLConnection("my_sql_connection")
assert "Missing SQL DB connection configuration." in str(e.value)
@parameterized.expand([("dialect",), ("username",), ("host",)])
def test_error_if_missing_required_param(self, missing_param):
secrets = deepcopy(DB_SECRETS)
del secrets[missing_param]
with patch(
"streamlit.connections.sql_connection.SQLConnection._secrets",
PropertyMock(return_value=AttrDict(secrets)),
):
with pytest.raises(StreamlitAPIException) as e:
SQLConnection("my_sql_connection")
assert str(e.value) == f"Missing SQL DB connection param: {missing_param}"
@patch(
"streamlit.connections.sql_connection.SQLConnection._secrets",
PropertyMock(
return_value=AttrDict(
{
**DB_SECRETS,
"create_engine_kwargs": {"foo": "bar", "baz": "i get overwritten"},
}
)
),
)
@patch("sqlalchemy.create_engine")
def test_create_engine_kwargs_secrets_section(self, patched_create_engine):
SQLConnection("my_sql_connection", baz="qux")
patched_create_engine.assert_called_once()
_, kwargs = patched_create_engine.call_args_list[0]
assert kwargs == {"foo": "bar", "baz": "qux"}
@patch("streamlit.connections.sql_connection.SQLConnection._connect", MagicMock())
@patch("pandas.read_sql")
def test_query_caches_value(self, patched_read_sql):
# Caching functions rely on an active script run ctx
add_script_run_ctx(threading.current_thread(), create_mock_script_run_ctx())
patched_read_sql.return_value = "i am a dataframe"
conn = SQLConnection("my_sql_connection")
assert conn.query("SELECT 1;") == "i am a dataframe"
assert conn.query("SELECT 1;") == "i am a dataframe"
patched_read_sql.assert_called_once()
@patch("streamlit.connections.sql_connection.SQLConnection._connect", MagicMock())
@patch("pandas.read_sql")
def test_does_not_reset_cache_when_ttl_changes(self, patched_read_sql):
# Caching functions rely on an active script run ctx
add_script_run_ctx(threading.current_thread(), create_mock_script_run_ctx())
patched_read_sql.return_value = "i am a dataframe"
conn = SQLConnection("my_sql_connection")
conn.query("SELECT 1;", ttl=10)
conn.query("SELECT 2;", ttl=20)
conn.query("SELECT 1;", ttl=10)
conn.query("SELECT 2;", ttl=20)
assert patched_read_sql.call_count == 2
@patch("streamlit.connections.sql_connection.SQLConnection._connect", MagicMock())
@patch("pandas.read_sql")
def test_scopes_caches_by_connection_name(self, patched_read_sql):
# Caching functions rely on an active script run ctx
add_script_run_ctx(threading.current_thread(), create_mock_script_run_ctx())
patched_read_sql.return_value = "i am a dataframe"
conn1 = SQLConnection("my_sql_connection1")
conn2 = SQLConnection("my_sql_connection2")
conn1.query("SELECT 1;")
conn1.query("SELECT 1;")
conn2.query("SELECT 1;")
conn2.query("SELECT 1;")
assert patched_read_sql.call_count == 2
@patch("streamlit.connections.sql_connection.SQLConnection._connect", MagicMock())
@patch("pandas.read_sql")
def test_retry_behavior(self, patched_read_sql):
from sqlalchemy.exc import DatabaseError, InternalError, OperationalError
for error_class in [DatabaseError, InternalError, OperationalError]:
patched_read_sql.side_effect = error_class("kaboom", params=None, orig=None)
conn = SQLConnection("my_sql_connection")
with patch.object(conn, "reset", wraps=conn.reset) as wrapped_reset:
with pytest.raises(error_class):
conn.query("SELECT 1;")
# Our connection should have been reset after each failed attempt to call
# query.
assert wrapped_reset.call_count == 3
# conn._connect should have been called three times: once in the initial
# connection, then once each after the second and third attempts to call
# query.
assert conn._connect.call_count == 3
conn._connect.reset_mock()
@patch("streamlit.connections.sql_connection.SQLConnection._connect", MagicMock())
@patch("pandas.read_sql")
def test_retry_behavior_fails_fast_for_most_errors(self, patched_read_sql):
patched_read_sql.side_effect = Exception("kaboom")
conn = SQLConnection("my_sql_connection")
with pytest.raises(Exception, match="kaboom"):
conn.query("SELECT 1;")
# conn._connect should have just been called once when first creating the
# connection.
assert conn._connect.call_count == 1
conn._connect.reset_mock()
| SQLConnectionTest |
python | google__jax | jax/_src/pallas/mosaic/sc_core.py | {
"start": 1215,
"end": 2047
} | class ____(pallas_core.MemoryRef):
"""A MemoryRef for SparseCore."""
tiling: Tiling | None = None
def __init__(
self,
shape: Sequence[int],
dtype: jax.typing.DTypeLike,
memory_space: tpu_core.MemorySpace,
tiling: Tiling | None = None,
):
super().__init__(jax_core.ShapedArray(shape, dtype), memory_space)
for tile in tiling or ():
if len(tile) > len(shape):
raise ValueError(
f"Tile rank must not exceed shape rank: {tile=} vs {shape=}"
)
object.__setattr__(self, "tiling", tiling)
def get_ref_aval(self) -> state.TransformedRef | state.AbstractRef:
# TODO(sharadmv): Clean this up. ShapedArrayWithMemorySpace fails when we
# try to apply JAX ops to it.
return AbstractRef(self.inner_aval, self.memory_space, self.tiling)
| MemoryRef |
python | PrefectHQ__prefect | src/prefect/server/concurrency/lease_storage/filesystem.py | {
"start": 548,
"end": 695
} | class ____(TypedDict):
id: str
resource_ids: list[str]
metadata: dict[str, Any] | None
expiration: str
created_at: str
| _LeaseFile |
python | huggingface__transformers | src/transformers/models/phimoe/modeling_phimoe.py | {
"start": 39857,
"end": 40066
} | class ____(GenericForSequenceClassification, PhimoePreTrainedModel): ...
__all__ = ["PhimoePreTrainedModel", "PhimoeModel", "PhimoeForCausalLM", "PhimoeForSequenceClassification"]
| PhimoeForSequenceClassification |
python | getsentry__sentry | src/sentry/users/api/endpoints/user_password.py | {
"start": 748,
"end": 2137
} | class ____(serializers.Serializer[User]):
password = serializers.CharField(required=True, trim_whitespace=False)
passwordNew = serializers.CharField(required=True, trim_whitespace=False)
passwordVerify = serializers.CharField(required=True, trim_whitespace=False)
def validate_password(self, value: str) -> str:
user = self.context["user"]
if not user.check_password(value):
raise serializers.ValidationError("The password you entered is not correct.")
return value
def validate_passwordNew(self, value: str) -> str:
# this will raise a ValidationError if password is invalid
user = self.context["user"]
password_validation.validate_password(value, user=user)
if user.is_managed:
raise serializers.ValidationError(
"This account is managed and the password cannot be changed via Sentry."
)
return value
def validate(self, attrs: dict[str, Any]) -> dict[str, Any]:
attrs = super().validate(attrs)
# make sure `passwordNew` matches `passwordVerify`
if not constant_time_compare(
str(attrs.get("passwordNew")), str(attrs.get("passwordVerify"))
):
raise serializers.ValidationError("The passwords you entered did not match.")
return attrs
@control_silo_endpoint
| UserPasswordSerializer |
python | pytorch__pytorch | torch/_inductor/dependencies.py | {
"start": 13757,
"end": 13925
} | class ____:
index: sympy.Expr # type: ignore[assignment]
var_names: tuple[sympy.Symbol, ...]
size: tuple[sympy.Expr, ...]
@dataclasses.dataclass
| IndexExprDep |
python | python__mypy | mypyc/namegen.py | {
"start": 75,
"end": 4934
} | class ____:
"""Utility for generating distinct C names from Python names.
Since C names can't use '.' (or unicode), some care is required to
make C names generated from Python names unique. Also, we want to
avoid generating overly long C names since they make the generated
code harder to read.
Note that we don't restrict ourselves to a 32-character distinguishing
prefix guaranteed by the C standard since all the compilers we care
about at the moment support longer names without issues.
For names that are exported in a shared library (not static) use
exported_name() instead.
Summary of the approach:
* Generate a unique name prefix from suffix of fully-qualified
module name used for static names. If only compiling a single
module, this can be empty. For example, if the modules are
'foo.bar' and 'foo.baz', the prefixes can be 'bar_' and 'baz_',
respectively. If the modules are 'bar.foo' and 'baz.foo', the
prefixes will be 'bar_foo_' and 'baz_foo_'.
* Replace '.' in the Python name with '___' in the C name. (And
replace the unlikely but possible '___' with '___3_'. This
collides '___' with '.3_', but this is OK because names
may not start with a digit.)
The generated should be internal to a build and thus the mapping is
arbitrary. Just generating names '1', '2', ... would be correct,
though not very usable. The generated names may be visible in CPU
profiles and when debugging using native debuggers.
"""
def __init__(self, groups: Iterable[list[str]], *, separate: bool = False) -> None:
"""Initialize with a list of modules in each compilation group.
The names of modules are used to shorten names referring to
modules, for convenience. Arbitrary module
names are supported for generated names, but uncompiled modules
will use long names.
If separate is True, assume separate compilation. This implies
that we don't have knowledge of all sources that will be linked
together. In this case we won't trim module prefixes, since we
don't have enough information to determine common module prefixes.
"""
self.module_map: dict[str, str] = {}
for names in groups:
if not separate:
self.module_map.update(make_module_translation_map(names))
else:
for name in names:
self.module_map[name] = name + "."
self.translations: dict[tuple[str, str], str] = {}
self.used_names: set[str] = set()
def private_name(self, module: str, partial_name: str | None = None) -> str:
"""Return a C name usable for a static definition.
Return a distinct result for each (module, partial_name) pair.
The caller should add a suitable prefix to the name to avoid
conflicts with other C names. Only ensure that the results of
this function are unique, not that they aren't overlapping with
arbitrary names.
If a name is not specific to any module, the module argument can
be an empty string.
"""
# TODO: Support unicode
if partial_name is None:
return exported_name(self.module_map[module].rstrip("."))
if (module, partial_name) in self.translations:
return self.translations[module, partial_name]
if module in self.module_map:
module_prefix = self.module_map[module]
elif module:
module_prefix = module + "."
else:
module_prefix = ""
actual = exported_name(f"{module_prefix}{partial_name}")
self.translations[module, partial_name] = actual
return actual
def exported_name(fullname: str) -> str:
"""Return a C name usable for an exported definition.
This is like private_name(), but the output only depends on the
'fullname' argument, so the names are distinct across multiple
builds.
"""
# TODO: Support unicode
return fullname.replace("___", "___3_").replace(".", "___")
def make_module_translation_map(names: list[str]) -> dict[str, str]:
num_instances: dict[str, int] = {}
for name in names:
for suffix in candidate_suffixes(name):
num_instances[suffix] = num_instances.get(suffix, 0) + 1
result = {}
for name in names:
for suffix in candidate_suffixes(name):
if num_instances[suffix] == 1:
break
# Takes the last suffix if none are unique
result[name] = suffix
return result
def candidate_suffixes(fullname: str) -> list[str]:
components = fullname.split(".")
result = [""]
for i in range(len(components)):
result.append(".".join(components[-i - 1 :]) + ".")
return result
| NameGenerator |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/airflow_release_validator.py | {
"start": 1130,
"end": 16052
} | class ____(ReleaseValidator):
APACHE_RAT_JAR_DOWNLOAD_URL = (
"https://downloads.apache.org/creadur/apache-rat-0.17/apache-rat-0.17-bin.tar.gz"
)
def __init__(
self,
version: str,
svn_path: Path,
airflow_repo_root: Path,
task_sdk_version: str | None = None,
):
super().__init__(version, svn_path, airflow_repo_root)
self.task_sdk_version = task_sdk_version or version
self.version_without_rc = self._strip_rc_suffix(version)
self.task_sdk_version_without_rc = self._strip_rc_suffix(self.task_sdk_version)
@property
def expected_airflow_file_bases(self) -> list[str]:
return [
f"apache_airflow-{self.version_without_rc}-source.tar.gz",
f"apache_airflow-{self.version_without_rc}.tar.gz",
f"apache_airflow-{self.version_without_rc}-py3-none-any.whl",
f"apache_airflow_core-{self.version_without_rc}.tar.gz",
f"apache_airflow_core-{self.version_without_rc}-py3-none-any.whl",
]
@property
def expected_task_sdk_file_bases(self) -> list[str]:
return [
f"apache_airflow_task_sdk-{self.task_sdk_version_without_rc}.tar.gz",
f"apache_airflow_task_sdk-{self.task_sdk_version_without_rc}-py3-none-any.whl",
]
def get_distribution_name(self) -> str:
return "Apache Airflow"
def get_svn_directory(self) -> Path:
return self.svn_path / self.version
def get_task_sdk_svn_directory(self) -> Path:
return self.svn_path / "task-sdk" / self.task_sdk_version
def get_expected_files(self) -> list[str]:
files = []
for base in self.expected_airflow_file_bases:
files.extend([base, f"{base}.asc", f"{base}.sha512"])
return files
def get_task_sdk_expected_files(self) -> list[str]:
files = []
for base in self.expected_task_sdk_file_bases:
files.extend([base, f"{base}.asc", f"{base}.sha512"])
return files
def validate_svn_files(self):
console_print("\n[bold]SVN File Verification[/bold]")
start_time = time.time()
airflow_svn_dir = self.get_svn_directory()
task_sdk_svn_dir = self.get_task_sdk_svn_directory()
console_print(f"Checking Airflow directory: {airflow_svn_dir}")
if not airflow_svn_dir.exists():
return ValidationResult(
check_type=CheckType.SVN,
passed=False,
message=f"Airflow SVN directory not found: {airflow_svn_dir}",
duration_seconds=time.time() - start_time,
)
console_print(f"Checking Task SDK directory: {task_sdk_svn_dir}")
if not task_sdk_svn_dir.exists():
return ValidationResult(
check_type=CheckType.SVN,
passed=False,
message=f"Task SDK SVN directory not found: {task_sdk_svn_dir}",
duration_seconds=time.time() - start_time,
)
actual_airflow = {f.name for f in airflow_svn_dir.iterdir() if f.is_file()}
expected_airflow = set(self.get_expected_files())
missing_airflow = expected_airflow - actual_airflow
actual_task_sdk = {f.name for f in task_sdk_svn_dir.iterdir() if f.is_file()}
expected_task_sdk = set(self.get_task_sdk_expected_files())
missing_task_sdk = expected_task_sdk - actual_task_sdk
details = []
if missing_airflow:
details.append(f"Missing {len(missing_airflow)} Airflow files:")
details.extend([f" - {f}" for f in sorted(missing_airflow)[:10]])
if missing_task_sdk:
details.append(f"Missing {len(missing_task_sdk)} Task SDK files:")
details.extend([f" - {f}" for f in sorted(missing_task_sdk)[:10]])
missing = list(missing_airflow) + list(missing_task_sdk)
total_expected = len(expected_airflow) + len(expected_task_sdk)
message = (
f"All {total_expected} expected files present" if not missing else f"Missing {len(missing)} files"
)
result = ValidationResult(
check_type=CheckType.SVN,
passed=not missing,
message=message,
details=details or None,
duration_seconds=time.time() - start_time,
)
self._print_result(result)
return result
def validate_signatures(self):
console_print("\n[bold]GPG Signature Verification[/bold]")
start_time = time.time()
asc_files = []
for svn_dir in [self.get_svn_directory(), self.get_task_sdk_svn_directory()]:
if svn_dir.exists():
asc_files.extend(svn_dir.glob("*.asc"))
if not asc_files:
return ValidationResult(
check_type=CheckType.SIGNATURES,
passed=False,
message="No .asc files found",
duration_seconds=time.time() - start_time,
)
failed = [
f.name
for f in asc_files
if run_command(["gpg", "--verify", str(f)], check=False, capture_output=True).returncode != 0
]
message = (
f"All {len(asc_files)} signatures verified" if not failed else f"{len(failed)} signatures failed"
)
result = ValidationResult(
check_type=CheckType.SIGNATURES,
passed=not failed,
message=message,
details=failed or None,
duration_seconds=time.time() - start_time,
)
self._print_result(result)
return result
def validate_checksums(self):
console_print("\n[bold]SHA512 Checksum Verification[/bold]")
start_time = time.time()
sha512_files = []
for svn_dir in [self.get_svn_directory(), self.get_task_sdk_svn_directory()]:
if svn_dir.exists():
sha512_files.extend(svn_dir.glob("*.sha512"))
if not sha512_files:
return ValidationResult(
check_type=CheckType.CHECKSUMS,
passed=False,
message="No .sha512 files found",
duration_seconds=time.time() - start_time,
)
failed = []
for sha_file in sha512_files:
expected = sha_file.read_text().split()[0]
target_file = sha_file.parent / sha_file.name.replace(".sha512", "")
if not target_file.exists():
failed.append(f"{sha_file.name} (target missing)")
continue
result = run_command(
["shasum", "-a", "512", str(target_file)], check=False, capture_output=True, text=True
)
if result.returncode != 0 or result.stdout.split()[0] != expected:
failed.append(sha_file.name)
message = (
f"All {len(sha512_files)} checksums valid" if not failed else f"{len(failed)} checksums failed"
)
result = ValidationResult(
check_type=CheckType.CHECKSUMS,
passed=not failed,
message=message,
details=failed or None,
duration_seconds=time.time() - start_time,
)
self._print_result(result)
return result
def validate_reproducible_build(self):
console_print("\n[bold]Reproducible Build Verification[/bold]")
start_time = time.time()
dist_dir = self.airflow_repo_root / "dist"
if dist_dir.exists():
console_print("Cleaning dist directory...")
shutil.rmtree(dist_dir)
dist_dir.mkdir()
console_print("Building packages from source...")
if not self.build_packages():
return ValidationResult(
check_type=CheckType.REPRODUCIBLE_BUILD,
passed=False,
message="Failed to build packages",
duration_seconds=time.time() - start_time,
)
differences = []
for pattern in ["*.tar.gz", "*.whl"]:
for file in dist_dir.glob(pattern):
svn_dir = (
self.get_task_sdk_svn_directory() if "task_sdk" in file.name else self.get_svn_directory()
)
svn_file = svn_dir / file.name
if svn_file.exists() and not filecmp.cmp(file, svn_file, shallow=False):
differences.append(file.name)
message = "All packages are identical" if not differences else f"{len(differences)} packages differ"
result = ValidationResult(
check_type=CheckType.REPRODUCIBLE_BUILD,
passed=not differences,
message=message,
details=differences or None,
duration_seconds=time.time() - start_time,
)
self._print_result(result)
return result
def validate_licenses(self):
console_print("\n[bold]Apache RAT License Verification[/bold]")
start_time = time.time()
rat_jar = Path("/tmp/apache-rat-0.17/apache-rat-0.17.jar")
source_dir = Path("/tmp/apache-airflow-src")
if not rat_jar.exists():
console_print("Downloading Apache RAT...")
wget_result = run_command(
["wget", "-qO-", self.APACHE_RAT_JAR_DOWNLOAD_URL],
check=False,
capture_output=True,
)
if wget_result.returncode != 0:
return ValidationResult(
check_type=CheckType.LICENSES,
passed=False,
message="Failed to download Apache RAT",
duration_seconds=time.time() - start_time,
)
subprocess.run(["tar", "-C", "/tmp", "-xzf", "-"], input=wget_result.stdout, check=True)
console_print("[green]Apache RAT downloaded[/green]")
else:
console_print("[green]Apache RAT already present[/green]")
source_tarball = self.get_svn_directory() / f"apache_airflow-{self.version_without_rc}-source.tar.gz"
if not source_tarball.exists():
return ValidationResult(
check_type=CheckType.LICENSES,
passed=False,
message=f"Source tarball not found: {source_tarball}",
duration_seconds=time.time() - start_time,
)
console_print(f"Extracting source to {source_dir}...")
if source_dir.exists():
shutil.rmtree(source_dir)
source_dir.mkdir(parents=True)
with tarfile.open(source_tarball, "r:gz") as tar:
for member in tar.getmembers():
member.name = "/".join(member.name.split("/")[1:])
if member.name:
tar.extract(member, source_dir)
rat_excludes = source_dir / ".rat-excludes"
console_print("Running Apache RAT...")
result = run_command(
[
"java",
"-jar",
str(rat_jar),
"--input-exclude-file",
str(rat_excludes) if rat_excludes.exists() else "/dev/null",
str(source_dir),
],
check=False,
capture_output=True,
text=True,
)
error_lines = [line.strip() for line in result.stdout.split("\n") if line.strip().startswith("!")]
unapproved = unknown = 0
for line in result.stdout.split("\n"):
if "Unapproved:" in line:
try:
unapproved = int(line.split("Unapproved:")[1].split()[0])
except (IndexError, ValueError):
pass
if "Unknown:" in line:
try:
unknown = int(line.split("Unknown:")[1].split()[0])
except (IndexError, ValueError):
pass
details = []
if error_lines:
details.append(f"Found {len(error_lines)} license issues:")
details.extend(error_lines[:10])
if len(error_lines) > 10:
details.append(f"... and {len(error_lines) - 10} more")
if unapproved > 0:
details.append(f"Unapproved licenses: {unapproved}")
if unknown > 0:
details.append(f"Unknown licenses: {unknown}")
passed = not error_lines and unapproved == 0 and unknown == 0
message = (
"All files have approved licenses"
if passed
else f"Found {len(error_lines)} issues, {unapproved} unapproved, {unknown} unknown"
)
result = ValidationResult(
check_type=CheckType.LICENSES,
passed=passed,
message=message,
details=details or None,
duration_seconds=time.time() - start_time,
)
self._print_result(result)
return result
def build_packages(self) -> bool:
console_print("Building Airflow distributions...")
if (
run_command(
[
"breeze",
"release-management",
"prepare-airflow-distributions",
"--distribution-format",
"both",
],
cwd=str(self.airflow_repo_root),
check=False,
capture_output=True,
).returncode
!= 0
):
console_print("[red]Failed to build Airflow distributions[/red]")
return False
console_print("Building Task SDK distributions...")
if (
run_command(
[
"breeze",
"release-management",
"prepare-task-sdk-distributions",
"--distribution-format",
"both",
],
cwd=str(self.airflow_repo_root),
check=False,
capture_output=True,
).returncode
!= 0
):
console_print("[red]Failed to build Task SDK distributions[/red]")
return False
console_print("Building source tarball...")
cmd = [
"breeze",
"release-management",
"prepare-tarball",
"--tarball-type",
"apache_airflow",
"--version",
self.version_without_rc,
]
if version_suffix := self._get_version_suffix():
cmd.extend(["--version-suffix", version_suffix])
if (
run_command(cmd, cwd=str(self.airflow_repo_root), check=False, capture_output=True).returncode
!= 0
):
console_print("[red]Failed to build source tarball[/red]")
return False
console_print("[green]All packages built successfully[/green]")
return True
| AirflowReleaseValidator |
python | mlflow__mlflow | tests/pyfunc/test_pyfunc_schema_enforcement.py | {
"start": 2525,
"end": 3909
} | class ____(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
assert isinstance(params, dict)
assert isinstance(params["str_param"], str)
assert isinstance(params["int_param"], int)
assert isinstance(params["bool_param"], bool)
assert isinstance(params["double_param"], float)
assert isinstance(params["float_param"], float)
assert isinstance(params["long_param"], int)
assert isinstance(params["datetime_param"], datetime.datetime)
assert isinstance(params["str_list"], list)
assert all(isinstance(x, str) for x in params["str_list"])
assert isinstance(params["bool_list"], list)
assert all(isinstance(x, bool) for x in params["bool_list"])
assert isinstance(params["double_array"], list)
assert all(isinstance(x, float) for x in params["double_array"])
return params
@pytest.fixture(scope="module")
def sample_params_with_arrays():
return {
"int_array": np.array([np.int32(1), np.int32(2)]),
"double_array": np.array([1.0, 2.0]),
"float_array": np.array([np.float32(1.0), np.float32(2.0)]),
"long_array": np.array([1, 2]),
"datetime_array": np.array(
[np.datetime64("2023-06-26 00:00:00"), np.datetime64("2023-06-26 00:00:00")]
),
}
| PythonModelWithBasicParams |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 41694,
"end": 41873
} | class ____(BaseModel, extra="forbid"):
formula: "Expression" = Field(..., description="")
defaults: Optional[Dict[str, Any]] = Field(default={}, description="")
| FormulaQuery |
python | numpy__numpy | benchmarks/benchmarks/bench_creation.py | {
"start": 74,
"end": 623
} | class ____(Benchmark):
""" Benchmark meshgrid generation
"""
params = [[16, 32],
[2, 3, 4],
['ij', 'xy'], TYPES1]
param_names = ['size', 'ndims', 'ind', 'ndtype']
timeout = 10
def setup(self, size, ndims, ind, ndtype):
rnd = np.random.RandomState(1864768776)
self.grid_dims = [(rnd.random_sample(size)).astype(ndtype) for
x in range(ndims)]
def time_meshgrid(self, size, ndims, ind, ndtype):
np.meshgrid(*self.grid_dims, indexing=ind)
| MeshGrid |
python | ApeWorX__ape | src/ape_ethereum/multicall/exceptions.py | {
"start": 557,
"end": 681
} | class ____(MulticallException):
def __init__(self):
super().__init__("Multicall not executed yet.")
| NotExecutedError |
python | python__mypy | mypyc/test/test_misc.py | {
"start": 253,
"end": 690
} | class ____(unittest.TestCase):
def test_debug_op(self) -> None:
block = BasicBlock()
builder = LowLevelIRBuilder(errors=None, options=CompilerOptions())
builder.activate_block(block)
builder.debug_print("foo")
names = generate_names_for_ir([], [block])
code = format_blocks([block], names, {})
assert code[:-1] == ["L0:", " r0 = 'foo'", " CPyDebug_PrintObject(r0)"]
| TestMisc |
python | getsentry__sentry | src/sentry/integrations/repository/notification_action.py | {
"start": 1622,
"end": 1810
} | class ____(NotificationActionNotificationMessageValidationError):
message = "both action and group need to exist together with a reference"
@dataclass
| ActionAndGroupActionValidationError |
python | huggingface__transformers | src/transformers/models/layoutlmv3/modeling_layoutlmv3.py | {
"start": 37315,
"end": 41582
} | class ____(LayoutLMv3PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv3 = LayoutLMv3Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if config.num_labels < 10:
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
else:
self.classifier = LayoutLMv3ClassificationHead(config, pool_feature=False)
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
pixel_values: Optional[torch.LongTensor] = None,
) -> Union[tuple, TokenClassifierOutput]:
r"""
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForTokenClassification
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False)
>>> model = AutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7)
>>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train")
>>> example = dataset[0]
>>> image = example["image"]
>>> words = example["tokens"]
>>> boxes = example["bboxes"]
>>> word_labels = example["ner_tags"]
>>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
>>> outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv3(
input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
pixel_values=pixel_values,
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# only take the text part of the output representations
sequence_output = outputs[0][:, :seq_length]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| LayoutLMv3ForTokenClassification |
python | fastai__fastai | fastai/callback/tensorboard.py | {
"start": 3505,
"end": 7389
} | class ____(TensorBoardBaseCallback):
"Extracts and exports image featuers for tensorboard projector during inference"
def __init__(self, log_dir=None, layer=None):
super().__init__()
store_attr()
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and hasattr(self, "gather_preds") and rank_distrib()==0
if not self.run: return
self._setup_writer()
def before_validate(self):
self._setup_projector()
# %% ../../nbs/70a_callback.tensorboard.ipynb 25
def _write_projector_embedding(learn, writer, feat):
lbls = [learn.dl.vocab[l] for l in feat['lbl']] if getattr(learn.dl, 'vocab', None) else None
vecs = feat['vec'].squeeze()
writer.add_embedding(vecs, metadata=lbls, label_img=feat['img'], global_step=learn.train_iter)
# %% ../../nbs/70a_callback.tensorboard.ipynb 26
def _add_projector_features(learn, hook, feat):
img = _normalize_for_projector(learn.x)
first_epoch = True if learn.iter == 0 else False
feat['vec'] = hook.stored if first_epoch else torch.cat((feat['vec'], hook.stored),0)
feat['img'] = img if first_epoch else torch.cat((feat['img'], img),0)
if getattr(learn.dl, 'vocab', None):
feat['lbl'] = learn.y if first_epoch else torch.cat((feat['lbl'], learn.y),0)
return feat
# %% ../../nbs/70a_callback.tensorboard.ipynb 27
def _get_embeddings(model, layer):
layer = model[0].encoder if layer == None else layer
return layer.weight
# %% ../../nbs/70a_callback.tensorboard.ipynb 28
@dispatch
def _normalize_for_projector(x:TensorImage):
# normalize tensor to be between 0-1
img = x.clone()
sz = img.shape
img = img.view(x.size(0), -1)
img -= img.min(1, keepdim=True)[0]
img /= img.max(1, keepdim=True)[0]
img = img.view(*sz)
return img
# %% ../../nbs/70a_callback.tensorboard.ipynb 29
from ..text.all import LMLearner, TextLearner
# %% ../../nbs/70a_callback.tensorboard.ipynb 30
def projector_word_embeddings(learn=None, layer=None, vocab=None, limit=-1, start=0, log_dir=None):
"Extracts and exports word embeddings from language models embedding layers"
if not layer:
if isinstance(learn, LMLearner): layer = learn.model[0].encoder
elif isinstance(learn, TextLearner): layer = learn.model[0].module.encoder
emb = layer.weight
img = torch.full((len(emb),3,8,8), 0.7)
vocab = learn.dls.vocab[0] if vocab == None else vocab
vocab = list(map(lambda x: f'{x}_', vocab))
writer = SummaryWriter(log_dir=log_dir)
end = start + limit if limit >= 0 else -1
writer.add_embedding(emb[start:end], metadata=vocab[start:end], label_img=img[start:end])
writer.close()
# %% ../../nbs/70a_callback.tensorboard.ipynb 32
from ..vision.data import *
# %% ../../nbs/70a_callback.tensorboard.ipynb 33
@dispatch
def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step):
fig,axs = get_grid(len(samples), return_fig=True)
for i in range(2):
axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)]
axs = [r.show(ctx=c, color='green' if b==r else 'red')
for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)]
writer.add_figure('Sample results', fig, step)
# %% ../../nbs/70a_callback.tensorboard.ipynb 34
from ..vision.core import TensorPoint,TensorBBox
# %% ../../nbs/70a_callback.tensorboard.ipynb 35
@dispatch
def tensorboard_log(x:TensorImage, y: TensorImageBase|TensorPoint|TensorBBox, samples, outs, writer, step):
fig,axs = get_grid(len(samples), return_fig=True, double=True)
for i in range(2):
axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])]
for x in [samples,outs]:
axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])]
writer.add_figure('Sample results', fig, step)
| TensorBoardProjectorCallback |
python | django__django | django/core/exceptions.py | {
"start": 1718,
"end": 1835
} | class ____(Exception):
"""The request was closed before it was completed, or timed out."""
pass
| RequestAborted |
python | wandb__wandb | wandb/sdk/internal/job_builder.py | {
"start": 913,
"end": 2014
} | class ____:
def __init__(self, major: int, minor: int, patch: int):
self._major = major
self._minor = minor
self._patch = patch
def __repr__(self) -> str:
return f"{self._major}.{self._minor}.{self._patch}"
def __lt__(self, other: "Version") -> bool:
if self._major < other._major:
return True
elif self._major == other._major:
if self._minor < other._minor:
return True
elif self._minor == other._minor:
if self._patch < other._patch:
return True
return False
def __eq__(self, other: object) -> bool:
if not isinstance(other, Version):
return NotImplemented
return (
self._major == other._major
and self._minor == other._minor
and self._patch == other._patch
)
# Minimum supported wandb version for keys in the source dict of wandb-job.json
SOURCE_KEYS_MIN_SUPPORTED_VERSION = {
"dockerfile": Version(0, 17, 0),
"build_context": Version(0, 17, 0),
}
| Version |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/tfr/python/tfr_gen.py | {
"start": 20488,
"end": 21722
} | class ____(object):
"""Symbol Table for python code."""
def __init__(self):
self.symbols = []
self.enter_scope()
self.scf_scope = 0
# reserved key words
self.insert_symbol('len', 'len', TFRTypes.PY_BUILTIN_FUNC)
def enter_scope(self, scf_scope=False):
"""Enter a new scope - at function level."""
self.symbols.append({'types': {}, 'symbols': {}})
self.curr_table = self.symbols[len(self.symbols) - 1]
if scf_scope:
self.scf_scope += 1
def insert_symbol(self, name, value, type_):
self.curr_table['symbols'][name] = (value, type_)
# TODO(mdan): Use the inferred type rather than tracking it here.
# The following field is deprecated.
self.curr_table['types'][name] = type_
return value
def exit_scope(self):
self.symbols.pop()
self.curr_table = self.symbols[len(self.symbols) - 1]
if self.scf_scope > 0:
self.scf_scope -= 1
def in_scf_scope(self):
return self.scf_scope > 0
def lookup(self, name):
curr_idx = len(self.symbols) - 1
while curr_idx >= 0 and (name not in self.symbols[curr_idx]['symbols']):
curr_idx -= 1
if curr_idx < 0:
return None
return self.symbols[curr_idx]['symbols'][name]
| SymbolTable |
python | kamyu104__LeetCode-Solutions | Python/faulty-keyboard.py | {
"start": 58,
"end": 452
} | class ____(object):
def finalString(self, s):
"""
:type s: str
:rtype: str
"""
dq = collections.deque()
parity = 0
for x in s:
if x == 'i':
parity ^= 1
else:
dq.appendleft(x) if parity else dq.append(x)
if parity:
dq.reverse()
return "".join(dq)
| Solution |
python | encode__django-rest-framework | tests/browsable_api/test_browsable_nested_api.py | {
"start": 793,
"end": 1294
} | class ____(TestCase):
"""Tests correct dropdown behavior with Auth views enabled."""
@override_settings(ROOT_URLCONF='tests.browsable_api.test_browsable_nested_api')
def test_login(self):
response = self.client.get('/api/')
assert 200 == response.status_code
content = response.content.decode()
assert 'form action="/api/"' in content
assert 'input name="nested.one"' in content
assert 'input name="nested.two"' in content
| DropdownWithAuthTests |
python | tensorflow__tensorflow | tensorflow/python/training/input_test.py | {
"start": 3708,
"end": 5983
} | class ____(test_lib.TestCase):
def testNoShuffle(self):
with ops.Graph().as_default(), self.cached_session():
input_tensor = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
num_epochs = 2
queue = inp.input_producer(
input_tensor, num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(input_tensor) * num_epochs)
dequeue = queue.dequeue()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
self.assertAllEqual(input_tensor * num_epochs,
self.evaluate(dequeue_many))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
def testNoShapeInference(self):
with ops.Graph().as_default(), self.cached_session():
# Disable shape inference for the input.
input_value = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]]
input_tensor = array_ops.placeholder_with_default(input_value, shape=None)
num_epochs = 2
queue = inp.input_producer(
input_tensor, element_shape=[4], num_epochs=num_epochs, shuffle=False)
dequeue_many = queue.dequeue_many(len(input_value) * num_epochs)
dequeue = queue.dequeue()
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
# No randomness, so just see repeated copies of the input.
self.assertAllEqual(input_value * num_epochs, self.evaluate(dequeue_many))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(dequeue)
for thread in threads:
thread.join()
def testShapeError(self):
with ops.Graph().as_default():
input_tensor = array_ops.placeholder(dtypes.float32, None)
with self.assertRaisesRegex(ValueError, "fully defined shape"):
_ = inp.input_producer(input_tensor)
| InputProducerTest |
python | getsentry__sentry | src/sentry/runner/commands/repair.py | {
"start": 286,
"end": 1787
} | class ____(Exception):
pass
@contextmanager
def catchable_atomic() -> Generator[None]:
try:
with transaction.atomic("default"):
yield
except RollbackLocally:
pass
@region_silo_function
def create_missing_dsns() -> None:
from sentry.models.project import Project
from sentry.models.projectkey import ProjectKey
click.echo("Creating missing DSNs")
queryset = Project.objects.filter(key_set__isnull=True)
for project in queryset:
try:
ProjectKey.objects.get_or_create(project=project)
except ProjectKey.MultipleObjectsReturned:
pass
@region_silo_function
def fix_group_counters() -> None:
from django.db import connection
click.echo("Correcting Group.num_comments counter")
cursor = connection.cursor()
cursor.execute(
"""
UPDATE sentry_groupedmessage SET num_comments = (
SELECT COUNT(*) from sentry_activity
WHERE type = %s and group_id = sentry_groupedmessage.id
)
""",
[ActivityType.NOTE.value],
)
@click.command()
@configuration
def repair() -> None:
"""Attempt to repair any invalid data.
This by default will correct some common issues like projects missing
DSNs or counters desynchronizing.
"""
try:
create_missing_dsns()
fix_group_counters()
except SiloLimit.AvailabilityError:
click.echo("Skipping repair operations due to silo restrictions")
| RollbackLocally |
python | ansible__ansible | lib/ansible/module_utils/facts/system/fips.py | {
"start": 373,
"end": 784
} | class ____(BaseFactCollector):
name = 'fips'
_fact_ids = set() # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
# NOTE: this is populated even if it is not set
fips_facts = {
'fips': False
}
if get_file_content('/proc/sys/crypto/fips_enabled') == '1':
fips_facts['fips'] = True
return fips_facts
| FipsFactCollector |
python | gwtw__py-sorting | test/radix_sort_test.py | {
"start": 278,
"end": 513
} | class ____(unittest.TestCase,
BasePositiveIntegerSortTest,
BaseNegativeIntegerSortTest):
def setUp(self):
self.sort = radix_sort.sort
if __name__ == '__main__':
unittest.main()
| RadixSortTest |
python | huggingface__transformers | src/transformers/models/biogpt/modeling_biogpt.py | {
"start": 29180,
"end": 34283
} | class ____(BioGptPreTrainedModel):
def __init__(self, config: BioGptConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.biogpt = BioGptModel(config)
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
position_ids: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
) -> Union[tuple, SequenceClassifierOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.biogpt(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = transformer_outputs[0]
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.score(hidden_states[:, slice_indices, :])
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
if self.config.pad_token_id is None:
sequence_length = -1
else:
if input_ids is not None:
sequence_length = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
else:
sequence_length = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_length]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
def get_input_embeddings(self):
return self.biogpt.embed_tokens
def set_input_embeddings(self, value):
self.biogpt.embed_tokens = value
__all__ = [
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
| BioGptForSequenceClassification |
python | huggingface__transformers | src/transformers/models/bart/modeling_bart.py | {
"start": 5018,
"end": 10612
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
config: Optional[BartConfig] = None,
layer_idx: Optional[int] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
| BartAttention |
python | matplotlib__matplotlib | lib/matplotlib/ticker.py | {
"start": 39087,
"end": 40608
} | class ____(LogFormatter):
"""
Format values for log axis using ``exponent = log_base(value)``.
"""
def _non_decade_format(self, sign_string, base, fx, usetex):
"""Return string for non-decade locations."""
return r'$\mathdefault{%s%s^{%.2f}}$' % (sign_string, base, fx)
def __call__(self, x, pos=None):
# docstring inherited
if x == 0: # Symlog
return r'$\mathdefault{0}$'
sign_string = '-' if x < 0 else ''
x = abs(x)
b = self._base
# only label the decades
fx = math.log(x) / math.log(b)
is_x_decade = _is_close_to_int(fx)
exponent = round(fx) if is_x_decade else np.floor(fx)
coeff = round(b ** (fx - exponent))
if self.labelOnlyBase and not is_x_decade:
return ''
if self._sublabels is not None and coeff not in self._sublabels:
return ''
if is_x_decade:
fx = round(fx)
# use string formatting of the base if it is not an integer
if b % 1 == 0.0:
base = '%d' % b
else:
base = '%s' % b
if abs(fx) < mpl.rcParams['axes.formatter.min_exponent']:
return r'$\mathdefault{%s%g}$' % (sign_string, x)
elif not is_x_decade:
usetex = mpl.rcParams['text.usetex']
return self._non_decade_format(sign_string, base, fx, usetex)
else:
return r'$\mathdefault{%s%s^{%d}}$' % (sign_string, base, fx)
| LogFormatterMathtext |
python | matplotlib__matplotlib | lib/matplotlib/patheffects.py | {
"start": 7002,
"end": 7832
} | class ____(AbstractPathEffect):
"""A line based PathEffect which re-draws a stroke."""
def __init__(self, offset=(0, 0), **kwargs):
"""
The path will be stroked with its gc updated with the given
keyword arguments, i.e., the keyword arguments should be valid
gc parameter values.
"""
super().__init__(offset)
self._gc = kwargs
def draw_path(self, renderer, gc, tpath, affine, rgbFace):
"""Draw the path with updated gc."""
gc0 = renderer.new_gc() # Don't modify gc, but a copy!
gc0.copy_properties(gc)
gc0 = self._update_gc(gc0, self._gc)
renderer.draw_path(
gc0, tpath, affine + self._offset_transform(renderer), rgbFace)
gc0.restore()
withStroke = _subclass_with_normal(effect_class=Stroke)
| Stroke |
python | lazyprogrammer__machine_learning_examples | unsupervised_class2/autoencoder.py | {
"start": 1042,
"end": 4337
} | class ____(object):
def __init__(self, M, an_id):
self.M = M
self.id = an_id
def fit(self, X, learning_rate=0.5, mu=0.99, epochs=1, batch_sz=100, show_fig=False):
# cast to float
mu = np.float32(mu)
learning_rate = np.float32(learning_rate)
N, D = X.shape
n_batches = N // batch_sz
W0 = init_weights((D, self.M))
self.W = theano.shared(W0, 'W_%s' % self.id)
self.bh = theano.shared(np.zeros(self.M, dtype=np.float32), 'bh_%s' % self.id)
self.bo = theano.shared(np.zeros(D, dtype=np.float32), 'bo_%s' % self.id)
self.params = [self.W, self.bh, self.bo]
self.forward_params = [self.W, self.bh]
# TODO: technically these should be reset before doing backprop
self.dW = theano.shared(np.zeros(W0.shape), 'dW_%s' % self.id)
self.dbh = theano.shared(np.zeros(self.M), 'dbh_%s' % self.id)
self.dbo = theano.shared(np.zeros(D), 'dbo_%s' % self.id)
self.dparams = [self.dW, self.dbh, self.dbo]
self.forward_dparams = [self.dW, self.dbh]
X_in = T.matrix('X_%s' % self.id)
X_hat = self.forward_output(X_in)
# attach it to the object so it can be used later
# must be sigmoidal because the output is also a sigmoid
H = T.nnet.sigmoid(X_in.dot(self.W) + self.bh)
self.hidden_op = theano.function(
inputs=[X_in],
outputs=H,
)
# save this for later so we can call it to
# create reconstructions of input
self.predict = theano.function(
inputs=[X_in],
outputs=X_hat,
)
cost = -(X_in * T.log(X_hat) + (1 - X_in) * T.log(1 - X_hat)).flatten().mean()
cost_op = theano.function(
inputs=[X_in],
outputs=cost,
)
updates = momentum_updates(cost, self.params, mu, learning_rate)
train_op = theano.function(
inputs=[X_in],
updates=updates,
)
costs = []
print("training autoencoder: %s" % self.id)
print("epochs to do:", epochs)
for i in range(epochs):
print("epoch:", i)
X = shuffle(X)
for j in range(n_batches):
batch = X[j*batch_sz:(j*batch_sz + batch_sz)]
train_op(batch)
the_cost = cost_op(batch) # technically we could also get the cost for Xtest here
if j % 10 == 0:
print("j / n_batches:", j, "/", n_batches, "cost:", the_cost)
costs.append(the_cost)
if show_fig:
plt.plot(costs)
plt.show()
def forward_hidden(self, X):
Z = T.nnet.sigmoid(X.dot(self.W) + self.bh)
return Z
def forward_output(self, X):
Z = self.forward_hidden(X)
Y = T.nnet.sigmoid(Z.dot(self.W.T) + self.bo)
return Y
@staticmethod
def createFromArrays(W, bh, bo, an_id):
ae = AutoEncoder(W.shape[1], an_id)
ae.W = theano.shared(W, 'W_%s' % ae.id)
ae.bh = theano.shared(bh, 'bh_%s' % ae.id)
ae.bo = theano.shared(bo, 'bo_%s' % ae.id)
ae.params = [ae.W, ae.bh, ae.bo]
ae.forward_params = [ae.W, ae.bh]
return ae
| AutoEncoder |
python | plotly__plotly.py | plotly/graph_objs/_candlestick.py | {
"start": 215,
"end": 64788
} | class ____(_BaseTraceType):
_parent_path_str = ""
_path_str = "candlestick"
_valid_props = {
"close",
"closesrc",
"customdata",
"customdatasrc",
"decreasing",
"high",
"highsrc",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatefallback",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"increasing",
"legend",
"legendgroup",
"legendgrouptitle",
"legendrank",
"legendwidth",
"line",
"low",
"lowsrc",
"meta",
"metasrc",
"name",
"opacity",
"open",
"opensrc",
"selectedpoints",
"showlegend",
"stream",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"visible",
"whiskerwidth",
"x",
"xaxis",
"xcalendar",
"xhoverformat",
"xperiod",
"xperiod0",
"xperiodalignment",
"xsrc",
"yaxis",
"yhoverformat",
"zorder",
}
@property
def close(self):
"""
Sets the close values.
The 'close' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["close"]
@close.setter
def close(self, val):
self["close"] = val
@property
def closesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `close`.
The 'closesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["closesrc"]
@closesrc.setter
def closesrc(self, val):
self["closesrc"] = val
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
@property
def decreasing(self):
"""
The 'decreasing' property is an instance of Decreasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.candlestick.Decreasing`
- A dict of string/value properties that will be passed
to the Decreasing constructor
Returns
-------
plotly.graph_objs.candlestick.Decreasing
"""
return self["decreasing"]
@decreasing.setter
def decreasing(self, val):
self["decreasing"] = val
@property
def high(self):
"""
Sets the high values.
The 'high' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["high"]
@high.setter
def high(self, val):
self["high"] = val
@property
def highsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `high`.
The 'highsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["highsrc"]
@highsrc.setter
def highsrc(self, val):
self["highsrc"] = val
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.candlestick.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Returns
-------
plotly.graph_objs.candlestick.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Variables that can't be found will be
replaced with the specifier. For example, a template of "data:
%{x}, %{y}" will result in a value of "data: 1, %{y}" if x is 1
and y is missing. Variables with an undefined value will be
replaced with the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Finally, the template string has access to variables `open`,
`high`, `low` and `close`. Anything contained in tag `<extra>`
is displayed in the secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
@property
def hovertemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'hovertemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["hovertemplatefallback"]
@hovertemplatefallback.setter
def hovertemplatefallback(self, val):
self["hovertemplatefallback"] = val
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
@property
def increasing(self):
"""
The 'increasing' property is an instance of Increasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.candlestick.Increasing`
- A dict of string/value properties that will be passed
to the Increasing constructor
Returns
-------
plotly.graph_objs.candlestick.Increasing
"""
return self["increasing"]
@increasing.setter
def increasing(self, val):
self["increasing"] = val
@property
def legend(self):
"""
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2", "legend3",
etc. Settings for these legends are set in the layout, under
`layout.legend`, `layout.legend2`, etc.
The 'legend' property is an identifier of a particular
subplot, of type 'legend', that may be specified as the string 'legend'
optionally followed by an integer >= 1
(e.g. 'legend', 'legend1', 'legend2', 'legend3', etc.)
Returns
-------
str
"""
return self["legend"]
@legend.setter
def legend(self, val):
self["legend"] = val
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces and shapes part of
the same legend group hide/show at the same time when toggling
legend items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.candlestick.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Returns
-------
plotly.graph_objs.candlestick.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
"reversed" `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items. When
having unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and layout.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
@property
def legendwidth(self):
"""
Sets the width (in px or fraction) of the legend for this
trace.
The 'legendwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["legendwidth"]
@legendwidth.setter
def legendwidth(self, val):
self["legendwidth"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.candlestick.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.candlestick.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def low(self):
"""
Sets the low values.
The 'low' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["low"]
@low.setter
def low(self, val):
self["low"] = val
@property
def lowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `low`.
The 'lowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["lowsrc"]
@lowsrc.setter
def lowsrc(self, val):
self["lowsrc"] = val
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
@property
def name(self):
"""
Sets the trace name. The trace name appears as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def open(self):
"""
Sets the open values.
The 'open' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["open"]
@open.setter
def open(self, val):
self["open"] = val
@property
def opensrc(self):
"""
Sets the source reference on Chart Studio Cloud for `open`.
The 'opensrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opensrc"]
@opensrc.setter
def opensrc(self, val):
self["opensrc"] = val
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.candlestick.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Returns
-------
plotly.graph_objs.candlestick.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
@property
def text(self):
"""
Sets hover text elements associated with each sample point. If
a single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
this trace's sample points.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def whiskerwidth(self):
"""
Sets the width of the whiskers relative to the box width. For
example, with 1, the whiskers are as wide as the box(es).
The 'whiskerwidth' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["whiskerwidth"]
@whiskerwidth.setter
def whiskerwidth(self, val):
self["whiskerwidth"] = val
@property
def x(self):
"""
Sets the x coordinates. If absent, linear coordinate will be
generated.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
@property
def xperiod(self):
"""
Only relevant when the axis `type` is "date". Sets the period
positioning in milliseconds or "M<n>" on the x axis. Special
values in the form of "M<n>" could be used to declare the
number of months. In this case `n` must be a positive integer.
The 'xperiod' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod"]
@xperiod.setter
def xperiod(self, val):
self["xperiod"] = val
@property
def xperiod0(self):
"""
Only relevant when the axis `type` is "date". Sets the base for
period positioning in milliseconds or date string on the x0
axis. When `x0period` is round number of weeks, the `x0period0`
by default would be on a Sunday i.e. 2000-01-02, otherwise it
would be at 2000-01-01.
The 'xperiod0' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod0"]
@xperiod0.setter
def xperiod0(self, val):
self["xperiod0"] = val
@property
def xperiodalignment(self):
"""
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
The 'xperiodalignment' property is an enumeration that may be specified as:
- One of the following enumeration values:
['start', 'middle', 'end']
Returns
-------
Any
"""
return self["xperiodalignment"]
@xperiodalignment.setter
def xperiodalignment(self, val):
self["xperiodalignment"] = val
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
@property
def zorder(self):
"""
Sets the layer on which this trace is displayed, relative to
other SVG traces on the same subplot. SVG traces with higher
`zorder` appear in front of those with lower `zorder`.
The 'zorder' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["zorder"]
@zorder.setter
def zorder(self, val):
self["zorder"] = val
@property
def type(self):
return self._props["type"]
@property
def _prop_descriptions(self):
return """\
close
Sets the close values.
closesrc
Sets the source reference on Chart Studio Cloud for
`close`.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
decreasing
:class:`plotly.graph_objects.candlestick.Decreasing`
instance or dict with compatible properties
high
Sets the high values.
highsrc
Sets the source reference on Chart Studio Cloud for
`high`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.candlestick.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `open`, `high`, `low` and `close`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
increasing
:class:`plotly.graph_objects.candlestick.Increasing`
instance or dict with compatible properties
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.candlestick.Legendgrouptit
le` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.candlestick.Line` instance
or dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud for
`low`.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud for
`open`.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.candlestick.Stream`
instance or dict with compatible properties
text
Sets hover text elements associated with each sample
point. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
whiskerwidth
Sets the width of the whiskers relative to the box
width. For example, with 1, the whiskers are as wide as
the box(es).
x
Sets the x coordinates. If absent, linear coordinate
will be generated.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
"""
def __init__(
self,
arg=None,
close=None,
closesrc=None,
customdata=None,
customdatasrc=None,
decreasing=None,
high=None,
highsrc=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatefallback=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
increasing=None,
legend=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
legendwidth=None,
line=None,
low=None,
lowsrc=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
open=None,
opensrc=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
visible=None,
whiskerwidth=None,
x=None,
xaxis=None,
xcalendar=None,
xhoverformat=None,
xperiod=None,
xperiod0=None,
xperiodalignment=None,
xsrc=None,
yaxis=None,
yhoverformat=None,
zorder=None,
**kwargs,
):
"""
Construct a new Candlestick object
The candlestick is a style of financial chart describing open,
high, low and close for a given `x` coordinate (most likely
time). The boxes represent the spread between the `open` and
`close` values and the lines represent the spread between the
`low` and `high` values Sample points where the close value is
higher (lower) then the open value are called increasing
(decreasing). By default, increasing candles are drawn in green
whereas decreasing are drawn in red.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Candlestick`
close
Sets the close values.
closesrc
Sets the source reference on Chart Studio Cloud for
`close`.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
decreasing
:class:`plotly.graph_objects.candlestick.Decreasing`
instance or dict with compatible properties
high
Sets the high values.
highsrc
Sets the source reference on Chart Studio Cloud for
`high`.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.candlestick.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, all attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Finally, the template string has access
to variables `open`, `high`, `low` and `close`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example
`<extra>%{fullData.name}</extra>`. To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
increasing
:class:`plotly.graph_objects.candlestick.Increasing`
instance or dict with compatible properties
legend
Sets the reference to a legend to show this trace in.
References to these legends are "legend", "legend2",
"legend3", etc. Settings for these legends are set in
the layout, under `layout.legend`, `layout.legend2`,
etc.
legendgroup
Sets the legend group for this trace. Traces and shapes
part of the same legend group hide/show at the same
time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.candlestick.Legendgrouptit
le` instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with "reversed" `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items. When having
unranked or equal rank items shapes would be displayed
after traces i.e. according to their order in data and
layout.
legendwidth
Sets the width (in px or fraction) of the legend for
this trace.
line
:class:`plotly.graph_objects.candlestick.Line` instance
or dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud for
`low`.
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appears as the
legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud for
`open`.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.candlestick.Stream`
instance or dict with compatible properties
text
Sets hover text elements associated with each sample
point. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
whiskerwidth
Sets the width of the whiskers relative to the box
width. For example, with 1, the whiskers are as wide as
the box(es).
x
Sets the x coordinates. If absent, linear coordinate
will be generated.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
zorder
Sets the layer on which this trace is displayed,
relative to other SVG traces on the same subplot. SVG
traces with higher `zorder` appear in front of those
with lower `zorder`.
Returns
-------
Candlestick
"""
super().__init__("candlestick")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.Candlestick
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Candlestick`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("close", arg, close)
self._set_property("closesrc", arg, closesrc)
self._set_property("customdata", arg, customdata)
self._set_property("customdatasrc", arg, customdatasrc)
self._set_property("decreasing", arg, decreasing)
self._set_property("high", arg, high)
self._set_property("highsrc", arg, highsrc)
self._set_property("hoverinfo", arg, hoverinfo)
self._set_property("hoverinfosrc", arg, hoverinfosrc)
self._set_property("hoverlabel", arg, hoverlabel)
self._set_property("hovertemplate", arg, hovertemplate)
self._set_property("hovertemplatefallback", arg, hovertemplatefallback)
self._set_property("hovertemplatesrc", arg, hovertemplatesrc)
self._set_property("hovertext", arg, hovertext)
self._set_property("hovertextsrc", arg, hovertextsrc)
self._set_property("ids", arg, ids)
self._set_property("idssrc", arg, idssrc)
self._set_property("increasing", arg, increasing)
self._set_property("legend", arg, legend)
self._set_property("legendgroup", arg, legendgroup)
self._set_property("legendgrouptitle", arg, legendgrouptitle)
self._set_property("legendrank", arg, legendrank)
self._set_property("legendwidth", arg, legendwidth)
self._set_property("line", arg, line)
self._set_property("low", arg, low)
self._set_property("lowsrc", arg, lowsrc)
self._set_property("meta", arg, meta)
self._set_property("metasrc", arg, metasrc)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("open", arg, open)
self._set_property("opensrc", arg, opensrc)
self._set_property("selectedpoints", arg, selectedpoints)
self._set_property("showlegend", arg, showlegend)
self._set_property("stream", arg, stream)
self._set_property("text", arg, text)
self._set_property("textsrc", arg, textsrc)
self._set_property("uid", arg, uid)
self._set_property("uirevision", arg, uirevision)
self._set_property("visible", arg, visible)
self._set_property("whiskerwidth", arg, whiskerwidth)
self._set_property("x", arg, x)
self._set_property("xaxis", arg, xaxis)
self._set_property("xcalendar", arg, xcalendar)
self._set_property("xhoverformat", arg, xhoverformat)
self._set_property("xperiod", arg, xperiod)
self._set_property("xperiod0", arg, xperiod0)
self._set_property("xperiodalignment", arg, xperiodalignment)
self._set_property("xsrc", arg, xsrc)
self._set_property("yaxis", arg, yaxis)
self._set_property("yhoverformat", arg, yhoverformat)
self._set_property("zorder", arg, zorder)
self._props["type"] = "candlestick"
arg.pop("type", None)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Candlestick |
python | fluentpython__example-code-2e | 15-more-types/protocol/random/randompop.py | {
"start": 73,
"end": 140
} | class ____(Protocol):
def pop_random(self) -> Any: ...
| RandomPopper |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 184376,
"end": 184774
} | class ____:
_col_type = INT8RANGE
_col_str = "INT8RANGE"
def _data_str(self):
return "[9223372036854775306,9223372036854775800)"
def _data_obj(self):
return Range(9223372036854775306, 9223372036854775800)
_epsilon = 1
def _step_value_up(self, value):
return value + 5
def _step_value_down(self, value):
return value - 5
| _Int8RangeTests |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/check_numerics_callback_test.py | {
"start": 4914,
"end": 17582
} | class ____(test_util.TensorFlowTestCase):
"""Test for cases in which enable_check_numerics() catches infs or nans."""
def tearDown(self):
check_numerics_callback.disable_check_numerics()
super(CheckNumericsCallbackUnhealthyTest, self).tearDown()
def _assertRaisesInvalidArgumentErrorAndGetMessage(self, func):
caught = None
try:
func()
except errors.InvalidArgumentError as error:
caught = error
self.assertTrue(caught, "Failed to catch expected InvalidArgumentError")
return caught.message
def testCatchEagerOpFloat32Inf(self):
"""Test catching Infinity in eager op execution: float32."""
check_numerics_callback.enable_check_numerics()
x = constant_op.constant([2.0, 3.0])
y = constant_op.constant([1.0, 0.0])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: x / y)
# Check the content of the error message.
self.assertTrue(re.search(r"eagerly-executing op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: (2,)\n", message)
self.assertIn("# of +Inf elements: 1\n", message)
self.assertIn("0: %s" % x, message)
self.assertIn("1: %s" % y, message)
def testEnableCheckNumericsIsIdempotent(self):
"""Two calls to enable_check_numerics() have same effect as one call."""
check_numerics_callback.enable_check_numerics()
check_numerics_callback.enable_check_numerics()
x = constant_op.constant([2.0, 3.0])
y = constant_op.constant([1.0, 0.0])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: x / y)
# Check the content of the error message.
self.assertTrue(re.search(r"eagerly-executing op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: (2,)\n", message)
self.assertIn("# of +Inf elements: 1\n", message)
self.assertIn("0: %s" % x, message)
self.assertIn("1: %s" % y, message)
def testCatchEagerOpFloat16NaN(self):
"""Test catching Infinity in eager op execution: float16."""
check_numerics_callback.enable_check_numerics()
def log1p(x):
y = 1.0 + x
return math_ops.log(y)
x = constant_op.constant([[-1.0]], dtype=dtypes.float16)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: log1p(x))
# Check the content of the error message.
self.assertTrue(re.search(r"eagerly-executing op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*float16", message))
self.assertIn("shape: (1, 1)\n", message)
self.assertIn("# of -Inf elements: 1\n", message)
self.assertTrue(re.search(r"Input tensor.*0\.", message))
@test_util.enable_eager_op_as_function
def testCatchEagerOpFloat16NaNWithEagerOpAsFunctionEnabled(self):
self.testCatchEagerOpFloat16NaN()
@test_util.run_in_graph_and_eager_modes
def testCatchFunctionOpInfFloat64(self):
"""Test catching infinites generated in a FuncGraph."""
check_numerics_callback.enable_check_numerics()
@def_function.function
def divide_sum_with_diff(x, y):
w1 = x + y
w2 = x - y
u = w1 / w2
return u * 2.0
x = constant_op.constant(2.0, dtype=dtypes.float64)
y = constant_op.constant(2.0, dtype=dtypes.float64)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(divide_sum_with_diff(x, y)))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"RealDiv\"", message))
self.assertTrue(re.search(r"dtype.*float64", message))
self.assertIn("shape: ()\n", message)
self.assertIn("Input tensors (2):", message)
# Check that the correct input ops are printed.
self.assertTrue(re.search(r"0:.*Tensor.*add:0", message))
self.assertTrue(re.search(r"1:.*Tensor.*sub:0", message))
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("divide_sum_with_diff", message)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_xla(
"TODO(b/141100809): XLA has no way to assert inside of a kernel.")
def testControlFlowGraphWithNaNBFloat16(self):
"""Test catching bfloat16 NaNs in a control-flow-v2 FuncGraph."""
check_numerics_callback.enable_check_numerics()
@def_function.function
def my_conditional(x):
if math_ops.less(math_ops.reduce_sum(x), 0.0):
return math_ops.log(x)
else:
return math_ops.log(-x)
x = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.bfloat16)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(my_conditional(x)))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*bfloat16", message))
self.assertIn("shape: (3,)\n", message)
# Check that the correct input op is printed.
self.assertTrue(re.search(r"Input tensor.*Tensor.*Neg", message))
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("my_conditional", message)
@test_util.run_in_graph_and_eager_modes
@test_util.disable_xla(
"There is a small inconsistency in the step at which overflow happens: "
"128 (without XLA) and 127 (with XLA).")
@test_util.disable_tfrt("b/177261532: TFRT cannot detect overflow yet.")
def testOverflowInTfFunction(self):
"""Test catching Infinity caused by overflow in a tf.function with while."""
check_numerics_callback.enable_check_numerics()
@def_function.function
def accumulation_function(counter, lim, accum):
while math_ops.less(counter, lim):
accum.assign(accum * 2.0)
counter.assign_add(1)
return 1
counter = variables.Variable(0, dtype=dtypes.int32)
# Repeated `* 2.0` overflows a float32 tensor in 128 steps. So the
# 1000-step limit is sufficient.
lim = constant_op.constant(1000, dtype=dtypes.int32)
accum = variables.Variable(1.0)
if not context.executing_eagerly():
self.evaluate([counter.initializer, accum.initializer])
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(accumulation_function(counter, lim, accum)))
self.assertAllClose(self.evaluate(counter), 128)
# Check the content of the error message.
# The overflow to +Infinity happens during the `* 2.0` operation.
self.assertTrue(re.search(r"graph op.*\"Mul\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: ()\n", message)
# Check that the correct input op is printed.
self.assertIn("Input tensors (2):", message)
# Check that the correct input ops are printed.
self.assertTrue(re.search(r"0:.*Tensor.*ReadVariableOp:0", message))
self.assertTrue(re.search(r"1:.*Tensor.*mul/y:0", message))
# Check that the correct line for op creation is printed.
self.assertTrue(re.search(r"Stack trace of op's creation", message))
self.assertIn("accumulation_function", message)
@test_util.run_in_graph_and_eager_modes
def testNanInConstIsCaptured(self):
check_numerics_callback.enable_check_numerics()
v = variables.Variable(3.0, dtype=dtypes.float32)
@def_function.function
def add_a_bad_constant(x):
c = constant_op.constant(np.nan)
return x + c
if not context.executing_eagerly():
self.evaluate(v.initializer)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(add_a_bad_constant(v)))
self.assertTrue(re.search(r"graph op.*\"Const\"", message))
self.assertTrue(re.search(r"dtype:.*float32", message))
self.assertTrue(re.search(r"shape:.*\(\)", message))
self.assertTrue(re.search(r"Graph name:.*add_a_bad_constant", message))
@test_util.run_in_graph_and_eager_modes
def testCatchInfinityInDatasetMapFunction(self):
"""Test that callback catches NaN in a tf.dataset map function."""
check_numerics_callback.enable_check_numerics()
def generate_nan(x):
"""Intentionally generates NaNs by taking log of negative number."""
casted_x = math_ops.cast(x, dtypes.float32)
return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x
dataset = dataset_ops.Dataset.range(10).map(generate_nan)
iterator = dataset_ops.make_one_shot_iterator(dataset)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(iterator.get_next()))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*float32", message))
self.assertIn("shape: (2, 2)\n", message)
self.assertTrue(re.search(r"Input tensor.*Tensor.*Log/x:0", message))
self.assertIn("generate_nan", message)
@test_util.run_in_graph_and_eager_modes
def testCustomGradientWithNaNWithTfFunction(self):
"""Test that callback catches NaN in a gradient function during backprop."""
check_numerics_callback.enable_check_numerics()
@custom_gradient.custom_gradient
def func_with_bad_grad(x):
output = math_ops.sin(x)
@def_function.function
def grad(dy):
# `dy` will come in as 1.0. Taking log of -1.0 leads to NaN.
return math_ops.log(-dy)
return output, grad
x = constant_op.constant(-2.0, dtype=dtypes.float16)
def f(x):
return func_with_bad_grad(x)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: gradient_checker_v2.compute_gradient(f, [x]))
# Check the content of the error message.
self.assertTrue(re.search(r"graph op.*\"Log\"", message))
self.assertTrue(re.search(r"dtype.*float16", message))
if context.executing_eagerly():
self.assertIn("shape: ()\n", message)
self.assertTrue(re.search(r"Input tensor.*Tensor.*Neg:0", message))
self.assertIn("grad", message)
@test_util.run_in_graph_and_eager_modes
def testNestedFunctionGradientCall(self):
"""Catching inf in the inner nested tf.function during backprop."""
check_numerics_callback.enable_check_numerics()
x = constant_op.constant(1.0 - 1e-8, dtype=dtypes.float32)
@def_function.function
def asinp1(x):
# asin()'s gradient overflows at the value close to 1.0.
return math_ops.asin(x) + 1.0
@def_function.function
def loss(x):
return math_ops.square(asinp1(x))
with backprop.GradientTape() as tape:
tape.watch(x)
y = loss(x)
message = self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: self.evaluate(tape.gradient(y, x)))
self.assertTrue(re.search(r"gradient", message))
def testEagerModeUsesCorrectPathLengthAndStackHeightLimits(self):
check_numerics_callback.enable_check_numerics(
stack_height_limit=123, path_length_limit=1200)
fake_get_check_numerics_error_message = test.mock.MagicMock(
return_value="dummy_message")
with test.mock.patch.object(check_numerics_callback,
"get_check_numerics_error_message",
fake_get_check_numerics_error_message):
x = constant_op.constant(2.0)
y = constant_op.constant(0.0)
self._assertRaisesInvalidArgumentErrorAndGetMessage(
lambda: x / y) # Expected to generate an inf.
(_, call_kwargs) = fake_get_check_numerics_error_message.call_args
self.assertEqual(call_kwargs["stack_height_limit"], 123)
self.assertEqual(call_kwargs["path_length_limit"], 1200)
@test_util.run_in_graph_and_eager_modes
def testExpectedNaNOpOutputs(self):
"""Test calling operations with benign NaN output."""
check_numerics_callback.enable_check_numerics()
# Empty input tensor
x = constant_op.constant(1, dtype=dtypes.float32, shape=[0, 1, 1, 1])
scale = constant_op.constant([1], dtype=dtypes.float32)
offset = constant_op.constant([1], dtype=dtypes.float32)
# Calling fused_batch_norm with an empty input should output a NaN in the
# latter four outputs without triggering the check_numerics callback
batch_norm_res = gen_nn_ops._fused_batch_norm(
x=x, scale=scale, offset=offset, mean=[], variance=[])
_, batch_mean, batch_variance, _, _ = self.evaluate(batch_norm_res)
self.assertTrue(np.isnan(batch_mean.squeeze()))
self.assertTrue(np.isnan(batch_variance.squeeze()))
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
| CheckNumericsCallbackUnhealthyTest |
python | plotly__plotly.py | plotly/graph_objs/scatterpolargl/selected/_textfont.py | {
"start": 233,
"end": 2456
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolargl.selected"
_path_str = "scatterpolargl.selected.textfont"
_valid_props = {"color"}
@property
def color(self):
"""
Sets the text font color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the text font color of selected points.
"""
def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterpolargl
.selected.Textfont`
color
Sets the text font color of selected points.
Returns
-------
Textfont
"""
super().__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolargl.selected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolargl.selected.Textfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Textfont |
python | walkccc__LeetCode | solutions/2862. Maximum Element-Sum of a Complete Subset of Indices/2862-2.py | {
"start": 0,
"end": 330
} | class ____:
def maximumSum(self, nums: list[int]) -> int:
ans = 0
for oddPower in range(1, len(nums) + 1):
summ = 0
for num in range(1, len(nums) + 1):
if num * num * oddPower > len(nums):
break
summ += nums[oddPower * num * num - 1]
ans = max(ans, summ)
return ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py | {
"start": 33357,
"end": 36948
} | class ____(ShopifyBulkQuery):
"""
{
customers(query: "updated_at:>='2024-01-20T00:00:00+00:00' AND updated_at:<'2024-01-24T00:00:00+00:00'", sortKey:UPDATED_AT) {
edges {
node {
__typename
customerId: id
defaultAddress {
id
}
addresses {
address1
address2
city
country
countryCode
company
firstName
id
lastName
name
phone
province
provinceCode
zip
}
}
}
}
}
"""
query_name = "customers"
sort_key = "UPDATED_AT"
addresses_fields: List[str] = [
"address1",
"address2",
"city",
"country",
"countryCode",
"company",
"firstName",
"id",
"lastName",
"name",
"phone",
"province",
"provinceCode",
"zip",
]
query_nodes: List[Field] = [
"__typename",
"id",
Field(name="defaultAddress", fields=["id"]),
Field(name="addresses", fields=addresses_fields),
# add `Customer.updated_at` field to provide the parent state
"updatedAt",
]
record_composition = {
"new_record": "Customer",
}
def set_default_address(
self, record: MutableMapping[str, Any], address_record: MutableMapping[str, Any]
) -> Iterable[MutableMapping[str, Any]]:
default_address = record.get("defaultAddress", {})
# the default_address could be literal `None`, additional check is required
if default_address:
if address_record.get("id") == record.get("defaultAddress", {}).get("id"):
address_record["default"] = True
return address_record
def record_process_components(self, record: MutableMapping[str, Any]) -> Optional[Iterable[MutableMapping[str, Any]]]:
"""
Defines how to process collected components.
"""
if "addresses" in record.keys():
addresses = record.get("addresses")
if len(addresses) > 0:
for customer_address in addresses:
# add `customer_id` to each address entry
customer_address["customer_id"] = record.get("id")
# add `country_name` from `country`
customer_address["country_name"] = customer_address.get("country")
# default address check
customer_address = self.set_default_address(record, customer_address)
# resolve address id
customer_address["id"] = self.tools.resolve_str_id(customer_address.get("id"))
# add PARENT stream cursor_field to the root level of the record
# providing the ability to track the PARENT state as well
# convert dates from ISO-8601 to RFC-3339
customer_address["updated_at"] = self.tools.from_iso8601_to_rfc3339(record, "updatedAt")
# names to snake
customer_address = self.tools.fields_names_to_snake_case(customer_address)
yield customer_address
| CustomerAddresses |
python | pypa__pip | src/pip/_vendor/urllib3/exceptions.py | {
"start": 4795,
"end": 4912
} | class ____(SecurityWarning):
"""Warned when making an unverified HTTPS request."""
pass
| InsecureRequestWarning |
python | spyder-ide__spyder | spyder/plugins/editor/widgets/window.py | {
"start": 2229,
"end": 2537
} | class ____(OutlineExplorerWidget):
sig_collapse_requested = Signal()
@Slot()
def close_dock(self):
"""
Reimplemented to preserve the widget's visible state when shown in an
editor window.
"""
self.sig_collapse_requested.emit()
| OutlineExplorerInEditorWindow |
python | encode__django-rest-framework | tests/test_routers.py | {
"start": 26640,
"end": 26834
} | class ____(BasenameTestCase, TestCase):
def setUp(self):
self.router = DefaultRouter()
self.router.root_view_name = 'nameable-root'
| TestDuplicateBasenameDefaultRouterRootViewName |
python | readthedocs__readthedocs.org | readthedocs/sso/admin.py | {
"start": 383,
"end": 1636
} | class ____(admin.ModelAdmin):
"""Admin configuration for SSOIntegration."""
list_display = ("organization", "provider")
search_fields = ("organization__slug", "organization__name", "domains__domain")
list_filter = ("provider",)
raw_id_fields = ("organization",)
actions = [
"resync_sso_user_accounts",
]
@admin.action(description="Re-sync all SSO user accounts")
def resync_sso_user_accounts(self, request, queryset):
users_count = 0
organizations_count = queryset.count()
for ssointegration in queryset.select_related("organization"):
members = AdminPermission.members(ssointegration.organization)
log.info(
"Triggering SSO re-sync for organization.",
organization_slug=ssointegration.organization.slug,
count=members.count(),
)
users_count += members.count()
for user in members:
sync_remote_repositories.delay(user.pk)
messages.add_message(
request,
messages.INFO,
f"Triggered resync for {organizations_count} organizations and {users_count} users.",
)
admin.site.register(SSODomain)
| SSOIntegrationAdmin |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 3154,
"end": 3232
} | class ____(Variadic[T_co]): ...
# This should generate an error.
| VariadicChildCo |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_grad_test.py | {
"start": 9151,
"end": 9680
} | class ____(test.TestCase):
def testSwishGrad(self):
features = constant_op.constant([[-2, -1, 1, 3]],
dtype=dtypes.float32)
beta = constant_op.constant(0.25, dtype=dtypes.float32)
with self.cached_session():
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_impl.swish, [features, beta])
error = gradient_checker_v2.max_error(theoretical, numerical)
self.assertLess(error, 1e-4)
if __name__ == "__main__":
test.main()
| SwishGradOpTest |
python | agronholm__apscheduler | examples/gui/qt_executor.py | {
"start": 505,
"end": 1269
} | class ____(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("APScheduler demo")
self.clock = QLabel()
font = self.clock.font()
font.setPointSize(30)
self.clock.setFont(font)
self.update_time()
self.setCentralWidget(self.clock)
def update_time(self) -> None:
now = datetime.now()
self.clock.setText(f"The time is now {now:%H:%M:%S}")
app = QApplication(sys.argv)
window = MainWindow()
window.show()
with Scheduler() as scheduler:
scheduler.job_executors["qt"] = QtJobExecutor()
scheduler.add_schedule(
window.update_time, IntervalTrigger(seconds=1), job_executor="qt"
)
scheduler.start_in_background()
app.exec()
| MainWindow |
python | kubernetes-client__python | kubernetes/client/models/v1_env_from_source.py | {
"start": 383,
"end": 5214
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config_map_ref': 'V1ConfigMapEnvSource',
'prefix': 'str',
'secret_ref': 'V1SecretEnvSource'
}
attribute_map = {
'config_map_ref': 'configMapRef',
'prefix': 'prefix',
'secret_ref': 'secretRef'
}
def __init__(self, config_map_ref=None, prefix=None, secret_ref=None, local_vars_configuration=None): # noqa: E501
"""V1EnvFromSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config_map_ref = None
self._prefix = None
self._secret_ref = None
self.discriminator = None
if config_map_ref is not None:
self.config_map_ref = config_map_ref
if prefix is not None:
self.prefix = prefix
if secret_ref is not None:
self.secret_ref = secret_ref
@property
def config_map_ref(self):
"""Gets the config_map_ref of this V1EnvFromSource. # noqa: E501
:return: The config_map_ref of this V1EnvFromSource. # noqa: E501
:rtype: V1ConfigMapEnvSource
"""
return self._config_map_ref
@config_map_ref.setter
def config_map_ref(self, config_map_ref):
"""Sets the config_map_ref of this V1EnvFromSource.
:param config_map_ref: The config_map_ref of this V1EnvFromSource. # noqa: E501
:type: V1ConfigMapEnvSource
"""
self._config_map_ref = config_map_ref
@property
def prefix(self):
"""Gets the prefix of this V1EnvFromSource. # noqa: E501
Optional text to prepend to the name of each environment variable. May consist of any printable ASCII characters except '='. # noqa: E501
:return: The prefix of this V1EnvFromSource. # noqa: E501
:rtype: str
"""
return self._prefix
@prefix.setter
def prefix(self, prefix):
"""Sets the prefix of this V1EnvFromSource.
Optional text to prepend to the name of each environment variable. May consist of any printable ASCII characters except '='. # noqa: E501
:param prefix: The prefix of this V1EnvFromSource. # noqa: E501
:type: str
"""
self._prefix = prefix
@property
def secret_ref(self):
"""Gets the secret_ref of this V1EnvFromSource. # noqa: E501
:return: The secret_ref of this V1EnvFromSource. # noqa: E501
:rtype: V1SecretEnvSource
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1EnvFromSource.
:param secret_ref: The secret_ref of this V1EnvFromSource. # noqa: E501
:type: V1SecretEnvSource
"""
self._secret_ref = secret_ref
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EnvFromSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EnvFromSource):
return True
return self.to_dict() != other.to_dict()
| V1EnvFromSource |
python | ray-project__ray | python/ray/serve/tests/test_list_outbound_deployments.py | {
"start": 258,
"end": 357
} | class ____:
def __call__(self, x: int) -> int:
return x * 2
@serve.deployment
| DownstreamA |
python | numba__numba | numba/core/ir.py | {
"start": 30126,
"end": 30810
} | class ____(EqualityCheckMixin, AbstractRHS):
def __init__(self, value, loc, use_literal_type=True):
assert isinstance(loc, Loc)
self.value = value
self.loc = loc
# Note: need better way to tell if this is a literal or not.
self.use_literal_type = use_literal_type
def __repr__(self):
return 'const(%s, %s)' % (type(self.value).__name__, self.value)
def infer_constant(self):
return self.value
def __deepcopy__(self, memo):
# Override to not copy constant values in code
return Const(
value=self.value, loc=self.loc,
use_literal_type=self.use_literal_type,
)
| Const |
python | coleifer__peewee | tests/pwiz_integration.py | {
"start": 1997,
"end": 2448
} | class ____(BaseModel):
data = IntegerField()
misc = IntegerField()
text = TextField(index=True)
user = ForeignKeyField(column_name='user_id', field='username', model=User)
class Meta:
table_name = 'note'
indexes = (
(('user', 'data', 'misc'), False),
(('user', 'text'), True),
)
""".strip()
EXPECTED_ORDERED = """
from peewee import *
database = SqliteDatabase('peewee_test.db')
| Note |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 8709,
"end": 10823
} | class ____(VirtualMachineError):
"""
Raised when there is a contract-defined revert,
such as from an assert/require statement.
"""
def __init__(
self,
revert_message: Optional[str] = None,
txn: Optional[FailedTxn] = None,
trace: _TRACE_ARG = None,
contract_address: Optional["AddressType"] = None,
source_traceback: _SOURCE_TRACEBACK_ARG = None,
base_err: Optional[Exception] = None,
project: Optional["ProjectManager"] = None,
set_ape_traceback: bool = True, # Overridden default.
):
self.txn = txn
self.contract_address = contract_address
super().__init__(
base_err=base_err,
contract_address=contract_address,
message=revert_message,
project=project,
set_ape_traceback=set_ape_traceback,
source_traceback=source_traceback,
trace=trace,
txn=txn,
)
if not revert_message and source_traceback is not None and (dev := self.dev_message):
try:
# Attempt to use dev message as main exception message.
self.message = dev
except Exception:
pass
@property
def revert_message(self):
return self.message
@revert_message.setter
def revert_message(self, value):
self.message = value
if args := self.args:
self.args = tuple([value, *args[1:]])
@property
def dev_message(self) -> Optional[str]:
"""
The dev-string message of the exception.
Raises:
``ValueError``: When unable to get dev message.
"""
return self.source_traceback.revert_type if self.source_traceback else None
@classmethod
def from_error(cls, err: Exception):
"""
Creates this class from the error message of the given
error.
This should be overridden whenever possible to handle
provider-specific use-cases for raising this error.
"""
return cls(str(err))
| ContractLogicError |
python | wandb__wandb | wandb/vendor/pygments/lexers/automation.py | {
"start": 475,
"end": 10167
} | class ____(RegexLexer):
"""
For `autohotkey <http://www.autohotkey.com/>`_ source code.
.. versionadded:: 1.4
"""
name = 'autohotkey'
aliases = ['ahk', 'autohotkey']
filenames = ['*.ahk', '*.ahkl']
mimetypes = ['text/x-autohotkey']
tokens = {
'root': [
(r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'),
(r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'),
(r'\s+;.*?$', Comment.Singleline),
(r'^;.*?$', Comment.Singleline),
(r'[]{}(),;[]', Punctuation),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'\%[a-zA-Z_#@$][\w#@$]*\%', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInVariables'),
(r'"', String, combined('stringescape', 'dqs')),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
include('garbage'),
],
'incomment': [
(r'^\s*\*/', Comment.Multiline, '#pop'),
(r'[^*/]', Comment.Multiline),
(r'[*/]', Comment.Multiline)
],
'incontinuation': [
(r'^\s*\)', Generic, '#pop'),
(r'[^)]', Generic),
(r'[)]', Generic),
],
'commands': [
(r'(?i)^(\s*)(global|local|static|'
r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|'
r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|'
r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|'
r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|'
r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|'
r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|'
r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|'
r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|'
r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|'
r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|'
r'ControlSendRaw|ControlSetText|CoordMode|Critical|'
r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|'
r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|'
r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|'
r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|'
r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|'
r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|'
r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|'
r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|'
r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|'
r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|'
r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|'
r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|'
r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|'
r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|'
r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|'
r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|'
r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|'
r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|'
r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|'
r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|'
r'SetBatchLines|SetCapslockState|SetControlDelay|'
r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|'
r'SetMouseDelay|SetNumlockState|SetScrollLockState|'
r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|'
r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|'
r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|'
r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|'
r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|'
r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|'
r'StringReplace|StringRight|StringSplit|StringTrimLeft|'
r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|'
r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|'
r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|'
r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|'
r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|'
r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|'
r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|'
r'WinWait)\b', bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|'
r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|'
r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|'
r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|'
r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|'
r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|'
r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|'
r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|'
r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|'
r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|'
r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|'
r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|'
r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|'
r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|'
r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|'
r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b',
Name.Function),
],
'builtInVariables': [
(r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|'
r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|'
r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|'
r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|'
r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|'
r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|'
r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|'
r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|'
r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|'
r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|'
r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|'
r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|'
r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|'
r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|'
r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|'
r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|'
r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|'
r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|'
r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|'
r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|'
r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|'
r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|'
r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|'
r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|'
r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|'
r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|'
r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|'
r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|'
r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|'
r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b',
Name.Variable),
],
'labels': [
# hotkeys and labels
# technically, hotkey names are limited to named keys and buttons
(r'(^\s*)([^:\s("]+?:{1,2})', bygroups(Text, Name.Label)),
(r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'garbage': [
(r'[^\S\n]', Text),
# (r'.', Text), # no cheating
],
}
| AutohotkeyLexer |
python | sympy__sympy | sympy/assumptions/assume.py | {
"start": 10738,
"end": 14588
} | class ____(Predicate):
"""
Predicate without handler.
Explanation
===========
This predicate is generated by using ``Predicate`` directly for
construction. It does not have a handler, and evaluating this with
arguments is done by SAT solver.
Examples
========
>>> from sympy import Predicate, Q
>>> Q.P = Predicate('P')
>>> Q.P.func
<class 'sympy.assumptions.assume.UndefinedPredicate'>
>>> Q.P.name
Str('P')
"""
handler = None
def __new__(cls, name, handlers=None):
# "handlers" parameter supports old design
if not isinstance(name, Str):
name = Str(name)
obj = super(Boolean, cls).__new__(cls, name)
obj.handlers = handlers or []
return obj
@property
def name(self):
return self.args[0]
def _hashable_content(self):
return (self.name,)
def __getnewargs__(self):
return (self.name,)
def __call__(self, expr):
return AppliedPredicate(self, expr)
def add_handler(self, handler):
sympy_deprecation_warning(
"""
The AskHandler system is deprecated. Predicate.add_handler()
should be replaced with the multipledispatch handler of Predicate.
""",
deprecated_since_version="1.8",
active_deprecations_target='deprecated-askhandler',
)
self.handlers.append(handler)
def remove_handler(self, handler):
sympy_deprecation_warning(
"""
The AskHandler system is deprecated. Predicate.remove_handler()
should be replaced with the multipledispatch handler of Predicate.
""",
deprecated_since_version="1.8",
active_deprecations_target='deprecated-askhandler',
)
self.handlers.remove(handler)
def eval(self, args, assumptions=True):
# Support for deprecated design
# When old design is removed, this will always return None
sympy_deprecation_warning(
"""
The AskHandler system is deprecated. Evaluating UndefinedPredicate
objects should be replaced with the multipledispatch handler of
Predicate.
""",
deprecated_since_version="1.8",
active_deprecations_target='deprecated-askhandler',
stacklevel=5,
)
expr, = args
res, _res = None, None
mro = inspect.getmro(type(expr))
for handler in self.handlers:
cls = get_class(handler)
for subclass in mro:
eval_ = getattr(cls, subclass.__name__, None)
if eval_ is None:
continue
res = eval_(expr, assumptions)
# Do not stop if value returned is None
# Try to check for higher classes
if res is None:
continue
if _res is None:
_res = res
else:
# only check consistency if both resolutors have concluded
if _res != res:
raise ValueError('incompatible resolutors')
break
return res
@contextmanager
def assuming(*assumptions):
"""
Context manager for assumptions.
Examples
========
>>> from sympy import assuming, Q, ask
>>> from sympy.abc import x, y
>>> print(ask(Q.integer(x + y)))
None
>>> with assuming(Q.integer(x), Q.integer(y)):
... print(ask(Q.integer(x + y)))
True
"""
old_global_assumptions = global_assumptions.copy()
global_assumptions.update(assumptions)
try:
yield
finally:
global_assumptions.clear()
global_assumptions.update(old_global_assumptions)
| UndefinedPredicate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.