language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/sliding-window-maximum.py | {
"start": 61,
"end": 551
} | class ____(object):
def maxSlidingWindow(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
result, dq = [], deque()
for i in xrange(len(nums)):
if dq and i-dq[0] == k:
dq.popleft()
while dq and nums[dq[-1]] <= nums[i]:
dq.pop()
dq.append(i)
if i >= k-1:
result.append(nums[dq[0]])
return result
| Solution |
python | astropy__astropy | astropy/units/tests/test_quantity_ufuncs.py | {
"start": 31835,
"end": 40284
} | class ____:
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value / 10.0, out=s)
assert check is s
assert np.all(check.value == np.arcsin(value / 10.0))
assert check.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100.0 * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# can also replace input with first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.0
assert check is s
assert np.all(check.value == value / 2.0)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2.0 * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1.0 * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1.0 * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2.0 / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1.0, 2.0, 3.0]) * u.dimensionless_unscaled
np.add(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert np.all(s.value == np.array([3.0, 6.0, 9.0]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, 3.0]) * 2.0, out=s)
assert_allclose(s.value, np.arctan2(1.0, 2.0))
assert s.unit is u.radian
@pytest.mark.parametrize("value", [1.0, np.arange(10.0)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.0 * u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.0) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1.0 * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += 20.0 * u.km
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.parametrize("ufunc", (np.equal, np.greater))
def test_comparison_ufuncs_inplace(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
check = np.empty(q_i1.shape, bool)
ufunc(q_i1.value, q_i2.to_value(q_i1.unit), out=check)
result = np.empty(q_i1.shape, bool)
q_o = ufunc(q_i1, q_i2, out=result)
assert q_o is result
assert type(q_o) is np.ndarray
assert q_o.dtype == bool
assert np.all(q_o == check)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.signbit))
def test_onearg_test_ufuncs_inplace(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, bool)
ufunc(q.value, out=check)
result = np.empty(q.shape, bool)
out = ufunc(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign_inplace(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
check = np.empty(q.shape, q.dtype)
np.sign(q.value, out=check)
result = np.empty(q.shape, q.dtype)
out = np.sign(q, out=result)
assert out is result
assert type(out) is np.ndarray
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
def test_ndarray_inplace_op_with_quantity(self):
"""Regression test for gh-13911."""
a = np.arange(3.0)
q = u.Quantity([12.5, 25.0], u.percent)
a[:2] += q # This used to fail
assert_array_equal(a, np.array([0.125, 1.25, 2.0]))
def test_ndarray_inplace_op_with_dimensionless_quantity(self):
# Regression test for #18866 - multiplying a bare array inplace with
# a dimensionless Quantity required the unit to be u.dimensionless_unscaled.
# Mere equality was not good enough.
arr = np.ones((1,))
arr *= 1 / np.cos(0 * u.deg)
assert arr[0] == 1
| TestInplaceUfuncs |
python | gevent__gevent | src/greentest/3.14/test__interpreters.py | {
"start": 16439,
"end": 26671
} | class ____(TestBase):
def setUp(self):
super().setUp()
self.id = _interpreters.create()
def test_success(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
_interpreters.run_string(self.id, script)
out = file.read()
self.assertEqual(out, 'it worked!')
def test_in_thread(self):
script, file = _captured_script('print("it worked!", end="")')
with file:
def f():
_interpreters.run_string(self.id, script)
t = threading.Thread(target=f)
t.start()
t.join()
out = file.read()
self.assertEqual(out, 'it worked!')
def test_create_thread(self):
subinterp = _interpreters.create()
script, file = _captured_script("""
import threading
def f():
print('it worked!', end='')
t = threading.Thread(target=f)
t.start()
t.join()
""")
with file:
_interpreters.run_string(subinterp, script)
out = file.read()
self.assertEqual(out, 'it worked!')
def test_create_daemon_thread(self):
with self.subTest('isolated'):
expected = 'spam spam spam spam spam'
subinterp = _interpreters.create('isolated')
script, file = _captured_script(f"""
import threading
def f():
print('it worked!', end='')
try:
t = threading.Thread(target=f, daemon=True)
t.start()
t.join()
except RuntimeError:
print('{expected}', end='')
""")
with file:
_interpreters.run_string(subinterp, script)
out = file.read()
self.assertEqual(out, expected)
with self.subTest('not isolated'):
subinterp = _interpreters.create('legacy')
script, file = _captured_script("""
import threading
def f():
print('it worked!', end='')
t = threading.Thread(target=f, daemon=True)
t.start()
t.join()
""")
with file:
_interpreters.run_string(subinterp, script)
out = file.read()
self.assertEqual(out, 'it worked!')
def test_shareable_types(self):
interp = _interpreters.create()
objects = [
None,
'spam',
b'spam',
42,
]
for obj in objects:
with self.subTest(obj):
_interpreters.set___main___attrs(interp, dict(obj=obj))
_interpreters.run_string(
interp,
f'assert(obj == {obj!r})',
)
def test_os_exec(self):
expected = 'spam spam spam spam spam'
subinterp = _interpreters.create()
script, file = _captured_script(f"""
import os, sys
try:
os.execl(sys.executable)
except RuntimeError:
print('{expected}', end='')
""")
with file:
_interpreters.run_string(subinterp, script)
out = file.read()
self.assertEqual(out, expected)
@support.requires_fork()
def test_fork(self):
import tempfile
with tempfile.NamedTemporaryFile('w+', encoding="utf-8") as file:
file.write('')
file.flush()
expected = 'spam spam spam spam spam'
script = dedent(f"""
import os
try:
os.fork()
except RuntimeError:
with open('{file.name}', 'w', encoding='utf-8') as out:
out.write('{expected}')
""")
_interpreters.run_string(self.id, script)
file.seek(0)
content = file.read()
self.assertEqual(content, expected)
def test_already_running(self):
with _running(self.id):
with self.assertRaises(_interpreters.InterpreterError):
_interpreters.run_string(self.id, 'print("spam")')
def test_does_not_exist(self):
id = 0
while id in set(id for id, *_ in _interpreters.list_all()):
id += 1
with self.assertRaises(InterpreterNotFoundError):
_interpreters.run_string(id, 'print("spam")')
def test_error_id(self):
with self.assertRaises(ValueError):
_interpreters.run_string(-1, 'print("spam")')
def test_bad_id(self):
with self.assertRaises(TypeError):
_interpreters.run_string('spam', 'print("spam")')
def test_bad_script(self):
with self.assertRaises(TypeError):
_interpreters.run_string(self.id, 10)
def test_bytes_for_script(self):
with self.assertRaises(TypeError):
_interpreters.run_string(self.id, b'print("spam")')
def test_str_subclass_string(self):
class StrSubclass(str): pass
output = _run_output(self.id, StrSubclass('print(1 + 2)'))
self.assertEqual(output, '3\n')
def test_with_shared(self):
r, w = os.pipe()
shared = {
'spam': b'ham',
'eggs': b'-1',
'cheddar': None,
}
script = dedent(f"""
eggs = int(eggs)
spam = 42
result = spam + eggs
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
_interpreters.set___main___attrs(self.id, shared)
_interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['spam'], 42)
self.assertEqual(ns['eggs'], -1)
self.assertEqual(ns['result'], 41)
self.assertIsNone(ns['cheddar'])
def test_shared_overwrites(self):
_interpreters.run_string(self.id, dedent("""
spam = 'eggs'
ns1 = dict(vars())
del ns1['__builtins__']
"""))
shared = {'spam': b'ham'}
script = dedent("""
ns2 = dict(vars())
del ns2['__builtins__']
""")
_interpreters.set___main___attrs(self.id, shared)
_interpreters.run_string(self.id, script)
r, w = os.pipe()
script = dedent(f"""
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
_interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['ns1']['spam'], 'eggs')
self.assertEqual(ns['ns2']['spam'], b'ham')
self.assertEqual(ns['spam'], b'ham')
def test_shared_overwrites_default_vars(self):
r, w = os.pipe()
shared = {'__name__': b'not __main__'}
script = dedent(f"""
spam = 42
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
_interpreters.set___main___attrs(self.id, shared)
_interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
self.assertEqual(ns['__name__'], b'not __main__')
def test_main_reused(self):
r, w = os.pipe()
_interpreters.run_string(self.id, dedent(f"""
spam = True
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
del ns, pickle, chan
"""))
with open(r, 'rb') as chan:
ns1 = pickle.load(chan)
r, w = os.pipe()
_interpreters.run_string(self.id, dedent(f"""
eggs = False
ns = dict(vars())
del ns['__builtins__']
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
"""))
with open(r, 'rb') as chan:
ns2 = pickle.load(chan)
self.assertIn('spam', ns1)
self.assertNotIn('eggs', ns1)
self.assertIn('eggs', ns2)
self.assertIn('spam', ns2)
def test_execution_namespace_is_main(self):
r, w = os.pipe()
script = dedent(f"""
spam = 42
ns = dict(vars())
ns['__builtins__'] = str(ns['__builtins__'])
import pickle
with open({w}, 'wb') as chan:
pickle.dump(ns, chan)
""")
_interpreters.run_string(self.id, script)
with open(r, 'rb') as chan:
ns = pickle.load(chan)
ns.pop('__builtins__')
ns.pop('__loader__')
self.assertEqual(ns, {
'__name__': '__main__',
'__doc__': None,
'__package__': None,
'__spec__': None,
'spam': 42,
})
# XXX Fix this test!
@unittest.skip('blocking forever')
def test_still_running_at_exit(self):
script = dedent("""
from textwrap import dedent
import threading
import _interpreters
id = _interpreters.create()
def f():
_interpreters.run_string(id, dedent('''
import time
# Give plenty of time for the main interpreter to finish.
time.sleep(1_000_000)
'''))
t = threading.Thread(target=f)
t.start()
""")
with support.temp_dir() as dirname:
filename = script_helper.make_script(dirname, 'interp', script)
with script_helper.spawn_python(filename) as proc:
retcode = proc.wait()
self.assertEqual(retcode, 0)
| RunStringTests |
python | wandb__wandb | wandb/sdk/launch/runner/kubernetes_monitor.py | {
"start": 971,
"end": 1026
} | class ____:
JOBS = "jobs"
PODS = "pods"
| Resources |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_memorystore.py | {
"start": 10046,
"end": 11149
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.cloud_memorystore.CloudMemorystoreHook")
def test_assert_valid_hook_call(self, mock_hook):
task = CloudMemorystoreListInstancesOperator(
task_id=TEST_TASK_ID,
location=TEST_LOCATION,
page_size=TEST_PAGE_SIZE,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
task.execute(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.list_instances.assert_called_once_with(
location=TEST_LOCATION,
page_size=TEST_PAGE_SIZE,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| TestCloudMemorystoreListInstancesOperator |
python | python-markdown__markdown | markdown/treeprocessors.py | {
"start": 16796,
"end": 17652
} | class ____(Treeprocessor):
""" Restore escaped chars """
RE = re.compile(r'{}(\d+){}'.format(util.STX, util.ETX))
def _unescape(self, m: re.Match[str]) -> str:
return chr(int(m.group(1)))
def unescape(self, text: str) -> str:
return self.RE.sub(self._unescape, text)
def run(self, root: etree.Element) -> None:
""" Loop over all elements and unescape all text. """
for elem in root.iter():
# Unescape text content
if elem.text and not elem.tag == 'code':
elem.text = self.unescape(elem.text)
# Unescape tail content
if elem.tail:
elem.tail = self.unescape(elem.tail)
# Unescape attribute values
for key, value in elem.items():
elem.set(key, self.unescape(value))
| UnescapeTreeprocessor |
python | kamyu104__LeetCode-Solutions | Python/number-of-pairs-of-strings-with-concatenation-equal-to-target.py | {
"start": 832,
"end": 1471
} | class ____(object):
def numOfPairs(self, nums, target):
"""
:type nums: List[str]
:type target: str
:rtype: int
"""
prefix, suffix = collections.Counter(), collections.Counter()
result = 0
for num in nums:
if target.startswith(num):
result += suffix[len(target)-len(num)]
if target.endswith(num):
result += prefix[len(target)-len(num)]
if target.startswith(num):
prefix[len(num)] += 1
if target.endswith(num):
suffix[len(num)] += 1
return result
| Solution2 |
python | getsentry__sentry | src/sentry/api/serializers/rest_framework/dashboard.py | {
"start": 5055,
"end": 13164
} | class ____(CamelSnakeSerializer[Dashboard]):
# Is a string because output serializers also make it a string.
id = serializers.CharField(required=False)
fields = serializers.ListField(child=serializers.CharField(), required=False) # type: ignore[assignment] # XXX: clobbering Serializer.fields
aggregates = serializers.ListField(
child=serializers.CharField(), required=False, allow_null=True
)
columns = serializers.ListField(child=serializers.CharField(), required=False, allow_null=True)
field_aliases = serializers.ListField(
child=serializers.CharField(allow_blank=True), required=False, allow_null=True
)
name = serializers.CharField(required=False, allow_blank=True)
conditions = serializers.CharField(required=False, allow_blank=True)
orderby = serializers.CharField(required=False, allow_blank=True)
is_hidden = serializers.BooleanField(required=False)
on_demand_extraction = DashboardWidgetQueryOnDemandSerializer(many=False, required=False)
on_demand_extraction_disabled = serializers.BooleanField(required=False)
selected_aggregate = serializers.IntegerField(required=False, allow_null=True)
linked_dashboards = LinkedDashboardSerializer(many=True, required=False, allow_null=True)
required_for_create = {"fields", "conditions"}
validate_id = validate_id
def get_metrics_features(
self, organization: Organization | None, user: User | None
) -> dict[str, bool | None]:
if organization is None or user is None:
return {}
feature_names = [
"organizations:mep-rollout-flag",
"organizations:dynamic-sampling",
"organizations:performance-use-metrics",
"organizations:dashboards-mep",
]
batch_features = features.batch_has(
feature_names,
organization=organization,
actor=user,
)
return (
batch_features.get(f"organization:{organization.id}", {})
if batch_features is not None
else {
feature_name: features.has(feature_name, organization=organization, actor=user)
for feature_name in feature_names
}
)
def validate(self, data):
if not data.get("id"):
keys = set(data.keys())
if self.required_for_create - keys:
raise serializers.ValidationError(
{
"fields": "fields are required during creation.",
"conditions": "conditions are required during creation.",
}
)
# Validate the query that would be created when run.
conditions = self._get_attr(data, "conditions", "")
orderby = self._get_attr(data, "orderby", "")
is_table = is_table_display_type(self.context.get("display_type"))
columns = self._get_attr(data, "columns", []).copy()
aggregates = self._get_attr(data, "aggregates", []).copy()
fields = columns + aggregates
# Handle the orderby since it can be a value that's not included in fields
# e.g. a custom equation, or a function that isn't plotted as a y-axis
injected_orderby_equation, orderby_prefix = None, None
stripped_orderby = orderby.lstrip("-")
if is_equation(stripped_orderby):
# The orderby is a custom equation and needs to be added to fields
injected_orderby_equation = stripped_orderby
fields.append(injected_orderby_equation)
orderby_prefix = "-" if orderby.startswith("-") else ""
elif is_function(stripped_orderby) and stripped_orderby not in fields:
fields.append(stripped_orderby)
equations, fields = categorize_columns(fields)
if injected_orderby_equation is not None and orderby_prefix is not None:
# Subtract one because the equation is injected to fields
orderby = f"{orderby_prefix}equation[{len(equations) - 1}]"
params: ParamsType = {
"start": datetime.now() - timedelta(days=1),
"end": datetime.now(),
"project_id": [p.id for p in self.context["projects"]],
"organization_id": self.context["organization"].id,
"environment": self.context.get("environment", []),
}
try:
parse_search_query(conditions, params=params)
except InvalidSearchQuery as err:
# We don't know if the widget that this query belongs to is an
# Issue widget or Discover widget. Pass the error back to the
# Widget serializer to decide if whether or not to raise this
# error based on the Widget's type
data["issue_query_error"] = {"conditions": [f"Invalid conditions: {err}"]}
try:
batch_features = self.get_metrics_features(
self.context.get("organization"), self.context.get("user")
)
use_metrics = bool(
(
batch_features.get("organizations:mep-rollout-flag", False)
and batch_features.get("organizations:dynamic-sampling", False)
)
or batch_features.get("organizations:performance-use-metrics", False)
or batch_features.get("organizations:dashboards-mep", False)
)
# When using the eps/epm functions, they require an interval argument
# or to provide the start/end so that the interval can be computed.
# This uses a hard coded start/end to ensure the validation succeeds
# since the values themselves don't matter.
config = QueryBuilderConfig(
equation_config={
"auto_add": bool(not is_table or injected_orderby_equation),
"aggregates_only": not is_table,
},
use_aggregate_conditions=True,
)
if self.context.get("widget_type") == DashboardWidgetTypes.get_type_name(
DashboardWidgetTypes.ERROR_EVENTS
):
config.parser_config_overrides = ERROR_PARSER_CONFIG_OVERRIDES
elif self.context.get("widget_type") == DashboardWidgetTypes.get_type_name(
DashboardWidgetTypes.TRANSACTION_LIKE
):
config.has_metrics = use_metrics
builder = UnresolvedQuery(
dataset=Dataset.Discover,
params=params,
config=config,
)
builder.resolve_time_conditions()
builder.resolve_conditions(conditions)
# We need to resolve params to set time range params here since some
# field aliases might those params to be resolved (total.count)
builder.where = builder.resolve_params()
except InvalidSearchQuery as err:
data["discover_query_error"] = {"conditions": [f"Invalid conditions: {err}"]}
return data
# TODO(dam): Add validation for metrics fields/queries
try:
builder.columns = builder.resolve_select(fields, equations)
except (InvalidSearchQuery, ArithmeticError) as err:
# We don't know if the widget that this query belongs to is an
# Issue widget or Discover widget. Pass the error back to the
# Widget serializer to decide if whether or not to raise this
# error based on the Widget's type
data["discover_query_error"] = {"fields": f"Invalid fields: {err}"}
try:
builder.resolve_orderby(orderby)
except InvalidSearchQuery as err:
data["discover_query_error"] = {"orderby": f"Invalid orderby: {err}"}
return data
def _get_attr(self, data, attr, empty_value=None):
value = data.get(attr)
if value is not None:
return value
if self.instance:
return getattr(self.instance, attr)
return empty_value
| DashboardWidgetQuerySerializer |
python | pdm-project__pdm | src/pdm/formats/poetry.py | {
"start": 4062,
"end": 9042
} | class ____(MetaConverter):
@convert_from("authors")
def authors(self, value: list[str]) -> list[dict]:
return parse_name_email(value)
@convert_from("maintainers")
def maintainers(self, value: list[str]) -> list[dict]:
return parse_name_email(value)
@convert_from("license")
def license(self, value: str) -> dict[str, str]:
return make_inline_table({"text": value})
@convert_from(name="requires-python")
def requires_python(self, source: dict[str, Any]) -> str:
python = source.get("dependencies", {}).pop("python", None)
return str(_convert_python(python))
@convert_from()
def urls(self, source: dict[str, Any]) -> dict[str, str]:
rv = source.pop("urls", {})
if "homepage" in source:
rv["homepage"] = source.pop("homepage")
if "repository" in source:
rv["repository"] = source.pop("repository")
if "documentation" in source:
rv["documentation"] = source.pop("documentation")
return rv
@convert_from("plugins", name="entry-points")
def entry_points(self, value: dict[str, dict[str, str]]) -> dict[str, dict[str, str]]:
return value
@convert_from()
def dependencies(self, source: dict[str, Any]) -> list[str]:
rv = []
value, extras = dict(source["dependencies"]), source.pop("extras", {})
for key, req_dict in value.items():
optional = getattr(req_dict, "items", None) and req_dict.pop("optional", False)
for req in _convert_req(key, req_dict):
if optional:
extra = next((k for k, v in extras.items() if key in v), None)
if extra:
self._data.setdefault("optional-dependencies", {}).setdefault(extra, []).append(req)
else:
rv.append(req)
del source["dependencies"]
return make_array(rv, True)
@convert_from("dev-dependencies")
def dev_dependencies(self, value: dict) -> None:
self.settings.setdefault("dev-dependencies", {})["dev"] = make_array(
[r for key, req in value.items() for r in _convert_req(key, req)], True
)
raise Unset()
@convert_from("group")
def group_dependencies(self, value: dict[str, dict[str, Any]]) -> None:
for name, group in value.items():
self.settings.setdefault("dev-dependencies", {})[name] = make_array(
[r for key, req in group.get("dependencies", {}).items() for r in _convert_req(key, req)], True
)
raise Unset()
@convert_from("package-mode")
def package_mode(self, value: bool) -> None:
self.settings["distribution"] = value
raise Unset()
@convert_from()
def includes(self, source: dict[str, list[str] | str]) -> list[str]:
includes: list[str] = []
source_includes: list[str] = []
for item in source.pop("packages", []):
assert isinstance(item, dict)
include = item["include"]
if item.get("from"):
include = Path(str(item.get("from")), include).as_posix()
includes.append(include)
for item in source.pop("include", []):
if not isinstance(item, dict):
includes.append(item)
else:
dest = source_includes if "sdist" in item.get("format", "") else includes
dest.append(item["path"])
if includes:
self.settings.setdefault("build", {})["includes"] = includes
raise Unset()
@convert_from("exclude")
def excludes(self, value: list[str]) -> None:
self.settings.setdefault("build", {})["excludes"] = value
raise Unset()
@convert_from("build")
def build(self, value: str | dict) -> None:
result = {}
if isinstance(value, dict) and "generate-setup-file" in value:
result["run-setuptools"] = cast(bool, value["generate-setup-file"])
self.settings.setdefault("build", {}).update(result)
raise Unset()
@convert_from("source")
def sources(self, value: list[dict[str, Any]]) -> None:
self.settings["source"] = [
{
"name": item.get("name", ""),
"url": item.get("url", ""),
"verify_ssl": item.get("url", "").startswith("https"),
}
for item in value
]
raise Unset()
def convert(
project: Project | None,
filename: str | Path,
options: Namespace | None,
) -> tuple[Mapping[str, Any], Mapping[str, Any]]:
with open(filename, "rb") as fp, cd(os.path.dirname(os.path.abspath(filename))):
converter = PoetryMetaConverter(tomllib.load(fp)["tool"]["poetry"], project.core.ui if project else None)
return converter.convert()
def export(project: Project, candidates: list, options: Any) -> None:
raise NotImplementedError()
| PoetryMetaConverter |
python | GoogleCloudPlatform__python-docs-samples | composer/tools/composer_migrate.py | {
"start": 922,
"end": 19485
} | class ____:
"""Client for interacting with Composer API.
The client uses gcloud under the hood.
"""
def __init__(self, project: str, location: str, sdk_endpoint: str) -> None:
self.project = project
self.location = location
self.sdk_endpoint = sdk_endpoint
def get_environment(self, environment_name: str) -> Any:
"""Returns an environment json for a given Composer environment."""
command = (
f"CLOUDSDK_API_ENDPOINT_OVERRIDES_COMPOSER={self.sdk_endpoint} gcloud"
" composer environments describe"
f" {environment_name} --project={self.project} --location={self.location} --format"
" json"
)
output = run_shell_command(command)
return json.loads(output)
def create_environment_from_config(self, config: Any) -> Any:
"""Creates a Composer environment based on the given json config."""
# Obtain access token through gcloud
access_token = run_shell_command("gcloud auth print-access-token")
# gcloud does not support creating composer environments from json, so we
# need to use the API directly.
create_environment_command = (
f"curl -s -X POST -H 'Authorization: Bearer {access_token}'"
" -H 'Content-Type: application/json'"
f" -d '{json.dumps(config)}'"
f" {self.sdk_endpoint}/v1/projects/{self.project}/locations/{self.location}/environments"
)
output = run_shell_command(create_environment_command)
logging.info("Create environment operation: %s", output)
# Poll create operation using gcloud.
operation_id = json.loads(output)["name"].split("/")[-1]
poll_operation_command = (
f"CLOUDSDK_API_ENDPOINT_OVERRIDES_COMPOSER={self.sdk_endpoint} gcloud"
" composer operations wait"
f" {operation_id} --project={self.project} --location={self.location}"
)
run_shell_command(poll_operation_command)
def list_dags(self, environment_name: str) -> List[str]:
"""Returns a list of DAGs in a given Composer environment."""
command = (
f"CLOUDSDK_API_ENDPOINT_OVERRIDES_COMPOSER={self.sdk_endpoint} gcloud"
" composer environments run"
f" {environment_name} --project={self.project} --location={self.location} dags"
" list -- -o json"
)
output = run_shell_command(command)
# Output may contain text from top level print statements.
# The last line of the output is always a json array of DAGs.
return json.loads(output.splitlines()[-1])
def pause_dag(
self,
dag_id: str,
environment_name: str,
) -> Any:
"""Pauses a DAG in a Composer environment."""
command = (
f"CLOUDSDK_API_ENDPOINT_OVERRIDES_COMPOSER={self.sdk_endpoint} gcloud"
" composer environments run"
f" {environment_name} --project={self.project} --location={self.location} dags"
f" pause -- {dag_id}"
)
run_shell_command(command)
def unpause_dag(
self,
dag_id: str,
environment_name: str,
) -> Any:
"""Unpauses all DAGs in a Composer environment."""
command = (
f"CLOUDSDK_API_ENDPOINT_OVERRIDES_COMPOSER={self.sdk_endpoint} gcloud"
" composer environments run"
f" {environment_name} --project={self.project} --location={self.location} dags"
f" unpause -- {dag_id}"
)
run_shell_command(command)
def save_snapshot(self, environment_name: str) -> str:
"""Saves a snapshot of a Composer environment."""
command = (
f"CLOUDSDK_API_ENDPOINT_OVERRIDES_COMPOSER={self.sdk_endpoint} gcloud"
" composer"
" environments snapshots save"
f" {environment_name} --project={self.project}"
f" --location={self.location} --format=json"
)
output = run_shell_command(command)
return json.loads(output)["snapshotPath"]
def load_snapshot(
self,
environment_name: str,
snapshot_path: str,
) -> Any:
"""Loads a snapshot to a Composer environment."""
command = (
f"CLOUDSDK_API_ENDPOINT_OVERRIDES_COMPOSER={self.sdk_endpoint} gcloud"
" composer"
f" environments snapshots load {environment_name}"
f" --snapshot-path={snapshot_path} --project={self.project}"
f" --location={self.location} --format=json"
)
run_shell_command(command)
def run_shell_command(command: str, command_input: str = None) -> str:
"""Executes shell command and returns its output."""
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(res, _) = p.communicate(input=command_input)
output = str(res.decode().strip("\n"))
if p.returncode:
raise RuntimeError(f"Failed to run shell command: {command}, details: {output}")
return output
def get_target_cpu(source_cpu: float, max_cpu: float) -> float:
"""Returns a target CPU value for a Composer 3 workload."""
# Allowed values for Composer 3 workloads are 0.5, 1.0 and multiples of 2.0 up
# to max_cpu.
if source_cpu < 1.0:
return 0.5
if source_cpu == 1.0:
return source_cpu
return min(math.ceil(source_cpu / 2.0) * 2, max_cpu)
def get_target_memory_gb(source_memory_gb: float, target_cpu: float) -> float:
"""Returns a target memory in GB for a Composer 3 workload."""
# Allowed values for Composer 3 workloads are multiples of 0.25
# starting from 1 * cpu up to 8 * cpu, with minimum of 1 GB.
target_memory_gb = math.ceil(source_memory_gb * 4.0) / 4.0
return max(1.0, target_cpu, min(target_memory_gb, target_cpu * 8))
def get_target_storage_gb(source_storage_gb: float) -> float:
"""Returns a target storage in GB for a Composer 3 workload."""
# Composer 3 allows only whole numbers of GB for storage, up to 100 GB.
return min(math.ceil(source_storage_gb), 100.0)
def get_target_workloads_config(
source_workloads_config: Any,
) -> Dict[str, Any]:
"""Returns a Composer 3 workloads config based on the source environment."""
workloads_config = {}
if source_workloads_config.get("scheduler"):
scheduler_cpu = get_target_cpu(source_workloads_config["scheduler"]["cpu"], 1.0)
workloads_config["scheduler"] = {
"cpu": scheduler_cpu,
"memoryGb": get_target_memory_gb(
source_workloads_config["scheduler"]["memoryGb"], scheduler_cpu
),
"storageGb": get_target_storage_gb(
source_workloads_config["scheduler"]["storageGb"]
),
"count": min(source_workloads_config["scheduler"]["count"], 3),
}
# Use configuration from the Composer 2 scheduler for Composer 3
# dagProcessor.
dag_processor_cpu = get_target_cpu(
source_workloads_config["scheduler"]["cpu"], 32.0
)
workloads_config["dagProcessor"] = {
"cpu": dag_processor_cpu,
"memoryGb": get_target_memory_gb(
source_workloads_config["scheduler"]["memoryGb"], dag_processor_cpu
),
"storageGb": get_target_storage_gb(
source_workloads_config["scheduler"]["storageGb"]
),
"count": min(source_workloads_config["scheduler"]["count"], 3),
}
if source_workloads_config.get("webServer"):
web_server_cpu = get_target_cpu(
source_workloads_config["webServer"]["cpu"], 4.0
)
workloads_config["webServer"] = {
"cpu": web_server_cpu,
"memoryGb": get_target_memory_gb(
source_workloads_config["webServer"]["memoryGb"], web_server_cpu
),
"storageGb": get_target_storage_gb(
source_workloads_config["webServer"]["storageGb"]
),
}
if source_workloads_config.get("worker"):
worker_cpu = get_target_cpu(source_workloads_config["worker"]["cpu"], 32.0)
workloads_config["worker"] = {
"cpu": worker_cpu,
"memoryGb": get_target_memory_gb(
source_workloads_config["worker"]["memoryGb"], worker_cpu
),
"storageGb": get_target_storage_gb(
source_workloads_config["worker"]["storageGb"]
),
"minCount": source_workloads_config["worker"]["minCount"],
"maxCount": source_workloads_config["worker"]["maxCount"],
}
if source_workloads_config.get("triggerer"):
triggerer_cpu = get_target_cpu(source_workloads_config["triggerer"]["cpu"], 1.0)
workloads_config["triggerer"] = {
"cpu": triggerer_cpu,
"memoryGb": get_target_memory_gb(
source_workloads_config["triggerer"]["memoryGb"], triggerer_cpu
),
"count": source_workloads_config["triggerer"]["count"],
}
else:
workloads_config["triggerer"] = {
"count": 0,
}
return workloads_config
def get_target_environment_config(
target_environment_name: str,
target_airflow_version: str,
source_environment: Any,
) -> Dict[str, Any]:
"""Returns a Composer 3 environment config based on the source environment."""
# Use the same project and location as the source environment.
target_environment_name = "/".join(
source_environment["name"].split("/")[:-1] + [target_environment_name]
)
target_workloads_config = get_target_workloads_config(
source_environment["config"].get("workloadsConfig", {})
)
target_node_config = {
"network": source_environment["config"]["nodeConfig"].get("network"),
"serviceAccount": source_environment["config"]["nodeConfig"]["serviceAccount"],
"tags": source_environment["config"]["nodeConfig"].get("tags", []),
}
if "subnetwork" in source_environment["config"]["nodeConfig"]:
target_node_config["subnetwork"] = source_environment["config"]["nodeConfig"][
"subnetwork"
]
target_environment = {
"name": target_environment_name,
"labels": source_environment.get("labels", {}),
"config": {
"softwareConfig": {
"imageVersion": f"composer-3-airflow-{target_airflow_version}",
"cloudDataLineageIntegration": (
source_environment["config"]["softwareConfig"].get(
"cloudDataLineageIntegration", {}
)
),
},
"nodeConfig": target_node_config,
"privateEnvironmentConfig": {
"enablePrivateEnvironment": (
source_environment["config"]
.get("privateEnvironmentConfig", {})
.get("enablePrivateEnvironment", False)
)
},
"webServerNetworkAccessControl": source_environment["config"][
"webServerNetworkAccessControl"
],
"environmentSize": source_environment["config"]["environmentSize"],
"databaseConfig": source_environment["config"]["databaseConfig"],
"encryptionConfig": source_environment["config"]["encryptionConfig"],
"maintenanceWindow": source_environment["config"]["maintenanceWindow"],
"dataRetentionConfig": {
"airflowMetadataRetentionConfig": source_environment["config"][
"dataRetentionConfig"
]["airflowMetadataRetentionConfig"]
},
"workloadsConfig": target_workloads_config,
},
}
return target_environment
def main(
project_name: str,
location: str,
source_environment_name: str,
target_environment_name: str,
target_airflow_version: str,
sdk_endpoint: str,
dry_run: bool,
) -> int:
client = ComposerClient(
project=project_name, location=location, sdk_endpoint=sdk_endpoint
)
# 1. Get the source environment, validate whether it is eligible
# for migration and produce a Composer 3 environment config.
logger.info("STEP 1: Getting and validating the source environment...")
source_environment = client.get_environment(source_environment_name)
logger.info("Source environment:\n%s", pprint.pformat(source_environment))
image_version = source_environment["config"]["softwareConfig"]["imageVersion"]
if not image_version.startswith("composer-2"):
raise ValueError(
f"Source environment {source_environment['name']} is not a Composer 2"
f" environment. Current image version: {image_version}"
)
# 2. Create a Composer 3 environment based on the source environment
# configuration.
target_environment = get_target_environment_config(
target_environment_name, target_airflow_version, source_environment
)
logger.info(
"Composer 3 environment will be created with the following config:\n%s",
pprint.pformat(target_environment),
)
logger.warning(
"Composer 3 environnment workloads config may be different from the"
" source environment."
)
logger.warning(
"Newly created Composer 3 environment will not have set"
" 'airflowConfigOverrides', 'pypiPackages' and 'envVariables'. Those"
" fields will be set when the snapshot is loaded."
)
if dry_run:
logger.info("Dry run enabled, exiting.")
return 0
logger.info("STEP 2: Creating a Composer 3 environment...")
client.create_environment_from_config(target_environment)
target_environment = client.get_environment(target_environment_name)
logger.info(
"Composer 3 environment successfully created%s",
pprint.pformat(target_environment),
)
# 3. Pause all DAGs in the source environment
logger.info("STEP 3: Pausing all DAGs in the source environment...")
source_env_dags = client.list_dags(source_environment_name)
source_env_dag_ids = [dag["dag_id"] for dag in source_env_dags]
logger.info(
"Found %d DAGs in the source environment: %s",
len(source_env_dags),
source_env_dag_ids,
)
for dag in source_env_dags:
if dag["dag_id"] == "airflow_monitoring":
continue
if dag["is_paused"] == "True":
logger.info("DAG %s is already paused.", dag["dag_id"])
continue
logger.info("Pausing DAG %s in the source environment.", dag["dag_id"])
client.pause_dag(dag["dag_id"], source_environment_name)
logger.info("DAG %s paused.", dag["dag_id"])
logger.info("All DAGs in the source environment paused.")
# 4. Save snapshot of the source environment
logger.info("STEP 4: Saving snapshot of the source environment...")
snapshot_path = client.save_snapshot(source_environment_name)
logger.info("Snapshot saved: %s", snapshot_path)
# 5. Load the snapshot into the target environment
logger.info("STEP 5: Loading snapshot into the new environment...")
client.load_snapshot(target_environment_name, snapshot_path)
logger.info("Snapshot loaded.")
# 6. Unpase DAGs in the new environment
logger.info("STEP 6: Unpausing DAGs in the new environment...")
all_dags_present = False
# Wait until all DAGs from source environment are visible.
while not all_dags_present:
target_env_dags = client.list_dags(target_environment_name)
target_env_dag_ids = [dag["dag_id"] for dag in target_env_dags]
all_dags_present = set(source_env_dag_ids) == set(target_env_dag_ids)
logger.info("List of DAGs in the target environment: %s", target_env_dag_ids)
# Unpause only DAGs that were not paused in the source environment.
for dag in source_env_dags:
if dag["dag_id"] == "airflow_monitoring":
continue
if dag["is_paused"] == "True":
logger.info("DAG %s was paused in the source environment.", dag["dag_id"])
continue
logger.info("Unpausing DAG %s in the target environment.", dag["dag_id"])
client.unpause_dag(dag["dag_id"], target_environment_name)
logger.info("DAG %s unpaused.", dag["dag_id"])
logger.info("DAGs in the target environment unpaused.")
logger.info("Migration complete.")
return 0
def parse_arguments() -> Dict[Any, Any]:
"""Parses command line arguments."""
argument_parser = argparse.ArgumentParser(
usage="Script for migrating environments from Composer 2 to Composer 3.\n"
)
argument_parser.add_argument(
"--project",
type=str,
required=True,
help="Project name of the Composer environment to migrate.",
)
argument_parser.add_argument(
"--location",
type=str,
required=True,
help="Location of the Composer environment to migrate.",
)
argument_parser.add_argument(
"--source_environment",
type=str,
required=True,
help="Name of the Composer 2 environment to migrate.",
)
argument_parser.add_argument(
"--target_environment",
type=str,
required=True,
help="Name of the Composer 3 environment to create.",
)
argument_parser.add_argument(
"--target_airflow_version",
type=str,
default="2",
help="Airflow version for the Composer 3 environment.",
)
argument_parser.add_argument(
"--dry_run",
action="store_true",
default=False,
help=(
"If true, script will only print the config for the Composer 3"
" environment."
),
)
argument_parser.add_argument(
"--sdk_endpoint",
type=str,
default="https://composer.googleapis.com/",
required=False,
)
return argument_parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
exit(
main(
project_name=args.project,
location=args.location,
source_environment_name=args.source_environment,
target_environment_name=args.target_environment,
target_airflow_version=args.target_airflow_version,
sdk_endpoint=args.sdk_endpoint,
dry_run=args.dry_run,
)
)
| ComposerClient |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 55712,
"end": 55890
} | class ____(themeable):
"""
Padding between the legends and the box
Parameters
----------
theme_element : int
Value in points.
"""
| legend_box_margin |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 27927,
"end": 33247
} | class ____:
def test_basic(self):
x = [1, 4, 6, 7, 12]
out = np.array([3, 2, 1, 5])
out2 = np.array([-1, -1, 4])
out3 = np.array([0, 5])
assert_array_equal(diff(x), out)
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, n=3), out3)
x = [1.1, 2.2, 3.0, -0.2, -0.1]
out = np.array([1.1, 0.8, -3.2, 0.1])
assert_almost_equal(diff(x), out)
x = [True, True, False, False]
out = np.array([False, True, False])
out2 = np.array([True, True])
assert_array_equal(diff(x), out)
assert_array_equal(diff(x, n=2), out2)
def test_axis(self):
x = np.zeros((10, 20, 30))
x[:, 1::2, :] = 1
exp = np.ones((10, 19, 30))
exp[:, 1::2, :] = -1
assert_array_equal(diff(x), np.zeros((10, 20, 29)))
assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29)))
assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30)))
assert_array_equal(diff(x, axis=1), exp)
assert_array_equal(diff(x, axis=-2), exp)
assert_raises(AxisError, diff, x, axis=3)
assert_raises(AxisError, diff, x, axis=-4)
x = np.array(1.11111111111, np.float64)
assert_raises(ValueError, diff, x)
def test_nd(self):
x = 20 * rand(10, 20, 30)
out1 = x[:, :, 1:] - x[:, :, :-1]
out2 = out1[:, :, 1:] - out1[:, :, :-1]
out3 = x[1:, :, :] - x[:-1, :, :]
out4 = out3[1:, :, :] - out3[:-1, :, :]
assert_array_equal(diff(x), out1)
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, axis=0), out3)
assert_array_equal(diff(x, n=2, axis=0), out4)
def test_n(self):
x = list(range(3))
assert_raises(ValueError, diff, x, n=-1)
output = [diff(x, n=n) for n in range(1, 5)]
expected = [[1, 1], [0], [], []]
assert_(diff(x, n=0) is x)
for n, (expected_n, output_n) in enumerate(zip(expected, output), start=1):
assert_(type(output_n) is np.ndarray)
assert_array_equal(output_n, expected_n)
assert_equal(output_n.dtype, np.int_)
assert_equal(len(output_n), max(0, len(x) - n))
def test_times(self):
x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
expected = [
np.array([1, 1], dtype='timedelta64[D]'),
np.array([0], dtype='timedelta64[D]'),
]
expected.extend([np.array([], dtype='timedelta64[D]')] * 3)
for n, exp in enumerate(expected, start=1):
out = diff(x, n=n)
assert_array_equal(out, exp)
assert_equal(out.dtype, exp.dtype)
def test_subclass(self):
x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]],
mask=[[False, False], [True, False],
[False, True], [True, True], [False, False]])
out = diff(x)
assert_array_equal(out.data, [[1], [1], [1], [1], [1]])
assert_array_equal(out.mask, [[False], [True],
[True], [True], [False]])
assert_(type(out) is type(x))
out3 = diff(x, n=3)
assert_array_equal(out3.data, [[], [], [], [], []])
assert_array_equal(out3.mask, [[], [], [], [], []])
assert_(type(out3) is type(x))
def test_prepend(self):
x = np.arange(5) + 1
assert_array_equal(diff(x, prepend=0), np.ones(5))
assert_array_equal(diff(x, prepend=[0]), np.ones(5))
assert_array_equal(np.cumsum(np.diff(x, prepend=0)), x)
assert_array_equal(diff(x, prepend=[-1, 0]), np.ones(6))
x = np.arange(4).reshape(2, 2)
result = np.diff(x, axis=1, prepend=0)
expected = [[0, 1], [2, 1]]
assert_array_equal(result, expected)
result = np.diff(x, axis=1, prepend=[[0], [0]])
assert_array_equal(result, expected)
result = np.diff(x, axis=0, prepend=0)
expected = [[0, 1], [2, 2]]
assert_array_equal(result, expected)
result = np.diff(x, axis=0, prepend=[[0, 0]])
assert_array_equal(result, expected)
assert_raises(ValueError, np.diff, x, prepend=np.zeros((3, 3)))
assert_raises(AxisError, diff, x, prepend=0, axis=3)
def test_append(self):
x = np.arange(5)
result = diff(x, append=0)
expected = [1, 1, 1, 1, -4]
assert_array_equal(result, expected)
result = diff(x, append=[0])
assert_array_equal(result, expected)
result = diff(x, append=[0, 2])
expected = expected + [2]
assert_array_equal(result, expected)
x = np.arange(4).reshape(2, 2)
result = np.diff(x, axis=1, append=0)
expected = [[1, -1], [1, -3]]
assert_array_equal(result, expected)
result = np.diff(x, axis=1, append=[[0], [0]])
assert_array_equal(result, expected)
result = np.diff(x, axis=0, append=0)
expected = [[2, 2], [-2, -3]]
assert_array_equal(result, expected)
result = np.diff(x, axis=0, append=[[0, 0]])
assert_array_equal(result, expected)
assert_raises(ValueError, np.diff, x, append=np.zeros((3, 3)))
assert_raises(AxisError, diff, x, append=0, axis=3)
| TestDiff |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mailchimp/components.py | {
"start": 457,
"end": 798
} | class ____(DpathExtractor):
def extract_records(self, response: requests.Response) -> Iterable[MutableMapping[str, Any]]:
records = super().extract_records(response=response)
yield from ({**record, **activity_item} for record in records for activity_item in record.pop("activity", []))
| MailChimpRecordExtractorEmailActivity |
python | sqlalchemy__sqlalchemy | test/sql/test_quote.py | {
"start": 28318,
"end": 31207
} | class ____(fixtures.TestBase):
"""Test the db-agnostic quoting services of IdentifierPreparer."""
def test_unformat(self):
prep = compiler.IdentifierPreparer(default.DefaultDialect())
unformat = prep.unformat_identifiers
def a_eq(have, want):
if have != want:
print("Wanted %s" % want)
print("Received %s" % have)
self.assert_(have == want)
a_eq(unformat("foo"), ["foo"])
a_eq(unformat('"foo"'), ["foo"])
a_eq(unformat("'foo'"), ["'foo'"])
a_eq(unformat("foo.bar"), ["foo", "bar"])
a_eq(unformat('"foo"."bar"'), ["foo", "bar"])
a_eq(unformat('foo."bar"'), ["foo", "bar"])
a_eq(unformat('"foo".bar'), ["foo", "bar"])
a_eq(unformat('"foo"."b""a""r"."baz"'), ["foo", 'b"a"r', "baz"])
def test_unformat_custom(self):
class Custom(compiler.IdentifierPreparer):
def __init__(self, dialect):
super().__init__(dialect, initial_quote="`", final_quote="`")
def _escape_identifier(self, value):
return value.replace("`", "``")
def _unescape_identifier(self, value):
return value.replace("``", "`")
prep = Custom(default.DefaultDialect())
unformat = prep.unformat_identifiers
def a_eq(have, want):
if have != want:
print("Wanted %s" % want)
print("Received %s" % have)
self.assert_(have == want)
a_eq(unformat("foo"), ["foo"])
a_eq(unformat("`foo`"), ["foo"])
a_eq(unformat(repr("foo")), ["'foo'"])
a_eq(unformat("foo.bar"), ["foo", "bar"])
a_eq(unformat("`foo`.`bar`"), ["foo", "bar"])
a_eq(unformat("foo.`bar`"), ["foo", "bar"])
a_eq(unformat("`foo`.bar"), ["foo", "bar"])
a_eq(unformat("`foo`.`b``a``r`.`baz`"), ["foo", "b`a`r", "baz"])
def test_alembic_quote(self):
t1 = Table(
"TableOne", MetaData(), Column("MyCol", Integer, index=True)
)
t2 = Table(
"some_table", MetaData(), Column("some_col", Integer, index=True)
)
t3 = Table(
"some_table", MetaData(), Column("some_col", Integer, index=True)
)
ix3 = Index("my_index", t3.c.some_col)
ix4 = Index("MyIndex", t3.c.some_col)
ix5 = Index(None, t3.c.some_col)
for idx, expected in [
(list(t1.indexes)[0], "ix_TableOne_MyCol"),
(list(t2.indexes)[0], "ix_some_table_some_col"),
(ix3, "my_index"),
(ix4, "MyIndex"),
(ix5, "ix_some_table_some_col"),
]:
eq_(
testing.db.dialect.identifier_preparer.format_constraint(
idx, _alembic_quote=False
),
expected,
)
| PreparerTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 61064,
"end": 61584
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("start_line", "start_column", "end_line", "end_column")
start_line = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="startLine")
start_column = sgqlc.types.Field(Int, graphql_name="startColumn")
end_line = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="endLine")
end_column = sgqlc.types.Field(Int, graphql_name="endColumn")
| CheckAnnotationRange |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 437378,
"end": 439280
} | class ____(Fit):
"""
GeoJsonFeature schema wrapper.
A feature object which contains a geometry and associated properties.
https://tools.ietf.org/html/rfc7946#section-3.2
Parameters
----------
geometry : dict, :class:`Point`, :class:`Polygon`, :class:`Geometry`, :class:`LineString`, :class:`MultiPoint`, :class:`MultiPolygon`, :class:`MultiLineString`, :class:`GeometryCollection`
The feature's geometry
properties : dict, :class:`GeoJsonProperties`, None
Properties associated with this feature.
type : Literal['Feature']
Specifies the type of GeoJSON object.
bbox : :class:`BBox`, Sequence[float]
Bounding box of the coordinate range of the object's Geometries, Features, or
Feature Collections. The value of the bbox member is an array of length 2*n where n
is the number of dimensions represented in the contained geometries, with all axes
of the most southwesterly point followed by all axes of the more northeasterly
point. The axes order of a bbox follows the axes order of geometries.
https://tools.ietf.org/html/rfc7946#section-5
id : str, float
A value that uniquely identifies this feature in a
https://tools.ietf.org/html/rfc7946#section-3.2.
"""
_schema = {"$ref": "#/definitions/GeoJsonFeature"}
def __init__(
self,
geometry: Optional[SchemaBase | Map] = Undefined,
properties: Optional[SchemaBase | Map | None] = Undefined,
type: Optional[Literal["Feature"]] = Undefined,
bbox: Optional[SchemaBase | Sequence[float]] = Undefined,
id: Optional[str | float] = Undefined,
**kwds,
):
super().__init__(
geometry=geometry,
properties=properties,
type=type,
bbox=bbox,
id=id,
**kwds,
)
| GeoJsonFeature |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 83842,
"end": 89535
} | class ____:
"""
Base class for defining kernel templates.
Children classes: TritonTemplate, CUDATemplate
"""
@staticmethod
def indent_except_first(
source: str, num_indents: int, indents_spacing: int = 4
) -> str:
lines = source.splitlines(True)
if len(lines) > 1:
lines[1:] = [
(" " * indents_spacing * num_indents) + line for line in lines[1:]
]
return "".join(lines)
@staticmethod
def _template_from_string(source: str) -> Any:
env = jinja2_env()
if env is None:
return None
env.filters["indent_except_first"] = KernelTemplate.indent_except_first
from jinja2 import TemplateSyntaxError
try:
return env.from_string(source)
except TemplateSyntaxError as e:
class DetailedTemplateSyntaxError(TemplateSyntaxError):
def __init__(self, original_error: TemplateSyntaxError) -> None:
super().__init__(
# pyrefly: ignore [bad-argument-type]
original_error.message,
original_error.lineno,
original_error.name,
original_error.filename,
)
self.original_error = original_error
def __str__(self) -> str:
error_info = f"Error in template at line {self.lineno}\n"
error_info += f"Error message: {self.message}\n"
if hasattr(self.original_error, "source"):
# pyrefly: ignore [missing-attribute]
lines = self.original_error.source.split("\n")
error_info += "Context:\n"
start = max(0, self.lineno - 2)
end = min(len(lines), self.lineno + 2)
for i in range(start, end):
if i == self.lineno - 1:
error_info += f"{i + 1}: --> {lines[i]}\n"
if hasattr(self.original_error, "column"):
error_info += (
" "
+ " " * (self.original_error.column - 1)
+ "^\n"
)
else:
error_info += f"{i + 1}: {lines[i]}\n"
return error_info
raise DetailedTemplateSyntaxError(e) from e
@staticmethod
def _fake_get_dtype(
fake_outs: Union[list[Buffer], Buffer],
) -> Callable[[str], torch.dtype]:
_get_dtype_real = V.graph.get_dtype
if isinstance(fake_outs, (list, tuple)):
lookup = {buf.get_name(): buf.get_dtype() for buf in fake_outs}
else:
lookup = {fake_outs.get_name(): fake_outs.get_dtype()}
def get_dtype(name: str) -> torch.dtype:
result = lookup.get(name)
if result is not None:
return result
return _get_dtype_real(name)
return get_dtype
def __init__(self, name: str, hash: Optional[str] = None) -> None:
self.name = name
self._hash = hash
@property
def uid(self) -> str:
"""
entry point to override for templates to ensure a uid e.g. through a prefix
the purpose of this is that every KernelTemplate/ExternKernelChoice is unique
in the system, but reproducible e.g. restarting pytorch should yield the same id
"""
# TODO(coconutruben): add some central registration to assert on global uniqueness
return self.name
@property
def src_hash(self) -> Union[str, None]:
"""
source hash for a Template.
Templates can optionally provide a src hash to make it easier to cache/validate that
a template has not changed from one version to another. Override this if that detection
is different for your specific Template
"""
return self._hash
def choice_or_none(self, **kwargs: Any) -> Optional[ChoiceCaller]:
"""
Maybe generates a new ChoiceCaller and returns it, or None if generation fails.
kwargs: Additional kwargs to be passed to self.generate() to generate a new ChoiceCaller.
"""
temp_choices: list[Any] = []
result = self.maybe_append_choice(temp_choices, **kwargs)
if result is None and len(temp_choices) == 1:
return temp_choices[0]
return None
def maybe_append_choice(
self, choices: list[Any], **kwargs: Any
) -> Optional[NotImplementedError]:
"""
Maybe generates a new ChoiceCaller and appends it into existing choices.
Returns None if success, otherwise returns the error.
choices: A list of ChoiceCallers.
kwargs: Additional kwargs to be passed to self.generate() to generate a new ChoiceCaller.
"""
try:
choices.append(self.generate(**kwargs))
return None
except NotImplementedError as e:
log.info( # noqa: G200
"Cannot Append Choice: %s. KernelTemplate type is %s",
e,
type(self),
stack_info=log.getEffectiveLevel() < logging.INFO,
)
return e
def generate(self, **kwargs: Any) -> ChoiceCaller:
"""
Generates a ChoiceCaller instance from the given arguments.
"""
raise NotImplementedError
| KernelTemplate |
python | sphinx-doc__sphinx | sphinx/application.py | {
"start": 4640,
"end": 65178
} | class ____:
"""The main application class and extensibility interface.
:ivar srcdir: Directory containing source.
:ivar confdir: Directory containing ``conf.py``.
:ivar doctreedir: Directory for storing pickled doctrees.
:ivar outdir: Directory for storing build documents.
"""
warningiserror: Final = False
_warncount: int
srcdir = _StrPathProperty()
confdir = _StrPathProperty()
outdir = _StrPathProperty()
doctreedir = _StrPathProperty()
def __init__(
self,
srcdir: str | os.PathLike[str],
confdir: str | os.PathLike[str] | None,
outdir: str | os.PathLike[str],
doctreedir: str | os.PathLike[str],
buildername: str,
confoverrides: dict[str, Any] | None = None,
status: IO[str] | None = sys.stdout,
warning: IO[str] | None = sys.stderr,
freshenv: bool = False,
warningiserror: bool = False,
tags: Sequence[str] = (),
verbosity: int = 0,
parallel: int = 0,
keep_going: bool = False,
pdb: bool = False,
exception_on_warning: bool = False,
) -> None:
"""Initialize the Sphinx application.
:param srcdir: The path to the source directory.
:param confdir: The path to the configuration directory.
If not given, it is assumed to be the same as ``srcdir``.
:param outdir: Directory for storing build documents.
:param doctreedir: Directory for caching pickled doctrees.
:param buildername: The name of the builder to use.
:param confoverrides: A dictionary of configuration settings that override the
settings in the configuration file.
:param status: A file-like object to write status messages to.
:param warning: A file-like object to write warnings to.
:param freshenv: If true, clear the cached environment.
:param warningiserror: If true, warnings become errors.
:param tags: A list of tags to apply.
:param verbosity: The verbosity level.
:param parallel: The maximum number of parallel jobs to use
when reading/writing documents.
:param keep_going: Unused.
:param pdb: If true, enable the Python debugger on an exception.
:param exception_on_warning: If true, raise an exception on warnings.
"""
self.verbosity = verbosity
self._fresh_env_used: bool | None = None
self.extensions: dict[str, Extension] = {}
self.registry = SphinxComponentRegistry()
# validate provided directories
self.srcdir = _StrPath(srcdir).resolve()
self.outdir = _StrPath(outdir).resolve()
self.doctreedir = _StrPath(doctreedir).resolve()
if not self.srcdir.is_dir():
raise ApplicationError(
__('Cannot find source directory (%s)') % self.srcdir
)
if self.outdir.exists() and not self.outdir.is_dir():
raise ApplicationError(
__('Output directory (%s) is not a directory') % self.outdir
)
if self.srcdir == self.outdir:
raise ApplicationError(
__('Source directory and destination directory cannot be identical')
)
self.parallel = parallel
if status is None:
self._status: IO[str] = StringIO()
self.quiet: bool = True
else:
self._status = status
self.quiet = False
if warning is None:
self._warning: IO[str] = StringIO()
else:
self._warning = warning
self._warncount = 0
self.keep_going = bool(warningiserror) # Unused
self._fail_on_warnings = bool(warningiserror)
self.pdb = pdb
self._exception_on_warning = exception_on_warning
logging.setup(self, self._status, self._warning, verbosity=verbosity)
self.events = EventManager(self)
# keep last few messages for traceback
# This will be filled by sphinx.util.logging.LastMessagesWriter
self.messagelog: deque[str] = deque(maxlen=10)
# say hello to the world
logger.info(bold(__('Running Sphinx v%s')), sphinx.__display_version__)
# status code for command-line application
self.statuscode = 0
# read config
overrides = confoverrides or {}
self.tags = Tags(tags)
if confdir is None:
# set confdir to srcdir if -C given (!= no confdir); a few pieces
# of code expect a confdir to be set
self.confdir = self.srcdir
self.config = Config({}, overrides)
else:
self.confdir = _StrPath(confdir).resolve()
self.config = Config.read(self.confdir, overrides=overrides, tags=self.tags)
self.config._verbosity = -1 if self.quiet else self.verbosity
# set up translation infrastructure
self._init_i18n()
# check the Sphinx version if requested
if (
self.config.needs_sphinx
and self.config.needs_sphinx > sphinx.__display_version__
):
raise VersionRequirementError(
__(
'This project needs at least Sphinx v%s and therefore cannot '
'be built with this version.'
)
% self.config.needs_sphinx
)
# load all built-in extension modules, first-party extension modules,
# and first-party themes
for extension in builtin_extensions:
self.setup_extension(extension)
# load all user-given extension modules
for extension in self.config.extensions:
self.setup_extension(extension)
# preload builder module (before init config values)
self.preload_builder(buildername)
if not self.outdir.is_dir():
with progress_message(__('making output directory')):
ensuredir(self.outdir)
# the config file itself can be an extension
if self.config.setup:
prefix = __('while setting up extension %s:') % 'conf.py'
with prefixed_warnings(prefix):
if callable(self.config.setup):
self.config.setup(self)
else:
raise ConfigError(
__(
"'setup' as currently defined in conf.py isn't a Python callable. "
'Please modify its definition to make it a callable function. '
'This is needed for conf.py to behave as a Sphinx extension.'
),
)
# Report any warnings for overrides.
self.config._report_override_warnings()
self.events.emit('config-inited', self.config)
# create the project
self.project = Project(self.srcdir, self.config.source_suffix)
# set up the build environment
self.env = self._init_env(freshenv)
# create the builder
self.builder = self.create_builder(buildername)
# build environment post-initialisation, after creating the builder
self._post_init_env()
# set up the builder
self._init_builder()
@property
def fresh_env_used(self) -> bool | None:
"""True/False as to whether a new environment was created for this build,
or None if the environment has not been initialised yet.
"""
return self._fresh_env_used
@property
def phase(self) -> BuildPhase:
if not hasattr(self, 'builder'):
return BuildPhase.INITIALIZATION
return self.builder.phase
def _init_i18n(self) -> None:
"""Load translated strings from the configured localedirs if enabled in
the configuration.
"""
logger.info(
bold(__('loading translations [%s]... ')), self.config.language, nonl=True
)
# compile mo files if sphinx.po file in user locale directories are updated
repo = CatalogRepository(
self.srcdir,
self.config.locale_dirs,
self.config.language,
self.config.source_encoding,
)
for catalog in repo.catalogs:
if catalog.domain == 'sphinx' and catalog.is_outdated():
catalog.write_mo(
self.config.language, self.config.gettext_allow_fuzzy_translations
)
locale_dirs: list[_StrPath | None] = list(repo.locale_dirs)
locale_dirs += [None]
locale_dirs += [package_dir / 'locale']
self.translator, has_translation = locale.init(
locale_dirs, self.config.language
)
if has_translation or self.config.language == 'en':
logger.info(__('done'))
else:
logger.info(__('not available for built-in messages'))
def _init_env(self, freshenv: bool) -> BuildEnvironment:
filename = self.doctreedir / ENV_PICKLE_FILENAME
if freshenv or not filename.exists():
return self._create_fresh_env()
else:
return self._load_existing_env(filename)
def _create_fresh_env(self) -> BuildEnvironment:
env = BuildEnvironment(self)
self._fresh_env_used = True
return env
@progress_message(__('loading pickled environment'))
def _load_existing_env(self, filename: Path) -> BuildEnvironment:
try:
with open(filename, 'rb') as f:
env = pickle.load(f)
env.setup(self)
self._fresh_env_used = False
except Exception as err:
logger.info(__('failed: %s'), err)
env = self._create_fresh_env()
return env
def _post_init_env(self) -> None:
if self._fresh_env_used:
self.env.find_files(self.config, self.builder)
self.env._builder_cls = self.builder.__class__
def preload_builder(self, name: str) -> None:
self.registry.preload_builder(self, name)
def create_builder(self, name: str) -> Builder:
if name is None:
logger.info(__('No builder selected, using default: html'))
name = 'html'
return self.registry.create_builder(self, name, self.env)
def _init_builder(self) -> None:
self.builder.init()
self.events.emit('builder-inited')
# ---- main "build" method -------------------------------------------------
def build(self, force_all: bool = False, filenames: Sequence[Path] = ()) -> None:
self.builder.phase = BuildPhase.READING
try:
if force_all:
self.builder.build_all()
elif filenames:
self.builder.build_specific(filenames)
else:
self.builder.build_update()
self.events.emit('build-finished', None)
except Exception as err:
# delete the saved env to force a fresh build next time
envfile = self.doctreedir / ENV_PICKLE_FILENAME
if envfile.is_file():
envfile.unlink()
self.events.emit('build-finished', err)
raise
if self._warncount == 0:
if self.statuscode != 0:
logger.info(bold(__('build finished with problems.')))
else:
logger.info(bold(__('build succeeded.')))
elif self._warncount == 1:
if self._fail_on_warnings:
self.statuscode = 1
msg = __(
'build finished with problems, 1 warning '
'(with warnings treated as errors).'
)
elif self.statuscode != 0:
msg = __('build finished with problems, 1 warning.')
else:
msg = __('build succeeded, 1 warning.')
logger.info(bold(msg))
else:
if self._fail_on_warnings:
self.statuscode = 1
msg = __(
'build finished with problems, %s warnings '
'(with warnings treated as errors).'
)
elif self.statuscode != 0:
msg = __('build finished with problems, %s warnings.')
else:
msg = __('build succeeded, %s warnings.')
logger.info(bold(msg), self._warncount)
if self.statuscode == 0 and self.builder.epilog:
logger.info('')
logger.info(
self.builder.epilog,
{
'outdir': relpath(self.outdir),
'project': self.config.project,
},
)
self.builder.cleanup()
# ---- general extensibility interface -------------------------------------
def setup_extension(self, extname: str) -> None:
"""Import and setup a Sphinx extension module.
Load the extension given by the module *name*. Use this if your
extension needs the features provided by another extension. No-op if
called twice.
"""
logger.debug('[app] setting up extension: %r', extname)
self.registry.load_extension(self, extname)
@staticmethod
def require_sphinx(version: tuple[int, int] | str) -> None:
"""Check the Sphinx version if requested.
Compare *version* with the version of the running Sphinx, and abort the
build when it is too old.
:param version: The required version in the form of ``major.minor`` or
``(major, minor)``.
.. versionadded:: 1.0
.. versionchanged:: 7.1
Type of *version* now allows ``(major, minor)`` form.
"""
if isinstance(version, tuple):
major, minor = version
else:
major, minor = map(int, version.split('.')[:2])
if (major, minor) > sphinx.version_info[:2]:
req = f'{major}.{minor}'
raise VersionRequirementError(req)
# ---- Core events -------------------------------------------------------
@overload
def connect(
self,
event: Literal['config-inited'],
callback: Callable[[Sphinx, Config], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['builder-inited'],
callback: Callable[[Sphinx], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['env-get-outdated'],
callback: Callable[
[Sphinx, BuildEnvironment, Set[str], Set[str], Set[str]], Sequence[str]
],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['env-before-read-docs'],
callback: Callable[[Sphinx, BuildEnvironment, list[str]], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['env-purge-doc'],
callback: Callable[[Sphinx, BuildEnvironment, str], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['source-read'],
callback: Callable[[Sphinx, str, list[str]], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['include-read'],
callback: Callable[[Sphinx, Path, str, list[str]], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['doctree-read'],
callback: Callable[[Sphinx, nodes.document], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['env-merge-info'],
callback: Callable[
[Sphinx, BuildEnvironment, Set[str], BuildEnvironment], None
],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['env-updated'],
callback: Callable[[Sphinx, BuildEnvironment], str],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['env-get-updated'],
callback: Callable[[Sphinx, BuildEnvironment], Iterable[str]],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['env-check-consistency'],
callback: Callable[[Sphinx, BuildEnvironment], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['write-started'],
callback: Callable[[Sphinx, Builder], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['doctree-resolved'],
callback: Callable[[Sphinx, nodes.document, str], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['missing-reference'],
callback: Callable[
[Sphinx, BuildEnvironment, addnodes.pending_xref, nodes.TextElement],
nodes.reference | None,
],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['warn-missing-reference'],
callback: Callable[[Sphinx, Domain, addnodes.pending_xref], bool | None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['build-finished'],
callback: Callable[[Sphinx, Exception | None], None],
priority: int = 500,
) -> int: ...
# ---- Events from builtin builders --------------------------------------
@overload
def connect(
self,
event: Literal['html-collect-pages'],
callback: Callable[[Sphinx], Iterable[tuple[str, dict[str, Any], str]]],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['html-page-context'],
callback: Callable[
[Sphinx, str, str, dict[str, Any], nodes.document], str | None
],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['linkcheck-process-uri'],
callback: Callable[[Sphinx, str], str | None],
priority: int = 500,
) -> int: ...
# ---- Events from builtin extensions-- ----------------------------------
@overload
def connect(
self,
event: Literal['object-description-transform'],
callback: Callable[[Sphinx, str, str, addnodes.desc_content], None],
priority: int = 500,
) -> int: ...
# ---- Events from first-party extensions --------------------------------
@overload
def connect(
self,
event: Literal['autodoc-process-docstring'],
callback: _AutodocProcessDocstringListener,
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['autodoc-before-process-signature'],
callback: Callable[[Sphinx, Any, bool], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['autodoc-process-signature'],
callback: _AutodocProcessSignatureListener,
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['autodoc-process-bases'],
callback: Callable[[Sphinx, str, Any, dict[str, bool], list[str]], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['autodoc-skip-member'],
callback: _AutodocSkipMemberListener,
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['todo-defined'],
callback: Callable[[Sphinx, todo_node], None],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['viewcode-find-source'],
callback: Callable[
[Sphinx, str],
tuple[str, dict[str, tuple[Literal['class', 'def', 'other'], int, int]]],
],
priority: int = 500,
) -> int: ...
@overload
def connect(
self,
event: Literal['viewcode-follow-imported'],
callback: Callable[[Sphinx, str, str], str | None],
priority: int = 500,
) -> int: ...
# ---- Catch-all ---------------------------------------------------------
@overload
def connect(
self,
event: str,
callback: Callable[..., Any],
priority: int = 500,
) -> int: ...
# event interface
def connect(
self, event: str, callback: Callable[..., Any], priority: int = 500
) -> int:
"""Register *callback* to be called when *event* is emitted.
For details on available core events and the arguments of callback
functions, please see :ref:`events`.
:param event: The name of target event
:param callback: Callback function for the event
:param priority: The priority of the callback. The callbacks will be invoked
in order of *priority* (ascending).
:return: A listener ID. It can be used for :meth:`disconnect`.
.. versionchanged:: 3.0
Support *priority*
"""
listener_id = self.events.connect(event, callback, priority)
logger.debug(
'[app] connecting event %r (%d): %r [id=%s]',
event,
priority,
callback,
listener_id,
)
return listener_id
def disconnect(self, listener_id: int) -> None:
"""Unregister callback by *listener_id*.
:param listener_id: A listener_id that :meth:`connect` returns
"""
logger.debug('[app] disconnecting event: [id=%s]', listener_id)
self.events.disconnect(listener_id)
def emit(
self,
event: str,
*args: Any,
allowed_exceptions: tuple[type[Exception], ...] = (),
) -> list[Any]:
"""Emit *event* and pass *arguments* to the callback functions.
Return the return values of all callbacks as a list. Do not emit core
Sphinx events in extensions!
:param event: The name of event that will be emitted
:param args: The arguments for the event
:param allowed_exceptions: The list of exceptions that are allowed in the callbacks
.. versionchanged:: 3.1
Added *allowed_exceptions* to specify path-through exceptions
"""
return self.events.emit(event, *args, allowed_exceptions=allowed_exceptions)
def emit_firstresult(
self,
event: str,
*args: Any,
allowed_exceptions: tuple[type[Exception], ...] = (),
) -> Any:
"""Emit *event* and pass *arguments* to the callback functions.
Return the result of the first callback that doesn't return ``None``.
:param event: The name of event that will be emitted
:param args: The arguments for the event
:param allowed_exceptions: The list of exceptions that are allowed in the callbacks
.. versionadded:: 0.5
.. versionchanged:: 3.1
Added *allowed_exceptions* to specify path-through exceptions
"""
return self.events.emit_firstresult(
event, *args, allowed_exceptions=allowed_exceptions
)
# registering addon parts
def add_builder(self, builder: type[Builder], override: bool = False) -> None:
"""Register a new builder.
:param builder: A builder class
:param override: If true, install the builder forcedly even if another builder
is already installed as the same name
.. versionchanged:: 1.8
Add *override* keyword.
"""
self.registry.add_builder(builder, override=override)
def add_config_value(
self,
name: str,
default: Any,
rebuild: _ConfigRebuild,
types: type | Collection[type] | ENUM = (),
description: str = '',
) -> None:
"""Register a configuration value.
This is necessary for Sphinx to recognize new values and set default
values accordingly.
:param name: The name of the configuration value. It is recommended to be prefixed
with the extension name (ex. ``html_logo``, ``epub_title``)
:param default: The default value of the configuration.
:param rebuild: The condition of rebuild. It must be one of those values:
* ``'env'`` if a change in the setting only takes effect when a
document is parsed -- this means that the whole environment must be
rebuilt.
* ``'html'`` if a change in the setting needs a full rebuild of HTML
documents.
* ``''`` if a change in the setting will not need any special rebuild.
:param types: The type of configuration value. A list of types can be specified. For
example, ``[str]`` is used to describe a configuration that takes string
value.
:param description: A short description of the configuration value.
.. versionchanged:: 0.4
If the *default* value is a callable, it will be called with the
config object as its argument in order to get the default value.
This can be used to implement config values whose default depends on
other values.
.. versionchanged:: 0.6
Changed *rebuild* from a simple boolean (equivalent to ``''`` or
``'env'``) to a string. However, booleans are still accepted and
converted internally.
.. versionadded:: 1.4
The *types* parameter.
.. versionadded:: 7.4
The *description* parameter.
"""
logger.debug('[app] adding config value: %r', (name, default, rebuild, types))
self.config.add(
name=name,
default=default,
rebuild=rebuild,
types=types,
description=description,
)
def add_event(self, name: str) -> None:
"""Register an event called *name*.
This is needed to be able to emit it.
:param name: The name of the event
"""
logger.debug('[app] adding event: %r', name)
self.events.add(name)
def set_translator(
self,
name: str,
translator_class: type[nodes.NodeVisitor],
override: bool = False,
) -> None:
"""Register or override a Docutils translator class.
This is used to register a custom output translator or to replace a
builtin translator. This allows extensions to use a custom translator
and define custom nodes for the translator (see :meth:`add_node`).
:param name: The name of the builder for the translator
:param translator_class: A translator class
:param override: If true, install the translator forcedly even if another translator
is already installed as the same name
.. versionadded:: 1.3
.. versionchanged:: 1.8
Add *override* keyword.
"""
self.registry.add_translator(name, translator_class, override=override)
def add_node(
self,
node: type[Element],
override: bool = False,
**kwargs: _NodeHandlerPair,
) -> None:
"""Register a Docutils node class.
This is necessary for Docutils internals. It may also be used in the
future to validate nodes in the parsed documents.
:param node: A node class
:param kwargs: Visitor functions for each builder (see below)
:param override: If true, install the node forcedly even if another node is already
installed as the same name
Node visitor functions for the Sphinx HTML, LaTeX, text and manpage
writers can be given as keyword arguments: the keyword should be one or
more of ``'html'``, ``'latex'``, ``'text'``, ``'man'``, ``'texinfo'``
or any other supported translators, the value a 2-tuple of ``(visit,
depart)`` methods. ``depart`` can be ``None`` if the ``visit``
function raises :exc:`docutils.nodes.SkipNode`. Example:
.. code-block:: python
class math(docutils.nodes.Element): ...
def visit_math_html(self, node):
self.body.append(self.starttag(node, 'math'))
def depart_math_html(self, node):
self.body.append('</math>')
app.add_node(math, html=(visit_math_html, depart_math_html))
Obviously, translators for which you don't specify visitor methods will
choke on the node when encountered in a document to translate.
.. versionchanged:: 0.5
Added the support for keyword arguments giving visit functions.
"""
logger.debug('[app] adding node: %r', (node, kwargs))
if not override and docutils.is_node_registered(node):
logger.warning(
__(
'node class %r is already registered, '
'its visitors will be overridden'
),
node.__name__,
type='app',
subtype='add_node',
)
docutils.register_node(node)
self.registry.add_translation_handlers(node, **kwargs)
def add_enumerable_node(
self,
node: type[Element],
figtype: str,
title_getter: TitleGetter | None = None,
override: bool = False,
**kwargs: tuple[_NodeHandler, _NodeHandler],
) -> None:
"""Register a Docutils node class as a numfig target.
Sphinx numbers the node automatically. And then the users can refer it
using :rst:role:`numref`.
:param node: A node class
:param figtype: The type of enumerable nodes. Each figtype has individual numbering
sequences. As system figtypes, ``figure``, ``table`` and
``code-block`` are defined. It is possible to add custom nodes to
these default figtypes. It is also possible to define new custom
figtype if a new figtype is given.
:param title_getter: A getter function to obtain the title of node. It takes an
instance of the enumerable node, and it must return its title as
string. The title is used to the default title of references for
:rst:role:`ref`. By default, Sphinx searches
``docutils.nodes.caption`` or ``docutils.nodes.title`` from the
node as a title.
:param kwargs: Visitor functions for each builder (same as :meth:`add_node`)
:param override: If true, install the node forcedly even if another node is already
installed as the same name
.. versionadded:: 1.4
"""
self.registry.add_enumerable_node(
node, figtype, title_getter, override=override
)
self.add_node(node, override=override, **kwargs)
def add_directive(
self, name: str, cls: type[Directive], override: bool = False
) -> None:
"""Register a Docutils directive.
:param name: The name of the directive
:param cls: A directive class
:param override: If false, do not install it if another directive
is already installed as the same name
If true, unconditionally install the directive.
For example, a custom directive named ``my-directive`` would be added
like this:
.. code-block:: python
from docutils.parsers.rst import Directive, directives
class MyDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'class': directives.class_option,
'name': directives.unchanged,
}
def run(self):
pass
def setup(app):
app.add_directive('my-directive', MyDirective)
For more details, see `the Docutils docs
<https://docutils.sourceforge.io/docs/howto/rst-directives.html>`__ .
.. versionchanged:: 0.6
Docutils 0.5-style directive classes are now supported.
.. versionchanged:: 1.8
Docutils 0.4-style (function based) directives support is deprecated.
.. versionchanged:: 1.8
Add *override* keyword.
"""
logger.debug('[app] adding directive: %r', (name, cls))
if not override and docutils.is_directive_registered(name):
logger.warning(
__('directive %r is already registered and will not be overridden'),
name,
type='app',
subtype='add_directive',
)
docutils.register_directive(name, cls)
def add_role(self, name: str, role: Any, override: bool = False) -> None:
"""Register a Docutils role.
:param name: The name of role
:param role: A role function
:param override: If false, do not install it if another role
is already installed as the same name
If true, unconditionally install the role.
For more details about role functions, see `the Docutils docs
<https://docutils.sourceforge.io/docs/howto/rst-roles.html>`__ .
.. versionchanged:: 1.8
Add *override* keyword.
"""
logger.debug('[app] adding role: %r', (name, role))
if not override and docutils.is_role_registered(name):
logger.warning(
__('role %r is already registered and will not be overridden'),
name,
type='app',
subtype='add_role',
)
docutils.register_role(name, role)
def add_generic_role(
self, name: str, nodeclass: type[Node], override: bool = False
) -> None:
"""Register a generic Docutils role.
Register a Docutils role that does nothing but wrap its contents in the
node given by *nodeclass*.
:param override: If false, do not install it if another role
is already installed as the same name
If true, unconditionally install the role.
.. versionadded:: 0.6
.. versionchanged:: 1.8
Add *override* keyword.
"""
# Don't use ``roles.register_generic_role`` because it uses
# ``register_canonical_role``.
logger.debug('[app] adding generic role: %r', (name, nodeclass))
if not override and docutils.is_role_registered(name):
logger.warning(
__('role %r is already registered and will not be overridden'),
name,
type='app',
subtype='add_generic_role',
)
role = roles.GenericRole(name, nodeclass)
docutils.register_role(name, role)
def add_domain(self, domain: type[Domain], override: bool = False) -> None:
"""Register a domain.
:param domain: A domain class
:param override: If false, do not install it if another domain
is already installed as the same name
If true, unconditionally install the domain.
.. versionadded:: 1.0
.. versionchanged:: 1.8
Add *override* keyword.
"""
self.registry.add_domain(domain, override=override)
def add_directive_to_domain(
self, domain: str, name: str, cls: type[Directive], override: bool = False
) -> None:
"""Register a Docutils directive in a domain.
Like :meth:`add_directive`, but the directive is added to the domain
named *domain*.
:param domain: The name of target domain
:param name: A name of directive
:param cls: A directive class
:param override: If false, do not install it if another directive
is already installed as the same name
If true, unconditionally install the directive.
.. versionadded:: 1.0
.. versionchanged:: 1.8
Add *override* keyword.
"""
self.registry.add_directive_to_domain(domain, name, cls, override=override)
def add_role_to_domain(
self,
domain: str,
name: str,
role: RoleFunction | XRefRole,
override: bool = False,
) -> None:
"""Register a Docutils role in a domain.
Like :meth:`add_role`, but the role is added to the domain named
*domain*.
:param domain: The name of the target domain
:param name: The name of the role
:param role: The role function
:param override: If false, do not install it if another role
is already installed as the same name
If true, unconditionally install the role.
.. versionadded:: 1.0
.. versionchanged:: 1.8
Add *override* keyword.
"""
self.registry.add_role_to_domain(domain, name, role, override=override)
def add_index_to_domain(
self, domain: str, index: type[Index], _override: bool = False
) -> None:
"""Register a custom index for a domain.
Add a custom *index* class to the domain named *domain*.
:param domain: The name of the target domain
:param index: The index class
:param override: If false, do not install it if another index
is already installed as the same name
If true, unconditionally install the index.
.. versionadded:: 1.0
.. versionchanged:: 1.8
Add *override* keyword.
"""
self.registry.add_index_to_domain(domain, index)
def add_object_type(
self,
directivename: str,
rolename: str,
indextemplate: str = '',
parse_node: Callable[[BuildEnvironment, str, addnodes.desc_signature], str]
| None = None,
ref_nodeclass: type[nodes.TextElement] | None = None,
objname: str = '',
doc_field_types: Sequence[Field] = (),
override: bool = False,
) -> None:
"""Register a new object type.
This method is a very convenient way to add a new :term:`object` type
that can be cross-referenced. It will do this:
- Create a new directive (called *directivename*) for documenting an
object. It will automatically add index entries if *indextemplate*
is nonempty; if given, it must contain exactly one instance of
``%s``. See the example below for how the template will be
interpreted.
- Create a new role (called *rolename*) to cross-reference to these
object descriptions.
- If you provide *parse_node*, it must be a function that takes a
string and a docutils node, and it must populate the node with
children parsed from the string. It must then return the name of the
item to be used in cross-referencing and index entries. See the
:file:`conf.py` file in the source for this documentation for an
example.
- The *objname* (if not given, will default to *directivename*) names
the type of object. It is used when listing objects, e.g. in search
results.
For example, if you have this call in a custom Sphinx extension::
app.add_object_type('directive', 'dir', 'pair: %s; directive')
you can use this markup in your documents::
.. rst:directive:: function
Document a function.
<...>
See also the :rst:dir:`function` directive.
For the directive, an index entry will be generated as if you had prepended ::
.. index:: pair: function; directive
The reference node will be of class ``literal`` (so it will be rendered
in a proportional font, as appropriate for code) unless you give the
*ref_nodeclass* argument, which must be a docutils node class. Most
useful are ``docutils.nodes.emphasis`` or ``docutils.nodes.strong`` --
you can also use ``docutils.nodes.generated`` if you want no further
text decoration. If the text should be treated as literal (e.g. no
smart quote replacement), but not have typewriter styling, use
``sphinx.addnodes.literal_emphasis`` or
``sphinx.addnodes.literal_strong``.
For the role content, you have the same syntactical possibilities as
for standard Sphinx roles (see :ref:`xref-syntax`).
If *override* is True, the given object_type is forcedly installed even if
an object_type having the same name is already installed.
.. versionchanged:: 1.8
Add *override* keyword.
"""
self.registry.add_object_type(
directivename,
rolename,
indextemplate,
parse_node,
ref_nodeclass,
objname,
doc_field_types,
override=override,
)
def add_crossref_type(
self,
directivename: str,
rolename: str,
indextemplate: str = '',
ref_nodeclass: type[nodes.TextElement] | None = None,
objname: str = '',
override: bool = False,
) -> None:
"""Register a new crossref object type.
This method is very similar to :meth:`~Sphinx.add_object_type` except that the
directive it generates must be empty, and will produce no output.
That means that you can add semantic targets to your sources, and refer
to them using custom roles instead of generic ones (like
:rst:role:`ref`). Example call::
app.add_crossref_type(
'topic', 'topic', 'single: %s', docutils.nodes.emphasis
)
Example usage::
.. topic:: application API
The application API
-------------------
Some random text here.
See also :topic:`this section <application API>`.
(Of course, the element following the ``topic`` directive needn't be a
section.)
:param override: If false, do not install it if another cross-reference type
is already installed as the same name
If true, unconditionally install the cross-reference type.
.. versionchanged:: 1.8
Add *override* keyword.
"""
self.registry.add_crossref_type(
directivename,
rolename,
indextemplate,
ref_nodeclass,
objname,
override=override,
)
def add_transform(self, transform: type[Transform]) -> None:
"""Register a Docutils transform to be applied after parsing.
Add the standard docutils :class:`~docutils.transforms.Transform`
subclass *transform* to the list of transforms that are applied after
Sphinx parses a reST document.
:param transform: A transform class
.. list-table:: priority range categories for Sphinx transforms
:widths: 20,80
* - Priority
- Main purpose in Sphinx
* - 0-99
- Fix invalid nodes by docutils. Translate a doctree.
* - 100-299
- Preparation
* - 300-399
- early
* - 400-699
- main
* - 700-799
- Post processing. Deadline to modify text and referencing.
* - 800-899
- Collect referencing and referenced nodes. Domain processing.
* - 900-999
- Finalize and clean up.
refs: `Transform Priority Range Categories`__
__ https://docutils.sourceforge.io/docs/ref/transforms.html#transform-priority-range-categories
"""
self.registry.add_transform(transform)
def add_post_transform(self, transform: type[Transform]) -> None:
"""Register a Docutils transform to be applied before writing.
Add the standard docutils :class:`~docutils.transforms.Transform`
subclass *transform* to the list of transforms that are applied before
Sphinx writes a document.
:param transform: A transform class
"""
self.registry.add_post_transform(transform)
def add_js_file(
self,
filename: str | None,
priority: int = 500,
loading_method: str | None = None,
**kwargs: Any,
) -> None:
"""Register a JavaScript file to include in the HTML output.
:param filename: The name of a JavaScript file that the default HTML
template will include. It must be relative to the HTML
static path, or a full URI with scheme, or ``None`` .
The ``None`` value is used to create an inline
``<script>`` tag. See the description of *kwargs*
below.
:param priority: Files are included in ascending order of priority. If
multiple JavaScript files have the same priority,
those files will be included in order of registration.
See list of "priority range for JavaScript files" below.
:param loading_method: The loading method for the JavaScript file.
Either ``'async'`` or ``'defer'`` are allowed.
:param kwargs: Extra keyword arguments are included as attributes of the
``<script>`` tag. If the special keyword argument
``body`` is given, its value will be added as the content
of the ``<script>`` tag.
Example::
app.add_js_file('example.js')
# => <script src="_static/example.js"></script>
app.add_js_file('example.js', loading_method='async')
# => <script src="_static/example.js" async="async"></script>
app.add_js_file(None, body="var myVariable = 'foo';")
# => <script>var myVariable = 'foo';</script>
.. list-table:: priority range for JavaScript files
:widths: 20,80
* - Priority
- Main purpose in Sphinx
* - 200
- default priority for built-in JavaScript files
* - 500
- default priority for extensions
* - 800
- default priority for :confval:`html_js_files`
A JavaScript file can be added to the specific HTML page when an extension
calls this method on :event:`html-page-context` event.
.. versionadded:: 0.5
.. versionchanged:: 1.8
Renamed from ``app.add_javascript()``.
And it allows keyword arguments as attributes of script tag.
.. versionchanged:: 3.5
Take priority argument. Allow to add a JavaScript file to the specific page.
.. versionchanged:: 4.4
Take loading_method argument. Allow to change the loading method of the
JavaScript file.
"""
if loading_method == 'async':
kwargs['async'] = 'async'
elif loading_method == 'defer':
kwargs['defer'] = 'defer'
filename = filename or ''
self.registry.add_js_file(filename, priority=priority, **kwargs)
with contextlib.suppress(AttributeError):
self.builder.add_js_file( # type: ignore[attr-defined]
filename, priority=priority, **kwargs
)
def add_css_file(self, filename: str, priority: int = 500, **kwargs: Any) -> None:
"""Register a stylesheet to include in the HTML output.
:param filename: The name of a CSS file that the default HTML
template will include. It must be relative to the HTML
static path, or a full URI with scheme.
:param priority: Files are included in ascending order of priority. If
multiple CSS files have the same priority,
those files will be included in order of registration.
See list of "priority range for CSS files" below.
:param kwargs: Extra keyword arguments are included as attributes of the
``<link>`` tag.
Example::
app.add_css_file('custom.css')
# => <link rel="stylesheet" href="_static/custom.css" type="text/css" />
app.add_css_file('print.css', media='print')
# => <link rel="stylesheet" href="_static/print.css"
# type="text/css" media="print" />
app.add_css_file('fancy.css', rel='alternate stylesheet', title='fancy')
# => <link rel="alternate stylesheet" href="_static/fancy.css"
# type="text/css" title="fancy" />
.. list-table:: priority range for CSS files
:widths: 20,80
* - Priority
- Main purpose in Sphinx
* - 200
- default priority for built-in CSS files
* - 500
- default priority for extensions
* - 800
- default priority for :confval:`html_css_files`
A CSS file can be added to the specific HTML page when an extension calls
this method on :event:`html-page-context` event.
.. versionadded:: 1.0
.. versionchanged:: 1.6
Optional ``alternate`` and/or ``title`` attributes can be supplied
with the arguments *alternate* (a Boolean) and *title* (a string).
The default is no title and *alternate* = ``False``. For
more information, refer to the `documentation
<https://mdn.io/Web/CSS/Alternative_style_sheets>`__.
.. versionchanged:: 1.8
Renamed from ``app.add_stylesheet()``.
And it allows keyword arguments as attributes of link tag.
.. versionchanged:: 3.5
Take priority argument. Allow to add a CSS file to the specific page.
"""
logger.debug('[app] adding stylesheet: %r', filename)
self.registry.add_css_files(filename, priority=priority, **kwargs)
with contextlib.suppress(AttributeError):
self.builder.add_css_file( # type: ignore[attr-defined]
filename, priority=priority, **kwargs
)
def add_latex_package(
self, packagename: str, options: str | None = None, after_hyperref: bool = False
) -> None:
r"""Register a package to include in the LaTeX source code.
Add *packagename* to the list of packages that LaTeX source code will
include. If you provide *options*, it will be taken to the `\usepackage`
declaration. If you set *after_hyperref* truthy, the package will be
loaded after ``hyperref`` package.
.. code-block:: python
app.add_latex_package('mypackage')
# => \usepackage{mypackage}
app.add_latex_package('mypackage', 'foo,bar')
# => \usepackage[foo,bar]{mypackage}
.. versionadded:: 1.3
.. versionadded:: 3.1
*after_hyperref* option.
"""
self.registry.add_latex_package(packagename, options, after_hyperref)
def add_lexer(self, alias: str, lexer: type[Lexer]) -> None:
"""Register a new lexer for source code.
Use *lexer* to highlight code blocks with the given language *alias*.
.. versionadded:: 0.6
.. versionchanged:: 2.1
Take a lexer class as an argument.
.. versionchanged:: 4.0
Removed support for lexer instances as an argument.
"""
logger.debug('[app] adding lexer: %r', (alias, lexer))
lexer_classes[alias] = lexer
def add_autodocumenter(self, cls: type[Documenter], override: bool = False) -> None:
"""Register a new documenter class for the autodoc extension.
Add *cls* as a new documenter class for the :mod:`sphinx.ext.autodoc`
extension. It must be a subclass of
:class:`sphinx.ext.autodoc.Documenter`. This allows auto-documenting
new types of objects. See the source of the autodoc module for
examples on how to subclass :class:`~sphinx.ext.autodoc.Documenter`.
If *override* is True, the given *cls* is forcedly installed even if
a documenter having the same name is already installed.
See :ref:`autodoc_ext_tutorial`.
.. versionadded:: 0.6
.. versionchanged:: 2.2
Add *override* keyword.
"""
logger.debug('[app] adding autodocumenter: %r', cls)
from sphinx.ext.autodoc.directive import AutodocDirective
objtype = cls.objtype
self.registry.add_documenter(objtype, cls)
self.add_directive('auto' + objtype, AutodocDirective, override=override)
def add_autodoc_attrgetter(
self, typ: type, getter: Callable[[Any, str, Any], Any]
) -> None:
"""Register a new ``getattr``-like function for the autodoc extension.
Add *getter*, which must be a function with an interface compatible to
the :func:`getattr` builtin, as the autodoc attribute getter for
objects that are instances of *typ*. All cases where autodoc needs to
get an attribute of a type are then handled by this function instead of
:func:`getattr`.
.. versionadded:: 0.6
"""
logger.debug('[app] adding autodoc attrgetter: %r', (typ, getter))
self.registry.add_autodoc_attrgetter(typ, getter)
def add_search_language(self, cls: type[SearchLanguage]) -> None:
"""Register a new language for the HTML search index.
Add *cls*, which must be a subclass of
:class:`sphinx.search.SearchLanguage`, as a support language for
building the HTML full-text search index. The class must have a *lang*
attribute that indicates the language it should be used for. See
:confval:`html_search_language`.
.. versionadded:: 1.1
"""
logger.debug('[app] adding search language: %r', cls)
from sphinx.search import languages
languages[cls.lang] = cls
def add_source_suffix(
self, suffix: str, filetype: str, override: bool = False
) -> None:
"""Register a suffix of source files.
Same as :confval:`source_suffix`. The users can override this
using the config setting.
:param override: If false, do not install it the same suffix
is already installed.
If true, unconditionally install the suffix.
.. versionadded:: 1.8
"""
self.registry.add_source_suffix(suffix, filetype, override=override)
def add_source_parser(self, parser: type[Parser], override: bool = False) -> None:
"""Register a parser class.
:param override: If false, do not install it if another parser
is already installed for the same suffix.
If true, unconditionally install the parser.
.. versionadded:: 1.4
.. versionchanged:: 1.8
*suffix* argument is deprecated. It only accepts *parser* argument.
Use :meth:`add_source_suffix` API to register suffix instead.
.. versionchanged:: 1.8
Add *override* keyword.
"""
self.registry.add_source_parser(parser, override=override)
def add_env_collector(self, collector: type[EnvironmentCollector]) -> None:
"""Register an environment collector class.
Refer to :ref:`collector-api`.
.. versionadded:: 1.6
"""
logger.debug('[app] adding environment collector: %r', collector)
collector().enable(self)
def add_html_theme(self, name: str, theme_path: str | os.PathLike[str]) -> None:
"""Register a HTML Theme.
The *name* is a name of theme, and *theme_path* is a full path to the
theme (refs: :ref:`distribute-your-theme`).
.. versionadded:: 1.6
"""
logger.debug('[app] adding HTML theme: %r, %r', name, theme_path)
self.registry.add_html_theme(name, theme_path)
def add_html_math_renderer(
self,
name: str,
inline_renderers: _MathsInlineRenderers | None = None,
block_renderers: _MathsBlockRenderers | None = None,
) -> None:
"""Register a math renderer for HTML.
The *name* is a name of math renderer. Both *inline_renderers* and
*block_renderers* are used as visitor functions for the HTML writer:
the former for inline math node (``nodes.math``), the latter for
block math node (``nodes.math_block``). Regarding visitor functions,
see :meth:`add_node` for details.
.. versionadded:: 1.8
"""
self.registry.add_html_math_renderer(name, inline_renderers, block_renderers)
def add_message_catalog(
self, catalog: str, locale_dir: str | os.PathLike[str]
) -> None:
"""Register a message catalog.
:param catalog: The name of the catalog
:param locale_dir: The base path of the message catalog
For more details, see :func:`sphinx.locale.get_translation()`.
.. versionadded:: 1.8
"""
locale.init([locale_dir], self.config.language, catalog)
locale.init_console(locale_dir, catalog)
# ---- other methods -------------------------------------------------
def is_parallel_allowed(self, typ: str) -> bool:
"""Check whether parallel processing is allowed or not.
:param typ: A type of processing; ``'read'`` or ``'write'``.
"""
if typ == 'read':
attrname = 'parallel_read_safe'
message_not_declared = __(
'the %s extension does not declare if it '
'is safe for parallel reading, assuming '
"it isn't - please ask the extension author "
'to check and make it explicit'
)
message_not_safe = __('the %s extension is not safe for parallel reading')
elif typ == 'write':
attrname = 'parallel_write_safe'
message_not_declared = __(
'the %s extension does not declare if it '
'is safe for parallel writing, assuming '
"it isn't - please ask the extension author "
'to check and make it explicit'
)
message_not_safe = __('the %s extension is not safe for parallel writing')
else:
raise ValueError('parallel type %s is not supported' % typ)
for ext in self.extensions.values():
allowed = getattr(ext, attrname, None)
if allowed is None:
logger.warning(message_not_declared, ext.name)
logger.warning(__('doing serial %s'), typ)
return False
elif not allowed:
logger.warning(message_not_safe, ext.name)
logger.warning(__('doing serial %s'), typ)
return False
return True
def set_html_assets_policy(self, policy: Literal['always', 'per_page']) -> None:
"""Set the policy to include assets in HTML pages.
- always: include the assets in all the pages
- per_page: include the assets only in pages where they are used
.. versionadded: 4.1
"""
if policy not in {'always', 'per_page'}:
raise ValueError('policy %s is not supported' % policy)
self.registry.html_assets_policy = policy
| Sphinx |
python | scrapy__scrapy | tests/test_spiderloader/__init__.py | {
"start": 8862,
"end": 9414
} | class ____(SpiderLoader):
pass
def test_custom_spider_loader():
settings = Settings(
{
"SPIDER_LOADER_CLASS": CustomSpiderLoader,
}
)
spider_loader = get_spider_loader(settings)
assert isinstance(spider_loader, CustomSpiderLoader)
def test_dummy_spider_loader(spider_loader_env):
settings, _ = spider_loader_env
spider_loader = DummySpiderLoader.from_settings(settings)
assert not spider_loader.list()
with pytest.raises(KeyError):
spider_loader.load("spider1")
| CustomSpiderLoader |
python | openai__openai-python | src/openai/types/realtime/realtime_transcription_session_create_request_param.py | {
"start": 366,
"end": 928
} | class ____(TypedDict, total=False):
type: Required[Literal["transcription"]]
"""The type of session to create.
Always `transcription` for transcription sessions.
"""
audio: RealtimeTranscriptionSessionAudioParam
"""Configuration for input and output audio."""
include: List[Literal["item.input_audio_transcription.logprobs"]]
"""Additional fields to include in server outputs.
`item.input_audio_transcription.logprobs`: Include logprobs for input audio
transcription.
"""
| RealtimeTranscriptionSessionCreateRequestParam |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 8831,
"end": 9077
} | class ____:
def __init__(self, x, length=64):
self.x = x
self.length = length
def __len__(self):
return self.length
def __getitem__(self, i):
return {"input_ids": self.x, "labels": self.x}
| RepeatDataset |
python | kamyu104__LeetCode-Solutions | Python/count-tested-devices-after-test-operations.py | {
"start": 42,
"end": 341
} | class ____(object):
def countTestedDevices(self, batteryPercentages):
"""
:type batteryPercentages: List[int]
:rtype: int
"""
result = 0
for x in batteryPercentages:
if x > result:
result += 1
return result
| Solution |
python | getsentry__sentry | src/sentry/interfaces/contexts.py | {
"start": 425,
"end": 1108
} | class ____(string.Formatter):
def format_field(self, value, format_spec):
if not format_spec and isinstance(value, bool):
return value and "yes" or "no"
return string.Formatter.format_field(self, value, format_spec)
def format_index_expr(format_string, data):
return str(_IndexFormatter().vformat(str(format_string), (), data).strip())
def contexttype(cls: type[ContextTypeT]) -> type[ContextTypeT]:
context_types[cls.type] = cls
return cls
# NOTE: Are you adding a new context? Make sure to also update the
# documentation in the sentry develop docs [0]!
#
# [0]: https://develop.sentry.dev/sdk/event-payloads/contexts
| _IndexFormatter |
python | pytorch__pytorch | torch/optim/lr_scheduler.py | {
"start": 68933,
"end": 82468
} | class ____(LRScheduler):
r"""Sets the learning rate of each parameter group according to cyclical learning rate policy (CLR).
The policy cycles the learning rate between two boundaries with a constant frequency,
as detailed in the paper `Cyclical Learning Rates for Training Neural Networks`_.
The distance between the two boundaries can be scaled on a per-iteration
or per-cycle basis.
Cyclical learning rate policy changes the learning rate after every batch.
`step` should be called after a batch has been used for training.
This class has three built-in policies, as put forth in the paper:
* "triangular": A basic triangular cycle without amplitude scaling.
* "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle.
* "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}`
at each cycle iteration.
This implementation was adapted from the github repo: `bckenstler/CLR`_
Args:
optimizer (Optimizer): Wrapped optimizer.
base_lr (float or list): Initial learning rate which is the
lower boundary in the cycle for each parameter group.
max_lr (float or list): Upper learning rate boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_lr - base_lr).
The lr at any cycle is the sum of base_lr
and some scaling of the amplitude; therefore
max_lr may not actually be reached depending on
scaling function.
step_size_up (int): Number of training iterations in the
increasing half of a cycle. Default: 2000
step_size_down (int): Number of training iterations in the
decreasing half of a cycle. If step_size_down is None,
it is set to step_size_up. Default: None
mode (str): One of {triangular, triangular2, exp_range}.
Values correspond to policies detailed above.
If scale_fn is not None, this argument is ignored.
Default: 'triangular'
gamma (float): Constant in 'exp_range' scaling function:
gamma**(cycle iterations)
Default: 1.0
scale_fn (function): Custom scaling policy defined by a single
argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0.
If specified, then 'mode' is ignored.
Default: None
scale_mode (str): {'cycle', 'iterations'}.
Defines whether scale_fn is evaluated on
cycle number or cycle iterations (training
iterations since start of cycle).
Default: 'cycle'
cycle_momentum (bool): If ``True``, momentum is cycled inversely
to learning rate between 'base_momentum' and 'max_momentum'.
Default: True
base_momentum (float or list): Lower momentum boundaries in the cycle
for each parameter group. Note that momentum is cycled inversely
to learning rate; at the peak of a cycle, momentum is
'base_momentum' and learning rate is 'max_lr'.
Default: 0.8
max_momentum (float or list): Upper momentum boundaries in the cycle
for each parameter group. Functionally,
it defines the cycle amplitude (max_momentum - base_momentum).
The momentum at any cycle is the difference of max_momentum
and some scaling of the amplitude; therefore
base_momentum may not actually be reached depending on
scaling function. Note that momentum is cycled inversely
to learning rate; at the start of a cycle, momentum is 'max_momentum'
and learning rate is 'base_lr'
Default: 0.9
last_epoch (int): The index of the last batch. This parameter is used when
resuming a training job. Since `step()` should be invoked after each
batch instead of after each epoch, this number represents the total
number of *batches* computed, not the total number of epochs computed.
When last_epoch=-1, the schedule is started from the beginning.
Default: -1
Example:
>>> # xdoctest: +SKIP
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> scheduler = torch.optim.lr_scheduler.CyclicLR(
... optimizer,
... base_lr=0.01,
... max_lr=0.1,
... step_size_up=10,
... )
>>> data_loader = torch.utils.data.DataLoader(...)
>>> for epoch in range(10):
>>> for batch in data_loader:
>>> train_batch(...)
>>> scheduler.step()
.. image:: ../scripts/lr_scheduler_images/CyclicLR.png
.. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186
.. _bckenstler/CLR: https://github.com/bckenstler/CLR
"""
def __init__(
self,
optimizer: Optimizer,
base_lr: Union[float, list[float]],
max_lr: Union[float, list[float]],
step_size_up: int = 2000,
step_size_down: Optional[int] = None,
mode: Literal["triangular", "triangular2", "exp_range"] = "triangular",
gamma: float = 1.0,
scale_fn: Optional[Callable[[float], float]] = None,
scale_mode: Literal["cycle", "iterations"] = "cycle",
cycle_momentum: bool = True,
base_momentum: float = 0.8,
max_momentum: float = 0.9,
last_epoch: int = -1,
) -> None: # noqa: D107
# Attach optimizer
if not isinstance(optimizer, Optimizer):
raise TypeError(f"{type(optimizer).__name__} is not an Optimizer")
self.optimizer = optimizer
base_lrs = _format_param("base_lr", optimizer, base_lr)
if last_epoch == -1:
for lr, group in zip(base_lrs, optimizer.param_groups, strict=True):
_update_param_group_val(group, "lr", lr)
self.max_lrs = _format_param("max_lr", optimizer, max_lr)
# pyrefly: ignore [bad-assignment]
step_size_up = float(step_size_up)
step_size_down = (
# pyrefly: ignore [bad-assignment]
float(step_size_down) if step_size_down is not None else step_size_up
)
# pyrefly: ignore [unsupported-operation]
self.total_size = step_size_up + step_size_down
self.step_ratio = step_size_up / self.total_size
if mode not in ["triangular", "triangular2", "exp_range"] and scale_fn is None:
raise ValueError("mode is invalid and scale_fn is None")
self.mode = mode
self.gamma = gamma
self._scale_fn_ref: Callable[[float], float]
self._scale_fn_custom = scale_fn
self.scale_mode = scale_mode
self._init_scale_fn()
self.cycle_momentum = cycle_momentum
if cycle_momentum:
if (
"momentum" not in optimizer.defaults
and "betas" not in optimizer.defaults
):
raise ValueError(
"optimizer must support momentum or beta1 with `cycle_momentum` option enabled"
)
self.use_beta1 = "betas" in self.optimizer.defaults
self.base_momentums = _format_param(
"base_momentum", optimizer, base_momentum
)
self.max_momentums = _format_param("max_momentum", optimizer, max_momentum)
if last_epoch == -1:
for m_momentum, b_momentum, group in zip(
self.max_momentums,
self.base_momentums,
optimizer.param_groups,
strict=True,
):
if self.use_beta1:
group["betas"] = (m_momentum, *group["betas"][1:])
else:
group["momentum"] = m_momentum
group["max_momentum"] = m_momentum
group["base_momentum"] = b_momentum
super().__init__(optimizer, last_epoch)
self.base_lrs = base_lrs
def _init_scale_fn(self) -> None:
if self._scale_fn_custom is not None:
return
if self.mode == "triangular":
self._scale_fn_ref = self._triangular_scale_fn
self.scale_mode = "cycle"
elif self.mode == "triangular2":
self._scale_fn_ref = self._triangular2_scale_fn
self.scale_mode = "cycle"
elif self.mode == "exp_range":
self._scale_fn_ref = partial(self._exp_range_scale_fn, self.gamma)
self.scale_mode = "iterations"
def scale_fn(self, x) -> float:
"""Get the scaling policy."""
if self._scale_fn_custom is not None:
return self._scale_fn_custom(x)
else:
return self._scale_fn_ref(x) # static method
@staticmethod
def _triangular_scale_fn(x: float) -> float:
return 1.0
@staticmethod
def _triangular2_scale_fn(x: float) -> float:
return 1 / (2.0 ** (x - 1))
@staticmethod
def _exp_range_scale_fn(gamma: float, x: float) -> float:
return gamma**x
@override
def get_lr(self) -> list[float | Tensor]:
r"""Compute the next learning rate for each of the optimizer's
:attr:`~torch.optim.Optimizer.param_groups`.
Advances each ``group["lr"]`` in the optimizer's
:attr:`~torch.optim.Optimizer.param_groups` along a cycle between the
group's ``base_lr`` and ``max_lr`` using :meth:`scale_fn`.
Returns:
list[float | Tensor]: A :class:`list` of learning rates for each of
the optimizer's :attr:`~torch.optim.Optimizer.param_groups` with the
same types as their current ``group["lr"]``\s.
.. note::
If you're trying to inspect the most recent learning rate, use
:meth:`get_last_lr()` instead.
.. note::
The returned :class:`~torch.Tensor`\s are copies, and never alias
the optimizer's ``group["lr"]``\s.
.. note::
This method treats :attr:`last_epoch` as the index of the previous
batch.
.. note::
When :attr:`cycle_momentum` is ``True``, this method has a side
effect of updating the optimizer's momentum.
"""
_warn_get_lr_called_within_step(self)
cycle = math.floor(1 + self.last_epoch / self.total_size)
x = 1.0 + self.last_epoch / self.total_size - cycle
if x <= self.step_ratio:
scale_factor = x / self.step_ratio
else:
scale_factor = (x - 1) / (self.step_ratio - 1)
lrs = []
for base_lr, max_lr in zip(self.base_lrs, self.max_lrs, strict=True):
base_height = (max_lr - base_lr) * scale_factor
if self.scale_mode == "cycle":
lr = base_lr + base_height * self.scale_fn(cycle)
else:
lr = base_lr + base_height * self.scale_fn(self.last_epoch)
lrs.append(lr)
if self.cycle_momentum:
momentums = []
for base_momentum, max_momentum in zip(
self.base_momentums, self.max_momentums, strict=True
):
base_height = (max_momentum - base_momentum) * scale_factor
if self.scale_mode == "cycle":
momentum = max_momentum - base_height * self.scale_fn(cycle)
else:
momentum = max_momentum - base_height * self.scale_fn(
self.last_epoch
)
momentums.append(momentum)
for param_group, momentum in zip(
self.optimizer.param_groups, momentums, strict=True
):
if self.use_beta1:
param_group["betas"] = (momentum, *param_group["betas"][1:])
else:
param_group["momentum"] = momentum
return lrs
@override
def state_dict(self) -> dict[str, Any]: # noqa: D102
"""Return the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in ``self.__dict__`` which
is not the optimizer.
The learning rate lambda functions will only be saved if they are callable objects
and not if they are functions or lambdas.
When saving or loading the scheduler, please make sure to also save or load the state of the optimizer.
"""
state = super().state_dict()
# We are dropping the `_scale_fn_ref` attribute because it is a
# `weakref.WeakMethod` and can't be pickled.
state.pop("_scale_fn_ref", None)
fn = state.pop("_scale_fn_custom")
state["_scale_fn_custom"] = None
if fn is not None and not isinstance(fn, types.FunctionType):
# The _scale_fn_custom will only be saved if it is a callable object
# and not if it is a function or lambda.
state["_scale_fn_custom"] = fn.__dict__.copy()
return state
@override
def load_state_dict(self, state_dict: dict[str, Any]) -> None:
"""Load the scheduler's state."""
fn = state_dict.pop("_scale_fn_custom")
super().load_state_dict(state_dict)
if fn is not None:
self._scale_fn_custom.__dict__.update(fn)
self._init_scale_fn()
| CyclicLR |
python | conda__conda | conda/exceptions.py | {
"start": 15802,
"end": 16008
} | class ____(CondaError, KeyError):
def __init__(self, key: Any, message: str, *args):
self.key = key
self.msg = f"{key!r}: {message}"
super().__init__(self.msg, *args)
| CondaKeyError |
python | ray-project__ray | python/ray/data/tests/test_pandas_block.py | {
"start": 7967,
"end": 17709
} | class ____:
def test_small(ray_start_regular_shared):
animals = ["Flamingo", "Centipede"]
block = pd.DataFrame({"animals": animals})
block_accessor = PandasBlockAccessor.for_block(block)
bytes_size = block_accessor.size_bytes()
# check that memory usage is within 10% of the size_bytes
# For strings, Pandas seems to be fairly accurate, so let's use that.
memory_usage = block.memory_usage(index=True, deep=True).sum()
assert bytes_size == pytest.approx(memory_usage, rel=0.1), (
bytes_size,
memory_usage,
)
def test_large_str(ray_start_regular_shared):
animals = [
random.choice(["alligator", "crocodile", "centipede", "flamingo"])
for i in range(100_000)
]
block = pd.DataFrame({"animals": animals})
block["animals"] = block["animals"].astype("string")
block_accessor = PandasBlockAccessor.for_block(block)
bytes_size = block_accessor.size_bytes()
memory_usage = block.memory_usage(index=True, deep=True).sum()
assert bytes_size == pytest.approx(memory_usage, rel=0.1), (
bytes_size,
memory_usage,
)
def test_large_str_object(ray_start_regular_shared):
"""Note - this test breaks if you refactor/move the list of animals."""
num = 100_000
animals = [
random.choice(["alligator", "crocodile", "centipede", "flamingo"])
for i in range(num)
]
block = pd.DataFrame({"animals": animals})
block_accessor = PandasBlockAccessor.for_block(block)
bytes_size = block_accessor.size_bytes()
# The actual usage should be the index usage + the string data usage.
memory_usage = block.memory_usage(index=True, deep=False).sum() + sum(
[sys.getsizeof(animal) for animal in animals]
)
assert bytes_size == pytest.approx(memory_usage, rel=0.1), (
bytes_size,
memory_usage,
)
def test_large_floats(ray_start_regular_shared):
animals = [random.random() for i in range(100_000)]
block = pd.DataFrame({"animals": animals})
block_accessor = PandasBlockAccessor.for_block(block)
bytes_size = block_accessor.size_bytes()
memory_usage = pickle.dumps(block).__sizeof__()
# check that memory usage is within 10% of the size_bytes
assert bytes_size == pytest.approx(memory_usage, rel=0.1), (
bytes_size,
memory_usage,
)
def test_bytes_object(ray_start_regular_shared):
def generate_data(batch):
for _ in range(8):
yield {"data": [[b"\x00" * 128 * 1024 * 128]]}
ds = (
ray.data.range(1, override_num_blocks=1)
.map_batches(generate_data, batch_size=1)
.map_batches(lambda batch: batch, batch_format="pandas")
)
true_value = 128 * 1024 * 128 * 8
for bundle in ds.iter_internal_ref_bundles():
size = bundle.size_bytes()
# assert that true_value is within 10% of bundle.size_bytes()
assert size == pytest.approx(true_value, rel=0.1), (
size,
true_value,
)
def test_nested_numpy(ray_start_regular_shared):
size = 1024
rows = 1_000
data = [
np.random.randint(size=size, low=0, high=100, dtype=np.int8)
for _ in range(rows)
]
df = pd.DataFrame({"data": data})
block_accessor = PandasBlockAccessor.for_block(df)
block_size = block_accessor.size_bytes()
true_value = rows * size
assert block_size == pytest.approx(true_value, rel=0.1), (
block_size,
true_value,
)
def test_nested_objects(ray_start_regular_shared):
size = 10
rows = 10_000
lists = [[random.randint(0, 100) for _ in range(size)] for _ in range(rows)]
data = {"lists": lists}
block = pd.DataFrame(data)
block_accessor = PandasBlockAccessor.for_block(block)
bytes_size = block_accessor.size_bytes()
# List overhead + 10 integers per list
true_size = rows * (
sys.getsizeof([random.randint(0, 100) for _ in range(size)]) + size * 28
)
assert bytes_size == pytest.approx(true_size, rel=0.1), (
bytes_size,
true_size,
)
def test_mixed_types(ray_start_regular_shared):
rows = 10_000
data = {
"integers": [random.randint(0, 100) for _ in range(rows)],
"floats": [random.random() for _ in range(rows)],
"strings": [
random.choice(["apple", "banana", "cherry"]) for _ in range(rows)
],
"object": [b"\x00" * 128 for _ in range(rows)],
}
block = pd.DataFrame(data)
block_accessor = PandasBlockAccessor.for_block(block)
bytes_size = block_accessor.size_bytes()
# Manually calculate the size
int_size = rows * 8
float_size = rows * 8
str_size = sum(sys.getsizeof(string) for string in data["strings"])
object_size = rows * sys.getsizeof(b"\x00" * 128)
true_size = int_size + float_size + str_size + object_size
assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size)
def test_nested_lists_strings(ray_start_regular_shared):
rows = 5_000
nested_lists = ["a"] * 3 + ["bb"] * 4 + ["ccc"] * 3
data = {
"nested_lists": [nested_lists for _ in range(rows)],
}
block = pd.DataFrame(data)
block_accessor = PandasBlockAccessor.for_block(block)
bytes_size = block_accessor.size_bytes()
# Manually calculate the size
list_overhead = sys.getsizeof(block["nested_lists"].iloc[0]) + sum(
[sys.getsizeof(x) for x in nested_lists]
)
true_size = rows * list_overhead
assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size)
@pytest.mark.parametrize("size", [10, 1024])
def test_multi_level_nesting(ray_start_regular_shared, size):
rows = 1_000
data = {
"complex": [
{"list": [np.random.rand(size)], "value": {"key": "val"}}
for _ in range(rows)
],
}
block = pd.DataFrame(data)
block_accessor = PandasBlockAccessor.for_block(block)
bytes_size = block_accessor.size_bytes()
numpy_size = np.random.rand(size).nbytes
values = ["list", "value", "key", "val"]
str_size = sum([sys.getsizeof(v) for v in values])
list_ref_overhead = sys.getsizeof([np.random.rand(size)])
dict_overhead1 = sys.getsizeof({"key": "val"})
dict_overhead3 = sys.getsizeof(
{"list": [np.random.rand(size)], "value": {"key": "val"}}
)
true_size = (
numpy_size + str_size + list_ref_overhead + dict_overhead1 + dict_overhead3
) * rows
assert bytes_size == pytest.approx(true_size, rel=0.15), (
bytes_size,
true_size,
)
def test_boolean(ray_start_regular_shared):
data = [random.choice([True, False, None]) for _ in range(100_000)]
block = pd.DataFrame({"flags": pd.Series(data, dtype="boolean")})
block_accessor = PandasBlockAccessor.for_block(block)
bytes_size = block_accessor.size_bytes()
# No object case
true_size = block.memory_usage(index=True, deep=True).sum()
assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size)
def test_arrow(ray_start_regular_shared):
data = [
random.choice(["alligator", "crocodile", "flamingo"]) for _ in range(50_000)
]
arrow_dtype = pd.ArrowDtype(pa.string())
block = pd.DataFrame({"animals": pd.Series(data, dtype=arrow_dtype)})
block_accessor = PandasBlockAccessor.for_block(block)
bytes_size = block_accessor.size_bytes()
true_size = block.memory_usage(index=True, deep=False).sum() + sum(
sys.getsizeof(x) for x in data
)
assert bytes_size == pytest.approx(true_size, rel=0.1), (bytes_size, true_size)
def test_iter_rows_with_na(ray_start_regular_shared):
block = pd.DataFrame({"col": [pd.NA]})
block_accessor = PandasBlockAccessor.for_block(block)
rows = block_accessor.iter_rows(public_row_format=True)
# We should return None for NaN values.
assert list(rows) == [{"col": None}]
def test_empty_dataframe_with_object_columns(ray_start_regular_shared):
"""Test that size_bytes handles empty DataFrames with object/string columns.
The warning log:
"Error calculating size for column 'parent': cannot call `vectorize`
on size 0 inputs unless `otypes` is set"
should not be logged in the presence of empty columns.
"""
from unittest.mock import patch
# Create an empty DataFrame but with defined columns and dtypes
block = pd.DataFrame(
{
"parent": pd.Series([], dtype=object),
"child": pd.Series([], dtype="string"),
"data": pd.Series([], dtype=object),
}
)
block_accessor = PandasBlockAccessor.for_block(block)
# Check that NO warning is logged after calling size_bytes
with patch("ray.data._internal.pandas_block.logger.warning") as mock_warning:
bytes_size = block_accessor.size_bytes()
mock_warning.assert_not_called()
assert bytes_size >= 0
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestSizeBytes |
python | wandb__wandb | wandb/sdk/internal/file_stream.py | {
"start": 785,
"end": 844
} | class ____(NamedTuple):
filename: str
data: str
| Chunk |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/tokens.py | {
"start": 7714,
"end": 8008
} | class ____(Token):
__slots__ = 'name', 'value'
id = '<directive>'
def __init__(self, name, value, start_mark, end_mark):
# type: (Any, Any, Any, Any) -> None
Token.__init__(self, start_mark, end_mark)
self.name = name
self.value = value
| DirectiveToken |
python | pytorch__pytorch | test/distributed/tensor/parallel/test_micro_pipeline_tp.py | {
"start": 2167,
"end": 18500
} | class ____(TestCase):
def setUp(self):
torch._inductor.config._micro_pipeline_tp = True
self.rank = 0
self.world_size = 2
torch.cuda.set_device("cuda:0")
store = FakeStore()
dist.init_process_group(
backend="fake",
world_size=self.world_size,
rank=self.rank,
store=store,
)
def tearDown(self):
dist.destroy_process_group()
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@fresh_cache()
def test_find_all_gather_patterns(self):
group = dist.group.WORLD
def func(
inp: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
a = all_gather_tensor(inp, gather_dim=0, group=group.group_name)
b = all_gather_tensor(inp, gather_dim=1, group=group.group_name)
c = _fp8_all_gather(inp, gather_dim=0, group_name=group.group_name)
d = _fp8_all_gather(inp, gather_dim=1, group_name=group.group_name)
return a, b, c, d
inp = torch.rand(64, 32, device="cuda")
gm = _make_post_grad_fx(func, inp)
all_gathers = find_all_gather_patterns(gm.graph)
self.assertEqual(len(all_gathers), 4)
# If this test fails, please update find_all_gather_patterns instead of
# modifying the following assertions.
for all_gather in all_gathers:
self.assertEqual(
all_gather.ag_node.target,
torch.ops._c10d_functional.all_gather_into_tensor.default,
)
self.assertEqual(all_gather.group_name, group.group_name)
self.assertEqual(all_gathers[0].gather_dim, 0)
self.assertEqual(
all_gathers[0].res_node.target,
torch.ops._c10d_functional.wait_tensor.default,
)
self.assertEqual(all_gathers[1].gather_dim, 1)
self.assertEqual(
all_gathers[1].res_node.target,
torch.ops.aten.cat.default,
)
self.assertEqual(all_gathers[2].gather_dim, 0)
self.assertEqual(
all_gathers[2].res_node.target,
torch.ops._c10d_functional.wait_tensor.default,
)
self.assertEqual(all_gathers[3].gather_dim, 1)
self.assertEqual(
all_gathers[3].res_node.target,
torch.ops.aten.view.dtype,
)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@fresh_cache()
def test_find_reduce_scatter_patterns(self):
group = dist.group.WORLD
def func(inp: torch.Tensor) -> torch.Tensor:
a = reduce_scatter_tensor(inp, "sum", scatter_dim=0, group=group.group_name)
b = reduce_scatter_tensor(inp, "avg", scatter_dim=1, group=group.group_name)
return a, b
inp = torch.rand(64, 32, device="cuda")
gm = make_fx(func)(inp)
reduce_scatters = find_reduce_scatter_patterns(gm.graph)
self.assertEqual(len(reduce_scatters), 2)
# If this test fails, please update find_reduce_scatter_patterns
# instead of modifying the following assertions.
for reduce_scatter in reduce_scatters:
self.assertEqual(
reduce_scatter.input_node.op,
"placeholder",
)
self.assertEqual(
reduce_scatter.reduce_scatter_node.target,
torch.ops._c10d_functional.reduce_scatter_tensor.default,
)
self.assertEqual(
reduce_scatter.wait_tensor_node.target,
torch.ops._c10d_functional.wait_tensor.default,
)
self.assertEqual(reduce_scatter.group_name, group.group_name)
self.assertEqual(reduce_scatters[0].reduce_op, "sum")
self.assertEqual(reduce_scatters[0].scatter_dim, 0)
self.assertEqual(reduce_scatters[1].reduce_op, "avg")
self.assertEqual(reduce_scatters[1].scatter_dim, 1)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@fresh_cache()
def test_get_unexposed_collectives(self):
group = dist.group.WORLD
def func(inp: torch.Tensor) -> torch.Tensor:
a = inp @ inp.T
# b is unexposed (hidden by a)
b = all_gather_tensor(inp, gather_dim=0, group=group.group_name)
c = b @ inp.T
# d is unexposed (hidden by c)
d = reduce_scatter_tensor(b, "avg", scatter_dim=0, group=group.group_name)
# e is exposed
e = all_gather_tensor(d, gather_dim=0, group=group.group_name)
return a, c, e
inp = torch.rand(64, 32, device="cuda")
gm = make_fx(func)(inp)
overlappable_collectives = _get_unexposed_collectives(gm.graph)
self.assertEqual(
list(map(str, overlappable_collectives)),
["all_gather_into_tensor", "reduce_scatter_tensor"],
)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@parametrize("A_dims", [2, 3])
@parametrize("gather_dim", [0, 1, 2])
@parametrize("return_A", [True, False])
@fresh_cache()
def test_fuse_all_gather_matmul(self, A_dims, gather_dim, return_A):
if gather_dim >= A_dims:
return
group = dist.group.WORLD
def func(A_shard: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
A = all_gather_tensor(A_shard, gather_dim=gather_dim, group=group)
if return_A:
return A, A @ B
else:
return None, A @ B
if A_dims == 2:
A_shard_shape = [64, 32]
elif A_dims == 3:
A_shard_shape = [2, 64, 32]
else:
raise AssertionError(f"Invalid A_dims: {A_dims}")
A_shard_shape[gather_dim] //= self.world_size
A_shard = torch.rand(*A_shard_shape, device="cuda")
B = torch.rand(32, 16, device="cuda")
with _test_mode():
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, A_shard, B)
eager_stride = func(A_shard, B)[1].stride()
compiled_stride = compiled(A_shard, B)[1].stride()
self.assertEqual(eager_stride, compiled_stride)
if gather_dim == A_dims - 1:
# Decomposing the matmul on the K dimension is not supported
self.assertNotIn("fused_all_gather_matmul", code)
self.assertIn("all_gather_into_tensor", code)
else:
self.assertIn("fused_all_gather_matmul", code)
self.assertNotIn("all_gather_into_tensor", code)
self.assertEqual("return_A=True" in code, return_A)
@runOnRocmArch(MI300_ARCH)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@parametrize("A_dims", [2, 3])
@parametrize("gather_dim", [0, 1, 2])
@parametrize("return_A", [True, False])
@fresh_cache()
def test_fuse_all_gather_scaled_matmul(self, A_dims, gather_dim, return_A):
if gather_dim >= A_dims:
return
group = dist.group.WORLD
def func(
A_shard: torch.Tensor,
B: torch.Tensor,
A_scale: torch.Tensor,
B_scale: torch.Tensor,
out_dtype: Optional[torch.dtype],
) -> torch.Tensor:
A = _fp8_all_gather(
A_shard, gather_dim=gather_dim, group_name=group.group_name
)
if len(A_shard.shape) > 2:
C = torch._scaled_mm(
A.flatten(0, -2), B, A_scale, B_scale, out_dtype=out_dtype
)
C = C.view(*A.shape[:-1], -1)
else:
C = torch._scaled_mm(A, B, A_scale, B_scale, out_dtype=out_dtype)
if return_A:
return A, C
else:
return None, C
if A_dims == 2:
A_shard_shape = [64, 32]
elif A_dims == 3:
A_shard_shape = [2, 64, 32]
else:
raise AssertionError(f"Invalid A_dims: {A_dims}")
A_shard_shape[gather_dim] //= self.world_size
A_shard = torch.rand(*A_shard_shape, device="cuda").to(e4m3_type)
B = torch.rand(16, 32, device="cuda").to(e4m3_type).T
A_scale = torch.tensor(0.1, device="cuda")
B_scale = torch.tensor(0.1, device="cuda")
gm = _make_post_grad_fx(func, A_shard, B, A_scale, B_scale, torch.bfloat16)
with _test_mode():
micro_pipeline_tp_pass(gm.graph)
if gather_dim == A_dims - 1:
self.assertNotIn("fused_all_gather_scaled_matmul", str(gm.graph))
self.assertIn("all_gather_into_tensor", str(gm.graph))
else:
# Decomposing the matmul on the K dimension is not supported
self.assertIn("fused_all_gather_scaled_matmul", str(gm.graph))
self.assertNotIn("all_gather_into_tensor", str(gm.graph))
if torch.cuda.get_device_capability() < (8, 9):
return
with _test_mode():
compiled = torch.compile(func)
code = run_and_get_triton_code(
compiled, A_shard, B, A_scale, B_scale, torch.bfloat16
)
if gather_dim == A_dims - 1:
self.assertNotIn("fused_all_gather_scaled_matmul", code)
self.assertIn("all_gather_into_tensor", code)
else:
# Decomposing the matmul on the K dimension is not supported
self.assertIn("fused_all_gather_scaled_matmul", code)
self.assertNotIn("all_gather_into_tensor", code)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@parametrize("A_dims", [2, 3])
@parametrize("scatter_dim", [0, 1, 2])
@fresh_cache()
def test_fuse_matmul_reduce_scatter(self, A_dims, scatter_dim):
if scatter_dim >= A_dims:
return
group = dist.group.WORLD
def func(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
return reduce_scatter_tensor(A @ B, "avg", scatter_dim, group)
if A_dims == 2:
A = torch.rand(64, 32, device="cuda")
elif A_dims == 3:
A = torch.rand(2, 64, 32, device="cuda")
else:
raise AssertionError(f"Invalid A_dims: {A_dims}")
B = torch.rand(32, 16, device="cuda")
with _test_mode():
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, A, B)
self.assertIn("fused_matmul_reduce_scatter", code)
self.assertNotIn("reduce_scatter_tensor", code)
@runOnRocmArch(MI300_ARCH)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@parametrize("A_dims", [2, 3])
@parametrize("scatter_dim", [0, 1, 2])
@fresh_cache()
def test_fuse_scaled_matmul_reduce_scatter(self, A_dims, scatter_dim):
if scatter_dim >= A_dims - 1:
return
group = dist.group.WORLD
def func(
A: torch.Tensor,
B: torch.Tensor,
A_scale: torch.Tensor,
B_scale: torch.Tensor,
out_dtype: torch.dtype,
) -> torch.Tensor:
if len(A.shape) > 2:
C = torch._scaled_mm(
A.flatten(0, -2), B, A_scale, B_scale, out_dtype=out_dtype
)
C = C.view(*A.shape[:-1], B.shape[1])
else:
C = torch._scaled_mm(A, B, A_scale, B_scale, out_dtype=out_dtype)
return reduce_scatter_tensor(C, "avg", scatter_dim, group)
if A_dims == 2:
A = torch.rand(64, 32, device="cuda").to(e4m3_type)
elif A_dims == 3:
A = torch.rand(2, 64, 32, device="cuda").to(e4m3_type)
else:
raise AssertionError(f"Invalid A_dims: {A_dims}")
B = torch.rand(16, 32, device="cuda").to(e4m3_type).T
A_scale = torch.tensor(0.1, device="cuda")
B_scale = torch.tensor(0.1, device="cuda")
gm = _make_post_grad_fx(func, A, B, A_scale, B_scale, torch.bfloat16)
with _test_mode():
micro_pipeline_tp_pass(gm.graph)
self.assertIn("fused_scaled_matmul_reduce_scatter", str(gm.graph))
self.assertNotIn("reduce_scatter_tensor", str(gm.graph))
if torch.cuda.get_device_capability() < (8, 9):
return
with _test_mode():
compiled = torch.compile(func)
code = run_and_get_triton_code(
compiled, A, B, A_scale, B_scale, torch.bfloat16
)
self.assertIn("fused_scaled_matmul_reduce_scatter", code)
self.assertNotIn("reduce_scatter_tensor", code)
@runOnRocmArch(MI300_ARCH)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@parametrize("scatter_dim", [0, 1])
@fresh_cache()
def test_fuse_scaled_matmul_reduce_scatter_rowwise_scales_reshape_mm_reshape(
self, scatter_dim
):
group = dist.group.WORLD
def reshape_mm_reshape(
A: torch.Tensor,
B: torch.Tensor,
A_scale: torch.Tensor,
B_scale: torch.Tensor,
out_dtype: torch.dtype,
) -> torch.Tensor:
"""
Performs a scaled_mm followed by a reduce scatter,
following the reshape -> scaled_mm -> reshape pattern.
"""
orig_shape = A.shape
# reshape tensor and scale together
A = A.reshape(-1, orig_shape[-1])
A_scale = A_scale.reshape(-1, A_scale.shape[-1])
A_scale = torch.reciprocal(A_scale)
C = torch._scaled_mm(A, B, A_scale, B_scale, out_dtype=out_dtype)
# reshape output to have same leading dims as original `A` tensor
C = C.view(*orig_shape[:-1], C.shape[-1])
return reduce_scatter_tensor(C, "sum", scatter_dim, group)
A = torch.rand(2, 16, 32, device="cuda").to(e4m3_type)
B = torch.rand(64, 32, device="cuda").to(e4m3_type).T
# A_scale = rowwise scales
A_scale = torch.full((2, 16, 1), 0.1, device="cuda")
# B_scale = rowwise scales transposed for A @ B^T
B_scale = torch.full((1, 64), 0.1, device="cuda")
gm = _make_post_grad_fx(
reshape_mm_reshape, A, B, A_scale, B_scale, torch.bfloat16
)
with _test_mode():
micro_pipeline_tp_pass(gm.graph)
self.assertIn("fused_scaled_matmul_reduce_scatter", str(gm.graph))
self.assertNotIn("reduce_scatter_tensor", str(gm.graph))
if torch.cuda.get_device_capability() < (8, 9):
return
with _test_mode():
compiled = torch.compile(reshape_mm_reshape)
code = run_and_get_triton_code(
compiled, A, B, A_scale, B_scale, torch.bfloat16
)
self.assertIn("fused_scaled_matmul_reduce_scatter", code)
self.assertNotIn("reduce_scatter_tensor", code)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@parametrize("shard_dim", [0, 1])
@fresh_cache()
def test_dtensor_seq_par(self, shard_dim: int):
model: torch.nn.Module = MLPModule(device="cuda", bias=False)
device_mesh = DeviceMesh(
"cuda",
torch.arange(0, self.world_size),
)
parallelize_plan = {
"net1": ColwiseParallel(input_layouts=Shard(shard_dim)),
"net2": RowwiseParallel(output_layouts=Shard(shard_dim)),
}
model = parallelize_module(model, device_mesh, parallelize_plan)
if shard_dim == 0:
inp = torch.rand(8, 10, device="cuda")
elif shard_dim == 1:
inp = torch.rand(2, 8, 10, device="cuda")
else:
raise AssertionError("Invalid shard_dim")
with _test_mode():
compiled = torch.compile(model)
code = run_and_get_triton_code(compiled, inp)
self.assertIn("fused_all_gather_matmul", code)
self.assertNotIn("all_gather_into_tensor", code)
self.assertIn("fused_matmul_reduce_scatter", code)
self.assertNotIn("reduce_scatter_tensor", code)
@instantiate_parametrized_tests
| MicroPipelineTPTest |
python | readthedocs__readthedocs.org | readthedocs/projects/views/private.py | {
"start": 9892,
"end": 10295
} | class ____(ProjectVersionMixin):
def get_queryset(self):
return (
self.get_project()
.versions(manager=INTERNAL)
.public(
user=self.request.user,
only_active=False,
)
)
def form_valid(self, form):
form.save()
return HttpResponseRedirect(self.get_success_url())
| ProjectVersionEditMixin |
python | huggingface__transformers | src/transformers/models/hunyuan_v1_moe/modeling_hunyuan_v1_moe.py | {
"start": 2238,
"end": 2975
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
HunYuanMoEV1RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| HunYuanMoEV1RMSNorm |
python | getsentry__sentry | src/sentry/api/serializers/models/commit.py | {
"start": 1664,
"end": 3854
} | class ____(Serializer):
def __init__(self, exclude=None, include=None, type=None, *args, **kwargs):
Serializer.__init__(self, *args, **kwargs)
self.exclude = frozenset(exclude if exclude else ())
self.type = type or ""
def get_attrs(self, item_list, user, **kwargs):
if "author" not in self.exclude:
users_by_author = get_users_for_commits(item_list, user)
else:
users_by_author = {}
if "repository" not in self.exclude:
repositories = serialize(
list(Repository.objects.filter(id__in=[c.repository_id for c in item_list])), user
)
else:
repositories = []
repository_objs = {repository["id"]: repository for repository in repositories}
pull_requests = list(
PullRequest.objects.filter(
merge_commit_sha__in=[c.key for c in item_list],
organization_id=item_list[0].organization_id,
)
)
pull_request_by_commit = {
pr.merge_commit_sha: serialized_pr
for (pr, serialized_pr) in zip(pull_requests, serialize(pull_requests))
}
result = {}
for item in item_list:
result[item] = {
"repository": repository_objs.get(str(item.repository_id), {}),
"user": users_by_author.get(str(item.author_id), {}) if item.author_id else {},
"pull_request": pull_request_by_commit.get(item.key, None),
"suspect_commit_type": self.type,
}
return result
def serialize(self, obj: Commit, attrs, user, **kwargs) -> CommitSerializerResponse:
d: CommitSerializerResponse = {
"id": obj.key,
"message": obj.message,
"dateCreated": obj.date_added,
"pullRequest": attrs["pull_request"],
"suspectCommitType": attrs["suspect_commit_type"],
}
if "repository" not in self.exclude:
d["repository"] = attrs["repository"]
if "author" not in self.exclude:
d["author"] = attrs["user"]
return d
@register(Commit)
| CommitSerializer |
python | dask__dask | dask/dataframe/dask_expr/_rolling.py | {
"start": 5755,
"end": 9468
} | class ____:
"""Aggregate using one or more operations
The purpose of this class is to expose an API similar
to Pandas' `Rolling` for dask-expr
"""
def __init__(
self,
obj,
window,
groupby_kwargs=None,
groupby_slice=None,
min_periods=None,
center=False,
win_type=None,
):
if obj.divisions[0] is None and len(obj.divisions) > 2:
msg = (
"Can only rolling dataframes with known divisions\n"
"See https://docs.dask.org/en/latest/dataframe-design.html#partitions\n"
"for more information."
)
raise ValueError(msg)
self.obj = obj
self.window = window
self.groupby_kwargs = groupby_kwargs
self.groupby_slice = groupby_slice
self.min_periods = min_periods
self.center = center
self.win_type = win_type
# Allow pandas to raise if appropriate
obj._meta.rolling(window, **self.kwargs)
@functools.cached_property
def kwargs(self):
return dict(
min_periods=self.min_periods, center=self.center, win_type=self.win_type
)
def _single_agg(self, expr_cls, how_args=(), how_kwargs=None):
return new_collection(
expr_cls(
self.obj,
self.window,
kwargs=self.kwargs,
how_args=how_args,
how_kwargs=how_kwargs,
groupby_kwargs=self.groupby_kwargs,
groupby_slice=self.groupby_slice,
)
)
@derived_from(pd_Rolling)
def cov(self, *args, **kwargs):
return self._single_agg(RollingCov, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def apply(self, func, *args, **kwargs):
return self._single_agg(RollingApply, how_args=(func, *args), how_kwargs=kwargs)
@derived_from(pd_Rolling)
def count(self, *args, **kwargs):
return self._single_agg(RollingCount, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def sum(self, *args, **kwargs):
return self._single_agg(RollingSum, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def mean(self, *args, **kwargs):
return self._single_agg(RollingMean, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def min(self, *args, **kwargs):
return self._single_agg(RollingMin, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def max(self, *args, **kwargs):
return self._single_agg(RollingMax, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def var(self, *args, **kwargs):
return self._single_agg(RollingVar, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def std(self, *args, **kwargs):
return self._single_agg(RollingStd, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def median(self, *args, **kwargs):
return self._single_agg(RollingMedian, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def quantile(self, q, *args, **kwargs):
return self._single_agg(RollingQuantile, how_args=(q, *args), how_kwargs=kwargs)
@derived_from(pd_Rolling)
def skew(self, *args, **kwargs):
return self._single_agg(RollingSkew, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def kurt(self, *args, **kwargs):
return self._single_agg(RollingKurt, how_args=args, how_kwargs=kwargs)
@derived_from(pd_Rolling)
def agg(self, func, *args, **kwargs):
return self._single_agg(RollingAgg, how_args=(func, *args), how_kwargs=kwargs)
| Rolling |
python | numpy__numpy | numpy/lib/tests/test__datasource.py | {
"start": 4115,
"end": 4985
} | class ____:
def test_ValidHTTP(self, tmp_path):
ds = datasource.DataSource(tmp_path)
assert_(ds.exists(valid_httpurl()))
def test_InvalidHTTP(self, tmp_path):
ds = datasource.DataSource(tmp_path)
assert_equal(ds.exists(invalid_httpurl()), False)
def test_ValidFile(self, tmp_path):
# Test valid file in destpath
ds = datasource.DataSource(tmp_path)
tmpfile = valid_textfile(tmp_path)
assert_(ds.exists(tmpfile))
# Test valid local file not in destpath
localdir = mkdtemp()
tmpfile = valid_textfile(localdir)
assert_(ds.exists(tmpfile))
rmtree(localdir)
def test_InvalidFile(self, tmp_path):
ds = datasource.DataSource(tmp_path)
tmpfile = invalid_textfile(tmp_path)
assert_equal(ds.exists(tmpfile), False)
| TestDataSourceExists |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/tokens.py | {
"start": 10530,
"end": 12119
} | class ____(Token):
__slots__ = '_value', 'pre_done'
id = '<comment>'
def __init__(self, value, start_mark=None, end_mark=None, column=None):
# type: (Any, Any, Any, Any) -> None
if start_mark is None:
assert column is not None
self._column = column
Token.__init__(self, start_mark, None) # type: ignore
self._value = value
@property
def value(self):
# type: () -> str
if isinstance(self._value, str):
return self._value
return "".join(self._value)
@value.setter
def value(self, val):
# type: (Any) -> None
self._value = val
def reset(self):
# type: () -> None
if hasattr(self, 'pre_done'):
delattr(self, 'pre_done')
def __repr__(self):
# type: () -> Any
v = '{!r}'.format(self.value)
if SHOW_LINES:
try:
v += ', line: ' + str(self.start_mark.line)
except: # NOQA
pass
try:
v += ', col: ' + str(self.start_mark.column)
except: # NOQA
pass
return 'CommentToken({})'.format(v)
def __eq__(self, other):
# type: (Any) -> bool
if self.start_mark != other.start_mark:
return False
if self.end_mark != other.end_mark:
return False
if self.value != other.value:
return False
return True
def __ne__(self, other):
# type: (Any) -> bool
return not self.__eq__(other)
| CommentToken |
python | urllib3__urllib3 | test/with_dummyserver/test_https.py | {
"start": 46819,
"end": 46921
} | class ____(BaseTestHTTPS):
tls_protocol_name = "TLSv1.3"
certs = TLSv1_3_CERTS
| TestHTTPS_TLSv1_3 |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_evaluation.py | {
"start": 3466,
"end": 4607
} | class ____(NamedTupleSerializer):
"""Unpacks the legacy AutoMaterializeAssetEvaluation class into a completely empty
AutomationConditionEvaluationWithRunIds.
"""
def unpack( # pyright: ignore[reportIncompatibleMethodOverride]
self,
unpacked_dict: dict[str, UnpackedValue],
whitelist_map: WhitelistMap,
context: UnpackContext,
) -> "AutomationConditionEvaluationWithRunIds":
return AutomationConditionEvaluationWithRunIds(
evaluation=AutomationConditionEvaluation(
condition_snapshot=AutomationConditionNodeSnapshot("", "", "", None, None),
start_timestamp=None,
end_timestamp=None,
true_subset=SerializableEntitySubset(key=AssetKey("unknown"), value=False),
candidate_subset=HistoricalAllPartitionsSubsetSentinel(),
subsets_with_metadata=[],
child_evaluations=[],
),
run_ids=frozenset(),
)
@whitelist_for_serdes(serializer=BackcompatAutoMaterializeAssetEvaluationSerializer)
| BackcompatAutoMaterializeAssetEvaluationSerializer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 939559,
"end": 939971
} | class ____(sgqlc.types.Type):
"""Represents a repository migration."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("RepositoryMigration", graphql_name="node")
"""The item at the end of the edge."""
| RepositoryMigrationEdge |
python | pypa__pipenv | pipenv/patched/pip/_internal/metadata/importlib/_envs.py | {
"start": 4376,
"end": 5342
} | class ____(BaseEnvironment):
def __init__(self, paths: Sequence[str]) -> None:
self._paths = paths
@classmethod
def default(cls) -> BaseEnvironment:
return cls(sys.path)
@classmethod
def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment:
if paths is None:
return cls(sys.path)
return cls(paths)
def _iter_distributions(self) -> Iterator[BaseDistribution]:
finder = _DistributionFinder()
for location in self._paths:
yield from finder.find(location)
yield from finder.find_legacy_editables(location)
def get_distribution(self, name: str) -> Optional[BaseDistribution]:
canonical_name = canonicalize_name(name)
matches = (
distribution
for distribution in self.iter_all_distributions()
if distribution.canonical_name == canonical_name
)
return next(matches, None)
| Environment |
python | huggingface__transformers | src/transformers/models/yoso/modeling_yoso.py | {
"start": 24320,
"end": 27827
} | class ____(YosoPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = YosoEmbeddings(config)
self.encoder = YosoEncoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutputWithCrossAttentions(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@auto_docstring
| YosoModel |
python | redis__redis-py | redis/_parsers/resp3.py | {
"start": 339,
"end": 5032
} | class ____(_RESPBase, PushNotificationsParser):
"""RESP3 protocol implementation"""
def __init__(self, socket_read_size):
super().__init__(socket_read_size)
self.pubsub_push_handler_func = self.handle_pubsub_push_response
self.node_moving_push_handler_func = None
self.maintenance_push_handler_func = None
self.invalidation_push_handler_func = None
def handle_pubsub_push_response(self, response):
logger = getLogger("push_response")
logger.debug("Push response: " + str(response))
return response
def read_response(self, disable_decoding=False, push_request=False):
pos = self._buffer.get_pos() if self._buffer else None
try:
result = self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
except BaseException:
if self._buffer:
self._buffer.rewind(pos)
raise
else:
self._buffer.purge()
return result
def _read_response(self, disable_decoding=False, push_request=False):
raw = self._buffer.readline()
if not raw:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
byte, response = raw[:1], raw[1:]
# server returned an error
if byte in (b"-", b"!"):
if byte == b"!":
response = self._buffer.read(int(response))
response = response.decode("utf-8", errors="replace")
error = self.parse_error(response)
# if the error is a ConnectionError, raise immediately so the user
# is notified
if isinstance(error, ConnectionError):
raise error
# otherwise, we're dealing with a ResponseError that might belong
# inside a pipeline response. the connection's read_response()
# and/or the pipeline's execute() will raise this error if
# necessary, so just return the exception instance here.
return error
# single value
elif byte == b"+":
pass
# null value
elif byte == b"_":
return None
# int and big int values
elif byte in (b":", b"("):
return int(response)
# double value
elif byte == b",":
return float(response)
# bool value
elif byte == b"#":
return response == b"t"
# bulk response
elif byte == b"$":
response = self._buffer.read(int(response))
# verbatim string response
elif byte == b"=":
response = self._buffer.read(int(response))[4:]
# array response
elif byte == b"*":
response = [
self._read_response(disable_decoding=disable_decoding)
for _ in range(int(response))
]
# set response
elif byte == b"~":
# redis can return unhashable types (like dict) in a set,
# so we return sets as list, all the time, for predictability
response = [
self._read_response(disable_decoding=disable_decoding)
for _ in range(int(response))
]
# map response
elif byte == b"%":
# We cannot use a dict-comprehension to parse stream.
# Evaluation order of key:val expression in dict comprehension only
# became defined to be left-right in version 3.8
resp_dict = {}
for _ in range(int(response)):
key = self._read_response(disable_decoding=disable_decoding)
resp_dict[key] = self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
response = resp_dict
# push response
elif byte == b">":
response = [
self._read_response(
disable_decoding=disable_decoding, push_request=push_request
)
for _ in range(int(response))
]
response = self.handle_push_response(response)
# if this is a push request return the push response
if push_request:
return response
return self._read_response(
disable_decoding=disable_decoding,
push_request=push_request,
)
else:
raise InvalidResponse(f"Protocol Error: {raw!r}")
if isinstance(response, bytes) and disable_decoding is False:
response = self.encoder.decode(response)
return response
| _RESP3Parser |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_doc_integration_avatar.py | {
"start": 1212,
"end": 2938
} | class ____(DocIntegrationAvatarTest):
method = "GET"
def test_user_view_avatar(self) -> None:
"""
Tests that regular users can see only published doc integration avatars
"""
self.login_as(user=self.user)
response = self.get_success_response(
self.published_doc.slug, status_code=status.HTTP_200_OK
)
assert serialize(self.published_doc) == response.data
assert serialize(self.published_doc.avatar.get()) == response.data["avatar"]
response = self.get_error_response(
self.draft_doc.slug, status_code=status.HTTP_403_FORBIDDEN
)
# TODO(schew2381): Change test to check that superusers can only see published doc integration avatars
def test_superuser_view_avatar(self) -> None:
"""
Tests that superusers can see all doc integration avatars
"""
self.login_as(user=self.superuser, superuser=True)
for doc in [self.published_doc, self.draft_doc]:
response = self.get_success_response(doc.slug, status_code=status.HTTP_200_OK)
assert serialize(doc) == response.data
assert serialize(doc.avatar.get()) == response.data["avatar"]
def test_staff_view_avatar(self) -> None:
"""
Tests that staff can see all doc integration avatars
"""
self.login_as(user=self.staff_user, staff=True)
for doc in [self.published_doc, self.draft_doc]:
response = self.get_success_response(doc.slug, status_code=status.HTTP_200_OK)
assert serialize(doc) == response.data
assert serialize(doc.avatar.get()) == response.data["avatar"]
@control_silo_test
| GetDocIntegrationAvatarTest |
python | pypa__pip | src/pip/_vendor/urllib3/exceptions.py | {
"start": 7782,
"end": 8099
} | class ____(HTTPError):
"""Raised by assert_header_parsing, but we convert it to a log.warning statement."""
def __init__(self, defects, unparsed_data):
message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data)
super(HeaderParsingError, self).__init__(message)
| HeaderParsingError |
python | PyCQA__pylint | tests/functional/n/name/name_styles.py | {
"start": 3882,
"end": 4026
} | class ____(Exception):
"""A very exceptional exception with a nice descriptive name"""
pass
| _AnExceptionalExceptionThatOccursVeryVeryRarely |
python | kamyu104__LeetCode-Solutions | Python/word-squares.py | {
"start": 420,
"end": 1313
} | class ____(object):
def wordSquares(self, words):
"""
:type words: List[str]
:rtype: List[List[str]]
"""
result = []
trie = TrieNode()
for i in xrange(len(words)):
trie.insert(words, i)
curr = []
for s in words:
curr.append(s)
self.wordSquaresHelper(words, trie, curr, result)
curr.pop()
return result
def wordSquaresHelper(self, words, trie, curr, result):
if len(curr) >= len(words[0]):
return result.append(list(curr))
node = trie
for s in curr:
node = node.children[ord(s[len(curr)]) - ord('a')]
if not node:
return
for i in node.indices:
curr.append(words[i])
self.wordSquaresHelper(words, trie, curr, result)
curr.pop()
| Solution |
python | ipython__ipython | tests/test_guarded_eval.py | {
"start": 8188,
"end": 27952
} | class ____:
def custom_int_type(self) -> CustomIntType:
return CustomIntType(1)
def custom_heap_type(self) -> CustomHeapType:
return CustomHeapType(HeapType())
def int_type_alias(self) -> IntTypeAlias:
return 1
def heap_type_alias(self) -> HeapTypeAlias:
return 1
def literal(self) -> Literal[False]:
return False
def literal_string(self) -> LiteralString:
return "test"
def self(self) -> Self:
return self
def any_str(self, x: AnyStr) -> AnyStr:
return x
def with_kwargs(self, a=1, b=2, c=3) -> int:
return a + b + c
def annotated(self) -> Annotated[float, "positive number"]:
return 1
def annotated_self(self) -> Annotated[Self, "self with metadata"]:
self._metadata = "test"
return self
def int_type_guard(self, x) -> TypeGuard[int]:
return isinstance(x, int)
def optional_float(self) -> Optional[float]:
return 1.0
def union_str_and_int(self) -> Union[str, int]:
return ""
def protocol(self) -> ProtocolTest:
return ProtocolTestImplementer()
def typed_dict(self) -> Movie:
return {"name": "The Matrix", "year": 1999}
@pytest.mark.parametrize(
"data,code,expected,equality",
[
[[1, 2, 3], "data.index(2)", 1, True],
[{"a": 1}, "data.keys().isdisjoint({})", True, True],
[StringAnnotation(), "data.heap()", HeapType, False],
[StringAnnotation(), "data.copy()", StringAnnotation, False],
# test cases for `__call__`
[CallCreatesHeapType(), "data()", HeapType, False],
[CallCreatesBuiltin(), "data()", frozenset, False],
# Test cases for `__init__`
[HeapType, "data()", HeapType, False],
[InitReturnsFrozenset, "data()", frozenset, False],
[HeapType(), "data.__class__()", HeapType, False],
# supported special cases for typing
[SpecialTyping(), "data.custom_int_type()", int, False],
[SpecialTyping(), "data.custom_heap_type()", HeapType, False],
[SpecialTyping(), "data.int_type_alias()", int, False],
[SpecialTyping(), "data.heap_type_alias()", HeapType, False],
[SpecialTyping(), "data.self()", SpecialTyping, False],
[SpecialTyping(), "data.literal()", False, True],
[SpecialTyping(), "data.literal_string()", str, False],
[SpecialTyping(), "data.any_str('a')", str, False],
[SpecialTyping(), "data.any_str(b'a')", bytes, False],
[SpecialTyping(), "data.with_kwargs(b=3)", int, False],
[SpecialTyping(), "data.annotated()", float, False],
[SpecialTyping(), "data.annotated_self()", SpecialTyping, False],
[SpecialTyping(), "data.int_type_guard()", int, False],
# test cases for static methods
[HasStaticMethod, "data.static_method()", HeapType, False],
],
)
def test_evaluates_calls(data, code, expected, equality):
context = limited(data=data, HeapType=HeapType, StringAnnotation=StringAnnotation)
value = guarded_eval(code, context)
if equality:
assert value == expected
else:
assert isinstance(value, expected)
@pytest.mark.parametrize(
"data,code,expected_attributes",
[
[SpecialTyping(), "data.optional_float()", ["is_integer"]],
[
SpecialTyping(),
"data.union_str_and_int()",
["capitalize", "as_integer_ratio"],
],
[SpecialTyping(), "data.protocol()", ["test_method"]],
[SpecialTyping(), "data.typed_dict()", ["keys", "values", "items"]],
],
)
def test_mocks_attributes_of_call_results(data, code, expected_attributes):
context = limited(data=data, HeapType=HeapType, StringAnnotation=StringAnnotation)
result = guarded_eval(code, context)
for attr in expected_attributes:
assert hasattr(result, attr)
assert attr in dir(result)
@pytest.mark.parametrize(
"data,code,expected_items",
[
[SpecialTyping(), "data.typed_dict()", {"year": int, "name": str}],
],
)
def test_mocks_items_of_call_results(data, code, expected_items):
context = limited(data=data, HeapType=HeapType, StringAnnotation=StringAnnotation)
result = guarded_eval(code, context)
ipython_keys = result._ipython_key_completions_()
for key, value in expected_items.items():
assert isinstance(result[key], value)
assert key in ipython_keys
@pytest.mark.parametrize(
"code,expected",
[
["\n".join(["instance = TypedClass()", "instance.test_method()"]), bool],
["\n".join(["def func() -> int:", " pass", "func()"]), int],
[
"\n".join(
[
"class NotYetDefined:",
" def method(self) -> str:",
" pass",
"instance = NotYetDefined()",
"instance.method()",
]
),
str,
],
[
"\n".join(
[
"class NotYetDefined:",
" def method(self, argument) -> int:",
" pass",
"instance = NotYetDefined()",
"instance.method()",
]
),
int,
],
[
"\n".join(
[
"class NotYetDefined:",
" @property",
" def my_prop(self) -> int:",
" pass",
"instance = NotYetDefined()",
"instance.my_prop",
]
),
int,
],
[
"\n".join(
[
"class NotYetDefined:",
" @unkown_decorator",
" @property",
" def my_prop(self) -> int:",
" pass",
"instance = NotYetDefined()",
"instance.my_prop",
]
),
int,
],
[
"\n".join(
[
"class NotYetDefined:",
" attribute = 42",
"instance = NotYetDefined()",
"instance.attribute",
]
),
int,
],
[
"\n".join(
[
"class NotYetDefined:",
" def any_str(self, argument: AnyStr) -> AnyStr:",
" pass",
"instance = NotYetDefined()",
"instance.any_str(b'test')",
]
),
bytes,
],
[
"\n".join(
[
"class NotYetDefined:",
" def any_str(self, argument: AnyStr) -> AnyStr:",
" pass",
"instance = NotYetDefined()",
"instance.any_str('test')",
]
),
str,
],
[
"\n".join(
[
"async def async_func():",
" return []",
"(await async_func())",
]
),
list,
],
[
"\n".join(
[
"make_list = lambda:[]",
"make_list()",
]
),
list,
],
],
)
def test_mock_class_and_func_instances(code, expected):
context = limited(TypedClass=TypedClass, AnyStr=AnyStr)
value = guarded_eval(code, context)
assert isinstance(value, expected)
@pytest.mark.parametrize(
"annotation,expected",
[
# Basic types
(int, True),
(str, True),
(list, True),
# Typing generics
(list[str], True),
(dict[str, int], True),
(Optional[int], True),
(Union[int, str], True),
# Special forms
(AnyStr, True),
(TypeVar("T"), True),
(Callable[[int], str], True),
(Literal["GET", "POST"], True),
(Any, True),
(str | int, True),
# Nested
(List[Dict[str, int]], True),
# Non-annotations
(42, False),
("string", False),
([1, 2, 3], False),
(None, False),
],
)
def test_is_type_annotation(annotation, expected):
assert _is_type_annotation(annotation) == expected
@pytest.mark.parametrize(
"code,expected",
[
["\n".join(["a = True", "a"]), bool],
["\n".join(["a, b, c = 1, 'b', 3.0", "a"]), int],
["\n".join(["a, b, c = 1, 'b', 3.0", "b"]), str],
["\n".join(["a, b, c = 1, 'b', 3.0", "c"]), float],
["\n".join(["a, *rest = 1, 'b', 3.0", "a"]), int],
["\n".join(["a, *rest = 1, 'b', 3.0", "rest"]), list],
],
)
def test_evaluates_assignments(code, expected):
context = limited()
value = guarded_eval(code, context)
assert isinstance(value, expected)
def equals(a, b):
return a == b
def quacks_like(test_duck, reference_duck):
return set(dir(reference_duck)) - set(dir(test_duck)) == set()
@pytest.mark.parametrize(
"code,expected,check",
[
["\n".join(["a: Literal[True]", "a"]), True, equals],
["\n".join(["a: bool", "a"]), bool, isinstance],
["\n".join(["a: str", "a"]), str, isinstance],
# for lists we need quacking as we do not know:
# - how many elements in the list
# - which element is of which type
["\n".join(["a: list[str]", "a"]), list, quacks_like],
["\n".join(["a: list[str]", "a[0]"]), str, quacks_like],
["\n".join(["a: list[str]", "a[999]"]), str, quacks_like],
# set
["\n".join(["a: set[str]", "a"]), set, quacks_like],
# for tuples we do know which element is which
["\n".join(["a: tuple[str, int]", "a"]), tuple, isinstance],
["\n".join(["a: tuple[str, int]", "a[0]"]), str, isinstance],
["\n".join(["a: tuple[str, int]", "a[1]"]), int, isinstance],
],
)
def test_evaluates_type_assignments(code, expected, check):
context = limited(Literal=Literal)
value = guarded_eval(code, context)
assert check(value, expected)
@pytest.mark.parametrize(
"data,bad",
[
[[1, 2, 3], "data.append(4)"],
[{"a": 1}, "data.update()"],
],
)
def test_rejects_calls_with_side_effects(data, bad):
context = limited(data=data)
with pytest.raises(GuardRejection):
guarded_eval(bad, context)
@pytest.mark.parametrize(
"code,expected",
[
["(1\n+\n1)", 2],
["list(range(10))[-1:]", [9]],
["list(range(20))[3:-2:3]", [3, 6, 9, 12, 15]],
],
)
@pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
def test_evaluates_complex_cases(code, expected, context):
assert guarded_eval(code, context()) == expected
@pytest.mark.parametrize(
"code,expected",
[
["1", 1],
["1.0", 1.0],
["0xdeedbeef", 0xDEEDBEEF],
["True", True],
["None", None],
["{}", {}],
["[]", []],
],
)
@pytest.mark.parametrize("context", MINIMAL_OR_HIGHER)
def test_evaluates_literals(code, expected, context):
assert guarded_eval(code, context()) == expected
@pytest.mark.parametrize(
"code,expected",
[
["-5", -5],
["+5", +5],
["~5", -6],
],
)
@pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
def test_evaluates_unary_operations(code, expected, context):
assert guarded_eval(code, context()) == expected
@pytest.mark.parametrize(
"code,expected",
[
["1 + 1", 2],
["3 - 1", 2],
["2 * 3", 6],
["5 // 2", 2],
["5 / 2", 2.5],
["5**2", 25],
["2 >> 1", 1],
["2 << 1", 4],
["1 | 2", 3],
["1 & 1", 1],
["1 & 2", 0],
],
)
@pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
def test_evaluates_binary_operations(code, expected, context):
assert guarded_eval(code, context()) == expected
@pytest.mark.parametrize(
"code,expected",
[
["2 > 1", True],
["2 < 1", False],
["2 <= 1", False],
["2 <= 2", True],
["1 >= 2", False],
["2 >= 2", True],
["2 == 2", True],
["1 == 2", False],
["1 != 2", True],
["1 != 1", False],
["1 < 4 < 3", False],
["(1 < 4) < 3", True],
["4 > 3 > 2 > 1", True],
["4 > 3 > 2 > 9", False],
["1 < 2 < 3 < 4", True],
["9 < 2 < 3 < 4", False],
["1 < 2 > 1 > 0 > -1 < 1", True],
["1 in [1] in [[1]]", True],
["1 in [1] in [[2]]", False],
["1 in [1]", True],
["0 in [1]", False],
["1 not in [1]", False],
["0 not in [1]", True],
["True is True", True],
["False is False", True],
["True is False", False],
["True is not True", False],
["False is not True", True],
],
)
@pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
def test_evaluates_comparisons(code, expected, context):
assert guarded_eval(code, context()) == expected
def test_guards_comparisons():
class GoodEq(int):
pass
class BadEq(int):
def __eq__(self, other):
assert False
context = limited(bad=BadEq(1), good=GoodEq(1))
with pytest.raises(GuardRejection):
guarded_eval("bad == 1", context)
with pytest.raises(GuardRejection):
guarded_eval("bad != 1", context)
with pytest.raises(GuardRejection):
guarded_eval("1 == bad", context)
with pytest.raises(GuardRejection):
guarded_eval("1 != bad", context)
assert guarded_eval("good == 1", context) is True
assert guarded_eval("good != 1", context) is False
assert guarded_eval("1 == good", context) is True
assert guarded_eval("1 != good", context) is False
def test_guards_unary_operations():
class GoodOp(int):
pass
class BadOpInv(int):
def __inv__(self, other):
assert False
class BadOpInverse(int):
def __inv__(self, other):
assert False
context = limited(good=GoodOp(1), bad1=BadOpInv(1), bad2=BadOpInverse(1))
with pytest.raises(GuardRejection):
guarded_eval("~bad1", context)
with pytest.raises(GuardRejection):
guarded_eval("~bad2", context)
def test_guards_binary_operations():
class GoodOp(int):
pass
class BadOp(int):
def __add__(self, other):
assert False
context = limited(good=GoodOp(1), bad=BadOp(1))
with pytest.raises(GuardRejection):
guarded_eval("1 + bad", context)
with pytest.raises(GuardRejection):
guarded_eval("bad + 1", context)
assert guarded_eval("good + 1", context) == 2
assert guarded_eval("1 + good", context) == 2
def test_guards_attributes():
class GoodAttr(float):
pass
class BadAttr1(float):
def __getattr__(self, key):
assert False
class BadAttr2(float):
def __getattribute__(self, key):
assert False
context = limited(good=GoodAttr(0.5), bad1=BadAttr1(0.5), bad2=BadAttr2(0.5))
with pytest.raises(GuardRejection):
guarded_eval("bad1.as_integer_ratio", context)
with pytest.raises(GuardRejection):
guarded_eval("bad2.as_integer_ratio", context)
assert guarded_eval("good.as_integer_ratio()", context) == (1, 2)
@pytest.mark.parametrize("context", MINIMAL_OR_HIGHER)
def test_access_builtins(context):
assert guarded_eval("round", context()) == round
def test_access_builtins_fails():
context = limited()
with pytest.raises(NameError):
guarded_eval("this_is_not_builtin", context)
def test_rejects_forbidden():
context = forbidden()
with pytest.raises(GuardRejection):
guarded_eval("1", context)
def test_guards_locals_and_globals():
context = EvaluationContext(
locals={"local_a": "a"}, globals={"global_b": "b"}, evaluation="minimal"
)
with pytest.raises(GuardRejection):
guarded_eval("local_a", context)
with pytest.raises(GuardRejection):
guarded_eval("global_b", context)
def test_access_locals_and_globals():
context = EvaluationContext(
locals={"local_a": "a"}, globals={"global_b": "b"}, evaluation="limited"
)
assert guarded_eval("local_a", context) == "a"
assert guarded_eval("global_b", context) == "b"
@pytest.mark.parametrize(
"code",
["x += 1", "del x"],
)
@pytest.mark.parametrize("context", [minimal(x=1), limited(x=1), unsafe(x=1)])
def test_does_not_modify_namespace(code, context):
guarded_eval(code, context)
assert context.locals["x"] == 1
@pytest.mark.parametrize(
"code",
["def test(): pass", "class test: pass", "import ast as test"],
)
@pytest.mark.parametrize("context", [minimal(), limited(), unsafe()])
def test_does_not_populate_namespace(code, context):
guarded_eval(code, context)
assert "test" not in context.locals
assert "test" not in context.globals
def test_subscript():
context = EvaluationContext(
locals={}, globals={}, evaluation="limited", in_subscript=True
)
empty_slice = slice(None, None, None)
assert guarded_eval("", context) == tuple()
assert guarded_eval(":", context) == empty_slice
assert guarded_eval("1:2:3", context) == slice(1, 2, 3)
assert guarded_eval(':, "a"', context) == (empty_slice, "a")
def test_unbind_method():
class X(list):
def index(self, k):
return "CUSTOM"
x = X()
assert _unbind_method(x.index) is X.index
assert _unbind_method([].index) is list.index
assert _unbind_method(list.index) is None
def test_assumption_instance_attr_do_not_matter():
"""This is semi-specified in Python documentation.
However, since the specification says 'not guaranteed
to work' rather than 'is forbidden to work', future
versions could invalidate this assumptions. This test
is meant to catch such a change if it ever comes true.
"""
class T:
def __getitem__(self, k):
return "a"
def __getattr__(self, k):
return "a"
def f(self):
return "b"
t = T()
t.__getitem__ = f
t.__getattr__ = f
assert t[1] == "a"
assert t[1] == "a"
def test_assumption_named_tuples_share_getitem():
"""Check assumption on named tuples sharing __getitem__"""
from typing import NamedTuple
class A(NamedTuple):
pass
class B(NamedTuple):
pass
assert A.__getitem__ == B.__getitem__
@dec.skip_without("numpy")
def test_module_access():
import numpy
context = limited(numpy=numpy)
assert guarded_eval("numpy.linalg.norm", context) == numpy.linalg.norm
context = minimal(numpy=numpy)
with pytest.raises(GuardRejection):
guarded_eval("np.linalg.norm", context)
def test_autoimport_module():
context = EvaluationContext(
locals={},
globals={},
evaluation="limited",
auto_import=import_module,
policy_overrides={"allow_auto_import": True},
)
pi = guarded_eval("math.pi", context)
assert round(pi, 2) == 3.14
def test_autoimport_deep_module():
context = EvaluationContext(
locals={},
globals={},
evaluation="limited",
auto_import=import_module,
policy_overrides={"allow_auto_import": True},
)
ElementTree = guarded_eval("xml.etree.ElementTree", context)
assert hasattr(ElementTree, "ElementTree")
| SpecialTyping |
python | facelessuser__pymdown-extensions | pymdownx/snippets.py | {
"start": 16390,
"end": 18509
} | class ____(Extension):
"""Snippet extension."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'base_path': [["."], "Base path for snippet paths - Default: [\".\"]"],
'restrict_base_path': [
True,
"Restrict snippet paths such that they are under the base paths - Default: True"
],
'encoding': ["utf-8", "Encoding of snippets - Default: \"utf-8\""],
'check_paths': [False, "Make the build fail if a snippet can't be found - Default: \"False\""],
"auto_append": [
[],
"A list of snippets (relative to the 'base_path') to auto append to the Markdown content - Default: []"
],
'url_download': [False, "Download external URLs as snippets - Default: \"False\""],
'url_max_size': [DEFAULT_URL_SIZE, "External URL max size (0 means no limit)- Default: 32 MiB"],
'url_timeout': [DEFAULT_URL_TIMEOUT, 'Defualt URL timeout (0 means no timeout) - Default: 10 sec'],
'url_request_headers': [DEFAULT_URL_REQUEST_HEADERS, "Extra request Headers - Default: {}"],
'dedent_subsections': [False, "Dedent subsection extractions e.g. 'sections' and/or 'lines'."],
'max_retries': [
DEFAULT_MAX_RETRIES, "Maximum number of retry attempts for rate-limited requests - Default: 3"
],
'backoff_factor': [DEFAULT_BACKOFF_FACTOR, "Backoff factor for retry attempts - Default: 2"]
}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md):
"""Register the extension."""
self.md = md
md.registerExtension(self)
config = self.getConfigs()
snippet = SnippetPreprocessor(config, md)
md.preprocessors.register(snippet, "snippet", 32)
def reset(self):
"""Reset."""
self.md.preprocessors['snippet'].download.cache_clear()
def makeExtension(*args, **kwargs):
"""Return extension."""
return SnippetExtension(*args, **kwargs)
| SnippetExtension |
python | ray-project__ray | rllib/models/torch/mingpt.py | {
"start": 7829,
"end": 11144
} | class ____(nn.Module):
"""GPT Transformer Model"""
def __init__(self, config: GPTConfig):
super().__init__()
assert config.block_size is not None
self.block_size = config.block_size
self.transformer = nn.ModuleDict(
dict(
drop=nn.Dropout(config.embed_pdrop),
h=nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
ln_f=nn.LayerNorm(config.n_embed),
)
)
# init all weights, and apply a special scaled init to the residual
# projections, per GPT-2 paper
self.apply(self._init_weights)
for pn, p in self.named_parameters():
if pn.endswith("c_proj.weight"):
torch.nn.init.normal_(
p, mean=0.0, std=0.02 / math.sqrt(2 * config.n_layer)
)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
torch.nn.init.zeros_(module.bias)
torch.nn.init.ones_(module.weight)
def forward(self, input_embeds, attention_masks=None, return_attentions=False):
"""
input_embeds: [batch_size x seq_len x n_embed]
attention_masks: [batch_size x seq_len], 0 don't attend, 1 attend
"""
B, T, C = input_embeds.size()
assert T <= self.block_size, (
f"Cannot forward sequence of length {T}, "
f"block size is only {self.block_size}"
)
if attention_masks is not None:
_B, _T = attention_masks.size()
assert _B == B and _T == T
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_len]
# So we can broadcast to
# [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular
# masking of causal attention used in OpenAI GPT, we just need
# to prepare the broadcast dimension here.
attention_masks = attention_masks[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend
# and 0.0 for masked positions, this operation will create a
# tensor which is 0.0 for positions we want to attend and -inf
# for masked positions. Since we are adding it to the raw scores
# before the softmax, this is effectively the same as removing
# these entirely.
attention_masks = attention_masks.to(dtype=input_embeds.dtype)
attention_masks = (1.0 - attention_masks) * -1e9
# forward the GPT model itself
x = self.transformer.drop(input_embeds)
atts = []
for block in self.transformer.h:
x, att = block(x, attention_masks=attention_masks)
atts.append(att)
x = self.transformer.ln_f(x)
if return_attentions:
return x, atts
else:
return x
| GPT |
python | realpython__materials | build-a-rest-api-frontend/source_code_final/models.py | {
"start": 593,
"end": 1082
} | class ____(db.Model):
__tablename__ = "person"
id = db.Column(db.Integer, primary_key=True)
lname = db.Column(db.String(32), nullable=False)
fname = db.Column(db.String(32))
timestamp = db.Column(
db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow
)
notes = db.relationship(
Note,
backref="person",
cascade="all, delete, delete-orphan",
single_parent=True,
order_by="desc(Note.timestamp)",
)
| Person |
python | openai__openai-python | src/openai/resources/fine_tuning/jobs/checkpoints.py | {
"start": 6966,
"end": 7197
} | class ____:
def __init__(self, checkpoints: Checkpoints) -> None:
self._checkpoints = checkpoints
self.list = to_streamed_response_wrapper(
checkpoints.list,
)
| CheckpointsWithStreamingResponse |
python | getsentry__sentry | src/sentry/notifications/notification_action/action_validation.py | {
"start": 11308,
"end": 12210
} | class ____(BaseActionValidatorHandler):
provider = Action.Type.WEBHOOK
notify_action_form = NotifyEventServiceForm
def _get_services(self) -> list[Any]:
plugins = get_notification_plugins_for_org(self.organization)
sentry_apps = app_service.find_alertable_services(organization_id=self.organization.id)
return [
*plugins,
*sentry_apps,
]
def generate_action_form_payload(self) -> dict[str, Any]:
return {
"services": self._get_services(),
"data": self.generate_action_form_data(),
}
def generate_action_form_data(self) -> dict[str, Any]:
return {
"service": self.validated_data["config"]["target_identifier"],
}
def update_action_data(self, cleaned_data: dict[str, Any]) -> dict[str, Any]:
return self.validated_data
| WebhookActionValidatorHandler |
python | kamyu104__LeetCode-Solutions | Python/maximize-the-number-of-target-nodes-after-connecting-trees-ii.py | {
"start": 43,
"end": 1190
} | class ____(object):
def maxTargetNodes(self, edges1, edges2):
"""
:type edges1: List[List[int]]
:type edges2: List[List[int]]
:rtype: List[int]
"""
def bfs(adj):
result = [0]*len(adj)
parity = 0
lookup = [-1]*len(adj)
lookup[0] = parity
q = [0]
while q:
new_q = []
for u in q:
for v in adj[u]:
if lookup[v] != -1:
continue
lookup[v] = parity^1
new_q.append(v)
q = new_q
parity ^= 1
cnt = sum(lookup)
return [cnt if lookup[u] else len(adj)-cnt for u in xrange(len(adj))]
def find_adj(edges):
adj = [[] for _ in xrange(len(edges)+1)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
return adj
adj2 = find_adj(edges2)
mx = max(bfs(adj2))
adj1 = find_adj(edges1)
return [mx+x for x in bfs(adj1)]
| Solution |
python | huggingface__transformers | src/transformers/models/edgetam_video/modular_edgetam_video.py | {
"start": 25240,
"end": 27787
} | class ____(nn.Module):
"""Self-attention with rotary position encoding."""
def __init__(self, config: EdgeTamVideoConfig):
super().__init__()
self.config = config
self.hidden_size = config.memory_attention_hidden_size
self.internal_dim = self.hidden_size // config.memory_attention_downsample_rate
self.num_attention_heads = config.memory_attention_num_attention_heads
self.head_dim = self.internal_dim // config.memory_attention_num_attention_heads
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.q_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.k_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.v_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.o_proj = nn.Linear(self.internal_dim, self.hidden_size)
self.dropout_p = config.memory_attention_rope_dropout
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tensor:
# Input projections
batch_size, point_batch_size = query.shape[:2]
new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim)
query = self.q_proj(query).view(*new_shape).transpose(1, 2)
key = self.k_proj(key).view(*new_shape).transpose(1, 2)
value = self.v_proj(value).view(*new_shape).transpose(1, 2)
cos, sin = position_embeddings
# Apply rotary position encoding for self-attention
query, key = apply_rotary_pos_emb_2d_self_attn(query, key, cos=cos, sin=sin)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=None,
dropout=0.0 if not self.training else self.dropout_p,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(
batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim
).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| EdgeTamVideoRoPESelfAttention |
python | wandb__wandb | wandb/apis/public/artifacts.py | {
"start": 23827,
"end": 23944
} | class ____(ConnectionWithTotal[TNode]):
edges: List[_ArtifactEdgeGeneric] # noqa: UP006
| _ArtifactConnectionGeneric |
python | marshmallow-code__marshmallow | src/marshmallow/fields.py | {
"start": 46293,
"end": 47534
} | class ____(DateTime):
"""A formatted naive datetime string.
:param format: See :class:`DateTime`.
:param timezone: Used on deserialization. If `None`,
aware datetimes are rejected. If not `None`, aware datetimes are
converted to this timezone before their timezone information is
removed.
:param kwargs: The same keyword arguments that :class:`Field` receives.
.. versionadded:: 3.0.0rc9
"""
AWARENESS = "naive"
def __init__(
self,
format: str | None = None, # noqa: A002
*,
timezone: dt.timezone | None = None,
**kwargs: Unpack[_BaseFieldKwargs],
) -> None:
super().__init__(format=format, **kwargs)
self.timezone = timezone
def _deserialize(self, value, attr, data, **kwargs) -> dt.datetime:
ret = super()._deserialize(value, attr, data, **kwargs)
if utils.is_aware(ret):
if self.timezone is None:
raise self.make_error(
"invalid_awareness",
awareness=self.AWARENESS,
obj_type=self.OBJ_TYPE,
)
ret = ret.astimezone(self.timezone).replace(tzinfo=None)
return ret
| NaiveDateTime |
python | scipy__scipy | scipy/stats/tests/test_morestats.py | {
"start": 100055,
"end": 105931
} | class ____:
def test_fixed_lmbda(self):
x = _old_loggamma_rvs(5, size=50, random_state=12345) + 5
xt = stats.boxcox(x, lmbda=1)
assert_allclose(xt, x - 1)
xt = stats.boxcox(x, lmbda=-1)
assert_allclose(xt, 1 - 1/x)
xt = stats.boxcox(x, lmbda=0)
assert_allclose(xt, np.log(x))
# Also test that array_like input works
xt = stats.boxcox(list(x), lmbda=0)
assert_allclose(xt, np.log(x))
# test that constant input is accepted; see gh-12225
xt = stats.boxcox(np.ones(10), 2)
assert_equal(xt, np.zeros(10))
def test_lmbda_None(self):
# Start from normal rv's, do inverse transform to check that
# optimization function gets close to the right answer.
lmbda = 2.5
x = stats.norm.rvs(loc=10, size=50000, random_state=1245)
x_inv = (x * lmbda + 1)**(-lmbda)
xt, maxlog = stats.boxcox(x_inv)
assert_almost_equal(maxlog, -1 / lmbda, decimal=2)
def test_alpha(self):
rng = np.random.RandomState(1234)
x = _old_loggamma_rvs(5, size=50, random_state=rng) + 5
# Some regular values for alpha, on a small sample size
_, _, interval = stats.boxcox(x, alpha=0.75)
assert_allclose(interval, [4.004485780226041, 5.138756355035744])
_, _, interval = stats.boxcox(x, alpha=0.05)
assert_allclose(interval, [1.2138178554857557, 8.209033272375663])
# Try some extreme values, see we don't hit the N=500 limit
x = _old_loggamma_rvs(7, size=500, random_state=rng) + 15
_, _, interval = stats.boxcox(x, alpha=0.001)
assert_allclose(interval, [0.3988867, 11.40553131])
_, _, interval = stats.boxcox(x, alpha=0.999)
assert_allclose(interval, [5.83316246, 5.83735292])
def test_boxcox_bad_arg(self):
# Raise ValueError if any data value is negative.
x = np.array([-1, 2])
assert_raises(ValueError, stats.boxcox, x)
# Raise ValueError if data is constant.
assert_raises(ValueError, stats.boxcox, np.array([1]))
# Raise ValueError if data is not 1-dimensional.
assert_raises(ValueError, stats.boxcox, np.array([[1], [2]]))
def test_empty(self):
assert_(stats.boxcox([]).shape == (0,))
def test_gh_6873(self):
# Regression test for gh-6873.
y, lam = stats.boxcox(_boxcox_data)
# The expected value of lam was computed with the function
# powerTransform in the R library 'car'. I trust that value
# to only about five significant digits.
assert_allclose(lam, -0.051654, rtol=1e-5)
@pytest.mark.parametrize("bounds", [(-1, 1), (1.1, 2), (-2, -1.1)])
def test_bounded_optimizer_within_bounds(self, bounds):
# Define custom optimizer with bounds.
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded")
_, lmbda = stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
assert bounds[0] < lmbda < bounds[1]
def test_bounded_optimizer_against_unbounded_optimizer(self):
# Test whether setting bounds on optimizer excludes solution from
# unbounded optimizer.
# Get unbounded solution.
_, lmbda = stats.boxcox(_boxcox_data, lmbda=None)
# Set tolerance and bounds around solution.
bounds = (lmbda + 0.1, lmbda + 1)
options = {'xatol': 1e-12}
def optimizer(fun):
return optimize.minimize_scalar(fun, bounds=bounds,
method="bounded", options=options)
# Check bounded solution. Lower bound should be active.
_, lmbda_bounded = stats.boxcox(_boxcox_data, lmbda=None,
optimizer=optimizer)
assert lmbda_bounded != lmbda
assert_allclose(lmbda_bounded, bounds[0])
@pytest.mark.parametrize("optimizer", ["str", (1, 2), 0.1])
def test_bad_optimizer_type_raises_error(self, optimizer):
# Check if error is raised if string, tuple or float is passed
with pytest.raises(ValueError, match="`optimizer` must be a callable"):
stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
def test_bad_optimizer_value_raises_error(self):
# Check if error is raised if `optimizer` function does not return
# `OptimizeResult` object
# Define test function that always returns 1
def optimizer(fun):
return 1
message = "return an object containing the optimal `lmbda`"
with pytest.raises(ValueError, match=message):
stats.boxcox(_boxcox_data, lmbda=None, optimizer=optimizer)
@pytest.mark.parametrize(
"bad_x", [np.array([1, -42, 12345.6]), np.array([np.nan, 42, 1])]
)
def test_negative_x_value_raises_error(self, bad_x):
"""Test boxcox_normmax raises ValueError if x contains non-positive values."""
message = "only positive, finite, real numbers"
with pytest.raises(ValueError, match=message):
stats.boxcox_normmax(bad_x)
@pytest.mark.parametrize('x', [
# Attempt to trigger overflow in power expressions.
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0,
2009.0, 1980.0, 1999.0, 2007.0, 1991.0]),
# Attempt to trigger overflow with a large optimal lambda.
np.array([2003.0, 1950.0, 1997.0, 2000.0, 2009.0]),
# Attempt to trigger overflow with large data.
np.array([2003.0e200, 1950.0e200, 1997.0e200, 2000.0e200, 2009.0e200])
])
def test_overflow(self, x):
with pytest.warns(UserWarning, match="The optimal lambda is"):
xt_bc, lam_bc = stats.boxcox(x)
assert np.all(np.isfinite(xt_bc))
| TestBoxcox |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/linear_operator_tridiag.py | {
"start": 1658,
"end": 15417
} | class ____(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] square tridiagonal matrix.
This operator acts like a [batch] square tridiagonal matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x M` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
Example usage:
Create a 3 x 3 tridiagonal linear operator.
>>> superdiag = [3., 4., 5.]
>>> diag = [1., -1., 2.]
>>> subdiag = [6., 7., 8]
>>> operator = tf.linalg.LinearOperatorTridiag(
... [superdiag, diag, subdiag],
... diagonals_format='sequence')
>>> operator.to_dense()
<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[ 1., 3., 0.],
[ 7., -1., 4.],
[ 0., 8., 2.]], dtype=float32)>
>>> operator.shape
TensorShape([3, 3])
Scalar Tensor output.
>>> operator.log_abs_determinant()
<tf.Tensor: shape=(), dtype=float32, numpy=4.3307333>
Create a [2, 3] batch of 4 x 4 linear operators.
>>> diagonals = tf.random.normal(shape=[2, 3, 3, 4])
>>> operator = tf.linalg.LinearOperatorTridiag(
... diagonals,
... diagonals_format='compact')
Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible
since the batch dimensions, [2, 1], are broadcast to
operator.batch_shape = [2, 3].
>>> y = tf.random.normal(shape=[2, 1, 4, 2])
>>> x = operator.solve(y)
>>> x
<tf.Tensor: shape=(2, 3, 4, 2), dtype=float32, numpy=...,
dtype=float32)>
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb].
```
#### Performance
Suppose `operator` is a `LinearOperatorTridiag` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` will take O(N * R) time.
* `operator.solve(x)` will take O(N * R) time.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
diagonals,
diagonals_format=_COMPACT,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name='LinearOperatorTridiag'):
r"""Initialize a `LinearOperatorTridiag`.
Args:
diagonals: `Tensor` or list of `Tensor`s depending on `diagonals_format`.
If `diagonals_format=sequence`, this is a list of three `Tensor`'s each
with shape `[B1, ..., Bb, N]`, `b >= 0, N >= 0`, representing the
superdiagonal, diagonal and subdiagonal in that order. Note the
superdiagonal is padded with an element in the last position, and the
subdiagonal is padded with an element in the front.
If `diagonals_format=matrix` this is a `[B1, ... Bb, N, N]` shaped
`Tensor` representing the full tridiagonal matrix.
If `diagonals_format=compact` this is a `[B1, ... Bb, 3, N]` shaped
`Tensor` with the second to last dimension indexing the
superdiagonal, diagonal and subdiagonal in that order. Note the
superdiagonal is padded with an element in the last position, and the
subdiagonal is padded with an element in the front.
In every case, these `Tensor`s are all floating dtype.
diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is
`compact`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `diag.dtype` is real, this is auto-set to `True`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.
"""
parameters = dict(
diagonals=diagonals,
diagonals_format=diagonals_format,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
with ops.name_scope(name, values=[diagonals]):
if diagonals_format not in _DIAGONAL_FORMATS:
raise ValueError(
f'Argument `diagonals_format` must be one of compact, matrix, or '
f'sequence. Received : {diagonals_format}.')
if diagonals_format == _SEQUENCE:
self._diagonals = [linear_operator_util.convert_nonref_to_tensor(
d, name='diag_{}'.format(i)) for i, d in enumerate(diagonals)]
dtype = self._diagonals[0].dtype
else:
self._diagonals = linear_operator_util.convert_nonref_to_tensor(
diagonals, name='diagonals')
dtype = self._diagonals.dtype
self._diagonals_format = diagonals_format
super(LinearOperatorTridiag, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
def _shape(self):
if self.diagonals_format == _MATRIX:
return self.diagonals.shape
if self.diagonals_format == _COMPACT:
# Remove the second to last dimension that contains the value 3.
d_shape = self.diagonals.shape[:-2].concatenate(
self.diagonals.shape[-1])
else:
broadcast_shape = array_ops.broadcast_static_shape(
self.diagonals[0].shape[:-1],
self.diagonals[1].shape[:-1])
broadcast_shape = array_ops.broadcast_static_shape(
broadcast_shape,
self.diagonals[2].shape[:-1])
d_shape = broadcast_shape.concatenate(self.diagonals[1].shape[-1])
return d_shape.concatenate(d_shape[-1])
def _shape_tensor(self, diagonals=None):
diagonals = diagonals if diagonals is not None else self.diagonals
if self.diagonals_format == _MATRIX:
return array_ops.shape(diagonals)
if self.diagonals_format == _COMPACT:
d_shape = array_ops.shape(diagonals[..., 0, :])
else:
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(self.diagonals[0])[:-1],
array_ops.shape(self.diagonals[1])[:-1])
broadcast_shape = array_ops.broadcast_dynamic_shape(
broadcast_shape,
array_ops.shape(self.diagonals[2])[:-1])
d_shape = array_ops.concat(
[broadcast_shape, [array_ops.shape(self.diagonals[1])[-1]]], axis=0)
return array_ops.concat([d_shape, [d_shape[-1]]], axis=-1)
def _assert_self_adjoint(self):
# Check the diagonal has non-zero imaginary, and the super and subdiagonals
# are conjugate.
asserts = []
diag_message = (
'This tridiagonal operator contained non-zero '
'imaginary values on the diagonal.')
off_diag_message = (
'This tridiagonal operator has non-conjugate '
'subdiagonal and superdiagonal.')
if self.diagonals_format == _MATRIX:
asserts += [check_ops.assert_equal(
self.diagonals, linalg.adjoint(self.diagonals),
message='Matrix was not equal to its adjoint.')]
elif self.diagonals_format == _COMPACT:
diagonals = tensor_conversion.convert_to_tensor_v2_with_dispatch(
self.diagonals
)
asserts += [linear_operator_util.assert_zero_imag_part(
diagonals[..., 1, :], message=diag_message)]
# Roll the subdiagonal so the shifted argument is at the end.
subdiag = manip_ops.roll(diagonals[..., 2, :], shift=-1, axis=-1)
asserts += [check_ops.assert_equal(
math_ops.conj(subdiag[..., :-1]),
diagonals[..., 0, :-1],
message=off_diag_message)]
else:
asserts += [linear_operator_util.assert_zero_imag_part(
self.diagonals[1], message=diag_message)]
subdiag = manip_ops.roll(self.diagonals[2], shift=-1, axis=-1)
asserts += [check_ops.assert_equal(
math_ops.conj(subdiag[..., :-1]),
self.diagonals[0][..., :-1],
message=off_diag_message)]
return control_flow_ops.group(asserts)
def _construct_adjoint_diagonals(self, diagonals):
# Constructs adjoint tridiagonal matrix from diagonals.
if self.diagonals_format == _SEQUENCE:
diagonals = [math_ops.conj(d) for d in reversed(diagonals)]
# The subdiag and the superdiag swap places, so we need to shift the
# padding argument.
diagonals[0] = manip_ops.roll(diagonals[0], shift=-1, axis=-1)
diagonals[2] = manip_ops.roll(diagonals[2], shift=1, axis=-1)
return diagonals
elif self.diagonals_format == _MATRIX:
return linalg.adjoint(diagonals)
else:
diagonals = math_ops.conj(diagonals)
superdiag, diag, subdiag = array_ops_stack.unstack(
diagonals, num=3, axis=-2)
# The subdiag and the superdiag swap places, so we need
# to shift all arguments.
new_superdiag = manip_ops.roll(subdiag, shift=-1, axis=-1)
new_subdiag = manip_ops.roll(superdiag, shift=1, axis=-1)
return array_ops_stack.stack([new_superdiag, diag, new_subdiag], axis=-2)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
diagonals = self.diagonals
if adjoint:
diagonals = self._construct_adjoint_diagonals(diagonals)
x = linalg.adjoint(x) if adjoint_arg else x
return linalg.tridiagonal_matmul(
diagonals, x,
diagonals_format=self.diagonals_format)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
diagonals = self.diagonals
if adjoint:
diagonals = self._construct_adjoint_diagonals(diagonals)
# TODO(b/144860784): Remove the broadcasting code below once
# tridiagonal_solve broadcasts.
rhs_shape = array_ops.shape(rhs)
k = self._shape_tensor(diagonals)[-1]
broadcast_shape = array_ops.broadcast_dynamic_shape(
self._shape_tensor(diagonals)[:-2], rhs_shape[:-2])
rhs = array_ops.broadcast_to(
rhs, array_ops.concat(
[broadcast_shape, rhs_shape[-2:]], axis=-1))
if self.diagonals_format == _MATRIX:
diagonals = array_ops.broadcast_to(
diagonals, array_ops.concat(
[broadcast_shape, [k, k]], axis=-1))
elif self.diagonals_format == _COMPACT:
diagonals = array_ops.broadcast_to(
diagonals, array_ops.concat(
[broadcast_shape, [3, k]], axis=-1))
else:
diagonals = [
array_ops.broadcast_to(d, array_ops.concat(
[broadcast_shape, [k]], axis=-1)) for d in diagonals]
y = linalg.tridiagonal_solve(
diagonals, rhs,
diagonals_format=self.diagonals_format,
transpose_rhs=adjoint_arg,
conjugate_rhs=adjoint_arg)
return y
def _diag_part(self):
if self.diagonals_format == _MATRIX:
return array_ops.matrix_diag_part(self.diagonals)
elif self.diagonals_format == _SEQUENCE:
diagonal = self.diagonals[1]
return array_ops.broadcast_to(
diagonal, self.shape_tensor()[:-1])
else:
return self.diagonals[..., 1, :]
def _to_dense(self):
if self.diagonals_format == _MATRIX:
return self.diagonals
if self.diagonals_format == _COMPACT:
return gen_array_ops.matrix_diag_v3(
self.diagonals,
k=(-1, 1),
num_rows=-1,
num_cols=-1,
align='LEFT_RIGHT',
padding_value=0.)
diagonals = [
tensor_conversion.convert_to_tensor_v2_with_dispatch(d)
for d in self.diagonals
]
diagonals = array_ops_stack.stack(diagonals, axis=-2)
return gen_array_ops.matrix_diag_v3(
diagonals,
k=(-1, 1),
num_rows=-1,
num_cols=-1,
align='LEFT_RIGHT',
padding_value=0.)
@property
def diagonals(self):
return self._diagonals
@property
def diagonals_format(self):
return self._diagonals_format
@property
def _composite_tensor_fields(self):
return ('diagonals', 'diagonals_format')
@property
def _experimental_parameter_ndims_to_matrix_ndims(self):
diagonal_event_ndims = 2
if self.diagonals_format == _SEQUENCE:
# For the diagonal and the super/sub diagonals.
diagonal_event_ndims = [1, 1, 1]
return {
'diagonals': diagonal_event_ndims,
}
| LinearOperatorTridiag |
python | huggingface__transformers | src/transformers/models/longformer/configuration_longformer.py | {
"start": 822,
"end": 6268
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LongformerModel`]. It
is used to instantiate a Longformer model according to the specified arguments, defining the model architecture.
This is the configuration class to store the configuration of a [`LongformerModel`]. It is used to instantiate an
Longformer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the LongFormer
[allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) architecture with a sequence
length 4,096.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the Longformer model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`LongformerModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`LongformerModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
attention_window (`int` or `list[int]`, *optional*, defaults to 512):
Size of an attention window around each token. If an `int`, use the same size for all layers. To specify a
different window size for each layer, use a `list[int]` where `len(attention_window) == num_hidden_layers`.
Example:
```python
>>> from transformers import LongformerConfig, LongformerModel
>>> # Initializing a Longformer configuration
>>> configuration = LongformerConfig()
>>> # Initializing a model from the configuration
>>> model = LongformerModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "longformer"
def __init__(
self,
attention_window: Union[list[int], int] = 512,
sep_token_id: int = 2,
pad_token_id: int = 1,
bos_token_id: int = 0,
eos_token_id: int = 2,
vocab_size: int = 30522,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: str = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
onnx_export: bool = False,
**kwargs,
):
"""Constructs LongformerConfig."""
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.attention_window = attention_window
self.sep_token_id = sep_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.onnx_export = onnx_export
__all__ = ["LongformerConfig"]
| LongformerConfig |
python | pypa__pipenv | pipenv/patched/pip/_internal/models/pylock.py | {
"start": 1234,
"end": 1489
} | class ____:
url: Optional[str]
# (not supported) path: Optional[str]
# (not supported) size: Optional[int]
# (not supported) upload_time: Optional[datetime]
hashes: Dict[str, str]
subdirectory: Optional[str]
@dataclass
| PackageArchive |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 194043,
"end": 195029
} | class ____:
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
@pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc")
def test_add_same_docstring(self):
# test for attributes (which are C-level defined)
ncu.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__)
# And typical functions:
def func():
"""docstring"""
return
ncu.add_docstring(func, func.__doc__)
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
def test_different_docstring_fails(self):
# test for attributes (which are C-level defined)
with assert_raises(RuntimeError):
ncu.add_docstring(np.ndarray.flat, "different docstring")
# And typical functions:
def func():
"""docstring"""
return
with assert_raises(RuntimeError):
ncu.add_docstring(func, "different docstring")
| TestAddDocstring |
python | scipy__scipy | scipy/stats/_covariance.py | {
"start": 18374,
"end": 19509
} | class ____(Covariance):
def __init__(self, diagonal):
diagonal = self._validate_vector(diagonal, 'diagonal')
i_zero = diagonal <= 0
positive_diagonal = np.array(diagonal, dtype=np.float64)
positive_diagonal[i_zero] = 1 # ones don't affect determinant
self._log_pdet = np.sum(np.log(positive_diagonal), axis=-1)
psuedo_reciprocals = 1 / np.sqrt(positive_diagonal)
psuedo_reciprocals[i_zero] = 0
self._sqrt_diagonal = np.sqrt(diagonal)
self._LP = psuedo_reciprocals
self._rank = positive_diagonal.shape[-1] - i_zero.sum(axis=-1)
self._covariance = np.apply_along_axis(np.diag, -1, diagonal)
self._i_zero = i_zero
self._shape = self._covariance.shape
self._allow_singular = True
def _whiten(self, x):
return _dot_diag(x, self._LP)
def _colorize(self, x):
return _dot_diag(x, self._sqrt_diagonal)
def _support_mask(self, x):
"""
Check whether x lies in the support of the distribution.
"""
return ~np.any(_dot_diag(x, self._i_zero), axis=-1)
| CovViaDiagonal |
python | pytest-dev__pytest | testing/test_terminal.py | {
"start": 92155,
"end": 98743
} | class ____:
def test_code_highlight_simple(self, pytester: Pytester, color_mapping) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {reset}{kw}def{hl-reset}{kwspace}{function}test_foo{hl-reset}():{endline}",
"> {kw}assert{hl-reset} {number}1{hl-reset} == {number}10{hl-reset}{endline}",
"{bold}{red}E assert 1 == 10{reset}",
]
)
)
def test_code_highlight_continuation(
self, pytester: Pytester, color_mapping
) -> None:
pytester.makepyfile(
"""
def test_foo():
print('''
'''); assert 0
"""
)
result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {reset}{kw}def{hl-reset}{kwspace}{function}test_foo{hl-reset}():{endline}",
" {print}print{hl-reset}({str}'''{hl-reset}{str}{hl-reset}",
"> {str} {hl-reset}{str}'''{hl-reset}); {kw}assert{hl-reset} {number}0{hl-reset}{endline}",
"{bold}{red}E assert 0{reset}",
]
)
)
def test_code_highlight_custom_theme(
self, pytester: Pytester, color_mapping, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
monkeypatch.setenv("PYTEST_THEME", "solarized-dark")
monkeypatch.setenv("PYTEST_THEME_MODE", "dark")
result = pytester.runpytest("--color=yes")
result.stdout.fnmatch_lines(
color_mapping.format_for_fnmatch(
[
" {reset}{kw}def{hl-reset}{kwspace}{function}test_foo{hl-reset}():{endline}",
"> {kw}assert{hl-reset} {number}1{hl-reset} == {number}10{hl-reset}{endline}",
"{bold}{red}E assert 1 == 10{reset}",
]
)
)
def test_code_highlight_invalid_theme(
self, pytester: Pytester, color_mapping, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
monkeypatch.setenv("PYTEST_THEME", "invalid")
result = pytester.runpytest_subprocess("--color=yes")
result.stderr.fnmatch_lines(
"ERROR: PYTEST_THEME environment variable has an invalid value: 'invalid'. "
"Hint: See available pygments styles with `pygmentize -L styles`."
)
def test_code_highlight_invalid_theme_mode(
self, pytester: Pytester, color_mapping, monkeypatch: MonkeyPatch
) -> None:
pytester.makepyfile(
"""
def test_foo():
assert 1 == 10
"""
)
monkeypatch.setenv("PYTEST_THEME_MODE", "invalid")
result = pytester.runpytest_subprocess("--color=yes")
result.stderr.fnmatch_lines(
"ERROR: PYTEST_THEME_MODE environment variable has an invalid value: 'invalid'. "
"The allowed values are 'dark' (default) and 'light'."
)
def test_raw_skip_reason_skipped() -> None:
report = SimpleNamespace()
report.skipped = True
report.longrepr = ("xyz", 3, "Skipped: Just so")
reason = _get_raw_skip_reason(cast(TestReport, report))
assert reason == "Just so"
def test_raw_skip_reason_xfail() -> None:
report = SimpleNamespace()
report.wasxfail = "reason: To everything there is a season"
reason = _get_raw_skip_reason(cast(TestReport, report))
assert reason == "To everything there is a season"
def test_format_trimmed() -> None:
msg = "unconditional skip"
assert _format_trimmed(" ({}) ", msg, len(msg) + 4) == " (unconditional skip) "
assert _format_trimmed(" ({}) ", msg, len(msg) + 3) == " (unconditional ...) "
def test_warning_when_init_trumps_pyproject_toml(
pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Regression test for #7814."""
tests = pytester.path.joinpath("tests")
tests.mkdir()
pytester.makepyprojecttoml(
f"""
[tool.pytest.ini_options]
testpaths = ['{tests}']
"""
)
pytester.makefile(".ini", pytest="")
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"configfile: pytest.ini (WARNING: ignoring pytest config in pyproject.toml!)",
]
)
def test_warning_when_init_trumps_multiple_files(
pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Regression test for #7814."""
tests = pytester.path.joinpath("tests")
tests.mkdir()
pytester.makepyprojecttoml(
f"""
[tool.pytest.ini_options]
testpaths = ['{tests}']
"""
)
pytester.makefile(".ini", pytest="")
pytester.makeini(
"""
# tox.ini
[pytest]
minversion = 6.0
addopts = -ra -q
testpaths =
tests
integration
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"configfile: pytest.ini (WARNING: ignoring pytest config in pyproject.toml, tox.ini!)",
]
)
def test_no_warning_when_init_but_pyproject_toml_has_no_entry(
pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Regression test for #7814."""
tests = pytester.path.joinpath("tests")
tests.mkdir()
pytester.makepyprojecttoml(
f"""
[tool]
testpaths = ['{tests}']
"""
)
pytester.makefile(".ini", pytest="")
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"configfile: pytest.ini",
]
)
def test_no_warning_on_terminal_with_a_single_config_file(
pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Regression test for #7814."""
tests = pytester.path.joinpath("tests")
tests.mkdir()
pytester.makepyprojecttoml(
f"""
[tool.pytest.ini_options]
testpaths = ['{tests}']
"""
)
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"configfile: pyproject.toml",
]
)
| TestCodeHighlight |
python | pytorch__pytorch | test/distributed/elastic/rendezvous/utils_test.py | {
"start": 579,
"end": 9512
} | class ____(TestCase):
def test_parse_rendezvous_config_returns_dict(self) -> None:
expected_config = {
"a": "dummy1",
"b": "dummy2",
"c": "dummy3=dummy4",
"d": "dummy5/dummy6",
}
config = _parse_rendezvous_config(
" b= dummy2 ,c=dummy3=dummy4, a =dummy1,d=dummy5/dummy6"
)
self.assertEqual(config, expected_config)
def test_parse_rendezvous_returns_empty_dict_if_str_is_empty(self) -> None:
config_strs = ["", " "]
for config_str in config_strs:
with self.subTest(config_str=config_str):
config = _parse_rendezvous_config(config_str)
self.assertEqual(config, {})
def test_parse_rendezvous_raises_error_if_str_is_invalid(self) -> None:
config_strs = [
"a=dummy1,",
"a=dummy1,,c=dummy2",
"a=dummy1, ,c=dummy2",
"a=dummy1,= ,c=dummy2",
"a=dummy1, = ,c=dummy2",
"a=dummy1, =,c=dummy2",
" , ",
]
for config_str in config_strs:
with self.subTest(config_str=config_str):
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous configuration string must be in format "
r"<key1>=<value1>,...,<keyN>=<valueN>.$",
):
_parse_rendezvous_config(config_str)
def test_parse_rendezvous_raises_error_if_value_is_empty(self) -> None:
config_strs = [
"b=dummy1,a,c=dummy2",
"b=dummy1,c=dummy2,a",
"b=dummy1,a=,c=dummy2",
" a ",
]
for config_str in config_strs:
with self.subTest(config_str=config_str):
with self.assertRaisesRegex(
ValueError,
r"^The rendezvous configuration option 'a' must have a value specified.$",
):
_parse_rendezvous_config(config_str)
def test_try_parse_port_returns_port(self) -> None:
port = _try_parse_port("123")
self.assertEqual(port, 123)
def test_try_parse_port_returns_none_if_str_is_invalid(self) -> None:
port_strs = [
"",
" ",
" 1",
"1 ",
" 1 ",
"abc",
]
for port_str in port_strs:
with self.subTest(port_str=port_str):
port = _try_parse_port(port_str)
self.assertIsNone(port)
def test_parse_rendezvous_endpoint_returns_tuple(self) -> None:
endpoints = [
"dummy.com:0",
"dummy.com:123",
"dummy.com:65535",
"dummy-1.com:0",
"dummy-1.com:123",
"dummy-1.com:65535",
"123.123.123.123:0",
"123.123.123.123:123",
"123.123.123.123:65535",
"[2001:db8::1]:0",
"[2001:db8::1]:123",
"[2001:db8::1]:65535",
]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
host, port = parse_rendezvous_endpoint(endpoint, default_port=123)
expected_host, expected_port = endpoint.rsplit(":", 1)
if expected_host[0] == "[" and expected_host[-1] == "]":
expected_host = expected_host[1:-1]
self.assertEqual(host, expected_host)
self.assertEqual(port, int(expected_port))
def test_parse_rendezvous_endpoint_returns_tuple_if_endpoint_has_no_port(
self,
) -> None:
endpoints = ["dummy.com", "dummy-1.com", "123.123.123.123", "[2001:db8::1]"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
host, port = parse_rendezvous_endpoint(endpoint, default_port=123)
expected_host = endpoint
if expected_host[0] == "[" and expected_host[-1] == "]":
expected_host = expected_host[1:-1]
self.assertEqual(host, expected_host)
self.assertEqual(port, 123)
def test_parse_rendezvous_endpoint_returns_tuple_if_endpoint_is_empty(self) -> None:
endpoints = ["", " "]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
host, port = parse_rendezvous_endpoint("", default_port=123)
self.assertEqual(host, "localhost")
self.assertEqual(port, 123)
def test_parse_rendezvous_endpoint_raises_error_if_hostname_is_invalid(
self,
) -> None:
endpoints = ["~", "dummy.com :123", "~:123", ":123"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
with self.assertRaisesRegex(
ValueError,
rf"^The hostname of the rendezvous endpoint '{endpoint}' must be a "
r"dot-separated list of labels, an IPv4 address, or an IPv6 address.$",
):
parse_rendezvous_endpoint(endpoint, default_port=123)
def test_parse_rendezvous_endpoint_raises_error_if_port_is_invalid(self) -> None:
endpoints = ["dummy.com:", "dummy.com:abc", "dummy.com:-123", "dummy.com:-"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
with self.assertRaisesRegex(
ValueError,
rf"^The port number of the rendezvous endpoint '{endpoint}' must be an integer "
r"between 0 and 65536.$",
):
parse_rendezvous_endpoint(endpoint, default_port=123)
def test_parse_rendezvous_endpoint_raises_error_if_port_is_too_big(self) -> None:
endpoints = ["dummy.com:65536", "dummy.com:70000"]
for endpoint in endpoints:
with self.subTest(endpoint=endpoint):
with self.assertRaisesRegex(
ValueError,
rf"^The port number of the rendezvous endpoint '{endpoint}' must be an integer "
r"between 0 and 65536.$",
):
parse_rendezvous_endpoint(endpoint, default_port=123)
def test_matches_machine_hostname_returns_true_if_hostname_is_loopback(
self,
) -> None:
hosts = [
"localhost",
"127.0.0.1",
"::1",
"0000:0000:0000:0000:0000:0000:0000:0001",
]
for host in hosts:
with self.subTest(host=host):
self.assertTrue(_matches_machine_hostname(host))
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_hostname(
self,
) -> None:
host = socket.gethostname()
self.assertTrue(_matches_machine_hostname(host))
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_fqdn(
self,
) -> None:
host = socket.getfqdn()
self.assertTrue(_matches_machine_hostname(host))
def test_matches_machine_hostname_returns_true_if_hostname_is_machine_address(
self,
) -> None:
addr_list = socket.getaddrinfo(
socket.gethostname(), None, proto=socket.IPPROTO_TCP
)
for addr in (addr_info[4][0] for addr_info in addr_list):
with self.subTest(addr=addr):
self.assertTrue(_matches_machine_hostname(addr))
def test_matches_machine_hostname_returns_false_if_hostname_does_not_match(
self,
) -> None:
hosts = ["dummy", "0.0.0.0", "::2"]
for host in hosts:
with self.subTest(host=host):
self.assertFalse(_matches_machine_hostname(host))
def test_delay_suspends_thread(self) -> None:
for seconds in 0.2, (0.2, 0.4):
with self.subTest(seconds=seconds):
time1 = time.monotonic()
_delay(seconds) # type: ignore[arg-type]
time2 = time.monotonic()
self.assertGreaterEqual(time2 - time1, 0.2)
@patch(
"socket.getaddrinfo",
side_effect=[
[(None, None, 0, "a_host", ("1.2.3.4", 0))],
[(None, None, 0, "a_different_host", ("1.2.3.4", 0))],
],
)
def test_matches_machine_hostname_returns_true_if_ip_address_match_between_hosts(
self,
_0,
) -> None:
self.assertTrue(_matches_machine_hostname("a_host"))
@patch(
"socket.getaddrinfo",
side_effect=[
[(None, None, 0, "a_host", ("1.2.3.4", 0))],
[(None, None, 0, "another_host_with_different_ip", ("1.2.3.5", 0))],
],
)
def test_matches_machine_hostname_returns_false_if_ip_address_not_match_between_hosts(
self,
_0,
) -> None:
self.assertFalse(_matches_machine_hostname("a_host"))
| UtilsTest |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/aot_autograd_result.py | {
"start": 2375,
"end": 4209
} | class ____(InductorOutput[TOutputCode], Generic[TOutputCode]):
"""
A generic wrapper for OutputCode objects that are bundled directly in the cache
(rather than looked up via FxGraphCache).
This works for any OutputCode subclass (CompiledFxGraph, RegionalOutputCode, etc.)
"""
result: TOutputCode
def pre_save(self) -> None:
disk_result = copy(self.result)
disk_result.prepare_for_serialization()
self.result = disk_result
return
def load(self, example_inputs) -> TOutputCode:
self.example_inputs = example_inputs
return self.result
def post_compile(
self, result: TOutputCode, fx_config: _CompileFxKwargs
) -> TOutputCode:
constants = CompiledFxGraphConstants()
# Special handling for CompiledFxGraph - needs FxGraphCache.cache_hit_post_compile
if isinstance(result, CompiledFxGraph):
graph, cache_info = FxGraphCache.cache_hit_post_compile(
result, {}, constants
)
if graph is None:
raise RuntimeError("Failed to reload cache entry from disk")
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "fx_graph_bundled_cache_hit", # always a hit
"encoding": "json",
},
payload_fn=lambda: json.dumps(cache_info),
)
result = graph # type: ignore[assignment]
# Run normal post compile
result.post_compile(self.example_inputs, constants, fx_config)
return result
# Backwards compatibility alias
CompiledFxGraphLoadable: type[BundledOutputCodeLoadable[CompiledFxGraph]] = (
BundledOutputCodeLoadable[CompiledFxGraph]
)
@dataclass
| BundledOutputCodeLoadable |
python | tiangolo__fastapi | fastapi/params.py | {
"start": 21478,
"end": 24596
} | class ____(Body): # type: ignore[misc]
def __init__(
self,
default: Any = Undefined,
*,
default_factory: Union[Callable[[], Any], None] = _Unset,
annotation: Optional[Any] = None,
media_type: str = "application/x-www-form-urlencoded",
alias: Optional[str] = None,
alias_priority: Union[int, None] = _Unset,
# TODO: update when deprecating Pydantic v1, import these types
# validation_alias: str | AliasPath | AliasChoices | None
validation_alias: Union[str, None] = None,
serialization_alias: Union[str, None] = None,
title: Optional[str] = None,
description: Optional[str] = None,
gt: Optional[float] = None,
ge: Optional[float] = None,
lt: Optional[float] = None,
le: Optional[float] = None,
min_length: Optional[int] = None,
max_length: Optional[int] = None,
pattern: Optional[str] = None,
regex: Annotated[
Optional[str],
deprecated(
"Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
),
] = None,
discriminator: Union[str, None] = None,
strict: Union[bool, None] = _Unset,
multiple_of: Union[float, None] = _Unset,
allow_inf_nan: Union[bool, None] = _Unset,
max_digits: Union[int, None] = _Unset,
decimal_places: Union[int, None] = _Unset,
examples: Optional[List[Any]] = None,
example: Annotated[
Optional[Any],
deprecated(
"Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
"although still supported. Use examples instead."
),
] = _Unset,
openapi_examples: Optional[Dict[str, Example]] = None,
deprecated: Union[deprecated, str, bool, None] = None,
include_in_schema: bool = True,
json_schema_extra: Union[Dict[str, Any], None] = None,
**extra: Any,
):
super().__init__(
default=default,
default_factory=default_factory,
annotation=annotation,
media_type=media_type,
alias=alias,
alias_priority=alias_priority,
validation_alias=validation_alias,
serialization_alias=serialization_alias,
title=title,
description=description,
gt=gt,
ge=ge,
lt=lt,
le=le,
min_length=min_length,
max_length=max_length,
pattern=pattern,
regex=regex,
discriminator=discriminator,
strict=strict,
multiple_of=multiple_of,
allow_inf_nan=allow_inf_nan,
max_digits=max_digits,
decimal_places=decimal_places,
deprecated=deprecated,
example=example,
examples=examples,
openapi_examples=openapi_examples,
include_in_schema=include_in_schema,
json_schema_extra=json_schema_extra,
**extra,
)
| Form |
python | getsentry__sentry | src/sentry/workflow_engine/processors/data_condition_group.py | {
"start": 6675,
"end": 6825
} | class ____:
logic_result: TriggerResult
condition: DataCondition
result: DataConditionResult
@dataclasses.dataclass()
| ProcessedDataCondition |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 3525,
"end": 3678
} | class ____(models.Model):
user = models.OneToOneField("auth.User", on_delete=models.CASCADE)
is_private = models.BooleanField(default=True)
| Profile |
python | networkx__networkx | networkx/algorithms/assortativity/tests/test_pairs.py | {
"start": 94,
"end": 1675
} | class ____(BaseTestAttributeMixing):
def test_node_attribute_xy_undirected(self):
attrxy = sorted(nx.node_attribute_xy(self.G, "fish"))
attrxy_result = sorted(
[
("one", "one"),
("one", "one"),
("two", "two"),
("two", "two"),
("one", "red"),
("red", "one"),
("blue", "two"),
("two", "blue"),
]
)
assert attrxy == attrxy_result
def test_node_attribute_xy_undirected_nodes(self):
attrxy = sorted(nx.node_attribute_xy(self.G, "fish", nodes=["one", "yellow"]))
attrxy_result = sorted([])
assert attrxy == attrxy_result
def test_node_attribute_xy_directed(self):
attrxy = sorted(nx.node_attribute_xy(self.D, "fish"))
attrxy_result = sorted(
[("one", "one"), ("two", "two"), ("one", "red"), ("two", "blue")]
)
assert attrxy == attrxy_result
def test_node_attribute_xy_multigraph(self):
attrxy = sorted(nx.node_attribute_xy(self.M, "fish"))
attrxy_result = [
("one", "one"),
("one", "one"),
("one", "one"),
("one", "one"),
("two", "two"),
("two", "two"),
]
assert attrxy == attrxy_result
def test_node_attribute_xy_selfloop(self):
attrxy = sorted(nx.node_attribute_xy(self.S, "fish"))
attrxy_result = [("one", "one"), ("two", "two")]
assert attrxy == attrxy_result
| TestAttributeMixingXY |
python | chroma-core__chroma | chromadb/segment/__init__.py | {
"start": 2358,
"end": 3010
} | class ____(SegmentImplementation):
"""Embedding Vector segment interface"""
@abstractmethod
def get_vectors(
self,
request_version_context: RequestVersionContext,
ids: Optional[Sequence[str]] = None,
) -> Sequence[VectorEmbeddingRecord]:
"""Get embeddings from the segment. If no IDs are provided, all embeddings are
returned."""
pass
@abstractmethod
def query_vectors(
self, query: VectorQuery
) -> Sequence[Sequence[VectorQueryResult]]:
"""Given a vector query, return the top-k nearest neighbors for vector in the
query."""
pass
| VectorReader |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/tools/_beta_runner.py | {
"start": 4163,
"end": 12228
} | class ____(BaseToolRunner[BetaRunnableTool, ResponseFormatT], Generic[RunnerItemT, ResponseFormatT], ABC):
def __init__(
self,
*,
params: ParseMessageCreateParamsBase[ResponseFormatT],
options: RequestOptions,
tools: Iterable[BetaRunnableTool],
client: Anthropic,
max_iterations: int | None = None,
compaction_control: CompactionControl | None = None,
) -> None:
super().__init__(
params=params,
options=options,
tools=tools,
max_iterations=max_iterations,
compaction_control=compaction_control,
)
self._client = client
self._iterator = self.__run__()
self._last_message: (
Callable[[], ParsedBetaMessage[ResponseFormatT]] | ParsedBetaMessage[ResponseFormatT] | None
) = None
def __next__(self) -> RunnerItemT:
return self._iterator.__next__()
def __iter__(self) -> Iterator[RunnerItemT]:
for item in self._iterator:
yield item
@abstractmethod
@contextmanager
def _handle_request(self) -> Iterator[RunnerItemT]:
raise NotImplementedError()
yield # type: ignore[unreachable]
def _check_and_compact(self) -> bool:
"""
Check token usage and compact messages if threshold exceeded.
Returns True if compaction was performed, False otherwise.
"""
if self._compaction_control is None or not self._compaction_control["enabled"]:
return False
message = self._get_last_message()
tokens_used = 0
if message is not None:
total_input_tokens = (
message.usage.input_tokens
+ (message.usage.cache_creation_input_tokens or 0)
+ (message.usage.cache_read_input_tokens or 0)
)
tokens_used = total_input_tokens + message.usage.output_tokens
threshold = self._compaction_control.get("context_token_threshold", DEFAULT_THRESHOLD)
if tokens_used < threshold:
return False
# Perform compaction
log.info(f"Token usage {tokens_used} has exceeded the threshold of {threshold}. Performing compaction.")
model = self._compaction_control.get("model", self._params["model"])
messages = list(self._params["messages"])
if messages[-1]["role"] == "assistant":
# Remove tool_use blocks from the last message to avoid 400 error
# (tool_use requires tool_result, which we don't have yet)
non_tool_blocks = [
block
for block in messages[-1]["content"]
if isinstance(block, dict) and block.get("type") != "tool_use"
]
if non_tool_blocks:
messages[-1]["content"] = non_tool_blocks
else:
messages.pop()
messages = [
*messages,
BetaMessageParam(
role="user",
content=self._compaction_control.get("summary_prompt", DEFAULT_SUMMARY_PROMPT),
),
]
response = self._client.beta.messages.create(
model=model,
messages=messages,
max_tokens=self._params["max_tokens"],
extra_headers={"X-Stainless-Helper": "compaction"},
)
log.info(f"Compaction complete. New token usage: {response.usage.output_tokens}")
first_content = list(response.content)[0]
if first_content.type != "text":
raise ValueError("Compaction response content is not of type 'text'")
self.set_messages_params(
lambda params: {
**params,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": first_content.text,
}
],
}
],
}
)
return True
def __run__(self) -> Iterator[RunnerItemT]:
while not self._should_stop():
with self._handle_request() as item:
yield item
message = self._get_last_message()
assert message is not None
self._iteration_count += 1
# If the compaction was performed, skip tool call generation this iteration
if not self._check_and_compact():
response = self.generate_tool_call_response()
if response is None:
log.debug("Tool call was not requested, exiting from tool runner loop.")
return
if not self._messages_modified:
self.append_messages(message, response)
self._messages_modified = False
self._cached_tool_call_response = None
def until_done(self) -> ParsedBetaMessage[ResponseFormatT]:
"""
Consumes the tool runner stream and returns the last message if it has not been consumed yet.
If it has, it simply returns the last message.
"""
consume_sync_iterator(self)
last_message = self._get_last_message()
assert last_message is not None
return last_message
def generate_tool_call_response(self) -> BetaMessageParam | None:
"""Generate a MessageParam by calling tool functions with any tool use blocks from the last message.
Note the tool call response is cached, repeated calls to this method will return the same response.
None can be returned if no tool call was applicable.
"""
if self._cached_tool_call_response is not None:
log.debug("Returning cached tool call response.")
return self._cached_tool_call_response
response = self._generate_tool_call_response()
self._cached_tool_call_response = response
return response
def _generate_tool_call_response(self) -> BetaMessageParam | None:
content = self._get_last_assistant_message_content()
if not content:
return None
tool_use_blocks = [block for block in content if block.type == "tool_use"]
if not tool_use_blocks:
return None
results: list[BetaToolResultBlockParam] = []
for tool_use in tool_use_blocks:
tool = self._tools_by_name.get(tool_use.name)
if tool is None:
results.append(
{
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": f"Error: Tool '{tool_use.name}' not found",
"is_error": True,
}
)
continue
try:
result = tool.call(tool_use.input)
results.append({"type": "tool_result", "tool_use_id": tool_use.id, "content": result})
except Exception as exc:
log.exception(f"Error occurred while calling tool: {tool.name}", exc_info=exc)
results.append(
{
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": repr(exc),
"is_error": True,
}
)
return {"role": "user", "content": results}
def _get_last_message(self) -> ParsedBetaMessage[ResponseFormatT] | None:
if callable(self._last_message):
return self._last_message()
return self._last_message
def _get_last_assistant_message_content(self) -> list[ParsedBetaContentBlock[ResponseFormatT]] | None:
last_message = self._get_last_message()
if last_message is None or last_message.role != "assistant" or not last_message.content:
return None
return last_message.content
| BaseSyncToolRunner |
python | conda__conda | tests/plugins/test_auth_handlers.py | {
"start": 265,
"end": 434
} | class ____(HTTPBasicAuth):
def __init__(self):
username = "user_one"
password = "pass_one"
super().__init__(username, password)
| CustomCondaAuth |
python | kamyu104__LeetCode-Solutions | Python/maximum-length-of-a-concatenated-string-with-unique-characters.py | {
"start": 1051,
"end": 1943
} | class ____(object):
def maxLength(self, arr):
"""
:type arr: List[str]
:rtype: int
"""
def bitset(s):
result = 0
for c in s:
if result & power[ord(c)-ord('a')]:
return 0
result |= power[ord(c)-ord('a')]
return result
bitsets = [bitset(x) for x in arr]
result = 0
for i in xrange(power[len(arr)]):
curr_bitset, curr_len = 0, 0
while i:
j = i & -i # rightmost bit
i ^= j
j = log2[j] # log2(j)
if not bitsets[j] or (curr_bitset & bitsets[j]):
break
curr_bitset |= bitsets[j]
curr_len += len(arr[j])
else:
result = max(result, curr_len)
return result
| Solution2 |
python | psf__black | tests/data/cases/class_methods_new_line.py | {
"start": 559,
"end": 656
} | class ____:
cls_var = 100
@deco
def __init__(self):
pass
| ClassWithDecoInitAndVars |
python | fluentpython__example-code-2e | 10-dp-1class-func/strategy_param.py | {
"start": 2877,
"end": 3450
} | class ____:
"""discount for orders with 10 or more distinct items"""
def __init__(self, percent: float):
self.percent = percent
def __call__(self, order: Order) -> float:
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * self.percent / 100
return 0
def general_discount(percent: float, order: Order) -> float:
"""unrestricted discount; usage: ``partial(general_discount, 5)``"""
return order.total() * percent / 100
# end::STRATEGY[]
| LargeOrderPromo |
python | doocs__leetcode | solution/3700-3799/3726.Remove Zeros in Decimal Representation/Solution.py | {
"start": 0,
"end": 244
} | class ____:
def removeZeros(self, n: int) -> int:
k = 1
ans = 0
while n:
x = n % 10
if x:
ans = k * x + ans
k *= 10
n //= 10
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/edgetam_video/modular_edgetam_video.py | {
"start": 19310,
"end": 20827
} | class ____(Sam2VideoVisionRotaryEmbedding):
def __init__(self, config: EdgeTamVideoConfig, end_x: Optional[int] = None, end_y: Optional[int] = None):
nn.Module.__init__()
dim = config.memory_attention_hidden_size // (
config.memory_attention_downsample_rate * config.memory_attention_num_attention_heads
)
# Ensure even dimension for proper axial splitting
if dim % 4 != 0:
raise ValueError("Dimension must be divisible by 4 for axial RoPE")
end_x, end_y = config.memory_attention_rope_feat_sizes if end_x is None else (end_x, end_y)
freqs = 1.0 / (config.memory_attention_rope_theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
# Generate 2D position indices for axial rotary embedding
flattened_indices = torch.arange(end_x * end_y, dtype=torch.long)
x_positions = flattened_indices % end_x
y_positions = torch.div(flattened_indices, end_x, rounding_mode="floor")
freqs_x = torch.outer(x_positions, freqs).float()
freqs_y = torch.outer(y_positions, freqs).float()
inv_freq = torch.cat([freqs_x, freqs_y], dim=-1)
inv_freq = inv_freq.repeat_interleave(2, dim=-1)
# directly register the cos and sin embeddings as we have a fixed feature shape
self.register_buffer("rope_embeddings_cos", inv_freq.cos(), persistent=False)
self.register_buffer("rope_embeddings_sin", inv_freq.sin(), persistent=False)
| EdgeTamVideoVisionRotaryEmbedding |
python | astropy__astropy | astropy/timeseries/periodograms/bls/core.py | {
"start": 31469,
"end": 34147
} | class ____(dict):
"""The results of a BoxLeastSquares search.
Attributes
----------
objective : str
The scalar used to optimize to find the best fit phase, duration, and
depth. See :func:`BoxLeastSquares.power` for more information.
period : array-like or `~astropy.units.Quantity` ['time']
The set of test periods.
power : array-like or `~astropy.units.Quantity`
The periodogram evaluated at the periods in ``period``. If
``objective`` is:
* ``'likelihood'``: the values of ``power`` are the
log likelihood maximized over phase, depth, and duration, or
* ``'snr'``: the values of ``power`` are the signal-to-noise with
which the depth is measured maximized over phase, depth, and
duration.
depth : array-like or `~astropy.units.Quantity`
The estimated depth of the maximum power model at each period.
depth_err : array-like or `~astropy.units.Quantity`
The 1-sigma uncertainty on ``depth``.
duration : array-like or `~astropy.units.Quantity` ['time']
The maximum power duration at each period.
transit_time : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like or `~astropy.units.Quantity`
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like or `~astropy.units.Quantity`
The log likelihood of the maximum power model.
"""
def __init__(self, *args):
super().__init__(
zip(
(
"objective",
"period",
"power",
"depth",
"depth_err",
"duration",
"transit_time",
"depth_snr",
"log_likelihood",
),
args,
)
)
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return "\n".join(
[k.rjust(m) + ": " + repr(v) for k, v in sorted(self.items())]
)
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
| BoxLeastSquaresResults |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_sparkline10.py | {
"start": 345,
"end": 4451
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with no cell data."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.name = "Sheet1"
worksheet.excel_version = 2010
data = [-2, 2, 3, -1, 0]
worksheet.write_row("A1", data)
# Set up sparklines.
worksheet.add_sparkline(
"F1",
{
"range": "A1:E1",
"high_point": True,
"low_point": True,
"negative_points": True,
"first_point": True,
"last_point": True,
"markers": True,
"series_color": "#C00000",
"negative_color": "#FF0000",
"markers_color": "#FFC000",
"first_color": "#00B050",
"last_color": "#00B0F0",
"high_color": "#FFFF00",
"low_color": "#92D050",
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1:E1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData>
<row r="1" spans="1:5" x14ac:dyDescent="0.25">
<c r="A1">
<v>-2</v>
</c>
<c r="B1">
<v>2</v>
</c>
<c r="C1">
<v>3</v>
</c>
<c r="D1">
<v>-1</v>
</c>
<c r="E1">
<v>0</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{05C60535-1F16-4fd2-B633-F4F36F0B64E0}">
<x14:sparklineGroups xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:sparklineGroup displayEmptyCellsAs="gap" markers="1" high="1" low="1" first="1" last="1" negative="1">
<x14:colorSeries rgb="FFC00000"/>
<x14:colorNegative rgb="FFFF0000"/>
<x14:colorAxis rgb="FF000000"/>
<x14:colorMarkers rgb="FFFFC000"/>
<x14:colorFirst rgb="FF00B050"/>
<x14:colorLast rgb="FF00B0F0"/>
<x14:colorHigh rgb="FFFFFF00"/>
<x14:colorLow rgb="FF92D050"/>
<x14:sparklines>
<x14:sparkline>
<xm:f>Sheet1!A1:E1</xm:f>
<xm:sqref>F1</xm:sqref>
</x14:sparkline>
</x14:sparklines>
</x14:sparklineGroup>
</x14:sparklineGroups>
</ext>
</extLst>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | pypa__warehouse | tests/unit/packaging/test_forms.py | {
"start": 147,
"end": 2753
} | class ____:
inspector_link = "https://inspector.pypi.io/project/requests/"
def test_validate(self, pyramid_request):
pyramid_request.POST = MultiDict(
{
"inspector_link": self.inspector_link,
"summary": "This is a comment",
}
)
form = SubmitMalwareObservationForm(pyramid_request.POST)
assert form.validate({"comment": "This is a comment"})
def test_missing_inspector_link(self, pyramid_request):
pyramid_request.POST = MultiDict({"summary": "This is a comment"})
form = SubmitMalwareObservationForm(pyramid_request.POST)
assert not form.validate()
assert "inspector_link" in form.errors
def test_malformed_inspector_link(self, pyramid_request):
pyramid_request.POST = MultiDict(
{
"inspector_link": "https://inspector.pypi.org/project/requests/",
"summary": "This is a comment",
}
)
form = SubmitMalwareObservationForm(pyramid_request.POST)
assert not form.validate()
assert "inspector_link" in form.errors
def test_missing_summary(self, pyramid_request):
pyramid_request.POST = MultiDict({"inspector_link": self.inspector_link})
form = SubmitMalwareObservationForm(pyramid_request.POST)
assert not form.validate()
assert "summary" in form.errors
def test_summary_too_short(self, pyramid_request):
pyramid_request.POST = MultiDict(
{
"inspector_link": self.inspector_link,
"summary": "short",
}
)
form = SubmitMalwareObservationForm(pyramid_request.POST)
assert not form.validate()
assert "summary" in form.errors
def test_summary_too_long(self, pyramid_request):
pyramid_request.POST = MultiDict(
{
"inspector_link": self.inspector_link,
"summary": "x" * 2001,
}
)
form = SubmitMalwareObservationForm(pyramid_request.POST)
assert not form.validate()
assert "summary" in form.errors
def test_summary_contains_html_tags(self, pyramid_request):
pyramid_request.POST = MultiDict(
{
"inspector_link": self.inspector_link,
"summary": '<img src="https://example.com/image.png">',
}
)
form = SubmitMalwareObservationForm(pyramid_request.POST)
assert not form.validate()
assert "summary" in form.errors
| TestSubmitObservationForm |
python | kamyu104__LeetCode-Solutions | Python/bulb-switcher.py | {
"start": 43,
"end": 231
} | class ____(object):
def bulbSwitch(self, n):
"""
type n: int
rtype: int
"""
# The number of full squares.
return int(math.sqrt(n))
| Solution |
python | jazzband__django-model-utils | tests/test_choices.py | {
"start": 188,
"end": 1871
} | class ____(Generic[T]):
STATUS: Choices[T]
def test_getattr(self) -> None:
assert self.STATUS.DRAFT == 'DRAFT'
def test_len(self) -> None:
assert len(self.STATUS) == 2
def test_repr(self) -> None:
assert repr(self.STATUS) == "Choices" + repr((
('DRAFT', 'DRAFT', 'DRAFT'),
('PUBLISHED', 'PUBLISHED', 'PUBLISHED'),
))
def test_wrong_length_tuple(self) -> None:
with pytest.raises(ValueError):
Choices(('a',)) # type: ignore[arg-type]
def test_deepcopy(self) -> None:
import copy
assert list(self.STATUS) == list(copy.deepcopy(self.STATUS))
def test_equality(self) -> None:
assert self.STATUS == Choices('DRAFT', 'PUBLISHED')
def test_inequality(self) -> None:
assert self.STATUS != ['DRAFT', 'PUBLISHED']
assert self.STATUS != Choices('DRAFT')
def test_composability(self) -> None:
assert Choices('DRAFT') + Choices('PUBLISHED') == self.STATUS
assert Choices('DRAFT') + ('PUBLISHED',) == self.STATUS
assert ('DRAFT',) + Choices('PUBLISHED') == self.STATUS
def test_option_groups(self) -> None:
# Note: The implementation accepts any kind of sequence, but the type system can only
# track per-index types for tuples.
if TYPE_CHECKING:
c = Choices(('group a', ['one', 'two']), ('group b', ('three',)))
else:
c = Choices(('group a', ['one', 'two']), ['group b', ('three',)])
assert list(c) == [
('group a', [('one', 'one'), ('two', 'two')]),
('group b', [('three', 'three')]),
]
| ChoicesTestsMixin |
python | ray-project__ray | release/ray_release/exception.py | {
"start": 2606,
"end": 2702
} | class ____(ReleaseTestError):
exit_code = ExitCode.CLUSTER_STARTUP_ERROR
| EnvironmentSetupError |
python | pypa__wheel | src/wheel/wheelfile.py | {
"start": 1635,
"end": 8720
} | class ____(ZipFile):
"""A ZipFile derivative class that also reads SHA-256 hashes from
.dist-info/RECORD and checks any read files against those.
"""
_default_algorithm = hashlib.sha256
def __init__(
self,
file: StrPath,
mode: Literal["r", "w", "x", "a"] = "r",
compression: int = ZIP_DEFLATED,
):
basename = os.path.basename(file)
self.parsed_filename = WHEEL_INFO_RE.match(basename)
if not basename.endswith(".whl") or self.parsed_filename is None:
raise WheelError(f"Bad wheel filename {basename!r}")
ZipFile.__init__(self, file, mode, compression=compression, allowZip64=True)
self.dist_info_path = "{}.dist-info".format(
self.parsed_filename.group("namever")
)
self.record_path = self.dist_info_path + "/RECORD"
self._file_hashes: dict[str, tuple[None, None] | tuple[int, bytes]] = {}
self._file_sizes = {}
if mode == "r":
# Ignore RECORD and any embedded wheel signatures
self._file_hashes[self.record_path] = None, None
self._file_hashes[self.record_path + ".jws"] = None, None
self._file_hashes[self.record_path + ".p7s"] = None, None
# Fill in the expected hashes by reading them from RECORD
try:
record = self.open(self.record_path)
except KeyError:
raise WheelError(f"Missing {self.record_path} file") from None
with record:
for line in csv.reader(
TextIOWrapper(record, newline="", encoding="utf-8")
):
path, hash_sum, size = line
if not hash_sum:
continue
algorithm, hash_sum = hash_sum.split("=")
try:
hashlib.new(algorithm)
except ValueError:
raise WheelError(
f"Unsupported hash algorithm: {algorithm}"
) from None
if algorithm.lower() in {"md5", "sha1"}:
raise WheelError(
f"Weak hash algorithm ({algorithm}) is not permitted by "
f"PEP 427"
)
self._file_hashes[path] = (
algorithm,
urlsafe_b64decode(hash_sum.encode("ascii")),
)
def open(
self,
name_or_info: str | ZipInfo,
mode: Literal["r", "w"] = "r",
pwd: bytes | None = None,
) -> IO[bytes]:
def _update_crc(newdata: bytes) -> None:
eof = ef._eof
update_crc_orig(newdata)
running_hash.update(newdata)
if eof and running_hash.digest() != expected_hash:
raise WheelError(f"Hash mismatch for file '{ef_name}'")
ef_name = (
name_or_info.filename if isinstance(name_or_info, ZipInfo) else name_or_info
)
if (
mode == "r"
and not ef_name.endswith("/")
and ef_name not in self._file_hashes
):
raise WheelError(f"No hash found for file '{ef_name}'")
ef = ZipFile.open(self, name_or_info, mode, pwd)
if mode == "r" and not ef_name.endswith("/"):
algorithm, expected_hash = self._file_hashes[ef_name]
if expected_hash is not None:
# Monkey patch the _update_crc method to also check for the hash from
# RECORD
running_hash = hashlib.new(algorithm)
update_crc_orig, ef._update_crc = ef._update_crc, _update_crc
return ef
def write_files(self, base_dir: str) -> None:
log.info("creating %r and adding %r to it", self.filename, base_dir)
deferred: list[tuple[str, str]] = []
for root, dirnames, filenames in os.walk(base_dir):
# Sort the directory names so that `os.walk` will walk them in a
# defined order on the next iteration.
dirnames.sort()
for name in sorted(filenames):
path = os.path.normpath(os.path.join(root, name))
if os.path.isfile(path):
arcname = os.path.relpath(path, base_dir).replace(os.path.sep, "/")
if arcname == self.record_path:
pass
elif root.endswith(".dist-info"):
deferred.append((path, arcname))
else:
self.write(path, arcname)
deferred.sort()
for path, arcname in deferred:
self.write(path, arcname)
def write(
self,
filename: str,
arcname: str | None = None,
compress_type: int | None = None,
) -> None:
with open(filename, "rb") as f:
st = os.fstat(f.fileno())
data = f.read()
zinfo = ZipInfo(
arcname or filename, date_time=get_zipinfo_datetime(st.st_mtime)
)
zinfo.external_attr = (stat.S_IMODE(st.st_mode) | stat.S_IFMT(st.st_mode)) << 16
zinfo.compress_type = compress_type or self.compression
self.writestr(zinfo, data, compress_type)
def writestr(
self,
zinfo_or_arcname: str | ZipInfo,
data: SizedBuffer | str,
compress_type: int | None = None,
) -> None:
if isinstance(zinfo_or_arcname, str):
zinfo_or_arcname = ZipInfo(
zinfo_or_arcname, date_time=get_zipinfo_datetime()
)
zinfo_or_arcname.compress_type = self.compression
zinfo_or_arcname.external_attr = (0o664 | stat.S_IFREG) << 16
if isinstance(data, str):
data = data.encode("utf-8")
ZipFile.writestr(self, zinfo_or_arcname, data, compress_type)
fname = (
zinfo_or_arcname.filename
if isinstance(zinfo_or_arcname, ZipInfo)
else zinfo_or_arcname
)
log.info("adding %r", fname)
if fname != self.record_path:
hash_ = self._default_algorithm(data)
self._file_hashes[fname] = (
hash_.name,
urlsafe_b64encode(hash_.digest()).decode("ascii"),
)
self._file_sizes[fname] = len(data)
def close(self) -> None:
# Write RECORD
if self.fp is not None and self.mode == "w" and self._file_hashes:
data = StringIO()
writer = csv.writer(data, delimiter=",", quotechar='"', lineterminator="\n")
writer.writerows(
(
(fname, algorithm + "=" + hash_, self._file_sizes[fname])
for fname, (algorithm, hash_) in self._file_hashes.items()
)
)
writer.writerow((format(self.record_path), "", ""))
self.writestr(self.record_path, data.getvalue())
ZipFile.close(self)
| WheelFile |
python | openai__openai-python | src/openai/types/beta/realtime/conversation_item_created_event.py | {
"start": 282,
"end": 817
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
item: ConversationItem
"""The item to add to the conversation."""
type: Literal["conversation.item.created"]
"""The event type, must be `conversation.item.created`."""
previous_item_id: Optional[str] = None
"""
The ID of the preceding item in the Conversation context, allows the client to
understand the order of the conversation. Can be `null` if the item has no
predecessor.
"""
| ConversationItemCreatedEvent |
python | tensorflow__tensorflow | tensorflow/python/keras/backend.py | {
"start": 28506,
"end": 112102
} | class ____:
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
if isinstance(device, device_spec.DeviceSpecV2):
device = device.to_string()
self.device = device
def _set_device_from_string(self, device_str):
self.device = device_str
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
if tf2.enabled():
return device_spec.DeviceSpecV2.from_string(op.device)
else:
return device_spec.DeviceSpecV1.from_string(op.device)
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Args:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available GPU devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if ops.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [d.name for d in config.list_logical_devices('GPU')]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Args:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return constant_op.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Args:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return tensor_conversion.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)
@doc_controls.do_not_generate_docs
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Args:
tensor: A tensor instance.
Returns:
A boolean.
Example:
>>> a = tf.keras.backend.placeholder((2, 2), sparse=False)
>>> print(tf.keras.backend.is_sparse(a))
False
>>> b = tf.keras.backend.placeholder((2, 2), sparse=True)
>>> print(tf.keras.backend.is_sparse(b))
True
"""
spec = getattr(tensor, '_type_spec', None)
if spec is not None:
return isinstance(spec, sparse_tensor.SparseTensorSpec)
return isinstance(tensor, sparse_tensor.SparseTensor)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Args:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
>>> b = tf.keras.backend.placeholder((2, 2), sparse=True)
>>> print(tf.keras.backend.is_sparse(b))
True
>>> c = tf.keras.backend.to_dense(b)
>>> print(tf.keras.backend.is_sparse(c))
False
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
@doc_controls.do_not_generate_docs
def name_scope(name):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
def my_op(a):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
# Define some computation that uses `a`.
return foo_op(..., name=scope)
When executed, the Tensor `a` will have the name `MyOp/a`.
Args:
name: The prefix to use on all names created within the name scope.
Returns:
Name scope context manager.
"""
return ops.name_scope_v2(name)
@doc_controls.do_not_generate_docs
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Args:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val, dtype='float64',
... name='example_var')
>>> tf.keras.backend.dtype(kvar)
'float64'
>>> print(kvar)
<tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy=
array([[1., 2.],
[3., 4.]])>
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = variables_module.Variable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
optimizers = _GRAPH_TF_OPTIMIZERS[None]
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
_GRAPH_VARIABLES[graph].add(v)
def observe_object_name(name):
"""Observe a name and make sure it won't be used by `unique_object_name`."""
OBSERVED_NAMES.add(name)
def unique_object_name(name,
name_uid_map=None,
avoid_names=None,
namespace='',
zero_based=False,
avoid_observed_names=False):
"""Makes a object name (or arbitrary string) unique within a TensorFlow graph.
Args:
name: String name to make unique.
name_uid_map: An optional defaultdict(int) to use when creating unique
names. If None (default), uses a per-Graph dictionary.
avoid_names: An optional set or dict with names which should not be used. If
None (default), don't avoid any names unless `avoid_observed_names` is
True.
namespace: Gets a name which is unique within the (graph, namespace). Layers
which are not Networks use a blank namespace and so get graph-global
names.
zero_based: If True, name sequences start with no suffix (e.g. "dense",
"dense_1"). If False, naming is one-based ("dense_1", "dense_2").
avoid_observed_names: If True, avoid any names that have been observed by
`backend.observe_object_name`.
Returns:
Unique string name.
Example:
unique_object_name('dense') # dense_1
unique_object_name('dense') # dense_2
"""
if name_uid_map is None:
name_uid_map = get_default_graph_uid_map()
if avoid_names is None:
if avoid_observed_names:
avoid_names = OBSERVED_NAMES
else:
avoid_names = set()
proposed_name = None
while proposed_name is None or proposed_name in avoid_names:
name_key = (namespace, name)
if zero_based:
number = name_uid_map[name_key]
if number:
proposed_name = name + '_' + str(number)
else:
proposed_name = name
name_uid_map[name_key] += 1
else:
name_uid_map[name_key] += 1
proposed_name = name + '_' + str(name_uid_map[name_key])
return proposed_name
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES[graph]
for opt in _GRAPH_TF_OPTIMIZERS[graph]:
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variable_v1.is_variable_initialized(v) for v in candidate_vars])
# TODO(kathywu): Some metric variables loaded from SavedModel are never
# actually used, and do not have an initializer.
should_be_initialized = [
(not is_initialized[n]) and v.initializer is not None
for n, v in enumerate(candidate_vars)]
uninitialized_vars = []
for flag, v in zip(should_be_initialized, candidate_vars):
if flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Args:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Args:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
>>> np_var = np.array([1, 2])
>>> # A numpy array is not a symbolic tensor.
>>> tf.keras.backend.is_keras_tensor(np_var)
Traceback (most recent call last):
...
ValueError: Unexpectedly found an instance of type `<class 'numpy.ndarray'>`.
Expected a symbolic tensor instance.
>>> keras_var = tf.keras.backend.variable(np_var)
>>> # A variable created with the keras backend is not a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_var)
False
>>> keras_placeholder = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> # A placeholder is a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_placeholder)
True
>>> keras_input = tf.keras.layers.Input([10])
>>> # An Input is a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_input)
True
>>> keras_layer_output = tf.keras.layers.Dense(10)(keras_input)
>>> # Any Keras layer output is a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_layer_output)
True
"""
if not isinstance(x,
(tensor_lib.Tensor, variables_module.Variable,
sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor,
keras_tensor.KerasTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
if ops.executing_eagerly_outside_functions():
return isinstance(x, keras_tensor.KerasTensor)
return hasattr(x, '_keras_history')
@doc_controls.do_not_generate_docs
def placeholder(shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
ragged=False):
"""Instantiates a placeholder tensor and returns it.
Args:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
ragged: Boolean, whether the placeholder should have a ragged type.
In this case, values of 'None' in the 'shape' argument represent
ragged dimensions. For more information about RaggedTensors, see this
[guide](https://www.tensorflow.org/guide/ragged_tensors).
Raises:
ValueError: If called with sparse = True and ragged = True.
Returns:
Tensor instance (with Keras metadata included).
Examples:
>>> input_ph = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> input_ph
<KerasTensor: shape=(2, 4, 5) dtype=float32 (created by layer ...)>
"""
if sparse and ragged:
raise ValueError(
'Cannot set both sparse and ragged to True when creating a placeholder.'
)
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = (None,) * ndim
if ops.executing_eagerly_outside_functions():
if sparse:
spec = sparse_tensor.SparseTensorSpec(
shape=shape, dtype=dtype)
elif ragged:
ragged_rank = 0
for i in range(1, len(shape)):
# Hacky because could be tensorshape or tuple maybe?
# Or just tensorshape?
if shape[i] is None or (
hasattr(shape[i], 'value') and
shape[i].value is None):
ragged_rank = i
spec = ragged_tensor.RaggedTensorSpec(
shape=shape, dtype=dtype, ragged_rank=ragged_rank)
else:
spec = tensor_lib.TensorSpec(
shape=shape, dtype=dtype, name=name)
x = keras_tensor.keras_tensor_from_type_spec(spec, name=name)
else:
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
elif ragged:
ragged_rank = 0
for i in range(1, len(shape)):
if shape[i] is None:
ragged_rank = i
type_spec = ragged_tensor.RaggedTensorSpec(
shape=shape, dtype=dtype, ragged_rank=ragged_rank)
def tensor_spec_to_placeholder(tensorspec):
return array_ops.placeholder(tensorspec.dtype, tensorspec.shape)
x = nest.map_structure(tensor_spec_to_placeholder, type_spec,
expand_composites=True)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
if context.executing_eagerly():
# Add keras_history connectivity information to the placeholder
# when the placeholder is built in a top-level eager context
# (intended to be used with keras.backend.function)
from tensorflow.python.keras.engine import input_layer # pylint: disable=g-import-not-at-top
x = input_layer.Input(tensor=x)
x._is_backend_placeholder = True
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Args:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
if ops.executing_eagerly_outside_functions():
return hasattr(x, '_is_backend_placeholder')
from tensorflow.python.keras.utils import tf_utils # pylint: disable=g-import-not-at-top
if tf_utils.is_extension_type(x):
flat_components = nest.flatten(x, expand_composites=True)
return py_any(is_placeholder(c) for c in flat_components)
else:
return x.op.type == 'Placeholder'
except AttributeError:
return False
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Args:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.shape(kvar)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> tf.keras.backend.shape(input)
<KerasTensor: shape=(3,) dtype=int32 inferred_value=[2, 4, 5] ...>
"""
return array_ops.shape(x)
@doc_controls.do_not_generate_docs
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Args:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> tf.keras.backend.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.int_shape(kvar)
(2, 2)
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@doc_controls.do_not_generate_docs
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Args:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.ndim(input)
3
>>> tf.keras.backend.ndim(kvar)
2
"""
return x.shape.rank
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Args:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
>>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5)))
'float32'
>>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5),
... dtype='float32'))
'float32'
>>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5),
... dtype='float64'))
'float64'
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]))
>>> tf.keras.backend.dtype(kvar)
'float32'
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),
... dtype='float32')
>>> tf.keras.backend.dtype(kvar)
'float32'
"""
return x.dtype.base_dtype.name
@doc_controls.do_not_generate_docs
def dtype_numpy(x):
"""Returns the numpy dtype of a Keras tensor or variable.
Args:
x: Tensor or variable.
Returns:
numpy.dtype, dtype of `x`.
"""
return dtypes_module.as_dtype(x.dtype).as_numpy_dtype
@doc_controls.do_not_generate_docs
def eval(x):
"""Evaluates the value of a variable.
Args:
x: A variable.
Returns:
A Numpy array.
Examples:
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),
... dtype='float32')
>>> tf.keras.backend.eval(kvar)
array([[1., 2.],
[3., 4.]], dtype=float32)
"""
return get_value(to_dense(x))
@doc_controls.do_not_generate_docs
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Args:
shape: Tuple or list of integers, shape of returned Keras variable
dtype: data type of returned Keras variable
name: name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
>>> kvar = tf.keras.backend.zeros((3,4))
>>> tf.keras.backend.eval(kvar)
array([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]], dtype=float32)
>>> A = tf.constant([1,2,3])
>>> kvar2 = tf.keras.backend.zeros(A.shape) # [0., 0., 0.]
>>> tf.keras.backend.eval(kvar2)
array([0., 0., 0.], dtype=float32)
>>> kvar3 = tf.keras.backend.zeros(A.shape,dtype=tf.int32)
>>> tf.keras.backend.eval(kvar3)
array([0, 0, 0], dtype=int32)
>>> kvar4 = tf.keras.backend.zeros([2,3])
>>> tf.keras.backend.eval(kvar4)
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
return v
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Args:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
>>> kvar = tf.keras.backend.ones((3,4))
>>> tf.keras.backend.eval(kvar)
array([[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]], dtype=float32)
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
return v
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Args:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
>>> kvar = tf.keras.backend.eye(3)
>>> tf.keras.backend.eval(kvar)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@doc_controls.do_not_generate_docs
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Args:
x: Keras variable or Keras tensor.
dtype: dtype of returned Keras variable.
`None` uses the dtype of `x`.
name: name for the variable to create.
Returns:
A Keras variable with the shape of `x` filled with zeros.
Example:
```python
from tensorflow.keras import backend as K
kvar = K.variable(np.random.random((2,3)))
kvar_zeros = K.zeros_like(kvar)
K.eval(kvar_zeros)
# array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Args:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
>>> kvar = tf.keras.backend.variable(np.random.random((2,3)))
>>> kvar_ones = tf.keras.backend.ones_like(kvar)
>>> tf.keras.backend.eval(kvar_ones)
array([[1., 1., 1.],
[1., 1., 1.]], dtype=float32)
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Args:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@doc_controls.do_not_generate_docs
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Args:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
>>> kvar = tf.keras.backend.random_uniform_variable(shape=(2,3),
... low=0.0, high=1.0)
>>> kvar
<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@doc_controls.do_not_generate_docs
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Args:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
>>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3),
... mean=0.0, scale=1.0)
>>> kvar
<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@doc_controls.do_not_generate_docs
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Args:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
>>> kvar = tf.keras.backend.zeros((2,3))
>>> tf.keras.backend.count_params(kvar)
6
>>> tf.keras.backend.eval(kvar)
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
"""
return np.prod(x.shape.as_list())
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Args:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Examples:
Cast a float32 variable to a float64 tensor
>>> input = tf.keras.backend.ones(shape=(1,3))
>>> print(input)
<tf.Variable 'Variable:0' shape=(1, 3) dtype=float32,
numpy=array([[1., 1., 1.]], dtype=float32)>
>>> cast_input = tf.keras.backend.cast(input, dtype='float64')
>>> print(cast_input)
tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@doc_controls.do_not_generate_docs
def update(x, new_x):
return state_ops.assign(x, new_x)
@doc_controls.do_not_generate_docs
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Args:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@doc_controls.do_not_generate_docs
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Args:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@doc_controls.do_not_generate_docs
def moving_average_update(x, value, momentum):
"""Compute the exponential moving average of a value.
The moving average 'x' is updated with 'value' following:
```
x = x * momentum + value * (1 - momentum)
```
For example:
>>> x = tf.Variable(0.0)
>>> momentum=0.9
>>> moving_average_update(x, value = 2.0, momentum=momentum).numpy()
>>> x.numpy()
0.2
The result will be biased towards the initial value of the variable.
If the variable was initialized to zero, you can divide by
`1 - momentum ** num_updates` to debias it (Section 3 of
[Kingma et al., 2015](https://arxiv.org/abs/1412.6980)):
>>> num_updates = 1.0
>>> x_zdb = x/(1 - momentum**num_updates)
>>> x_zdb.numpy()
2.0
Args:
x: A Variable, the moving average.
value: A tensor with the same shape as `x`, the new value to be
averaged in.
momentum: The moving average momentum.
Returns:
The updated variable.
"""
if tf2.enabled():
momentum = math_ops.cast(momentum, x.dtype)
value = math_ops.cast(value, x.dtype)
return x.assign(x * momentum + value * (1 - momentum))
else:
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=True)
# LINEAR ALGEBRA
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a tensor.
This operation corresponds to `numpy.dot(a, b, out=None)`.
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
If inputs `x` and `y` are 2-D arrays, then it is equivalent to `tf.matmul`.
>>> x = tf.keras.backend.placeholder(shape=(2, 3))
>>> y = tf.keras.backend.placeholder(shape=(3, 4))
>>> xy = tf.keras.backend.dot(x, y)
>>> xy
<KerasTensor: shape=(2, 4) dtype=float32 ...>
>>> x = tf.keras.backend.placeholder(shape=(32, 28, 3))
>>> y = tf.keras.backend.placeholder(shape=(3, 4))
>>> xy = tf.keras.backend.dot(x, y)
>>> xy
<KerasTensor: shape=(32, 28, 4) dtype=float32 ...>
If `x` is an N-D array and `y` is an M-D array (where M>=2), it is a sum
product over the last axis of `x` and the second-to-last axis of `y`.
>>> x = tf.keras.backend.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = tf.keras.backend.ones((4, 3, 5))
>>> xy = tf.keras.backend.dot(x, y)
>>> tf.keras.backend.int_shape(xy)
(2, 4, 5)
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops_stack.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops_stack.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Args:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: Tuple or list of integers with target dimensions, or single integer.
The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]` should be equal.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
>>> x_batch = tf.keras.backend.ones(shape=(32, 20, 1))
>>> y_batch = tf.keras.backend.ones(shape=(32, 30, 20))
>>> xy_batch_dot = tf.keras.backend.batch_dot(x_batch, y_batch, axes=(1, 2))
>>> tf.keras.backend.int_shape(xy_batch_dot)
(32, 1, 30)
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
"""
x_shape = int_shape(x)
y_shape = int_shape(y)
x_ndim = len(x_shape)
y_ndim = len(y_shape)
if x_ndim < 2 or y_ndim < 2:
raise ValueError('Cannot do batch_dot on inputs '
'with rank < 2. '
'Received inputs with shapes ' +
str(x_shape) + ' and ' +
str(y_shape) + '.')
x_batch_size = x_shape[0]
y_batch_size = y_shape[0]
if x_batch_size is not None and y_batch_size is not None:
if x_batch_size != y_batch_size:
raise ValueError('Cannot do batch_dot on inputs '
'with different batch sizes. '
'Received inputs with shapes ' +
str(x_shape) + ' and ' +
str(y_shape) + '.')
if isinstance(axes, int):
axes = [axes, axes]
if axes is None:
if y_ndim == 2:
axes = [x_ndim - 1, y_ndim - 1]
else:
axes = [x_ndim - 1, y_ndim - 2]
if py_any(isinstance(a, (list, tuple)) for a in axes):
raise ValueError('Multiple target dimensions are not supported. ' +
'Expected: None, int, (int, int), ' +
'Provided: ' + str(axes))
# if tuple, convert to list.
axes = list(axes)
# convert negative indices.
if axes[0] < 0:
axes[0] += x_ndim
if axes[1] < 0:
axes[1] += y_ndim
# sanity checks
if 0 in axes:
raise ValueError('Cannot perform batch_dot over axis 0. '
'If your inputs are not batched, '
'add a dummy batch dimension to your '
'inputs using K.expand_dims(x, 0)')
a0, a1 = axes
d1 = x_shape[a0]
d2 = y_shape[a1]
if d1 is not None and d2 is not None and d1 != d2:
raise ValueError('Cannot do batch_dot on inputs with shapes ' +
str(x_shape) + ' and ' + str(y_shape) +
' with axes=' + str(axes) + '. x.shape[%d] != '
'y.shape[%d] (%d != %d).' % (axes[0], axes[1], d1, d2))
# backup ndims. Need them later.
orig_x_ndim = x_ndim
orig_y_ndim = y_ndim
# if rank is 2, expand to 3.
if x_ndim == 2:
x = array_ops.expand_dims(x, 1)
a0 += 1
x_ndim += 1
if y_ndim == 2:
y = array_ops.expand_dims(y, 2)
y_ndim += 1
# bring x's dimension to be reduced to last axis.
if a0 != x_ndim - 1:
pattern = list(range(x_ndim))
for i in range(a0, x_ndim - 1):
pattern[i] = pattern[i + 1]
pattern[-1] = a0
x = array_ops.transpose(x, pattern)
# bring y's dimension to be reduced to axis 1.
if a1 != 1:
pattern = list(range(y_ndim))
for i in range(a1, 1, -1):
pattern[i] = pattern[i - 1]
pattern[1] = a1
y = array_ops.transpose(y, pattern)
# normalize both inputs to rank 3.
if x_ndim > 3:
# squash middle dimensions of x.
x_shape = shape(x)
x_mid_dims = x_shape[1:-1]
x_squashed_shape = array_ops_stack.stack(
[x_shape[0], -1, x_shape[-1]])
x = array_ops.reshape(x, x_squashed_shape)
x_squashed = True
else:
x_squashed = False
if y_ndim > 3:
# squash trailing dimensions of y.
y_shape = shape(y)
y_trail_dims = y_shape[2:]
y_squashed_shape = array_ops_stack.stack(
[y_shape[0], y_shape[1], -1])
y = array_ops.reshape(y, y_squashed_shape)
y_squashed = True
else:
y_squashed = False
result = math_ops.matmul(x, y)
# if inputs were squashed, we have to reshape the matmul output.
output_shape = array_ops.shape(result)
do_reshape = False
if x_squashed:
output_shape = array_ops.concat(
[output_shape[:1],
x_mid_dims,
output_shape[-1:]], 0)
do_reshape = True
if y_squashed:
output_shape = array_ops.concat([output_shape[:-1], y_trail_dims], 0)
do_reshape = True
if do_reshape:
result = array_ops.reshape(result, output_shape)
# if the inputs were originally rank 2, we remove the added 1 dim.
if orig_x_ndim == 2:
result = array_ops.squeeze(result, 1)
elif orig_y_ndim == 2:
result = array_ops.squeeze(result, -1)
return result
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def transpose(x):
"""Transposes a tensor and returns it.
Args:
x: Tensor or variable.
Returns:
A tensor.
Examples:
>>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])
>>> tf.keras.backend.eval(var)
array([[1., 2., 3.],
[4., 5., 6.]], dtype=float32)
>>> var_transposed = tf.keras.backend.transpose(var)
>>> tf.keras.backend.eval(var_transposed)
array([[1., 4.],
[2., 5.],
[3., 6.]], dtype=float32)
>>> input = tf.keras.backend.placeholder((2, 3))
>>> input
<KerasTensor: shape=(2, 3) dtype=float32 ...>
>>> input_transposed = tf.keras.backend.transpose(input)
>>> input_transposed
<KerasTensor: shape=(3, 2) dtype=float32 ...>
"""
return array_ops.transpose(x)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Args:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
Examples:
>>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])
>>> tf.keras.backend.eval(var)
array([[1., 2., 3.],
[4., 5., 6.]], dtype=float32)
>>> var_gathered = tf.keras.backend.gather(var, [0])
>>> tf.keras.backend.eval(var_gathered)
array([[1., 2., 3.]], dtype=float32)
>>> var_gathered = tf.keras.backend.gather(var, [1])
>>> tf.keras.backend.eval(var_gathered)
array([[4., 5., 6.]], dtype=float32)
>>> var_gathered = tf.keras.backend.gather(var, [0,1,0])
>>> tf.keras.backend.eval(var_gathered)
array([[1., 2., 3.],
[4., 5., 6.],
[1., 2., 3.]], dtype=float32)
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Args:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Args:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Args:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Args:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Args:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Args:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@doc_controls.do_not_generate_docs
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Args:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
It is an alias to `tf.math.reduce_std`.
Args:
x: A tensor or variable. It should have numerical dtypes. Boolean type
inputs will be converted to float.
axis: An integer, the axis to compute the standard deviation. If `None`
(the default), reduces all dimensions. Must be in the range
`[-rank(x), rank(x))`.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is retained with
length 1.
Returns:
A tensor with the standard deviation of elements of `x` with same dtype.
Boolean type input will be converted to float.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Args:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Args:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Args:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Args:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Args:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def square(x):
"""Element-wise square.
Args:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def abs(x):
"""Element-wise absolute value.
Args:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def sqrt(x):
"""Element-wise square root.
This function clips negative tensor values to 0 before computing the
square root.
Args:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _constant_to_tensor(0., x.dtype.base_dtype)
x = math_ops.maximum(x, zero)
return math_ops.sqrt(x)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def exp(x):
"""Element-wise exponential.
Args:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def log(x):
"""Element-wise log.
Args:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Args:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Args:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def sign(x):
"""Element-wise sign.
Args:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def pow(x, a):
"""Element-wise exponentiation.
Args:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Args:
x: Tensor or variable.
min_value: Python float, integer, or tensor.
max_value: Python float, integer, or tensor.
Returns:
A tensor.
"""
if (isinstance(min_value, (int, float)) and
isinstance(max_value, (int, float))):
if max_value < min_value:
max_value = min_value
if min_value is None:
min_value = -np.inf
if max_value is None:
max_value = np.inf
return clip_ops.clip_by_value(x, min_value, max_value)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def equal(x, y):
"""Element-wise equality between two tensors.
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def greater(x, y):
"""Element-wise truth value of (x > y).
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def less(x, y):
"""Element-wise truth value of (x < y).
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def maximum(x, y):
"""Element-wise maximum of two tensors.
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor with the element wise maximum value(s) of `x` and `y`.
Examples:
>>> x = tf.Variable([[1, 2], [3, 4]])
>>> y = tf.Variable([[2, 1], [0, -1]])
>>> m = tf.keras.backend.maximum(x, y)
>>> m
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[2, 2],
[3, 4]], dtype=int32)>
"""
return math_ops.maximum(x, y)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def minimum(x, y):
"""Element-wise minimum of two tensors.
Args:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def sin(x):
"""Computes sin of x element-wise.
Args:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def cos(x):
"""Computes cos of x element-wise.
Args:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Args:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Args:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops_stack.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Args:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@doc_controls.do_not_generate_docs
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Args:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Args:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Args:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]])
>>> tf.keras.backend.concatenate((a, b), axis=-1)
<tf.Tensor: shape=(3, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 10, 20, 30],
[ 4, 5, 6, 40, 50, 60],
[ 7, 8, 9, 70, 80, 90]], dtype=int32)>
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
elif py_all(isinstance(x, ragged_tensor.RaggedTensor) for x in tensors):
return array_ops.concat(tensors, axis)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Args:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
>>> a
<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> tf.keras.backend.reshape(a, shape=(2, 6))
<tf.Tensor: shape=(2, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12]], dtype=int32)>
"""
return array_ops.reshape(x, shape)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Args:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
>>> a
<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0))
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[ 1, 4, 7, 10],
[ 2, 5, 8, 11],
[ 3, 6, 9, 12]], dtype=int32)>
"""
return array_ops.transpose(x, perm=pattern)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Args:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
new_shape = x.shape[rows:cols + 1]
if new_shape.is_fully_defined():
new_shape = constant_op.constant(new_shape.as_list(), dtype='int32')
else:
new_shape = array_ops.shape_v2(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_images_v2(
x, new_shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR)
elif interpolation == 'bilinear':
x = image_ops.resize_images_v2(x, new_shape,
method=image_ops.ResizeMethod.BILINEAR)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
return x
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Args:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Args:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
Example:
>>> b = tf.constant([1, 2, 3])
>>> tf.keras.backend.repeat_elements(b, rep=2, axis=0)
<tf.Tensor: shape=(6,), dtype=int32,
numpy=array([1, 1, 2, 2, 3, 3], dtype=int32)>
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Args:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
Example:
>>> b = tf.constant([[1, 2], [3, 4]])
>>> b
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4]], dtype=int32)>
>>> tf.keras.backend.repeat(b, n=2)
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[1, 2],
[1, 2]],
[[3, 4],
[3, 4]]], dtype=int32)>
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops_stack.stack([1, n, 1])
return array_ops.tile(x, pattern)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Args:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
Example:
>>> tf.keras.backend.arange(start=0, stop=10, step=1.5)
<tf.Tensor: shape=(7,), dtype=float32,
numpy=array([0. , 1.5, 3. , 4.5, 6. , 7.5, 9. ], dtype=float32)>
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Args:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def flatten(x):
"""Flatten a tensor.
Args:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
Example:
>>> b = tf.constant([[1, 2], [3, 4]])
>>> b
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4]], dtype=int32)>
>>> tf.keras.backend.flatten(b)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([1, 2, 3, 4], dtype=int32)>
"""
return array_ops.reshape(x, [-1])
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Args:
x: A tensor or variable.
Returns:
A tensor.
Examples:
Flattening a 3D tensor to 2D by collapsing the last dimension.
>>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5))
>>> x_batch_flatten = batch_flatten(x_batch)
>>> tf.keras.backend.int_shape(x_batch_flatten)
(2, 60)
"""
x = array_ops.reshape(x, array_ops_stack.stack([-1, prod(shape(x)[1:])]))
return x
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Args:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Args:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Args:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Args:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Args:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Args:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2],[3, 4]])
>>> b = tf.constant([[10, 20],[30, 40]])
>>> tf.keras.backend.stack((a, b))
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[ 1, 2],
[ 3, 4]],
[[10, 20],
[30, 40]]], dtype=int32)>
"""
return array_ops_stack.stack(x, axis=axis)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Args:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Args:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
_VALUE_SET_CODE_STRING = """
>>> K = tf.keras.backend # Common keras convention
>>> v = K.variable(1.)
>>> # reassign
>>> K.set_value(v, 2.)
>>> print(K.get_value(v))
2.0
>>> # increment
>>> K.set_value(v, K.get_value(v) + 1)
>>> print(K.get_value(v))
3.0
Variable semantics in TensorFlow 2 are eager execution friendly. The above
code is roughly equivalent to:
>>> v = tf.Variable(1.)
>>> v.assign(2.)
>>> print(v.numpy())
2.0
>>> v.assign_add(1.)
>>> print(v.numpy())
3.0"""[3:] # Prune first newline and indent to match the docstring template.
@doc_controls.do_not_generate_docs
def get_value(x):
"""Returns the value of a variable.
`backend.get_value` is the complement of `backend.set_value`, and provides
a generic interface for reading from variables while abstracting away the
differences between TensorFlow 1.x and 2.x semantics.
{snippet}
Args:
x: input variable.
Returns:
A Numpy array.
"""
if not tensor_util.is_tf_type(x):
return x
if context.executing_eagerly() or isinstance(x, ops.EagerTensor):
return x.numpy()
if not getattr(x, '_in_graph_mode', True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with context.eager_mode():
return x.numpy()
if ops.executing_eagerly_outside_functions():
# This method of evaluating works inside the Keras FuncGraph.
with ops.init_scope():
return x.numpy()
with x.graph.as_default():
return x.eval(session=get_session((x,)))
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Args:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session(tensors).run(tensors)
else:
return []
@doc_controls.do_not_generate_docs
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
`backend.set_value` is the complement of `backend.get_value`, and provides
a generic interface for assigning to variables while abstracting away the
differences between TensorFlow 1.x and 2.x semantics.
{snippet}
Args:
x: Variable to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = numpy_compat.np_asarray(value, dtype=dtype_numpy(x))
if ops.executing_eagerly_outside_functions():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
# In order to support assigning weights to resizable variables in
# Keras, we make a placeholder with the correct number of dimensions
# but with None in each dimension. This way, we can assign weights
# of any size (as long as they have the correct dimensionality).
placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)
assign_placeholder = array_ops.placeholder(
tf_dtype, shape=placeholder_shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Args:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if context.executing_eagerly() or ops.inside_function():
for x, value in tuples:
x.assign(numpy_compat.np_asarray(value, dtype=dtype_numpy(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = numpy_compat.np_asarray(value, dtype=dtype_numpy(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
# In order to support assigning weights to resizable variables in
# Keras, we make a placeholder with the correct number of dimensions
# but with None in each dimension. This way, we can assign weights
# of any size (as long as they have the correct dimensionality).
placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)
assign_placeholder = array_ops.placeholder(
tf_dtype, shape=placeholder_shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
if get_value.__doc__ is not None:
get_value.__doc__ = get_value.__doc__.format(snippet=_VALUE_SET_CODE_STRING)
if set_value.__doc__ is not None:
set_value.__doc__ = set_value.__doc__.format(snippet=_VALUE_SET_CODE_STRING)
@dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def print_tensor(x, message='', summarize=3):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
>>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> _ = tf.keras.backend.print_tensor(x)
[[1 2]
[3 4]]
Args:
x: Tensor to print.
message: Message to print jointly with the tensor.
summarize: The first and last `summarize` elements within each dimension
are recursively printed per Tensor. If None, then the first 3 and last
3 elements of each dimension are printed for each tensor. If set to
-1, it will print all elements of every tensor.
Returns:
The same tensor `x`, unchanged.
"""
if isinstance(x, tensor_lib.Tensor) and hasattr(x, 'graph'):
with get_graph().as_default():
op = logging_ops.print_v2(
message, x, output_stream=sys.stdout, summarize=summarize)
with ops.control_dependencies([op]):
return array_ops.identity(x)
else:
logging_ops.print_v2(
message, x, output_stream=sys.stdout, summarize=summarize)
return x
# GRAPH MANIPULATION
| _TfDeviceCaptureOp |
python | kamyu104__LeetCode-Solutions | Python/number-of-wonderful-substrings.py | {
"start": 32,
"end": 504
} | class ____(object):
def wonderfulSubstrings(self, word):
"""
:type word: str
:rtype: int
"""
ALPHABET_SIZE = 10
count = [0]*(2**ALPHABET_SIZE)
count[0] = 1
result = curr = 0
for c in word:
curr ^= 1<<(ord(c)-ord('a'))
result += count[curr]
result += sum(count[curr^(1<<i)] for i in xrange(ALPHABET_SIZE))
count[curr] += 1
return result
| Solution |
python | huggingface__transformers | src/transformers/models/gpt_oss/modular_gpt_oss.py | {
"start": 16695,
"end": 19452
} | class ____(MixtralModel):
_no_split_modules = ["GptOssDecoderLayer"]
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
}
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
| GptOssModel |
python | run-llama__llama_index | llama-index-core/llama_index/core/vector_stores/types.py | {
"start": 6424,
"end": 6681
} | class ____(BaseModel):
"""
Information about a vector store (content and supported metadata filters).
Currently only used by VectorIndexAutoRetriever.
"""
metadata_info: List[MetadataInfo]
content_info: str
@dataclass
| VectorStoreInfo |
python | getsentry__sentry | tests/sentry/taskworker/test_client.py | {
"start": 2699,
"end": 31546
} | class ____(grpc.RpcError):
"""Grpc error are elusive and this mock simulates the interface in mypy stubs"""
def __init__(self, code, message):
self._code = code
self._message = message
def code(self) -> grpc.StatusCode:
return self._code
def details(self) -> str:
return self._message
def result(self):
raise self
def test_make_broker_hosts() -> None:
hosts = make_broker_hosts(host_prefix="broker:50051", num_brokers=3)
assert len(hosts) == 3
assert hosts == ["broker-0:50051", "broker-1:50051", "broker-2:50051"]
hosts = make_broker_hosts(
host_prefix="",
num_brokers=None,
host_list="broker:50051, broker-a:50051 , , broker-b:50051",
)
assert len(hosts) == 3
assert hosts == ["broker:50051", "broker-a:50051", "broker-b:50051"]
@django_db_all
def test_init_no_hosts() -> None:
with pytest.raises(AssertionError) as err:
TaskworkerClient(hosts=[])
assert "You must provide at least one RPC host" in str(err)
@django_db_all
def test_health_check_is_debounced() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="abc123",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
SetTaskStatusResponse(
task=TaskActivation(
id="abc123",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
health_check_path = Path(f"/tmp/{''.join(random.choices(string.ascii_letters, k=16))}")
client = TaskworkerClient(
["localhost-0:50051"],
health_check_settings=HealthCheckSettings(health_check_path, 1),
)
client._health_check_settings.file_path = Mock() # type: ignore[union-attr]
_ = client.get_task()
_ = client.get_task()
assert client._health_check_settings.file_path.touch.call_count == 1 # type: ignore[union-attr]
with patch("sentry.taskworker.client.client.time") as mock_time:
mock_time.time.return_value = time.time() + 1
_ = client.get_task()
assert client._health_check_settings.file_path.touch.call_count == 2 # type: ignore[union-attr]
@django_db_all
def test_get_task_ok() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="abc123",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(["localhost-0:50051"])
result = client.get_task()
assert result
assert result.host == "localhost-0:50051"
assert result.activation.id
assert result.activation.namespace == "testing"
@django_db_all
def test_get_task_writes_to_health_check_file() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="abc123",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
health_check_path = Path(f"/tmp/{''.join(random.choices(string.ascii_letters, k=16))}")
client = TaskworkerClient(
["localhost-0:50051"],
health_check_settings=HealthCheckSettings(health_check_path, 3),
)
_ = client.get_task()
assert health_check_path.exists()
@django_db_all
def test_get_task_with_interceptor() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="abc123",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
metadata=(
(
"sentry-signature",
"3202702605c1b65055c28e7c78a5835e760830cff3e9f995eb7ad5f837130b1f",
),
),
)
secret = '["a long secret value","notused"]'
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(["localhost-0:50051"], rpc_secret=secret)
result = client.get_task()
assert result
assert result.host == "localhost-0:50051"
assert result.activation.id
assert result.activation.namespace == "testing"
@django_db_all
def test_get_task_with_namespace() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="abc123",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(hosts=make_broker_hosts("localhost:50051", num_brokers=1))
result = client.get_task(namespace="testing")
assert result
assert result.host == "localhost-0:50051"
assert result.activation.id
assert result.activation.namespace == "testing"
@django_db_all
def test_get_task_not_found() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.NOT_FOUND, "no pending task found"),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(["localhost:50051"])
result = client.get_task()
assert result is None
@django_db_all
def test_get_task_failure() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.INTERNAL, "something bad"),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(["localhost:50051"])
with pytest.raises(grpc.RpcError):
client.get_task()
@django_db_all
def test_update_task_writes_to_health_check_file() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
SetTaskStatusResponse(
task=TaskActivation(
id="abc123",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
health_check_path = Path(f"/tmp/{''.join(random.choices(string.ascii_letters, k=16))}")
client = TaskworkerClient(
make_broker_hosts("localhost:50051", num_brokers=1),
health_check_settings=HealthCheckSettings(
health_check_path, DEFAULT_WORKER_HEALTH_CHECK_SEC_PER_TOUCH
),
)
_ = client.update_task(
ProcessingResult("abc123", TASK_ACTIVATION_STATUS_RETRY, "localhost-0:50051", 0),
FetchNextTask(namespace=None),
)
assert health_check_path.exists()
@django_db_all
def test_update_task_ok_with_next() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
SetTaskStatusResponse(
task=TaskActivation(
id="abc123",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(make_broker_hosts("localhost:50051", num_brokers=1))
assert set(client._host_to_stubs.keys()) == {"localhost-0:50051"}
result = client.update_task(
ProcessingResult("abc123", TASK_ACTIVATION_STATUS_RETRY, "localhost-0:50051", 0),
FetchNextTask(namespace=None),
)
assert result
assert result.host == "localhost-0:50051"
assert result.activation.id == "abc123"
@django_db_all
def test_update_task_ok_with_next_namespace() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
SetTaskStatusResponse(
task=TaskActivation(
id="abc123",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(make_broker_hosts("localhost:50051", num_brokers=1))
result = client.update_task(
ProcessingResult(
task_id="id",
status=TASK_ACTIVATION_STATUS_RETRY,
host="localhost-0:50051",
receive_timestamp=0,
),
FetchNextTask(namespace="testing"),
)
assert result
assert result.activation.id == "abc123"
assert result.activation.namespace == "testing"
@django_db_all
def test_update_task_ok_no_next() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus", SetTaskStatusResponse()
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(make_broker_hosts("localhost:50051", num_brokers=1))
result = client.update_task(
ProcessingResult(
task_id="abc123",
status=TASK_ACTIVATION_STATUS_RETRY,
host="localhost-0:50051",
receive_timestamp=0,
),
FetchNextTask(namespace=None),
)
assert result is None
@django_db_all
def test_update_task_not_found() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
MockGrpcError(grpc.StatusCode.NOT_FOUND, "no pending tasks found"),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(["localhost-0:50051"])
result = client.update_task(
ProcessingResult(
task_id="abc123",
status=TASK_ACTIVATION_STATUS_RETRY,
host="localhost-0:50051",
receive_timestamp=0,
),
FetchNextTask(namespace=None),
)
assert result is None
@django_db_all
def test_update_task_unavailable_retain_task_to_host() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "broker down"),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(["localhost-0:50051"])
with pytest.raises(MockGrpcError) as err:
client.update_task(
ProcessingResult(
task_id="abc123",
status=TASK_ACTIVATION_STATUS_RETRY,
host="localhost-0:50051",
receive_timestamp=0,
),
FetchNextTask(namespace=None),
)
assert "broker down" in str(err.value)
@django_db_all
def test_client_loadbalance() -> None:
channel_0 = MockChannel()
channel_0.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="0",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
channel_0.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
SetTaskStatusResponse(task=None),
)
channel_1 = MockChannel()
channel_1.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="1",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
channel_1.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
SetTaskStatusResponse(task=None),
)
channel_2 = MockChannel()
channel_2.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="2",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
channel_2.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
SetTaskStatusResponse(task=None),
)
channel_3 = MockChannel()
channel_3.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="3",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
channel_3.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
SetTaskStatusResponse(task=None),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.side_effect = [channel_0, channel_1, channel_2, channel_3]
with patch("sentry.taskworker.client.client.random.choice") as mock_randchoice:
mock_randchoice.side_effect = [
"localhost-0:50051",
"localhost-1:50051",
"localhost-2:50051",
"localhost-3:50051",
]
client = TaskworkerClient(
hosts=make_broker_hosts(host_prefix="localhost:50051", num_brokers=4),
max_tasks_before_rebalance=1,
)
task_0 = client.get_task()
assert task_0 is not None and task_0.activation.id == "0"
task_1 = client.get_task()
assert task_1 is not None and task_1.activation.id == "1"
task_2 = client.get_task()
assert task_2 is not None and task_2.activation.id == "2"
task_3 = client.get_task()
assert task_3 is not None and task_3.activation.id == "3"
client.update_task(
ProcessingResult(
task_0.activation.id, TASK_ACTIVATION_STATUS_COMPLETE, task_0.host, 0
),
None,
)
client.update_task(
ProcessingResult(
task_1.activation.id, TASK_ACTIVATION_STATUS_COMPLETE, task_1.host, 0
),
None,
)
client.update_task(
ProcessingResult(
task_2.activation.id, TASK_ACTIVATION_STATUS_COMPLETE, task_2.host, 0
),
None,
)
client.update_task(
ProcessingResult(
task_3.activation.id, TASK_ACTIVATION_STATUS_COMPLETE, task_3.host, 0
),
None,
)
@django_db_all
def test_client_loadbalance_on_notfound() -> None:
channel_0 = MockChannel()
channel_0.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.NOT_FOUND, "no pending task found"),
)
channel_1 = MockChannel()
channel_1.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="1",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
channel_1.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/SetTaskStatus",
MockGrpcError(grpc.StatusCode.NOT_FOUND, "no pending task found"),
)
channel_2 = MockChannel()
channel_2.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="2",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.side_effect = [channel_0, channel_1, channel_2]
with patch("sentry.taskworker.client.client.random.choice") as mock_randchoice:
mock_randchoice.side_effect = [
"localhost-0:50051",
"localhost-1:50051",
"localhost-2:50051",
]
client = TaskworkerClient(
hosts=make_broker_hosts(host_prefix="localhost:50051", num_brokers=3),
max_tasks_before_rebalance=30,
)
# Fetch from the first channel, it should return notfound
task_0 = client.get_task()
assert task_0 is None
# Fetch again, this time from channel_1
task_1 = client.get_task()
assert task_1 and task_1.activation.id == "1"
res = client.update_task(
ProcessingResult(
task_1.activation.id, TASK_ACTIVATION_STATUS_COMPLETE, task_1.host, 0
),
None,
)
assert res is None
# Because SetStatus on channel_1 returned notfound the client
# should switch brokers.
task_2 = client.get_task()
assert task_2 and task_2.activation.id == "2"
@django_db_all
def test_client_loadbalance_on_unavailable() -> None:
channel_0 = MockChannel()
channel_0.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
channel_0.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
channel_0.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
channel_1 = MockChannel()
channel_1.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="1",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.side_effect = [channel_0, channel_1]
with patch("sentry.taskworker.client.client.random.choice") as mock_randchoice:
mock_randchoice.side_effect = [
"localhost-0:50051",
"localhost-1:50051",
]
client = TaskworkerClient(
hosts=make_broker_hosts(host_prefix="localhost:50051", num_brokers=2),
max_consecutive_unavailable_errors=3,
)
# Fetch from the first channel, host should be unavailable
with pytest.raises(grpc.RpcError, match="host is unavailable"):
client.get_task()
assert client._num_consecutive_unavailable_errors == 1
# Fetch from the first channel, host should be unavailable
with pytest.raises(grpc.RpcError, match="host is unavailable"):
client.get_task()
assert client._num_consecutive_unavailable_errors == 2
# Fetch from the first channel, host should be unavailable
with pytest.raises(grpc.RpcError, match="host is unavailable"):
client.get_task()
assert client._num_consecutive_unavailable_errors == 3
# Should rebalance to the second host and receive task
task = client.get_task()
assert task and task.activation.id == "1"
assert client._num_consecutive_unavailable_errors == 0
@django_db_all
def test_client_single_host_unavailable() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="1",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
with (patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel,):
mock_channel.return_value = channel
client = TaskworkerClient(
hosts=["localhost-0:50051"],
max_consecutive_unavailable_errors=3,
temporary_unavailable_host_timeout=2,
)
for _ in range(3):
with pytest.raises(grpc.RpcError, match="host is unavailable"):
client.get_task()
assert client._num_consecutive_unavailable_errors == 3
# Verify host was marked as temporarily unavailable
assert "localhost-0:50051" in client._temporary_unavailable_hosts
assert isinstance(client._temporary_unavailable_hosts["localhost-0:50051"], float)
client.get_task()
assert client._cur_host == "localhost-0:50051"
@django_db_all
def test_client_reset_errors_after_success() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="1",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
with patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel:
mock_channel.return_value = channel
client = TaskworkerClient(["localhost:50051"], max_consecutive_unavailable_errors=3)
with pytest.raises(grpc.RpcError, match="host is unavailable"):
client.get_task()
assert client._num_consecutive_unavailable_errors == 1
task = client.get_task()
assert task and task.activation.id == "1"
assert client._num_consecutive_unavailable_errors == 0
with pytest.raises(grpc.RpcError, match="host is unavailable"):
client.get_task()
assert client._num_consecutive_unavailable_errors == 1
@django_db_all
def test_client_update_task_host_unavailable() -> None:
channel = MockChannel()
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
GetTaskResponse(
task=TaskActivation(
id="1",
namespace="testing",
taskname="do_thing",
parameters="",
headers={},
processing_deadline_duration=10,
)
),
)
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
channel.add_response(
"/sentry_protos.taskbroker.v1.ConsumerService/GetTask",
MockGrpcError(grpc.StatusCode.UNAVAILABLE, "host is unavailable"),
)
current_time = 1000.0
def mock_time():
return current_time
with (
patch("sentry.taskworker.client.client.grpc.insecure_channel") as mock_channel,
patch("sentry.taskworker.client.client.time.time", side_effect=mock_time),
):
mock_channel.return_value = channel
client = TaskworkerClient(
["localhost:50051"],
max_consecutive_unavailable_errors=3,
temporary_unavailable_host_timeout=10,
)
# Get a task to establish the host mapping
task = client.get_task()
assert task and task.activation.id == "1"
host = task.host
# Make the host temporarily unavailable
for _ in range(3):
with pytest.raises(grpc.RpcError, match="host is unavailable"):
client.get_task()
assert client._num_consecutive_unavailable_errors == 3
assert host in client._temporary_unavailable_hosts
# Try to update the task
with pytest.raises(
HostTemporarilyUnavailable, match=f"Host: {host} is temporarily unavailable"
):
client.update_task(
ProcessingResult(
task_id="1",
status=TASK_ACTIVATION_STATUS_COMPLETE,
host=host,
receive_timestamp=0,
),
fetch_next_task=None,
)
| MockGrpcError |
python | matplotlib__matplotlib | galleries/examples/user_interfaces/mpl_with_glade3_sgskip.py | {
"start": 325,
"end": 1206
} | class ____:
def on_window1_destroy(self, widget):
Gtk.main_quit()
def main():
builder = Gtk.Builder()
builder.add_objects_from_file(
str(Path(__file__).parent / "mpl_with_glade3.glade"),
("window1", ""))
builder.connect_signals(Window1Signals())
window = builder.get_object("window1")
sw = builder.get_object("scrolledwindow1")
# Start of Matplotlib specific code
figure = Figure(figsize=(8, 6), dpi=71)
axis = figure.add_subplot()
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2*np.pi*t)
axis.plot(t, s)
axis.set_xlabel('time [s]')
axis.set_ylabel('voltage [V]')
canvas = FigureCanvas(figure) # a Gtk.DrawingArea
canvas.set_size_request(800, 600)
sw.add(canvas)
# End of Matplotlib specific code
window.show_all()
Gtk.main()
if __name__ == "__main__":
main()
| Window1Signals |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 261933,
"end": 262835
} | class ____(sgqlc.types.Input):
"""The values that can be used to update a field of an item inside a
Project. Only 1 value can be updated at a time.
"""
__schema__ = github_schema
__field_names__ = ("text", "number", "date", "single_select_option_id", "iteration_id")
text = sgqlc.types.Field(String, graphql_name="text")
"""The text to set on the field."""
number = sgqlc.types.Field(Float, graphql_name="number")
"""The number to set on the field."""
date = sgqlc.types.Field(Date, graphql_name="date")
"""The ISO 8601 date to set on the field."""
single_select_option_id = sgqlc.types.Field(String, graphql_name="singleSelectOptionId")
"""The id of the single select option to set on the field."""
iteration_id = sgqlc.types.Field(String, graphql_name="iterationId")
"""The id of the iteration to set on the field."""
| ProjectV2FieldValue |
python | ansible__ansible | lib/ansible/module_utils/facts/hardware/hurd.py | {
"start": 1599,
"end": 1700
} | class ____(HardwareCollector):
_fact_class = HurdHardware
_platform = 'GNU'
| HurdHardwareCollector |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 227044,
"end": 256103
} | class ____(DatasetIOBase):
@contextlib.contextmanager
def create_store(self):
yield Dataset()
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
yield data.chunk()
# Override methods in DatasetIOBase - not applicable to dask
def test_roundtrip_string_encoded_characters(self) -> None:
pass
def test_roundtrip_coordinates_with_space(self) -> None:
pass
def test_roundtrip_numpy_datetime_data(self) -> None:
# Override method in DatasetIOBase - remove not applicable
# save_kwargs
times = pd.to_datetime(["2000-01-01", "2000-01-02", "NaT"], unit="ns")
expected = Dataset({"t": ("t", times), "t0": times[0]})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_cftime_datetime_data(self) -> None:
# Override method in DatasetIOBase - remove not applicable
# save_kwargs
from xarray.tests.test_coding_times import _all_cftime_date_types
date_types = _all_cftime_date_types()
for date_type in date_types.values():
times = [date_type(1, 1, 1), date_type(1, 1, 2)]
expected = Dataset({"t": ("t", times), "t0": times[0]})
expected_decoded_t = np.array(times)
expected_decoded_t0 = np.array([date_type(1, 1, 1)])
with self.roundtrip(expected) as actual:
assert_array_equal(actual.t.values, expected_decoded_t)
assert_array_equal(actual.t0.values, expected_decoded_t0)
def test_write_store(self) -> None:
# Override method in DatasetIOBase - not applicable to dask
pass
def test_dataset_caching(self) -> None:
expected = Dataset({"foo": ("x", [5, 6, 7])})
with self.roundtrip(expected) as actual:
assert not actual.foo.variable._in_memory
_ = actual.foo.values # no caching
assert not actual.foo.variable._in_memory
def test_open_mfdataset(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert isinstance(actual.foo.variable.data, da.Array)
assert actual.foo.variable.data.chunks == ((5, 5),)
assert_identical(original, actual)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested", chunks={"x": 3}
) as actual:
assert actual.foo.variable.data.chunks == ((3, 2, 3, 2),)
with pytest.raises(OSError, match=r"no files to open"):
open_mfdataset("foo-bar-baz-*.nc")
with pytest.raises(ValueError, match=r"wild-card"):
open_mfdataset("http://some/remote/uri")
@requires_fsspec
def test_open_mfdataset_no_files(self) -> None:
pytest.importorskip("aiobotocore")
# glob is attempted as of #4823, but finds no files
with pytest.raises(OSError, match=r"no files"):
open_mfdataset("http://some/remote/uri", engine="zarr")
def test_open_mfdataset_2d(self) -> None:
original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
with create_tmp_file() as tmp3:
with create_tmp_file() as tmp4:
original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1)
original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2)
original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3)
original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4)
with open_mfdataset(
[[tmp1, tmp2], [tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
) as actual:
assert isinstance(actual.foo.variable.data, da.Array)
assert actual.foo.variable.data.chunks == ((5, 5), (4, 4))
assert_identical(original, actual)
with open_mfdataset(
[[tmp1, tmp2], [tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
chunks={"x": 3, "y": 2},
) as actual:
assert actual.foo.variable.data.chunks == (
(3, 2, 3, 2),
(2, 2, 2, 2),
)
def test_open_mfdataset_pathlib(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmps1:
with create_tmp_file() as tmps2:
tmp1 = Path(tmps1)
tmp2 = Path(tmps2)
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(original, actual)
def test_open_mfdataset_2d_pathlib(self) -> None:
original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))})
with create_tmp_file() as tmps1:
with create_tmp_file() as tmps2:
with create_tmp_file() as tmps3:
with create_tmp_file() as tmps4:
tmp1 = Path(tmps1)
tmp2 = Path(tmps2)
tmp3 = Path(tmps3)
tmp4 = Path(tmps4)
original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1)
original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2)
original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3)
original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4)
with open_mfdataset(
[[tmp1, tmp2], [tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
) as actual:
assert_identical(original, actual)
def test_open_mfdataset_2(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(original, actual)
def test_open_mfdataset_with_ignore(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_files(2) as (tmp1, tmp2):
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, "non-existent-file.nc", tmp2],
concat_dim="x",
combine="nested",
errors="ignore",
) as actual:
assert_identical(original, actual)
def test_open_mfdataset_with_warn(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with pytest.warns(UserWarning, match=r"Ignoring."):
with create_tmp_files(2) as (tmp1, tmp2):
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, "non-existent-file.nc", tmp2],
concat_dim="x",
combine="nested",
errors="warn",
) as actual:
assert_identical(original, actual)
def test_open_mfdataset_2d_with_ignore(self) -> None:
original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))})
with create_tmp_files(4) as (tmp1, tmp2, tmp3, tmp4):
original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1)
original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2)
original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3)
original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4)
with open_mfdataset(
[[tmp1, tmp2], ["non-existent-file.nc", tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
errors="ignore",
) as actual:
assert_identical(original, actual)
def test_open_mfdataset_2d_with_warn(self) -> None:
original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))})
with pytest.warns(UserWarning, match=r"Ignoring."):
with create_tmp_files(4) as (tmp1, tmp2, tmp3, tmp4):
original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1)
original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2)
original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3)
original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4)
with open_mfdataset(
[[tmp1, tmp2, "non-existent-file.nc"], [tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
errors="warn",
) as actual:
assert_identical(original, actual)
def test_attrs_mfdataset(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs["test1"] = "foo"
ds2.attrs["test2"] = "bar"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
# presumes that attributes inherited from
# first dataset loaded
assert actual.test1 == ds1.test1
# attributes from ds2 are not retained, e.g.,
with pytest.raises(AttributeError, match=r"no attribute"):
_ = actual.test2
def test_open_mfdataset_attrs_file(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_files(2) as (tmp1, tmp2):
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs["test1"] = "foo"
ds2.attrs["test2"] = "bar"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested", attrs_file=tmp2
) as actual:
# attributes are inherited from the master file
assert actual.attrs["test2"] == ds2.attrs["test2"]
# attributes from ds1 are not retained, e.g.,
assert "test1" not in actual.attrs
def test_open_mfdataset_attrs_file_path(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_files(2) as (tmps1, tmps2):
tmp1 = Path(tmps1)
tmp2 = Path(tmps2)
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs["test1"] = "foo"
ds2.attrs["test2"] = "bar"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested", attrs_file=tmp2
) as actual:
# attributes are inherited from the master file
assert actual.attrs["test2"] == ds2.attrs["test2"]
# attributes from ds1 are not retained, e.g.,
assert "test1" not in actual.attrs
def test_open_mfdataset_auto_combine(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10)), "x": np.arange(10)})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset([tmp2, tmp1], combine="by_coords") as actual:
assert_identical(original, actual)
def test_open_mfdataset_raise_on_bad_combine_args(self) -> None:
# Regression test for unhelpful error shown in #5230
original = Dataset({"foo": ("x", np.random.randn(10)), "x": np.arange(10)})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with pytest.raises(ValueError, match="`concat_dim` has no effect"):
open_mfdataset([tmp1, tmp2], concat_dim="x")
def test_encoding_mfdataset(self) -> None:
original = Dataset(
{
"foo": ("t", np.random.randn(10)),
"t": ("t", pd.date_range(start="2010-01-01", periods=10, freq="1D")),
}
)
original.t.encoding["units"] = "days since 2010-01-01"
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
ds1 = original.isel(t=slice(5))
ds2 = original.isel(t=slice(5, 10))
ds1.t.encoding["units"] = "days since 2010-01-01"
ds2.t.encoding["units"] = "days since 2000-01-01"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], combine="nested", concat_dim="t"
) as actual:
assert actual.t.encoding["units"] == original.t.encoding["units"]
assert actual.t.encoding["units"] == ds1.t.encoding["units"]
assert actual.t.encoding["units"] != ds2.t.encoding["units"]
def test_encoding_mfdataset_new_defaults(self) -> None:
original = Dataset(
{
"foo": ("t", np.random.randn(10)),
"t": ("t", pd.date_range(start="2010-01-01", periods=10, freq="1D")),
}
)
original.t.encoding["units"] = "days since 2010-01-01"
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
ds1 = original.isel(t=slice(5))
ds2 = original.isel(t=slice(5, 10))
ds1.t.encoding["units"] = "days since 2010-01-01"
ds2.t.encoding["units"] = "days since 2000-01-01"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
for setting in [True, False]:
with set_options(use_new_combine_kwarg_defaults=setting):
with open_mfdataset(
[tmp1, tmp2], combine="nested", concat_dim="t"
) as old:
assert (
old.t.encoding["units"] == original.t.encoding["units"]
)
assert old.t.encoding["units"] == ds1.t.encoding["units"]
assert old.t.encoding["units"] != ds2.t.encoding["units"]
with set_options(use_new_combine_kwarg_defaults=True):
with pytest.raises(
AlignmentError, match="If you are intending to concatenate"
):
open_mfdataset([tmp1, tmp2], combine="nested")
def test_preprocess_mfdataset(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
def preprocess(ds):
return ds.assign_coords(z=0)
expected = preprocess(original)
with open_mfdataset(
tmp, preprocess=preprocess, combine="by_coords"
) as actual:
assert_identical(expected, actual)
def test_save_mfdataset_roundtrip(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(actual, original)
def test_save_mfdataset_invalid(self) -> None:
ds = Dataset()
with pytest.raises(ValueError, match=r"cannot use mode"):
save_mfdataset([ds, ds], ["same", "same"])
with pytest.raises(ValueError, match=r"same length"):
save_mfdataset([ds, ds], ["only one path"])
def test_save_mfdataset_invalid_dataarray(self) -> None:
# regression test for GH1555
da = DataArray([1, 2])
with pytest.raises(TypeError, match=r"supports writing Dataset"):
save_mfdataset([da], ["dataarray"])
def test_save_mfdataset_pathlib_roundtrip(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))]
with create_tmp_file() as tmps1:
with create_tmp_file() as tmps2:
tmp1 = Path(tmps1)
tmp2 = Path(tmps2)
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(actual, original)
def test_save_mfdataset_pass_kwargs(self) -> None:
# create a timeseries to store in a netCDF file
times = [0, 1]
time = xr.DataArray(times, dims=("time",))
# create a simple dataset to write using save_mfdataset
test_ds = xr.Dataset()
test_ds["time"] = time
# make sure the times are written as double and
# turn off fill values
encoding = dict(time=dict(dtype="double"))
unlimited_dims = ["time"]
# set the output file name
output_path = "test.nc"
# attempt to write the dataset with the encoding and unlimited args
# passed through
xr.save_mfdataset(
[test_ds], [output_path], encoding=encoding, unlimited_dims=unlimited_dims
)
def test_open_and_do_math(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset(tmp, combine="by_coords") as ds:
actual = 1.0 * ds
assert_allclose(original, actual, decode_bytes=False)
@pytest.mark.parametrize(
"kwargs",
[pytest.param({"concat_dim": None}, id="none"), pytest.param({}, id="default")],
)
def test_open_mfdataset_concat_dim(self, kwargs) -> None:
with set_options(use_new_combine_kwarg_defaults=True):
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
data = Dataset({"x": 0})
data.to_netcdf(tmp1)
Dataset({"x": np.nan}).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], **kwargs, combine="nested"
) as actual:
assert_identical(data, actual)
def test_open_dataset(self) -> None:
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp, chunks={"x": 5}) as actual:
assert isinstance(actual.foo.variable.data, da.Array)
assert actual.foo.variable.data.chunks == ((5, 5),)
assert_identical(original, actual)
with open_dataset(tmp, chunks=5) as actual:
assert_identical(original, actual)
with open_dataset(tmp) as actual:
assert isinstance(actual.foo.variable.data, np.ndarray)
assert_identical(original, actual)
def test_open_single_dataset(self) -> None:
# Test for issue GH #1988. This makes sure that the
# concat_dim is utilized when specified in open_mfdataset().
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
dim = DataArray([100], name="baz", dims="baz")
expected = Dataset(
{"foo": (("baz", "x"), rnddata[np.newaxis, :])}, {"baz": [100]}
)
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset(
[tmp], concat_dim=dim, data_vars="all", combine="nested"
) as actual:
assert_identical(expected, actual)
def test_open_multi_dataset(self) -> None:
# Test for issue GH #1988 and #2647. This makes sure that the
# concat_dim is utilized when specified in open_mfdataset().
# The additional wrinkle is to ensure that a length greater
# than one is tested as well due to numpy's implicit casting
# of 1-length arrays to booleans in tests, which allowed
# #2647 to still pass the test_open_single_dataset(),
# which is itself still needed as-is because the original
# bug caused one-length arrays to not be used correctly
# in concatenation.
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
dim = DataArray([100, 150], name="baz", dims="baz")
expected = Dataset(
{"foo": (("baz", "x"), np.tile(rnddata[np.newaxis, :], (2, 1)))},
{"baz": [100, 150]},
)
with create_tmp_file() as tmp1, create_tmp_file() as tmp2:
original.to_netcdf(tmp1)
original.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim=dim, data_vars="all", combine="nested"
) as actual:
assert_identical(expected, actual)
@requires_cftime
def test_open_dataset_cftime_autochunk(self) -> None:
"""Create a dataset with cftime datetime objects and
ensure that auto-chunking works correctly."""
import cftime
original = xr.Dataset(
{
"foo": ("time", [0.0]),
"time_bnds": (
("time", "bnds"),
[
[
cftime.Datetime360Day(2005, 12, 1, 0, 0, 0, 0),
cftime.Datetime360Day(2005, 12, 2, 0, 0, 0, 0),
]
],
),
},
{"time": [cftime.Datetime360Day(2005, 12, 1, 12, 0, 0, 0)]},
)
with self.roundtrip(original, open_kwargs={"chunks": "auto"}) as actual:
assert isinstance(actual.time_bnds.variable.data, da.Array)
assert _contains_cftime_datetimes(actual.time)
assert_identical(original, actual)
# Flaky test. Very open to contributions on fixing this
@pytest.mark.flaky
def test_dask_roundtrip(self) -> None:
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
chunks = {"dim1": 4, "dim2": 4, "dim3": 4, "time": 10}
with open_dataset(tmp, chunks=chunks) as dask_ds:
assert_identical(data, dask_ds)
with create_tmp_file() as tmp2:
dask_ds.to_netcdf(tmp2)
with open_dataset(tmp2) as on_disk:
assert_identical(data, on_disk)
def test_deterministic_names(self) -> None:
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
with open_mfdataset(tmp, combine="by_coords") as ds:
original_names = {k: v.data.name for k, v in ds.data_vars.items()}
with open_mfdataset(tmp, combine="by_coords") as ds:
repeat_names = {k: v.data.name for k, v in ds.data_vars.items()}
for var_name, dask_name in original_names.items():
assert var_name in dask_name
assert dask_name[:13] == "open_dataset-"
assert original_names == repeat_names
def test_dataarray_compute(self) -> None:
# Test DataArray.compute() on dask backend.
# The test for Dataset.compute() is already in DatasetIOBase;
# however dask is the only tested backend which supports DataArrays
actual = DataArray([1, 2]).chunk()
computed = actual.compute()
assert not actual._in_memory
assert computed._in_memory
assert_allclose(actual, computed, decode_bytes=False)
def test_save_mfdataset_compute_false_roundtrip(self) -> None:
from dask.delayed import Delayed
original = Dataset({"foo": ("x", np.random.randn(10))}).chunk()
datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))]
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp1:
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp2:
delayed_obj = save_mfdataset(
datasets, [tmp1, tmp2], engine=self.engine, compute=False
)
assert isinstance(delayed_obj, Delayed)
delayed_obj.compute()
with open_mfdataset(
[tmp1, tmp2], combine="nested", concat_dim="x"
) as actual:
assert_identical(actual, original)
def test_load_dataset(self) -> None:
with create_tmp_file() as tmp:
original = Dataset({"foo": ("x", np.random.randn(10))})
original.to_netcdf(tmp)
ds = load_dataset(tmp)
assert_identical(original, ds)
# this would fail if we used open_dataset instead of load_dataset
ds.to_netcdf(tmp)
def test_load_dataarray(self) -> None:
with create_tmp_file() as tmp:
original = DataArray(np.random.randn(10), dims=["x"])
original.to_netcdf(tmp)
da = load_dataarray(tmp)
assert_identical(original, da)
# this would fail if we used open_dataarray instead of
# load_dataarray
da.to_netcdf(tmp)
def test_load_datatree(self) -> None:
with create_tmp_file() as tmp:
original = DataTree(Dataset({"foo": ("x", np.random.randn(10))}))
original.to_netcdf(tmp)
dt = load_datatree(tmp)
xr.testing.assert_identical(original, dt)
# this would fail if we used open_datatree instead of
# load_datatree
dt.to_netcdf(tmp)
@pytest.mark.skipif(
ON_WINDOWS,
reason="counting number of tasks in graph fails on windows for some reason",
)
def test_inline_array(self) -> None:
with create_tmp_file() as tmp:
original = Dataset({"foo": ("x", np.random.randn(10))})
original.to_netcdf(tmp)
chunks = {"time": 10}
def num_graph_nodes(obj):
return len(obj.__dask_graph__())
with (
open_dataset(tmp, inline_array=False, chunks=chunks) as not_inlined_ds,
open_dataset(tmp, inline_array=True, chunks=chunks) as inlined_ds,
):
assert num_graph_nodes(inlined_ds) < num_graph_nodes(not_inlined_ds)
with (
open_dataarray(
tmp, inline_array=False, chunks=chunks
) as not_inlined_da,
open_dataarray(tmp, inline_array=True, chunks=chunks) as inlined_da,
):
assert num_graph_nodes(inlined_da) < num_graph_nodes(not_inlined_da)
@requires_scipy_or_netCDF4
@requires_pydap
@pytest.mark.filterwarnings("ignore:The binary mode of fromstring is deprecated")
| TestDask |
python | kamyu104__LeetCode-Solutions | Python/design-an-expression-tree-with-evaluate-function.py | {
"start": 1596,
"end": 1992
} | class ____(Node):
ops = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.div}
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def evaluate(self):
if self.val.isdigit():
return int(self.val)
return NodeRecu.ops[self.val](self.left.evaluate(), self.right.evaluate())
| NodeRecu |
python | pennersr__django-allauth | allauth/socialaccount/providers/salesforce/provider.py | {
"start": 542,
"end": 1763
} | class ____(OAuth2Provider):
id = "salesforce"
name = "Salesforce"
package = "allauth.socialaccount.providers.salesforce"
account_class = SalesforceAccount
oauth2_adapter_class = SalesforceOAuth2Adapter
def get_default_scope(self):
return ["id", "openid"]
def get_auth_params_from_request(self, request, action):
ret = super().get_auth_params_from_request(request, action)
if action == AuthAction.REAUTHENTICATE:
ret["approval_prompt"] = "force"
return ret
def extract_uid(self, data):
return str(data["user_id"])
def extract_common_fields(self, data):
return dict(
email=data.get("email"),
last_name=data.get("family_name"),
first_name=data.get("given_name"),
username=data.get("preferred_username"),
)
def extract_email_addresses(self, data):
# a salesforce user must have an email, but it might not be verified
email = EmailAddress(
email=data.get("email"),
primary=True,
verified=data.get("email_verified"),
)
return [email]
providers.registry.register(SalesforceProvider)
| SalesforceProvider |
python | numpy__numpy | numpy/_core/tests/test_simd.py | {
"start": 839,
"end": 3710
} | class ____:
# submodule of the desired SIMD extension, e.g. targets["AVX512F"]
npyv = None
# the current data type suffix e.g. 's8'
sfx = None
# target name can be 'baseline' or one or more of CPU features
target_name = None
def __getattr__(self, attr):
"""
To call NPV intrinsics without the attribute 'npyv' and
auto suffixing intrinsics according to class attribute 'sfx'
"""
return getattr(self.npyv, attr + "_" + self.sfx)
def _x2(self, intrin_name):
return getattr(self.npyv, f"{intrin_name}_{self.sfx}x2")
def _data(self, start=None, count=None, reverse=False):
"""
Create list of consecutive numbers according to number of vector's lanes.
"""
if start is None:
start = 1
if count is None:
count = self.nlanes
rng = range(start, start + count)
if reverse:
rng = reversed(rng)
if self._is_fp():
return [x / 1.0 for x in rng]
return list(rng)
def _is_unsigned(self):
return self.sfx[0] == 'u'
def _is_signed(self):
return self.sfx[0] == 's'
def _is_fp(self):
return self.sfx[0] == 'f'
def _scalar_size(self):
return int(self.sfx[1:])
def _int_clip(self, seq):
if self._is_fp():
return seq
max_int = self._int_max()
min_int = self._int_min()
return [min(max(v, min_int), max_int) for v in seq]
def _int_max(self):
if self._is_fp():
return None
max_u = self._to_unsigned(self.setall(-1))[0]
if self._is_signed():
return max_u // 2
return max_u
def _int_min(self):
if self._is_fp():
return None
if self._is_unsigned():
return 0
return -(self._int_max() + 1)
def _true_mask(self):
max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1)
return max_unsig[0]
def _to_unsigned(self, vector):
if isinstance(vector, (list, tuple)):
return getattr(self.npyv, "load_u" + self.sfx[1:])(vector)
else:
sfx = vector.__name__.replace("npyv_", "")
if sfx[0] == "b":
cvt_intrin = "cvt_u{0}_b{0}"
else:
cvt_intrin = "reinterpret_u{0}_{1}"
return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector)
def _pinfinity(self):
return float("inf")
def _ninfinity(self):
return -float("inf")
def _nan(self):
return float("nan")
def _cpu_features(self):
target = self.target_name
if target == "baseline":
target = __cpu_baseline__
else:
target = target.split('__') # multi-target separator
return ' '.join(target)
| _Test_Utility |
python | huggingface__transformers | src/transformers/models/qwen3_vl/modeling_qwen3_vl.py | {
"start": 25993,
"end": 34338
} | class ____(Qwen3VLPreTrainedModel):
config: Qwen3VLVisionConfig
_no_split_modules = ["Qwen3VLVisionBlock"]
def __init__(self, config, *inputs, **kwargs) -> None:
super().__init__(config, *inputs, **kwargs)
self.spatial_merge_size = config.spatial_merge_size
self.patch_size = config.patch_size
self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size
self.patch_embed = Qwen3VLVisionPatchEmbed(
config=config,
)
self.pos_embed = nn.Embedding(config.num_position_embeddings, config.hidden_size)
self.num_grid_per_side = int(config.num_position_embeddings**0.5)
head_dim = config.hidden_size // config.num_heads
self.rotary_pos_emb = Qwen3VLVisionRotaryEmbedding(head_dim // 2)
self.blocks = nn.ModuleList([Qwen3VLVisionBlock(config) for _ in range(config.depth)])
self.merger = Qwen3VLVisionPatchMerger(
config=config,
use_postshuffle_norm=False,
)
self.deepstack_visual_indexes = config.deepstack_visual_indexes
self.deepstack_merger_list = nn.ModuleList(
[
Qwen3VLVisionPatchMerger(
config=config,
use_postshuffle_norm=True,
)
for _ in range(len(config.deepstack_visual_indexes))
]
)
self.gradient_checkpointing = False
def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
merge_size = self.spatial_merge_size
max_hw = int(grid_thw[:, 1:].max().item())
freq_table = self.rotary_pos_emb(max_hw) # (max_hw, dim // 2)
device = freq_table.device
total_tokens = int(torch.prod(grid_thw, dim=1).sum().item())
pos_ids = torch.empty((total_tokens, 2), dtype=torch.long, device=device)
offset = 0
for num_frames, height, width in grid_thw:
merged_h, merged_w = height // merge_size, width // merge_size
block_rows = torch.arange(merged_h, device=device) # block row indices
block_cols = torch.arange(merged_w, device=device) # block col indices
intra_row = torch.arange(merge_size, device=device) # intra-block row offsets
intra_col = torch.arange(merge_size, device=device) # intra-block col offsets
# Compute full-resolution positions
row_idx = block_rows[:, None, None, None] * merge_size + intra_row[None, None, :, None]
col_idx = block_cols[None, :, None, None] * merge_size + intra_col[None, None, None, :]
row_idx = row_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1)
col_idx = col_idx.expand(merged_h, merged_w, merge_size, merge_size).reshape(-1)
coords = torch.stack((row_idx, col_idx), dim=-1)
if num_frames > 1:
coords = coords.repeat(num_frames, 1)
num_tokens = coords.shape[0]
pos_ids[offset : offset + num_tokens] = coords
offset += num_tokens
embeddings = freq_table[pos_ids] # lookup rotary embeddings
embeddings = embeddings.flatten(1)
return embeddings
def fast_pos_embed_interpolate(self, grid_thw):
grid_ts, grid_hs, grid_ws = grid_thw[:, 0], grid_thw[:, 1], grid_thw[:, 2]
device = grid_thw.device
idx_list = [[] for _ in range(4)]
weight_list = [[] for _ in range(4)]
for t, h, w in zip(grid_ts, grid_hs, grid_ws):
h_idxs = torch.linspace(0, self.num_grid_per_side - 1, h)
w_idxs = torch.linspace(0, self.num_grid_per_side - 1, w)
h_idxs_floor = h_idxs.int()
w_idxs_floor = w_idxs.int()
h_idxs_ceil = (h_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
w_idxs_ceil = (w_idxs.int() + 1).clip(max=self.num_grid_per_side - 1)
dh = h_idxs - h_idxs_floor
dw = w_idxs - w_idxs_floor
base_h = h_idxs_floor * self.num_grid_per_side
base_h_ceil = h_idxs_ceil * self.num_grid_per_side
indices = [
(base_h[None].T + w_idxs_floor[None]).flatten(),
(base_h[None].T + w_idxs_ceil[None]).flatten(),
(base_h_ceil[None].T + w_idxs_floor[None]).flatten(),
(base_h_ceil[None].T + w_idxs_ceil[None]).flatten(),
]
weights = [
((1 - dh)[None].T * (1 - dw)[None]).flatten(),
((1 - dh)[None].T * dw[None]).flatten(),
(dh[None].T * (1 - dw)[None]).flatten(),
(dh[None].T * dw[None]).flatten(),
]
for i in range(4):
idx_list[i].extend(indices[i].tolist())
weight_list[i].extend(weights[i].tolist())
idx_tensor = torch.tensor(idx_list, dtype=torch.long, device=device)
weight_tensor = torch.tensor(weight_list, dtype=self.pos_embed.weight.dtype, device=device)
pos_embeds = self.pos_embed(idx_tensor).to(device) * weight_tensor[:, :, None]
patch_pos_embeds = pos_embeds[0] + pos_embeds[1] + pos_embeds[2] + pos_embeds[3]
patch_pos_embeds = patch_pos_embeds.split([h * w for h, w in zip(grid_hs, grid_ws)])
patch_pos_embeds_permute = []
merge_size = self.config.spatial_merge_size
for pos_embed, t, h, w in zip(patch_pos_embeds, grid_ts, grid_hs, grid_ws):
pos_embed = pos_embed.repeat(t, 1)
pos_embed = (
pos_embed.view(t, h // merge_size, merge_size, w // merge_size, merge_size, -1)
.permute(0, 1, 3, 2, 4, 5)
.flatten(0, 4)
)
patch_pos_embeds_permute.append(pos_embed)
patch_pos_embeds = torch.cat(patch_pos_embeds_permute)
return patch_pos_embeds
def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
"""
Args:
hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
The final hidden states of the model.
grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
The temporal, height and width of feature shape of each image in LLM.
Returns:
`torch.Tensor`: hidden_states.
"""
hidden_states = self.patch_embed(hidden_states)
pos_embeds = self.fast_pos_embed_interpolate(grid_thw)
hidden_states = hidden_states + pos_embeds
rotary_pos_emb = self.rot_pos_emb(grid_thw)
seq_len, _ = hidden_states.size()
hidden_states = hidden_states.reshape(seq_len, -1)
rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
emb = torch.cat((rotary_pos_emb, rotary_pos_emb), dim=-1)
position_embeddings = (emb.cos(), emb.sin())
cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
dim=0,
# Select dtype based on the following factors:
# - FA2 requires that cu_seqlens_q must have dtype int32
# - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
# See https://github.com/huggingface/transformers/pull/34852 for more information
dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
)
cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
deepstack_feature_lists = []
for layer_num, blk in enumerate(self.blocks):
hidden_states = blk(
hidden_states,
cu_seqlens=cu_seqlens,
position_embeddings=position_embeddings,
**kwargs,
)
if layer_num in self.deepstack_visual_indexes:
deepstack_feature = self.deepstack_merger_list[self.deepstack_visual_indexes.index(layer_num)](
hidden_states
)
deepstack_feature_lists.append(deepstack_feature)
hidden_states = self.merger(hidden_states)
return hidden_states, deepstack_feature_lists
@auto_docstring(
custom_intro=(
"Text part of Qwen3VL, "
"not a pure text-only model, as DeepStack integrates visual features into the early hidden states."
)
)
| Qwen3VLVisionModel |
python | huggingface__transformers | src/transformers/models/pvt_v2/modeling_pvt_v2.py | {
"start": 8985,
"end": 10336
} | class ____(nn.Module):
def __init__(
self,
config: PvtV2Config,
in_features: int,
hidden_features: Optional[int] = None,
out_features: Optional[int] = None,
):
super().__init__()
out_features = out_features if out_features is not None else in_features
self.dense1 = nn.Linear(in_features, hidden_features)
self.dwconv = PvtV2DepthWiseConv(config, hidden_features)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.dense2 = nn.Linear(hidden_features, out_features)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.relu = nn.ReLU() if config.linear_attention else nn.Identity()
def forward(self, hidden_states: torch.Tensor, height, width) -> torch.Tensor:
hidden_states = self.dense1(hidden_states)
hidden_states = self.relu(hidden_states)
hidden_states = self.dwconv(hidden_states, height, width)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense2(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
| PvtV2ConvFeedForwardNetwork |
python | getsentry__sentry | src/sentry/api/endpoints/organization_plugins_configs.py | {
"start": 754,
"end": 5938
} | class ____(OrganizationEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, organization: Organization) -> Response:
"""
List one or more plugin configurations, including a `projectList` for each plugin which contains
all the projects that have that specific plugin both configured and enabled.
- similar to the `OrganizationPluginsEndpoint`, and can eventually replace it
:qparam plugins array[string]: an optional list of plugin ids (slugs) if you want specific plugins.
If not set, will return configurations for all plugins.
"""
desired_plugins = []
for slug in request.GET.getlist("plugins") or ():
# if the user request a plugin that doesn't exist, throw 404
try:
desired_plugins.append(plugins.get(slug))
except KeyError:
return Response({"detail": "Plugin %s not found" % slug}, status=404)
# if no plugins were specified, grab all plugins but limit by those that have the ability to be configured
if not desired_plugins:
desired_plugins = list(plugins.plugin_that_can_be_configured())
# `keys_to_check` are the ProjectOption keys that tell us if a plugin is enabled (e.g. `plugin:enabled`) or are
# configured properly, meaning they have the required information - plugin.required_field - needed for the
# plugin to work (ex:`opsgenie:api_key`)
keys_to_check = []
for plugin in desired_plugins:
keys_to_check.append("%s:enabled" % plugin.slug)
if plugin.required_field:
keys_to_check.append(f"{plugin.slug}:{plugin.required_field}")
# Get all the project options for org that have truthy values
project_options = ProjectOption.objects.filter(
key__in=keys_to_check, project__organization=organization
)
"""
This map stores info about whether a plugin is configured and/or enabled
{
"plugin_slug": {
"project_id": { "enabled": True, "configured": False },
},
}
"""
info_by_plugin_project: dict[str, dict[int, dict[str, bool]]] = {}
for project_option in project_options:
if not project_option.value:
continue
[slug, field] = project_option.key.split(":")
project_id = project_option.project_id
# first add to the set of all projects by plugin
info_by_plugin_project.setdefault(slug, {}).setdefault(
project_id, {"enabled": False, "configured": False}
)
# next check if enabled
if field == "enabled":
info_by_plugin_project[slug][project_id]["enabled"] = True
# if the projectoption is not the enable field, it's configuration field
else:
info_by_plugin_project[slug][project_id]["configured"] = True
# get the IDs of all projects for found project options and grab them from the DB
project_id_set = {project_option.project_id for project_option in project_options}
projects = Project.objects.filter(id__in=project_id_set, status=ObjectStatus.ACTIVE)
# create a key/value map of our projects
project_map = {project.id: project for project in projects}
# iterate through the desired plugins and serialize them
serialized_plugins = []
for plugin in desired_plugins:
serialized_plugin = serialize(plugin, request.user, PluginSerializer())
if serialized_plugin["isDeprecated"]:
continue
serialized_plugin["projectList"] = []
info_by_project = info_by_plugin_project.get(plugin.slug, {})
# iterate through the projects
for project_id, plugin_info in info_by_project.items():
# if the project is being deleted
if project_id not in project_map:
continue
project = project_map[project_id]
# only include plugins which are configured
if not plugin_info["configured"]:
continue
serialized_plugin["projectList"].append(
{
"projectId": project.id,
"projectSlug": project.slug,
"projectName": project.name, # TODO(steve): do we need?
"enabled": plugin_info["enabled"],
"configured": plugin_info["configured"], # TODO(steve): do we need?
"projectPlatform": project.platform,
}
)
# sort by the projectSlug
serialized_plugin["projectList"].sort(key=lambda x: x["projectSlug"])
serialized_plugins.append(serialized_plugin)
if not serialized_plugins:
raise Http404
return Response(serialized_plugins)
| OrganizationPluginsConfigsEndpoint |
python | realpython__materials | python-textual/hello_textual.py | {
"start": 65,
"end": 227
} | class ____(App):
def compose(self):
yield Static("Hello, Textual!")
if __name__ == "__main__":
app = HelloTextualApp()
app.run()
| HelloTextualApp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.