language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pallets__flask | tests/test_helpers.py | {
"start": 6141,
"end": 6970
} | class ____:
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self, modules_tmp_path):
(modules_tmp_path / "importerror.py").write_text("raise NotImplementedError()")
try:
flask.Flask("importerror")
except NotImplementedError:
AssertionError("Flask(import_name) is importing import_name.")
| TestNoImports |
python | tensorflow__tensorflow | tensorflow/python/util/module_wrapper.py | {
"start": 3063,
"end": 10217
} | class ____(FastModuleType):
"""Wrapper for TF modules to support deprecation messages and lazyloading."""
# Ensures that compat.v1 API usage is recorded at most once
compat_v1_usage_recorded = False
def __init__(
self,
wrapped,
module_name,
public_apis=None,
deprecation=True,
has_lite=False):
super(TFModuleWrapper, self).__init__(wrapped.__name__)
FastModuleType.set_getattr_callback(self, TFModuleWrapper._getattr)
FastModuleType.set_getattribute_callback(self,
TFModuleWrapper._getattribute)
self.__dict__.update(wrapped.__dict__)
# Prefix all local attributes with _tfmw_ so that we can
# handle them differently in attribute access methods.
self._tfmw_wrapped_module = wrapped
self._tfmw_module_name = module_name
self._tfmw_public_apis = public_apis
self._tfmw_print_deprecation_warnings = deprecation
self._tfmw_has_lite = has_lite
self._tfmw_is_compat_v1 = (wrapped.__name__.endswith('.compat.v1'))
# Set __all__ so that import * work for lazy loaded modules
if self._tfmw_public_apis:
self._tfmw_wrapped_module.__all__ = list(self._tfmw_public_apis.keys())
self.__all__ = list(self._tfmw_public_apis.keys())
else:
if hasattr(self._tfmw_wrapped_module, '__all__'):
self.__all__ = self._tfmw_wrapped_module.__all__
else:
self._tfmw_wrapped_module.__all__ = [
attr for attr in dir(self._tfmw_wrapped_module)
if not attr.startswith('_')
]
self.__all__ = self._tfmw_wrapped_module.__all__
# names we already checked for deprecation
self._tfmw_deprecated_checked = set()
self._tfmw_warning_count = 0
def _tfmw_add_deprecation_warning(self, name, attr):
"""Print deprecation warning for attr with given name if necessary."""
if (self._tfmw_warning_count < _PER_MODULE_WARNING_LIMIT and
name not in self._tfmw_deprecated_checked):
self._tfmw_deprecated_checked.add(name)
if self._tfmw_module_name:
full_name = 'tf.%s.%s' % (self._tfmw_module_name, name)
else:
full_name = 'tf.%s' % name
rename = get_rename_v2(full_name)
if rename and not has_deprecation_decorator(attr):
call_location = _call_location()
# skip locations in Python source
if not call_location.startswith('<'):
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), full_name, rename)
self._tfmw_warning_count += 1
return True
return False
def _tfmw_import_module(self, name):
"""Lazily loading the modules."""
# We ignore 'app' because it is accessed in __init__.py of tf.compat.v1.
# That way, if a user only imports tensorflow.compat.v1, it is not
# considered v1 API usage.
if (self._tfmw_is_compat_v1 and name != 'app' and
not TFModuleWrapper.compat_v1_usage_recorded):
TFModuleWrapper.compat_v1_usage_recorded = True
compat_v1_usage_gauge.get_cell().set(True)
symbol_loc_info = self._tfmw_public_apis[name]
if symbol_loc_info[0]:
module = importlib.import_module(symbol_loc_info[0])
attr = getattr(module, symbol_loc_info[1])
else:
attr = importlib.import_module(symbol_loc_info[1])
setattr(self._tfmw_wrapped_module, name, attr)
self.__dict__[name] = attr
# Cache the pair
self._fastdict_insert(name, attr)
return attr
def _getattribute(self, name):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Imports and caches pre-defined API.
Warns if necessary.
This method is a replacement for __getattribute__(). It will be added into
the extended python module as a callback to reduce API overhead.
"""
# Avoid infinite recursions
func__fastdict_insert = object.__getattribute__(self, '_fastdict_insert')
# Make sure we do not import from tensorflow/lite/__init__.py
if name == 'lite':
if self._tfmw_has_lite:
attr = self._tfmw_import_module(name)
setattr(self._tfmw_wrapped_module, 'lite', attr)
func__fastdict_insert(name, attr)
return attr
# Placeholder for Google-internal contrib error
attr = object.__getattribute__(self, name)
# Return and cache dunders and our own members.
# This is necessary to guarantee successful construction.
# In addition, all the accessed attributes used during the construction must
# begin with "__" or "_tfmw" or "_fastdict_".
if name.startswith('__') or name.startswith('_tfmw_') or name.startswith(
'_fastdict_'):
func__fastdict_insert(name, attr)
return attr
# Print deprecations, only cache functions after deprecation warnings have
# stopped.
if not (self._tfmw_print_deprecation_warnings and
self._tfmw_add_deprecation_warning(name, attr)):
func__fastdict_insert(name, attr)
return attr
def _getattr(self, name):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Imports and caches pre-defined API.
Warns if necessary.
This method is a replacement for __getattr__(). It will be added into the
extended python module as a callback to reduce API overhead. Instead of
relying on implicit AttributeError handling, this added callback function
will
be called explicitly from the extended C API if the default attribute lookup
fails.
"""
try:
attr = getattr(self._tfmw_wrapped_module, name)
except AttributeError:
# Placeholder for Google-internal contrib error
if not self._tfmw_public_apis:
raise
if name not in self._tfmw_public_apis:
raise
attr = self._tfmw_import_module(name)
if self._tfmw_print_deprecation_warnings:
self._tfmw_add_deprecation_warning(name, attr)
return attr
def __setattr__(self, arg, val):
if not arg.startswith('_tfmw_'):
setattr(self._tfmw_wrapped_module, arg, val)
self.__dict__[arg] = val
if arg not in self.__all__ and arg != '__all__':
self.__all__.append(arg)
# Update the cache
if self._fastdict_key_in(arg):
self._fastdict_insert(arg, val)
super(TFModuleWrapper, self).__setattr__(arg, val)
def __dir__(self):
if self._tfmw_public_apis:
return list(
set(self._tfmw_public_apis.keys()).union(
set([
attr for attr in dir(self._tfmw_wrapped_module)
if not attr.startswith('_')
])))
else:
return dir(self._tfmw_wrapped_module)
def __delattr__(self, name):
if name.startswith('_tfmw_'):
super(TFModuleWrapper, self).__delattr__(name)
else:
delattr(self._tfmw_wrapped_module, name)
self.__dict__.pop(name)
if name in self.__all__:
self.__all__.remove(name)
self._fastdict_pop(name)
# delattr(self._tfmw_wrapped_module, name)
def __repr__(self):
return self._tfmw_wrapped_module.__repr__()
def __reduce__(self):
return importlib.import_module, (self.__name__,)
| TFModuleWrapper |
python | pypa__warehouse | warehouse/organizations/models.py | {
"start": 7749,
"end": 7843
} | class ____(str, enum.Enum):
Community = "Community"
Company = "Company"
| OrganizationType |
python | pandas-dev__pandas | pandas/tests/window/test_groupby.py | {
"start": 40342,
"end": 47203
} | class ____:
@pytest.mark.parametrize(
"method, expected_data",
[
["mean", [0.0, 0.6666666666666666, 1.4285714285714286, 2.2666666666666666]],
["std", [np.nan, 0.707107, 0.963624, 1.177164]],
["var", [np.nan, 0.5, 0.9285714285714286, 1.3857142857142857]],
],
)
def test_methods(self, method, expected_data):
# GH 16037
df = DataFrame({"A": ["a"] * 4, "B": range(4)})
result = getattr(df.groupby("A").ewm(com=1.0), method)()
expected = DataFrame(
{"B": expected_data},
index=MultiIndex.from_tuples(
[
("a", 0),
("a", 1),
("a", 2),
("a", 3),
],
names=["A", None],
),
)
tm.assert_frame_equal(result, expected)
def test_groupby_ewm_agg_namedagg(self):
# GH#28333
df = DataFrame({"A": ["a"] * 4, "B": range(4)})
result = (
df.groupby("A")
.ewm(com=1.0)
.agg(
B_mean=NamedAgg(column="B", aggfunc="mean"),
B_std=NamedAgg(column="B", aggfunc="std"),
B_var=NamedAgg(column="B", aggfunc="var"),
)
)
expected = DataFrame(
{
"B_mean": [
0.0,
0.6666666666666666,
1.4285714285714286,
2.2666666666666666,
],
"B_std": [np.nan, 0.707107, 0.963624, 1.177164],
"B_var": [np.nan, 0.5, 0.9285714285714286, 1.3857142857142857],
},
index=MultiIndex.from_tuples(
[
("a", 0),
("a", 1),
("a", 2),
("a", 3),
],
names=["A", None],
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"method, expected_data",
[["corr", [np.nan, 1.0, 1.0, 1]], ["cov", [np.nan, 0.5, 0.928571, 1.385714]]],
)
def test_pairwise_methods(self, method, expected_data):
# GH 16037
df = DataFrame({"A": ["a"] * 4, "B": range(4)})
result = getattr(df.groupby("A").ewm(com=1.0), method)()
expected = DataFrame(
{"B": expected_data},
index=MultiIndex.from_tuples(
[
("a", 0, "B"),
("a", 1, "B"),
("a", 2, "B"),
("a", 3, "B"),
],
names=["A", None, None],
),
)
tm.assert_frame_equal(result, expected)
expected = df.groupby("A")[["B"]].apply(
lambda x: getattr(x.ewm(com=1.0), method)()
)
tm.assert_frame_equal(result, expected)
def test_times(self, times_frame):
# GH 40951
halflife = "23 days"
# GH#42738
times = times_frame.pop("C")
result = times_frame.groupby("A").ewm(halflife=halflife, times=times).mean()
expected = DataFrame(
{
"B": [
0.0,
0.507534,
1.020088,
1.537661,
0.0,
0.567395,
1.221209,
0.0,
0.653141,
1.195003,
]
},
index=MultiIndex.from_tuples(
[
("a", 0),
("a", 3),
("a", 6),
("a", 9),
("b", 1),
("b", 4),
("b", 7),
("c", 2),
("c", 5),
("c", 8),
],
names=["A", None],
),
)
tm.assert_frame_equal(result, expected)
def test_times_array(self, times_frame):
# GH 40951
halflife = "23 days"
times = times_frame.pop("C")
gb = times_frame.groupby("A")
result = gb.ewm(halflife=halflife, times=times).mean()
expected = gb.ewm(halflife=halflife, times=times.values).mean()
tm.assert_frame_equal(result, expected)
def test_dont_mutate_obj_after_slicing(self):
# GH 43355
df = DataFrame(
{
"id": ["a", "a", "b", "b", "b"],
"timestamp": date_range("2021-9-1", periods=5, freq="h"),
"y": range(5),
}
)
grp = df.groupby("id").rolling("1h", on="timestamp")
result = grp.count()
expected_df = DataFrame(
{
"timestamp": date_range("2021-9-1", periods=5, freq="h"),
"y": [1.0] * 5,
},
index=MultiIndex.from_arrays(
[["a", "a", "b", "b", "b"], list(range(5))], names=["id", None]
),
)
tm.assert_frame_equal(result, expected_df)
result = grp["y"].count()
expected_series = Series(
[1.0] * 5,
index=MultiIndex.from_arrays(
[
["a", "a", "b", "b", "b"],
date_range("2021-9-1", periods=5, freq="h"),
],
names=["id", "timestamp"],
),
name="y",
)
tm.assert_series_equal(result, expected_series)
# This is the key test
result = grp.count()
tm.assert_frame_equal(result, expected_df)
def test_rolling_corr_with_single_integer_in_index():
# GH 44078
df = DataFrame({"a": [(1,), (1,), (1,)], "b": [4, 5, 6]})
gb = df.groupby(["a"])
result = gb.rolling(2).corr(other=df)
index = MultiIndex.from_tuples([((1,), 0), ((1,), 1), ((1,), 2)], names=["a", None])
expected = DataFrame(
{"a": [np.nan, np.nan, np.nan], "b": [np.nan, 1.0, 1.0]}, index=index
)
tm.assert_frame_equal(result, expected)
def test_rolling_corr_with_tuples_in_index():
# GH 44078
df = DataFrame(
{
"a": [
(
1,
2,
),
(
1,
2,
),
(
1,
2,
),
],
"b": [4, 5, 6],
}
)
gb = df.groupby(["a"])
result = gb.rolling(2).corr(other=df)
index = MultiIndex.from_tuples(
[((1, 2), 0), ((1, 2), 1), ((1, 2), 2)], names=["a", None]
)
expected = DataFrame(
{"a": [np.nan, np.nan, np.nan], "b": [np.nan, 1.0, 1.0]}, index=index
)
tm.assert_frame_equal(result, expected)
| TestEWM |
python | PrefectHQ__prefect | tests/events/server/test_in_memory_ordering.py | {
"start": 8115,
"end": 10451
} | class ____:
async def test_record_and_forget_follower(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_two: ReceivedEvent,
):
# Initially no followers
assert await causal_ordering.get_followers(event_one) == []
# Record follower
await causal_ordering.record_follower(event_two)
followers = await causal_ordering.get_followers(event_one)
assert followers == [event_two]
# Forget follower
await causal_ordering.forget_follower(event_two)
assert await causal_ordering.get_followers(event_one) == []
async def test_multiple_followers_sorted_by_occurred(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_three_a: ReceivedEvent,
event_three_b: ReceivedEvent,
):
# Record followers (event_three_b occurs after event_three_a)
await causal_ordering.record_follower(event_three_b)
await causal_ordering.record_follower(event_three_a)
assert event_three_a.follows is not None
# Should return in occurred order (event_three_a.follows is event_two.id, so we need event_two)
leader_event = ReceivedEvent(
resource=event_three_a.resource,
event="leader",
occurred=event_three_a.occurred,
received=event_three_a.received,
id=event_three_a.follows,
follows=None,
)
followers = await causal_ordering.get_followers(leader_event)
assert followers == [event_three_a, event_three_b]
async def test_followers_by_id(
self,
causal_ordering: CausalOrdering,
event_two: ReceivedEvent,
event_three_a: ReceivedEvent,
event_three_b: ReceivedEvent,
):
# Record followers
await causal_ordering.record_follower(event_two)
await causal_ordering.record_follower(event_three_a)
await causal_ordering.record_follower(event_three_b)
# Get specific followers by ID
follower_ids = [event_three_b.id, event_two.id] # Out of order
followers = await causal_ordering.followers_by_id(follower_ids)
# Should return in occurred order
assert followers == [event_two, event_three_b]
| TestFollowerLeaderTracking |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/debugger_cli_common_test.py | {
"start": 44586,
"end": 45241
} | class ____(test_util.TensorFlowTestCase):
def testGetVersionWithoutDependencies(self):
out = debugger_cli_common.get_tensorflow_version_lines()
self.assertEqual(2, len(out.lines))
self.assertEqual("TensorFlow version: %s" % pywrap_tf_session.__version__,
out.lines[0])
def testGetVersionWithDependencies(self):
out = debugger_cli_common.get_tensorflow_version_lines(True)
self.assertIn("TensorFlow version: %s" % pywrap_tf_session.__version__,
out.lines)
self.assertIn(" numpy: %s" % np.__version__, out.lines)
if __name__ == "__main__":
googletest.main()
| GetTensorFlowVersionLinesTest |
python | ansible__ansible | test/units/module_utils/common/test_sys_info.py | {
"start": 4604,
"end": 5812
} | class ____:
class LinuxTest:
pass
class Foo(LinuxTest):
platform = "Linux"
distribution = None
class Bar(LinuxTest):
platform = "Linux"
distribution = "Bar"
def test_not_linux(self):
# if neither match, the fallback should be the top-level class
with patch('platform.system', return_value="Foo"):
with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None):
assert get_platform_subclass(self.LinuxTest) is self.LinuxTest
@pytest.mark.usefixtures("platform_linux")
def test_get_distribution_none(self):
# match just the platform class, not a specific distribution
with patch('ansible.module_utils.common.sys_info.get_distribution', return_value=None):
assert get_platform_subclass(self.LinuxTest) is self.Foo
@pytest.mark.usefixtures("platform_linux")
def test_get_distribution_found(self):
# match both the distribution and platform class
with patch('ansible.module_utils.common.sys_info.get_distribution', return_value="Bar"):
assert get_platform_subclass(self.LinuxTest) is self.Bar
| TestGetPlatformSubclass |
python | tornadoweb__tornado | tornado/test/testing_test.py | {
"start": 1864,
"end": 2618
} | class ____(AsyncTestCase):
def tearDown(self):
super().tearDown()
# Trigger a gc to make warnings more deterministic.
gc.collect()
def test_leaked_coroutine(self):
# This test verifies that "leaked" coroutines are shut down
# without triggering warnings like "task was destroyed but it
# is pending". If this test were to fail, it would fail
# because runtests.py detected unexpected output to stderr.
event = Event()
async def callback():
try:
await event.wait()
except asyncio.CancelledError:
pass
self.io_loop.add_callback(callback)
self.io_loop.add_callback(self.stop)
self.wait()
| LeakTest |
python | getsentry__sentry | tests/sentry/users/models/test_user_merge_verification_code.py | {
"start": 391,
"end": 1569
} | class ____(TestCase):
@freeze_time()
def test_regenerate_token(self) -> None:
code = UserMergeVerificationCode(user=self.user)
token = code.token
code.expires_at = datetime(2025, 3, 14, 5, 32, 21, tzinfo=UTC)
code.save()
code.regenerate_token()
assert code.token != token
assert code.expires_at == datetime.now(UTC) + timedelta(minutes=TOKEN_MINUTES_VALID)
@freeze_time()
def test_expires_at(self) -> None:
code = UserMergeVerificationCode(user=self.user)
code.expires_at = datetime(2025, 3, 14, 5, 32, 21, tzinfo=UTC)
code.save()
assert not code.is_valid()
code.regenerate_token()
assert code.is_valid()
def test_send_email(self) -> None:
code = UserMergeVerificationCode(user=self.user)
with self.options({"system.url-prefix": "http://testserver"}), self.tasks():
code.send_email()
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert msg.to == [self.user.email]
assert msg.subject == "[Sentry] Your Verification Code"
assert code.token in msg.body
| TestUserMergeVerificationCode |
python | getsentry__sentry | src/sentry/integrations/msteams/link_identity.py | {
"start": 1180,
"end": 2002
} | class ____(MsTeamsIdentityLinkageView, LinkIdentityView):
def get_success_template_and_context(
self, params: Mapping[str, Any], integration: Integration | None
) -> tuple[str, dict[str, Any]]:
return "sentry/integrations/msteams/linked.html", {}
def notify_on_success(
self, external_id: str, params: Mapping[str, Any], integration: Integration | None
) -> None:
if integration is None:
raise ValueError(
'Integration is required for linking (params must include "integration_id")'
)
card = build_linked_card()
client = MsTeamsClient(integration)
user_conversation_id = client.get_user_conversation_id(external_id, params["tenant_id"])
client.send_card(user_conversation_id, card)
| MsTeamsLinkIdentityView |
python | django__django | tests/forms_tests/tests/test_renderers.py | {
"start": 849,
"end": 1103
} | class ____(SimpleTestCase):
def test_get_renderer(self):
with self.assertRaisesMessage(
NotImplementedError, "subclasses must implement get_template()"
):
BaseRenderer().get_template("")
| BaseTemplateRendererTests |
python | django__django | tests/select_related_onetoone/models.py | {
"start": 31,
"end": 139
} | class ____(models.Model):
username = models.CharField(max_length=100)
email = models.EmailField()
| User |
python | pypa__hatch | backend/src/hatchling/metadata/custom.py | {
"start": 260,
"end": 1424
} | class ____:
PLUGIN_NAME = "custom"
def __new__( # type: ignore[misc]
cls,
root: str,
config: dict[str, Any],
*args: Any,
**kwargs: Any,
) -> MetadataHookInterface:
build_script = config.get("path", DEFAULT_BUILD_SCRIPT)
if not isinstance(build_script, str):
message = f"Option `path` for metadata hook `{cls.PLUGIN_NAME}` must be a string"
raise TypeError(message)
if not build_script:
message = f"Option `path` for metadata hook `{cls.PLUGIN_NAME}` must not be empty if defined"
raise ValueError(message)
path = os.path.normpath(os.path.join(root, build_script))
if not os.path.isfile(path):
message = f"Build script does not exist: {build_script}"
raise OSError(message)
hook_class = load_plugin_from_script(path, build_script, MetadataHookInterface, "metadata_hook") # type: ignore[type-abstract]
hook = hook_class(root, config, *args, **kwargs)
# Always keep the name to avoid confusion
hook.PLUGIN_NAME = cls.PLUGIN_NAME
return hook
| CustomMetadataHook |
python | huggingface__transformers | src/transformers/models/sam3_video/modeling_sam3_video.py | {
"start": 20832,
"end": 92333
} | class ____(Sam3VideoPreTrainedModel):
all_tied_weights_keys = {}
def __init__(self, config: Sam3VideoConfig):
super().__init__(config)
self.config = config
self.detector_model = AutoModel.from_config(config.detector_config)
self.tracker_model = AutoModel.from_config(config.tracker_config, remove_vision_encoder=True)
self.low_res_mask_size = config.low_res_mask_size
self.score_threshold_detection = config.score_threshold_detection
self.det_nms_thresh = config.det_nms_thresh
self.assoc_iou_thresh = config.assoc_iou_thresh
self.trk_assoc_iou_thresh = config.trk_assoc_iou_thresh
self.new_det_thresh = config.new_det_thresh
self.recondition_on_trk_masks = config.recondition_on_trk_masks
# hotstart parameters
self.hotstart_delay = config.hotstart_delay
self.hotstart_unmatch_thresh = config.hotstart_unmatch_thresh
self.hotstart_dup_thresh = config.hotstart_dup_thresh
self.suppress_unmatched_only_within_hotstart = config.suppress_unmatched_only_within_hotstart
self.init_trk_keep_alive = config.init_trk_keep_alive
self.max_trk_keep_alive = config.max_trk_keep_alive
self.min_trk_keep_alive = config.min_trk_keep_alive
self.suppress_overlapping_based_on_recent_occlusion_threshold = (
config.suppress_overlapping_based_on_recent_occlusion_threshold
)
self.decrease_trk_keep_alive_for_empty_masklets = config.decrease_trk_keep_alive_for_empty_masklets
self.fill_hole_area = config.fill_hole_area
self.eval()
# the maximum object number
self.max_num_objects = config.max_num_objects
self.recondition_every_nth_frame = config.recondition_every_nth_frame
self.high_conf_thresh = config.high_conf_thresh
self.high_iou_thresh = config.high_iou_thresh
self.tracker_neck = Sam3VisionNeck(config.detector_config.vision_config)
def get_vision_features_for_tracker(self, vision_embeds: torch.Tensor):
hidden_states = vision_embeds.last_hidden_state
batch_size = hidden_states.shape[0]
height, width = self.tracker_model.prompt_encoder.image_embedding_size
hidden_states_spatial = hidden_states.view(batch_size, height, width, -1).permute(0, 3, 1, 2)
fpn_hidden_states, fpn_position_encoding = self.tracker_neck(hidden_states_spatial)
# precompute projected level 0 and level 1 features in SAM decoder
# to avoid running it again on every SAM click
feature_maps = list(fpn_hidden_states[:-1])
feature_maps[0] = self.tracker_model.mask_decoder.conv_s0(feature_maps[0])
feature_maps[1] = self.tracker_model.mask_decoder.conv_s1(feature_maps[1])
# flatten NxCxHxW to HWxNxC
feature_maps = [feature_map.flatten(2).permute(2, 0, 1) for feature_map in feature_maps]
feature_maps_position_embeddings = [
feature_map_position_embedding.flatten(2).permute(2, 0, 1)
for feature_map_position_embedding in fpn_position_encoding[:-1]
]
return feature_maps, feature_maps_position_embeddings
def run_detection(
self,
inference_session: Sam3VideoInferenceSession,
vision_embeds: torch.Tensor,
):
"""
Run detection for all prompts efficiently by reusing vision embeddings.
Args:
inference_session: The inference session containing prompts and state
vision_embeds: Pre-computed vision embeddings to reuse across prompts
Returns:
Dictionary mapping prompt_id to detection outputs
"""
prompt_ids = list(inference_session.prompts.keys())
if not prompt_ids:
raise ValueError("No prompts available for detection. Please add prompts to the session first.")
all_detections = {}
for prompt_id in prompt_ids:
# Get or compute text embeddings for this prompt
if prompt_id not in inference_session.prompt_embeddings:
text_embeds = self.detector_model.get_text_features(
input_ids=inference_session.prompt_input_ids[prompt_id],
attention_mask=inference_session.prompt_attention_masks[prompt_id],
)
inference_session.prompt_embeddings[prompt_id] = text_embeds
else:
text_embeds = inference_session.prompt_embeddings[prompt_id]
# Run detector with cached vision features (efficient!)
detector_outputs = self.detector_model(
vision_embeds=vision_embeds,
text_embeds=text_embeds,
attention_mask=inference_session.prompt_attention_masks[prompt_id],
)
pred_logits = detector_outputs.pred_logits
presence_logits = detector_outputs.presence_logits
pred_probs = pred_logits.sigmoid()
presence_scores = presence_logits.sigmoid()
pred_probs = pred_probs * presence_scores
run_nms = self.det_nms_thresh > 0.0
if run_nms:
keep = nms_masks(
pred_probs=pred_probs[0],
pred_masks=detector_outputs.pred_masks[0],
prob_threshold=self.score_threshold_detection,
iou_threshold=self.det_nms_thresh,
)
# Set suppressed detections' probabilities to 0
pred_probs[0][~keep] = 0.0
pred_boxes_xyxy = detector_outputs.pred_boxes
pred_masks = detector_outputs.pred_masks
# get the positive detection outputs above threshold
pos_pred_idx = torch.where(pred_probs > self.score_threshold_detection)
det_out = {
"bbox": pred_boxes_xyxy[pos_pred_idx[0], pos_pred_idx[1]],
"mask": pred_masks[pos_pred_idx[0], pos_pred_idx[1]],
"scores": pred_probs[pos_pred_idx[0], pos_pred_idx[1]],
}
all_detections[prompt_id] = det_out
return all_detections
def run_tracker_propagation(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
reverse: bool,
):
low_res_masks_list = []
obj_scores_list = []
if len(inference_session.obj_ids) > 0:
# propagate one frame
out = self.tracker_model(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
run_mem_encoder=False,
)
out_low_res_masks = out.pred_masks
out_obj_scores = out.object_score_logits
# only 1 frames should be propagated
low_res_masks_list.append(out_low_res_masks.squeeze(1))
obj_scores_list.append(out_obj_scores.squeeze(1))
# concatenate the output masklets from all local inference states
H_mask = W_mask = self.low_res_mask_size
if len(low_res_masks_list) > 0:
low_res_masks = torch.cat(low_res_masks_list, dim=0)
obj_scores = torch.cat(obj_scores_list, dim=0)
# Apply hole filling to the masks
low_res_masks = fill_holes_in_mask_scores(
low_res_masks.unsqueeze(1),
max_area=self.fill_hole_area,
fill_holes=True,
remove_sprinkles=True,
)
low_res_masks = low_res_masks.squeeze(1)
else:
low_res_masks = torch.zeros(0, H_mask, W_mask, device=self.device)
obj_scores = torch.zeros(0, device=self.device)
return low_res_masks, obj_scores
def _associate_det_trk(
self,
det_masks: Tensor,
det_scores: Tensor,
trk_masks: Tensor,
trk_obj_ids: list[int],
det_prompt_ids: torch.Tensor,
trk_prompt_ids: torch.Tensor,
):
"""
Match detections on the current frame with the existing masklets.
Args:
- det_masks: (N, H, W) tensor of predicted masks
- det_scores: (N,) tensor of detection scores
- trk_masks: (M, H, W) tensor of track masks
- trk_obj_ids: (M,) list of object IDs corresponding to trk_masks
- det_prompt_ids: (N,) tensor of prompt IDs for each detection. Prevents cross-prompt
associations by zeroing IoUs between detections and tracks from different prompts.
- trk_prompt_ids: (M,) tensor of prompt IDs for each tracked object. Prevents cross-prompt
associations by zeroing IoUs between detections and tracks from different prompts.
Returns:
- new_det_out_inds: list of new object indices among in FA detection outputs
- unmatched_trk_obj_ids: list of existing masklet object IDs that are not matched
to any detections on this frame (for unmatched, we only count masklets with >0 area)
- det_to_matched_trk_obj_ids: dict[int, list[int]]: mapping from FA detection indices
to the list of matched tracklet object IDs
- empty_trk_obj_ids: list of existing masklet object IDs with zero area in SAM2 prediction
"""
iou_threshold = self.assoc_iou_thresh
iou_threshold_trk = self.trk_assoc_iou_thresh
new_det_thresh = self.new_det_thresh
trk_obj_ids_tensor = (
torch.tensor(trk_obj_ids, dtype=torch.long, device=det_masks.device)
if trk_obj_ids
else torch.empty(0, dtype=torch.long, device=det_masks.device)
)
if trk_masks.size(0) == 0:
# all detections are new
new_det_out_inds = list(range(det_masks.size(0)))
unmatched_trk_obj_ids = []
empty_trk_obj_ids = []
det_to_matched_trk_obj_ids = {}
trk_id_to_max_iou_high_conf_det = {}
return (
new_det_out_inds,
unmatched_trk_obj_ids,
det_to_matched_trk_obj_ids,
trk_id_to_max_iou_high_conf_det,
empty_trk_obj_ids,
)
elif det_masks.size(0) == 0:
# all previous tracklets are unmatched if they have a non-zero area
new_det_out_inds = []
trk_is_nonempty = (trk_masks > 0).any(dim=(1, 2)) # (M,) tensor
# Use tensor boolean indexing - elegant and avoids intermediate conversions
unmatched_trk_obj_ids = trk_obj_ids_tensor[trk_is_nonempty].tolist()
empty_trk_obj_ids = trk_obj_ids_tensor[~trk_is_nonempty].tolist()
det_to_matched_trk_obj_ids = {}
trk_id_to_max_iou_high_conf_det = {}
return (
new_det_out_inds,
unmatched_trk_obj_ids,
det_to_matched_trk_obj_ids,
trk_id_to_max_iou_high_conf_det,
empty_trk_obj_ids,
)
det_masks_binary = det_masks > 0
trk_masks_binary = trk_masks > 0
ious = mask_iou(det_masks_binary, trk_masks_binary) # (N, M) tensor
# Prevent cross-prompt associations by zeroing IoUs between different prompt groups.
prompt_match = det_prompt_ids.unsqueeze(1) == trk_prompt_ids.unsqueeze(0)
ious = torch.where(prompt_match, ious, torch.zeros_like(ious))
# trk_is_matched: for each track, True if matched to any detection above threshold
trk_is_matched = (ious >= iou_threshold_trk).any(dim=0) # (M,)
# Non-empty tracks not matched by Hungarian assignment above threshold are unmatched
trk_is_nonempty = trk_masks_binary.any(dim=(1, 2)) # (M,)
trk_is_unmatched = trk_is_nonempty & ~trk_is_matched # (M,)
# Use tensor boolean indexing directly - no intermediate conversions
unmatched_trk_obj_ids = trk_obj_ids_tensor[trk_is_unmatched].tolist()
empty_trk_obj_ids = trk_obj_ids_tensor[~trk_is_nonempty].tolist()
# For detections: allow many tracks to match to the same detection (many-to-one)
# So, a detection is 'new' if it does not match any track above threshold
det_matches_any_trk = (ious >= iou_threshold).any(dim=1) # (N,)
is_new_det = (det_scores >= new_det_thresh) & ~det_matches_any_trk # (N,)
new_det_out_inds = torch.where(is_new_det)[0].tolist()
# Build detection-to-track mappings using tensor operations
det_to_matched_trk_obj_ids = {}
trk_id_to_max_iou_high_conf_det = {} # trk id --> exactly one detection idx
det_to_max_iou_trk_idx = ious.argmax(dim=1) # (N,)
det_is_high_conf = (det_scores >= self.high_conf_thresh) & ~is_new_det # (N,)
det_max_iou = ious.max(dim=1)[0] # (N,)
det_is_high_iou = det_max_iou >= self.high_iou_thresh # (N,)
det_is_high_conf_and_iou = det_is_high_conf & det_is_high_iou # (N,)
high_conf_and_iou_mask = det_is_high_conf_and_iou # Keep as tensor
for det_idx in range(det_masks.size(0)):
# Find which tracks match this detection using tensor boolean indexing
matched_trk_mask = ious[det_idx] >= iou_threshold # (M,)
det_to_matched_trk_obj_ids[det_idx] = trk_obj_ids_tensor[matched_trk_mask].tolist()
if high_conf_and_iou_mask[det_idx].item():
trk_idx = det_to_max_iou_trk_idx[det_idx].item()
trk_obj_id = trk_obj_ids_tensor[trk_idx].item()
trk_id_to_max_iou_high_conf_det[trk_obj_id] = det_idx
return (
new_det_out_inds,
unmatched_trk_obj_ids,
det_to_matched_trk_obj_ids,
trk_id_to_max_iou_high_conf_det,
empty_trk_obj_ids,
)
def _process_hotstart(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
reverse: bool,
det_to_matched_trk_obj_ids: dict[int, list[int]],
new_det_obj_ids: list[int],
empty_trk_obj_ids: list[int],
unmatched_trk_obj_ids: list[int],
extra_metadata: dict[str, Any],
streaming: bool = False,
):
"""
Handle hotstart heuristics to remove unmatched or duplicated objects.
In streaming mode, hotstart removal logic is disabled since we don't have
future frames to make informed decisions about object removal.
"""
# obj_id --> first frame index where the object was detected
obj_first_frame_idx = extra_metadata["obj_first_frame_idx"]
# obj_id --> [mismatched frame indices]
unmatched_frame_inds = extra_metadata["unmatched_frame_inds"]
trk_keep_alive = extra_metadata["trk_keep_alive"]
# (first_appear_obj_id, obj_id) --> [overlap frame indices]
overlap_pair_to_frame_inds = extra_metadata["overlap_pair_to_frame_inds"]
# removed_obj_ids: object IDs that are suppressed via hot-start
removed_obj_ids = extra_metadata["removed_obj_ids"]
suppressed_obj_ids = extra_metadata["suppressed_obj_ids"][frame_idx]
obj_ids_newly_removed = set() # object IDs to be newly removed on this frame
hotstart_diff = frame_idx - self.hotstart_delay if not reverse else frame_idx + self.hotstart_delay
# Step 1: log the frame index where each object ID first appears
for obj_id in new_det_obj_ids:
if obj_id not in obj_first_frame_idx:
obj_first_frame_idx[obj_id] = frame_idx
trk_keep_alive[int(obj_id)] = self.init_trk_keep_alive
matched_trks = set()
# We use the det-->tracks list to check for matched objects. Otherwise, we need to compute areas to decide whether they're occluded
for matched_trks_per_det in det_to_matched_trk_obj_ids.values():
matched_trks.update({int(obj_id) for obj_id in matched_trks_per_det})
for obj_id in matched_trks:
# NOTE: To minimize number of configurable params, we use the hotstart_unmatch_thresh to set the max value of trk_keep_alive
trk_keep_alive[int(obj_id)] = min(self.max_trk_keep_alive, trk_keep_alive[int(obj_id)] + 1)
for obj_id in unmatched_trk_obj_ids:
unmatched_frame_inds[obj_id].append(frame_idx)
# NOTE: To minimize number of configurable params, we use the hotstart_unmatch_thresh to set the min value of trk_keep_alive
# The max keep alive is 2x the min, means the model prefers to keep the prediction rather than suppress it if it was matched long enough.
trk_keep_alive[int(obj_id)] = max(self.min_trk_keep_alive, trk_keep_alive[int(obj_id)] - 1)
if self.decrease_trk_keep_alive_for_empty_masklets:
for obj_id in empty_trk_obj_ids:
# NOTE: To minimize number of configurable params, we use the hotstart_unmatch_thresh to set the min value of trk_keep_alive
trk_keep_alive[int(obj_id)] = max(self.min_trk_keep_alive, trk_keep_alive[int(obj_id)] - 1)
# Step 2: removed tracks that has not matched with detections for `hotstart_unmatch_thresh` frames with hotstart period
# a) add unmatched frame indices for each existing object ID
# note that `unmatched_trk_obj_ids` contains those frames where the SAM2 output mask
# doesn't match any FA detection; it excludes those frames where SAM2 gives an empty mask
# b) remove a masklet if it first appears after `hotstart_diff` and is unmatched for more
# than `self.hotstart_unmatch_thresh` frames
# NOTE: In streaming mode, we skip hotstart removal logic since we don't have future frames
if not streaming:
for obj_id, frame_indices in unmatched_frame_inds.items():
if obj_id in removed_obj_ids or obj_id in obj_ids_newly_removed:
continue # skip if the object is already removed
if len(frame_indices) >= self.hotstart_unmatch_thresh:
is_within_hotstart = (obj_first_frame_idx[obj_id] > hotstart_diff and not reverse) or (
obj_first_frame_idx[obj_id] < hotstart_diff and reverse
)
if is_within_hotstart:
obj_ids_newly_removed.add(obj_id)
logger.info(
f"Removing object {obj_id} at frame {frame_idx} "
f"since it is unmatched for frames: {frame_indices}"
)
if (
trk_keep_alive[obj_id] <= 0 # Object has not been matched for too long
and not self.suppress_unmatched_only_within_hotstart
and obj_id not in removed_obj_ids
and obj_id not in obj_ids_newly_removed
):
logger.debug(f"Suppressing object {obj_id} at frame {frame_idx}, due to being unmatched")
suppressed_obj_ids.add(obj_id)
# Step 3: removed tracks that overlaps with another track for `hotstart_dup_thresh` frames
# a) find overlaps tracks -- we consider overlap if they match to the same detection
# NOTE: In streaming mode, we still track overlaps for metadata but skip removal logic
for matched_trk_obj_ids in det_to_matched_trk_obj_ids.values():
if len(matched_trk_obj_ids) < 2:
continue # only count detections that are matched to multiple (>=2) masklets
# if there are multiple matched track ids, we need to find the one that appeared first;
# these later appearing ids may be removed since they may be considered as duplicates
first_appear_obj_id = (
min(matched_trk_obj_ids, key=lambda x: obj_first_frame_idx[x])
if not reverse
else max(matched_trk_obj_ids, key=lambda x: obj_first_frame_idx[x])
)
for obj_id in matched_trk_obj_ids:
if obj_id != first_appear_obj_id:
key = (first_appear_obj_id, obj_id)
overlap_pair_to_frame_inds[key].append(frame_idx)
# b) remove a masklet if it first appears after `hotstart_diff` and it overlaps with another
# masklet (that appears earlier) for more than `self.hotstart_dup_thresh` frames
# NOTE: In streaming mode, we skip hotstart removal logic since we don't have future frames
if not streaming:
for (first_obj_id, obj_id), frame_indices in overlap_pair_to_frame_inds.items():
if obj_id in removed_obj_ids or obj_id in obj_ids_newly_removed:
continue # skip if the object is already removed
if (obj_first_frame_idx[obj_id] > hotstart_diff and not reverse) or (
obj_first_frame_idx[obj_id] < hotstart_diff and reverse
):
if len(frame_indices) >= self.hotstart_dup_thresh:
obj_ids_newly_removed.add(obj_id)
logger.info(
f"Removing object {obj_id} at frame {frame_idx} "
f"since it overlaps with another track {first_obj_id} at frames: {frame_indices}"
)
removed_obj_ids.update(obj_ids_newly_removed)
return obj_ids_newly_removed, extra_metadata
def run_memory_encoder(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
high_res_masks: torch.Tensor,
object_score_logits: torch.Tensor,
):
"""
Run the memory encoder on `high_res_masks`. This is usually after applying
non-overlapping constraints to object scores. Since their scores changed, their
memory also need to be computed again with the memory encoder.
"""
# Retrieve correct image features
cached_features = inference_session.cache.get_vision_features(frame_idx)
current_vision_feats = cached_features["vision_feats"]
maskmem_features, maskmem_pos_enc = self.tracker_model._encode_new_memory(
current_vision_feats=current_vision_feats[-1],
pred_masks_high_res=high_res_masks,
object_score_logits=object_score_logits,
is_mask_from_pts=False,
)
return maskmem_features, maskmem_pos_enc
def _prepare_recondition_masks(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
det_out: dict[str, Tensor],
trk_masks: Tensor,
trk_id_to_max_iou_high_conf_det: dict[int, int],
tracker_obj_scores_global: Tensor,
) -> dict[int, Tensor]:
"""
Prepare high-resolution masks for reconditioned objects.
Returns a dict of obj_idx -> high_res_mask for objects that should be reconditioned.
When recondition_on_trk_masks=True, uses detector as validation signal to strengthen tracker memory.
When False, uses detector to correct tracker drift by replacing with detection masks.
"""
reconditioned_masks = {}
reconditioned_obj_ids = set()
for trk_obj_id, det_idx in trk_id_to_max_iou_high_conf_det.items():
obj_idx = inference_session.obj_id_to_idx(trk_obj_id)
obj_score = tracker_obj_scores_global[obj_idx]
if obj_score <= self.high_conf_thresh:
continue
if self.recondition_on_trk_masks:
# Validation mode: detector confirms tracker quality, strengthen memory with tracked mask
new_mask = trk_masks[obj_idx : obj_idx + 1].unsqueeze(1)
reconditioned_masks[obj_idx] = new_mask
reconditioned_obj_ids.add(trk_obj_id)
else:
# Correction mode: detector corrects drift, replace tracker mask with detection mask
new_mask = det_out["mask"][det_idx : det_idx + 1].unsqueeze(1)
reconditioned_masks[obj_idx] = new_mask >= 0.5
reconditioned_obj_ids.add(trk_obj_id)
return reconditioned_masks, reconditioned_obj_ids
def _get_objects_to_suppress_based_on_most_recently_occluded(
self,
binary_low_res_masks: Tensor,
last_occluded: list[int],
obj_ids: list[int],
reverse: bool = False,
):
# Suppress overlapping masks for objects that were most recently occluded
to_suppress = torch.zeros(
binary_low_res_masks.size(0),
device=binary_low_res_masks.device,
dtype=torch.bool,
)
if len(obj_ids) <= 1:
return to_suppress
iou = mask_iou(binary_low_res_masks, binary_low_res_masks) # [N,N]
# Create masks for upper triangular matrix (i < j) and IoU threshold
mask_iou_thresh = iou >= self.suppress_overlapping_based_on_recent_occlusion_threshold
overlapping_pairs = torch.triu(mask_iou_thresh, diagonal=1) # [N,N]
last_occ_expanded_i = last_occluded.unsqueeze(1) # (N, 1)
last_occ_expanded_j = last_occluded.unsqueeze(0) # (1, N)
# Suppress most recently occluded
cmp_op = torch.gt if not reverse else torch.lt
suppress_i_mask = (
overlapping_pairs
& cmp_op(last_occ_expanded_i, last_occ_expanded_j) # (last_occ_expanded_i > last_occ_expanded_j)
& (last_occ_expanded_j > -1) # j can suppress i only if i was previously occluded
)
suppress_j_mask = (
overlapping_pairs
& cmp_op(last_occ_expanded_j, last_occ_expanded_i)
& (last_occ_expanded_i > -1) # i can suppress j only if j was previously occluded
)
# Apply suppression
to_suppress = suppress_i_mask.any(dim=1) | suppress_j_mask.any(dim=0)
return to_suppress
def _suppress_overlapping_based_on_recent_occlusion(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
tracker_low_res_masks_global: Tensor,
tracker_metadata_new: dict[str, Any],
obj_ids_newly_removed: set[int],
reverse: bool = False,
):
"""
Suppress overlapping masks based on the most recent occlusion information. If an object is removed by hotstart, we always suppress it if it overlaps with any other object.
Args:
frame_idx (int): The current frame index.
tracker_low_res_masks_global (Tensor): The low-resolution masks for the current frame.
tracker_metadata_prev (Dict[str, Any]): The metadata from the previous frame.
tracker_metadata_new (Dict[str, Any]): The metadata for the current frame.
obj_ids_newly_removed (Set[int]): The object IDs that have been removed.
Return:
Tensor: The updated low-resolution masks with some objects suppressed.
"""
obj_ids_global = inference_session.obj_ids
binary_tracker_low_res_masks_global = tracker_low_res_masks_global > 0
batch_size = tracker_low_res_masks_global.size(0)
if batch_size > 0:
NEVER_OCCLUDED = -1
ALWAYS_OCCLUDED = 100000 # This value should be larger than any possible frame index, indicates that the object was removed by hotstart logic
last_occluded_prev = torch.cat(
[
inference_session.obj_id_to_last_occluded.get(
obj_id,
torch.full(
(1,),
fill_value=(NEVER_OCCLUDED if obj_id not in obj_ids_newly_removed else ALWAYS_OCCLUDED),
device=binary_tracker_low_res_masks_global.device,
dtype=torch.long,
),
)
for obj_id in obj_ids_global
],
dim=0,
)
prompt_ids_global = torch.tensor(
[inference_session.obj_id_to_prompt_id[obj_id] for obj_id in obj_ids_global],
device=binary_tracker_low_res_masks_global.device,
dtype=torch.long,
)
to_suppress = torch.zeros(
batch_size,
device=binary_tracker_low_res_masks_global.device,
dtype=torch.bool,
)
# Only suppress overlaps within the same prompt group.
unique_prompts = prompt_ids_global.unique(sorted=True)
for prompt_id in unique_prompts:
prompt_mask = prompt_ids_global == prompt_id
prompt_indices = torch.nonzero(prompt_mask, as_tuple=True)[0]
if prompt_indices.numel() <= 1:
continue
prompt_masks = binary_tracker_low_res_masks_global[prompt_indices]
prompt_last_occ = last_occluded_prev[prompt_indices]
prompt_obj_ids = [obj_ids_global[idx] for idx in prompt_indices.tolist()]
prompt_suppress = self._get_objects_to_suppress_based_on_most_recently_occluded(
prompt_masks,
prompt_last_occ,
prompt_obj_ids,
reverse,
)
to_suppress[prompt_indices] = prompt_suppress
# Update metadata with occlusion information
is_obj_occluded = ~(binary_tracker_low_res_masks_global.any(dim=(-1, -2)))
is_obj_occluded_or_suppressed = is_obj_occluded | to_suppress
last_occluded_new = last_occluded_prev.clone()
last_occluded_new[is_obj_occluded_or_suppressed] = frame_idx
# Slice out the last occluded frame for each object
tracker_metadata_new["obj_id_to_last_occluded"] = {
obj_id: last_occluded_new[obj_idx : obj_idx + 1] for obj_idx, obj_id in enumerate(obj_ids_global)
}
# Zero out suppressed masks before memory encoding
NO_OBJ_LOGIT = -10
tracker_low_res_masks_global[to_suppress] = NO_OBJ_LOGIT
return tracker_low_res_masks_global
def _apply_non_overlapping_constraints(self, pred_masks):
"""
Apply non-overlapping constraints to the object scores in pred_masks. Here we
keep only the highest scoring object at each spatial location in pred_masks.
"""
batch_size = pred_masks.size(0)
if batch_size == 1:
return pred_masks
device = pred_masks.device
# "max_obj_inds": object index of the object with the highest score at each location
max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
# "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
keep = max_obj_inds == batch_obj_inds
# suppress overlapping regions' scores below -10.0 so that the foreground regions
# don't overlap (here sigmoid(-10.0)=4.5398e-05)
pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks
def _suppress_shrinked_masks(self, pred_masks, new_pred_masks, shrink_threshold=0.3):
area_before = (pred_masks > 0).sum(dim=(-1, -2))
area_after = (new_pred_masks > 0).sum(dim=(-1, -2))
area_before = torch.clamp(area_before, min=1.0)
area_ratio = area_after / area_before
keep = area_ratio >= shrink_threshold
keep_mask = keep[..., None, None].expand_as(pred_masks)
pred_masks_after = torch.where(keep_mask, pred_masks, torch.clamp(pred_masks, max=-10.0))
return pred_masks_after
def _suppress_object_pw_area_shrinkage(
self,
pred_masks,
prompt_ids: Optional[list[int]] = None,
):
"""
This function suppresses masks that shrink in area after applying pixelwise non-overlapping constraints.
When `prompt_ids` are provided, constraints are enforced independently per prompt group.
"""
if prompt_ids is None:
return self._suppress_object_pw_area_shrinkage_impl(pred_masks)
if len(prompt_ids) != pred_masks.size(0):
raise ValueError("prompt_ids must have the same length as pred_masks")
prompt_ids_tensor = torch.tensor(prompt_ids, device=pred_masks.device, dtype=torch.long)
pred_masks_grouped = pred_masks.clone()
for prompt_id in prompt_ids_tensor.unique(sorted=True):
indices = torch.nonzero(prompt_ids_tensor == prompt_id, as_tuple=True)[0]
if indices.numel() == 0:
continue
pred_masks_grouped[indices] = self._suppress_object_pw_area_shrinkage_impl(pred_masks_grouped[indices])
return pred_masks_grouped
def _suppress_object_pw_area_shrinkage_impl(self, pred_masks):
if pred_masks.size(0) <= 1:
return pred_masks
pixel_level_non_overlapping_masks = self._apply_non_overlapping_constraints(pred_masks)
pred_masks = self._suppress_shrinked_masks(pred_masks, pixel_level_non_overlapping_masks)
return pred_masks
def _tracker_update_memories(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
low_res_masks: Tensor,
reconditioned_masks: Optional[dict[int, Tensor]] = None,
):
"""
Run Sam3Tracker memory encoder, enforcing non-overlapping constraints globally.
Now with batched memory encoding for better performance.
Args:
inference_session: The inference session state
frame_idx: Current frame index
low_res_masks: Low-resolution tracker masks for all objects
reconditioned_masks: Optional dict of obj_idx -> high_res_mask for objects that
should use detection masks instead of tracker masks
"""
if len(inference_session.obj_ids) == 0:
return
if reconditioned_masks is None:
reconditioned_masks = {}
# Interpolate tracker masks to high resolution
high_res_masks = low_res_masks.unsqueeze(1)
# Override with detection masks for reconditioned objects
for obj_idx, recond_mask in reconditioned_masks.items():
high_res_masks[obj_idx] = recond_mask.float()
# Mark as conditioning frame for reconditioned objects
output_dict = inference_session.output_dict_per_obj[obj_idx]
if frame_idx in output_dict["non_cond_frame_outputs"]:
current_out = output_dict["non_cond_frame_outputs"].pop(frame_idx)
output_dict["cond_frame_outputs"][frame_idx] = current_out
# Apply non-overlapping constraints before memory encoding.
# Constraints are enforced independently per prompt group.
# Every object ID has a prompt_id assigned when it's created.
prompt_ids_for_objects = [
inference_session.obj_id_to_prompt_id[obj_id] for obj_id in inference_session.obj_ids
]
high_res_masks = self._suppress_object_pw_area_shrinkage(high_res_masks, prompt_ids_for_objects)
# Use mask areas as a proxy for object scores
object_score_logits = torch.where((high_res_masks > 0).any(dim=(-1, -2)), 10.0, -10.0)
# Run memory encoder in batch for all objects at once
num_objects = len(inference_session.obj_ids)
object_score_logits_batched = object_score_logits.unsqueeze(-1) # Shape: (num_objects, 1)
# Encode memories for all objects in one batch call
maskmem_features_batched, maskmem_pos_enc_batched = self.run_memory_encoder(
inference_session,
frame_idx,
high_res_masks, # Shape: (num_objects, 1, H, W)
object_score_logits_batched, # Shape: (num_objects, 1)
)
# Split and store encoded memories per object
for obj_idx in range(num_objects):
output_dict = inference_session.output_dict_per_obj[obj_idx]
# Extract per-object memory from batched result
maskmem_features = maskmem_features_batched[:, obj_idx : obj_idx + 1]
maskmem_pos_enc = maskmem_pos_enc_batched[:, obj_idx : obj_idx + 1]
for storage_key in ["cond_frame_outputs", "non_cond_frame_outputs"]:
if frame_idx not in output_dict[storage_key]:
continue
current_out = output_dict[storage_key][frame_idx]
current_out["maskmem_features"] = maskmem_features
current_out["maskmem_pos_enc"] = maskmem_pos_enc
def run_tracker_update_planning_phase(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
reverse: bool,
det_out: dict[str, Tensor],
tracker_low_res_masks_global: Tensor,
tracker_obj_scores_global: Tensor,
det_idx_to_prompt_id: dict[int, int],
streaming: bool = False,
):
# initialize new metadata from previous metadata (its values will be updated later)
tracker_metadata_new = {
"obj_ids": deepcopy(inference_session.obj_ids),
"obj_id_to_score": deepcopy(inference_session.obj_id_to_score),
"obj_id_to_tracker_score_frame_wise": deepcopy(inference_session.obj_id_to_tracker_score_frame_wise),
"obj_id_to_last_occluded": {}, # will be filled later
"max_obj_id": deepcopy(inference_session.max_obj_id),
}
# Initialize reconditioned_obj_ids early to avoid UnboundLocalError
reconditioned_obj_ids = set()
# Step 1: make the update plan and resolve heuristics
det_mask_preds: Tensor = det_out["mask"] # low-res mask logits
det_scores: Tensor = det_out["scores"].float() # Keep as tensor!
# det_idx_to_prompt_id maps every detection index to its prompt_id (created by _merge_detections_from_prompts).
det_prompt_ids = (
torch.tensor(
[det_idx_to_prompt_id[idx] for idx in range(det_mask_preds.size(0))],
device=det_mask_preds.device,
dtype=torch.long,
)
if det_mask_preds.size(0) > 0
else torch.empty(0, device=det_mask_preds.device, dtype=torch.long)
)
# Get prompt IDs for tracked objects.
trk_prompt_ids = (
torch.tensor(
[inference_session.obj_id_to_prompt_id[obj_id] for obj_id in inference_session.obj_ids],
device=tracker_low_res_masks_global.device
if tracker_low_res_masks_global.numel() > 0
else det_mask_preds.device,
dtype=torch.long,
)
if tracker_low_res_masks_global.numel() > 0
else torch.empty(0, device=det_mask_preds.device, dtype=torch.long)
)
# a) match FA and SAM2 masks and find new objects
(
new_det_out_inds,
unmatched_trk_obj_ids,
det_to_matched_trk_obj_ids,
trk_id_to_max_iou_high_conf_det,
empty_trk_obj_ids,
) = self._associate_det_trk(
det_masks=det_mask_preds,
det_scores=det_scores,
trk_masks=tracker_low_res_masks_global,
trk_obj_ids=inference_session.obj_ids,
det_prompt_ids=det_prompt_ids,
trk_prompt_ids=trk_prompt_ids,
)
# check whether we've hit the maximum number of objects we can track (and if so, drop some detections)
prev_obj_num = len(inference_session.obj_ids)
new_det_num = len(new_det_out_inds)
num_obj_dropped_due_to_limit = 0
if prev_obj_num + new_det_num > self.max_num_objects:
logger.warning(f"hitting {self.max_num_objects=} with {new_det_num=} and {prev_obj_num=}")
new_det_num_to_keep = self.max_num_objects - prev_obj_num
num_obj_dropped_due_to_limit = new_det_num - new_det_num_to_keep
# Keep top scoring detections
new_det_inds_tensor = torch.tensor(new_det_out_inds, dtype=torch.long, device=det_scores.device)
scores_for_new_dets = det_scores[new_det_inds_tensor]
_, top_inds = torch.topk(scores_for_new_dets, k=new_det_num_to_keep, largest=True)
new_det_out_inds = [new_det_out_inds[i] for i in top_inds]
new_det_num = len(new_det_out_inds)
# assign object IDs to new detections
new_det_start_obj_id = inference_session.max_obj_id + 1
new_det_obj_ids = list(range(new_det_start_obj_id, new_det_start_obj_id + new_det_num))
# Assign prompt IDs to new objects based on which prompt detected them.
for obj_id, det_idx in zip(new_det_obj_ids, new_det_out_inds):
prompt_id = det_idx_to_prompt_id[det_idx]
inference_session.obj_id_to_prompt_id[obj_id] = prompt_id
# b) handle hotstart heuristics to remove objects
extra_metadata_new = deepcopy(
{
"obj_first_frame_idx": inference_session.obj_first_frame_idx,
"unmatched_frame_inds": inference_session.unmatched_frame_inds,
"trk_keep_alive": inference_session.trk_keep_alive,
"overlap_pair_to_frame_inds": inference_session.overlap_pair_to_frame_inds,
"removed_obj_ids": inference_session.removed_obj_ids,
"suppressed_obj_ids": inference_session.suppressed_obj_ids,
}
)
obj_ids_newly_removed, extra_metadata_new = self._process_hotstart(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
det_to_matched_trk_obj_ids=det_to_matched_trk_obj_ids,
new_det_obj_ids=new_det_obj_ids,
empty_trk_obj_ids=empty_trk_obj_ids,
unmatched_trk_obj_ids=unmatched_trk_obj_ids,
extra_metadata=extra_metadata_new,
streaming=streaming,
)
tracker_metadata_new["extra_metadata"] = extra_metadata_new
# Step 3 (optional): prepare reconditioned masks based on high-confidence detections
reconditioned_masks = {}
reconditioned_obj_ids = set()
should_recondition_periodic = (
self.recondition_every_nth_frame > 0
and frame_idx % self.recondition_every_nth_frame == 0
and len(trk_id_to_max_iou_high_conf_det) > 0
)
if should_recondition_periodic:
reconditioned_masks, reconditioned_obj_ids = self._prepare_recondition_masks(
inference_session=inference_session,
frame_idx=frame_idx,
det_out=det_out,
trk_masks=tracker_low_res_masks_global,
trk_id_to_max_iou_high_conf_det=trk_id_to_max_iou_high_conf_det,
tracker_obj_scores_global=tracker_obj_scores_global,
)
tracker_update_plan = {
"new_det_out_inds": new_det_out_inds, # List[int]
"new_det_obj_ids": new_det_obj_ids, # List[int]
"unmatched_trk_obj_ids": unmatched_trk_obj_ids, # List[int]
"det_to_matched_trk_obj_ids": det_to_matched_trk_obj_ids, # dict
"obj_ids_newly_removed": obj_ids_newly_removed, # set
"num_obj_dropped_due_to_limit": num_obj_dropped_due_to_limit, # int
"trk_id_to_max_iou_high_conf_det": trk_id_to_max_iou_high_conf_det, # dict
"reconditioned_obj_ids": reconditioned_obj_ids, # set
}
# Step 4: Run SAM2 memory encoder on the current frame's prediction masks
# This uses tracker masks for most objects, but detection masks for reconditioned objects
batch_size = tracker_low_res_masks_global.size(0)
if batch_size > 0:
if self.suppress_overlapping_based_on_recent_occlusion_threshold > 0.0:
# NOTE: tracker_low_res_masks_global is updated in-place then returned
tracker_low_res_masks_global = self._suppress_overlapping_based_on_recent_occlusion(
inference_session=inference_session,
frame_idx=frame_idx,
tracker_low_res_masks_global=tracker_low_res_masks_global,
tracker_metadata_new=tracker_metadata_new,
obj_ids_newly_removed=obj_ids_newly_removed,
reverse=reverse,
)
# Unified memory encoding: uses detection masks for reconditioned objects
self._tracker_update_memories(
inference_session=inference_session,
frame_idx=frame_idx,
low_res_masks=tracker_low_res_masks_global,
reconditioned_masks=reconditioned_masks,
)
# Step 5: update the SAM2 metadata based on the update plan
updated_obj_ids = tracker_metadata_new["obj_ids"]
if len(new_det_obj_ids) > 0:
updated_obj_ids = updated_obj_ids + new_det_obj_ids
if len(obj_ids_newly_removed) > 0:
updated_obj_ids = [obj_id for obj_id in updated_obj_ids if obj_id not in obj_ids_newly_removed]
tracker_metadata_new["obj_ids"] = updated_obj_ids
# update object scores and the maximum object ID assigned so far
if len(new_det_obj_ids) > 0:
# Index tensor with list of indices and convert to list
new_det_scores = det_scores[
torch.tensor(new_det_out_inds, dtype=torch.long, device=det_scores.device)
].tolist()
tracker_metadata_new["obj_id_to_score"].update(zip(new_det_obj_ids, new_det_scores))
# tracker scores are not available for new objects, use det score instead.
tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][frame_idx].update(
zip(new_det_obj_ids, new_det_scores)
)
tracker_metadata_new["max_obj_id"] = max(
tracker_metadata_new["max_obj_id"],
max(new_det_obj_ids),
)
# for removed objects, we set their scores to a very low value (-1e4) but still
# keep them in "obj_id_to_score" (it's easier to handle outputs this way)
for obj_id in obj_ids_newly_removed:
tracker_metadata_new["obj_id_to_score"][obj_id] = -1e4
tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][frame_idx][obj_id] = -1e4
tracker_metadata_new["obj_id_to_last_occluded"].pop(obj_id, None)
return tracker_update_plan, tracker_metadata_new
def _tracker_add_new_objects(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
new_obj_ids: list[int],
new_obj_masks: Tensor,
reverse: bool = False,
):
"""Add a new object to SAM2 inference states."""
new_obj_masks = new_obj_masks >= 0.5
for obj_id, mask in zip(new_obj_ids, new_obj_masks):
obj_idx = inference_session.obj_id_to_idx(obj_id)
inference_session.add_mask_inputs(obj_idx, frame_idx, mask.unsqueeze(0).unsqueeze(0))
inference_session.obj_with_new_inputs = list(new_obj_ids)
self.tracker_model(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
run_mem_encoder=True,
)
def run_tracker_update_execution_phase(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
det_out: dict[str, Tensor],
tracker_update_plan: dict,
reverse: bool = False,
):
# initialize tracking scores with detection scores
new_det_out_inds: list[int] = tracker_update_plan["new_det_out_inds"]
new_det_obj_ids: list[int] = tracker_update_plan["new_det_obj_ids"]
obj_ids_newly_removed: set[int] = tracker_update_plan["obj_ids_newly_removed"]
# Step 1: add new objects from FA detection to SAM2 inference states
if len(new_det_out_inds) > 0:
new_det_out_inds_t = torch.tensor(new_det_out_inds, dtype=torch.long)
new_det_masks: Tensor = det_out["mask"][new_det_out_inds_t]
# initialize SAM2 with new object masks
self._tracker_add_new_objects(
inference_session=inference_session,
frame_idx=frame_idx,
new_obj_ids=new_det_obj_ids,
new_obj_masks=new_det_masks,
reverse=reverse,
)
# Step 2: remove from SAM2 inference states those objects removed by heuristics
for obj_id in obj_ids_newly_removed:
inference_session.remove_object(obj_id, strict=False) # implement remove_object in inference_session?
def build_outputs(
self,
inference_session: Sam3VideoInferenceSession,
det_out: dict[str, Tensor],
tracker_low_res_masks_global: Tensor,
tracker_update_plan: dict,
reconditioned_obj_ids: Optional[set] = None,
):
"""
Build output dictionary with low-resolution masks.
Interpolation to video resolution is handled by the processor.
Returns:
obj_id_to_mask: dict mapping obj_id to low-res mask tensor (1, H_low, W_low)
"""
new_det_out_inds: list[int] = tracker_update_plan["new_det_out_inds"]
new_det_obj_ids: list[int] = tracker_update_plan["new_det_obj_ids"]
obj_id_to_mask = {} # obj_id --> low-res mask tensor
# Part 1: masks from tracker propagation (existing objects)
existing_masklet_obj_ids = inference_session.obj_ids
for obj_id, mask in zip(existing_masklet_obj_ids, tracker_low_res_masks_global):
obj_id_to_mask[int(obj_id)] = mask.unsqueeze(0) # (1, H_low, W_low)
# Part 2: masks from new detections
if len(new_det_out_inds) > 0:
new_det_out_inds_t = torch.tensor(new_det_out_inds, dtype=torch.long, device=det_out["mask"].device)
new_det_low_res_masks = det_out["mask"][new_det_out_inds_t]
# Apply hole filling to new detection masks
new_det_low_res_masks = fill_holes_in_mask_scores(
new_det_low_res_masks.unsqueeze(1),
max_area=self.fill_hole_area,
fill_holes=True,
remove_sprinkles=True,
).squeeze(1)
for obj_id, mask in zip(new_det_obj_ids, new_det_low_res_masks):
obj_id_to_mask[int(obj_id)] = mask.unsqueeze(0) # (1, H_low, W_low)
# Part 3: Override masks for reconditioned objects using detection masks
if reconditioned_obj_ids is not None and len(reconditioned_obj_ids) > 0:
trk_id_to_max_iou_high_conf_det = tracker_update_plan.get("trk_id_to_max_iou_high_conf_det", {})
for obj_id in reconditioned_obj_ids:
det_idx = trk_id_to_max_iou_high_conf_det.get(obj_id)
if det_idx is not None:
det_mask = det_out["mask"][det_idx].unsqueeze(0) # (1, H_low, W_low)
obj_id_to_mask[int(obj_id)] = det_mask
return obj_id_to_mask
def _merge_detections_from_prompts(
self,
all_detections: dict[int, dict[str, Tensor]],
inference_session: Sam3VideoInferenceSession,
) -> tuple[dict[str, Tensor], dict[int, int]]:
"""
Merge detections from multiple prompts into a single detection output.
Assigns unique object IDs and tracks which prompt detected each object.
Args:
all_detections: Dictionary mapping prompt_id to detection outputs
inference_session: Session to track obj_id -> prompt_id mapping
Returns:
Tuple of (merged_det_out, det_idx_to_prompt_id) where det_idx_to_prompt_id
maps detection index in the merged output to the prompt that produced it.
"""
merged_bboxes, merged_masks, merged_scores = [], [], []
det_idx_to_prompt_id = {}
det_idx = 0
for prompt_id, det_out in all_detections.items():
num_dets = len(det_out["bbox"])
if num_dets > 0:
merged_bboxes.append(det_out["bbox"])
merged_masks.append(det_out["mask"])
merged_scores.append(det_out["scores"])
for i in range(num_dets):
det_idx_to_prompt_id[det_idx + i] = prompt_id
det_idx += num_dets
if merged_bboxes:
merged_det_out = {
"bbox": torch.cat(merged_bboxes),
"mask": torch.cat(merged_masks),
"scores": torch.cat(merged_scores),
}
else:
device = inference_session.inference_device
merged_det_out = {
"bbox": torch.zeros(0, 4, device=device),
"mask": torch.zeros(0, self.low_res_mask_size, self.low_res_mask_size, device=device),
"scores": torch.zeros(0, device=device),
}
return merged_det_out, det_idx_to_prompt_id
def _det_track_one_frame(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: int,
reverse: bool,
streaming: bool = False,
):
"""
This function handles one-step inference for the DenseTracking model.
- `inference_session` contains all the information needed for inference, including the input video frames, text prompts, and any other relevant metadata
- The function processes detection and tracking for a single frame
- `streaming` indicates whether this is streaming inference mode (frames provided one at a time)
"""
pixel_values = inference_session.get_frame(frame_idx).unsqueeze(0)
vision_embeds = self.detector_model.get_vision_features(pixel_values=pixel_values)
# Step 1: run detection for all prompts (efficiently reusing vision embeddings)
# Returns dict mapping prompt_id to detection outputs
all_detections = self.run_detection(
inference_session=inference_session,
vision_embeds=vision_embeds,
)
# Merge detections from all prompts into single output for tracking
det_out, det_idx_to_prompt_id = self._merge_detections_from_prompts(all_detections, inference_session)
# share the vision encoder outputs from the detector to the tracker
vision_feats, vision_pos_embeds = self.get_vision_features_for_tracker(
vision_embeds=vision_embeds,
)
inference_session.cache.cache_vision_features(
frame_idx, {"vision_feats": vision_feats, "vision_pos_embeds": vision_pos_embeds}
)
# Step 2: propagate SAM2 states to get the SAM2 prediction masks.
# The returned `tracker_low_res_masks_global` contains the masklet predictions.
# Note that this step only runs the SAM2 propagation step, but doesn't encode new memory for the predicted masks;
# we defer memory encoding to `run_tracker_update_execution_phase` after resolving all heuristics.
tracker_low_res_masks_global, tracker_obj_scores_global = self.run_tracker_propagation(
inference_session=inference_session, frame_idx=frame_idx, reverse=reverse
)
# Step 3: based on detection outputs and the propagated SAM2 prediction masks, we make plans
# for SAM2 masklet updates (i.e. which objects to add and remove, etc).
# We also run SAM2 memory encoder in this step to resolve non-overlapping constraints.
# **This step should involve all the heuristics needed for any updates.**
# This step also generates the new masklet metadata `tracker_metadata_new` (based on its previous version).
tracker_update_plan, tracker_metadata_new = self.run_tracker_update_planning_phase(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
det_out=det_out,
tracker_low_res_masks_global=tracker_low_res_masks_global,
tracker_obj_scores_global=tracker_obj_scores_global,
det_idx_to_prompt_id=det_idx_to_prompt_id,
streaming=streaming,
)
# Step 4: based on `tracker_update_plan`, execute the update w.r.t. the tracker states
self.run_tracker_update_execution_phase(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
det_out=det_out,
tracker_update_plan=tracker_update_plan,
)
# Step 5: finally, build the outputs for this frame
reconditioned_obj_ids = tracker_update_plan["reconditioned_obj_ids"]
obj_id_to_mask = self.build_outputs(
inference_session=inference_session,
det_out=det_out,
tracker_low_res_masks_global=tracker_low_res_masks_global,
tracker_update_plan=tracker_update_plan,
reconditioned_obj_ids=reconditioned_obj_ids,
)
obj_id_to_score = tracker_metadata_new["obj_id_to_score"]
# add tracker scores to metadata, it should be fired for frames except the first frame
if tracker_obj_scores_global.shape[0] > 0:
# Convert tracker_obj_scores_global to sigmoid scores before updating
tracker_obj_scores_global = tracker_obj_scores_global.sigmoid().tolist()
tracker_obj_ids = inference_session.obj_ids
tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][frame_idx].update(
dict(zip(tracker_obj_ids, tracker_obj_scores_global))
)
return (
obj_id_to_mask, # a dict: obj_id --> output mask
obj_id_to_score, # a dict: obj_id --> output score (prob)
tracker_metadata_new,
tracker_obj_scores_global, # a dict: obj_id --> tracker frame-level scores
)
@torch.inference_mode()
@auto_docstring(custom_intro="Propagate the objects through a streamed video frame.")
def forward(
self,
inference_session: Sam3VideoInferenceSession,
frame_idx: Optional[int] = None,
frame: Optional[torch.Tensor] = None,
reverse: bool = False,
):
r"""
inference_session (`Sam3VideoInferenceSession`):
The video inference session object.
frame_idx (`int`, *optional*):
The index of the frame on which to run inference. No need to provide when inferring
on a new streamed frame.
frame (`torch.Tensor`, *optional*):
The frame to process. Provide when streaming.
reverse (`bool`, *optional*, defaults to `False`):
Whether to propagate in reverse.
"""
if frame is not None:
frame_idx = inference_session.add_new_frame(frame, frame_idx)
if frame_idx is None:
raise ValueError("frame_idx must be provided when frame is not provided for streaming.")
(
obj_id_to_mask,
obj_id_to_score,
tracker_metadata_new,
_,
) = self._det_track_one_frame(
inference_session=inference_session,
frame_idx=frame_idx,
reverse=reverse,
streaming=frame is not None,
)
# use a dummy string in "previous_stages_out" to indicate this frame has outputs
# inference_session.previous_stages_out[frame_idx] = "_THIS_FRAME_HAS_OUTPUTS_"
extra_metadata = tracker_metadata_new["extra_metadata"]
removed_obj_ids = extra_metadata["removed_obj_ids"]
# Update inference session state
inference_session.obj_id_to_score = obj_id_to_score
inference_session.obj_id_to_tracker_score_frame_wise = tracker_metadata_new[
"obj_id_to_tracker_score_frame_wise"
]
inference_session.obj_id_to_last_occluded = tracker_metadata_new["obj_id_to_last_occluded"]
inference_session.max_obj_id = tracker_metadata_new["max_obj_id"]
inference_session.obj_ids = list(tracker_metadata_new["obj_ids"])
inference_session.obj_first_frame_idx = extra_metadata["obj_first_frame_idx"]
inference_session.unmatched_frame_inds = extra_metadata["unmatched_frame_inds"]
inference_session.trk_keep_alive = extra_metadata["trk_keep_alive"]
inference_session.overlap_pair_to_frame_inds = extra_metadata["overlap_pair_to_frame_inds"]
inference_session.removed_obj_ids = removed_obj_ids
inference_session.suppressed_obj_ids[frame_idx] = extra_metadata["suppressed_obj_ids"][frame_idx]
return Sam3VideoSegmentationOutput(
object_ids=list(tracker_metadata_new["obj_ids"]),
obj_id_to_mask=obj_id_to_mask,
obj_id_to_score=obj_id_to_score,
obj_id_to_tracker_score=tracker_metadata_new["obj_id_to_tracker_score_frame_wise"][frame_idx],
removed_obj_ids=removed_obj_ids,
suppressed_obj_ids=extra_metadata["suppressed_obj_ids"][frame_idx],
frame_idx=frame_idx,
)
def _get_processing_order(
self,
inference_session: Sam3VideoInferenceSession,
start_frame_idx: int,
max_frame_num_to_track: Optional[int] = None,
reverse: bool = False,
):
num_frames = inference_session.num_frames
if max_frame_num_to_track is None:
# default: track all the frames in the video
max_frame_num_to_track = num_frames
if reverse:
end_frame_idx = start_frame_idx - max_frame_num_to_track
end_frame_idx = max(end_frame_idx, 0)
processing_order = range(start_frame_idx - 1, end_frame_idx - 1, -1)
else:
end_frame_idx = start_frame_idx + max_frame_num_to_track
end_frame_idx = min(end_frame_idx, num_frames - 1)
processing_order = range(start_frame_idx, end_frame_idx + 1)
return processing_order, end_frame_idx
@torch.inference_mode()
def propagate_in_video_iterator(
self,
inference_session: Sam3VideoInferenceSession,
start_frame_idx=0,
max_frame_num_to_track=None,
reverse=False,
):
"""
Propagate the prompts to get grounding results for the entire video. This method
is a generator and yields inference outputs for all frames in the range specified
by `start_frame_idx`, `max_frame_num_to_track`, and `reverse`.
Yields:
`Sam3VideoSegmentationOutput`: The segmentation output for each frame.
"""
processing_order, end_frame_idx = self._get_processing_order(
inference_session,
start_frame_idx,
max_frame_num_to_track,
reverse=reverse,
)
hotstart_buffer = []
for frame_idx in tqdm(processing_order):
out = self(inference_session=inference_session, frame_idx=frame_idx, reverse=reverse)
if self.hotstart_delay > 0:
# accumulate the outputs for the first `hotstart_delay` frames
hotstart_buffer.append(out)
# update the object IDs removed by hotstart so that we don't output them
inference_session.hotstart_removed_obj_ids.update(out.removed_obj_ids)
if frame_idx == end_frame_idx:
# we reached the end of propagation -- yield all frames in the buffer
yield_list = hotstart_buffer
hotstart_buffer = []
elif len(hotstart_buffer) >= self.hotstart_delay:
# we have enough frames -- yield and remove the first (oldest) frame from the buffer
yield_list = hotstart_buffer[:1]
hotstart_buffer = hotstart_buffer[1:]
else:
# not enough frames yet -- skip yielding
yield_list = []
else:
yield_list = [out] # output the current frame
for yield_out in yield_list:
yield yield_out
@torch.jit.script
def fast_diag_box_iou(boxes1, boxes2):
box1_xy = boxes1[:, 2:]
box1_XY = boxes1[:, :2]
box2_xy = boxes2[:, 2:]
box2_XY = boxes2[:, :2]
area1 = (box1_xy - box1_XY).prod(-1)
area2 = (box2_xy - box2_XY).prod(-1)
lt = torch.max(box1_XY, box2_XY)
rb = torch.min(box1_xy, box2_xy)
inter = (rb - lt).clamp(min=0).prod(-1)
union = area1 + area2 - inter
iou = inter / union
return iou
def mask_iou(pred_masks: torch.Tensor, gt_masks: torch.Tensor) -> torch.Tensor:
"""
Compute the IoU (Intersection over Union) between predicted masks and ground truth masks.
Args:
- pred_masks: (N, H, W) bool Tensor, containing binary predicted segmentation masks
- gt_masks: (M, H, W) bool Tensor, containing binary ground truth segmentation masks
Returns:
- ious: (N, M) float Tensor, containing IoUs for each pair of predicted and ground truth masks
"""
N, H, W = pred_masks.shape
M, _, _ = gt_masks.shape
# Flatten masks: (N, 1, H*W) and (1, M, H*W)
pred_flat = pred_masks.view(N, 1, H * W)
gt_flat = gt_masks.view(1, M, H * W)
# Compute intersection and union: (N, M)
intersection = (pred_flat & gt_flat).sum(dim=2).float()
union = (pred_flat | gt_flat).sum(dim=2).float()
ious = intersection / union.clamp(min=1)
return ious # shape: (N, M)
def nms_masks(
pred_probs: torch.Tensor,
pred_masks: torch.Tensor,
prob_threshold: float,
iou_threshold: float,
) -> torch.Tensor:
"""
Args:
- pred_probs: (num_det,) float Tensor, containing the score (probability) of each detection
- pred_masks: (num_det, H_mask, W_mask) float Tensor, containing the binary segmentation mask of each detection
- prob_threshold: float, score threshold to prefilter detections (NMS is performed on detections above threshold)
- iou_threshold: float, mask IoU threshold for NMS
Returns:
- keep: (num_det,) bool Tensor, indicating whether each detection is kept after score thresholding + NMS
"""
# prefilter the detections with prob_threshold ("valid" are those above prob_threshold)
is_valid = pred_probs > prob_threshold # (num_det,)
probs = pred_probs[is_valid] # (num_valid,)
masks_binary = pred_masks[is_valid] > 0 # (num_valid, H_mask, W_mask)
if probs.numel() == 0:
return is_valid # no valid detection, return empty keep mask
ious = mask_iou(masks_binary, masks_binary) # (num_valid, num_valid)
# Try to use kernels for NMS, fallback to keeping all valid detections if unavailable
_load_cv_utils_kernel_once()
if not cv_utils_kernel:
return is_valid # Fallback: keep all valid detections without NMS
try:
kept_inds = cv_utils_kernel.generic_nms(ious, probs, iou_threshold, use_iou_matrix=True)
except Exception as e:
logger.warning_once(f"Failed to run NMS using kernels library: {e}. NMS post-processing will be skipped.")
return is_valid # Fallback: keep all valid detections without NMS
# valid_inds are the indices among `probs` of valid detections before NMS (or -1 for invalid)
valid_inds = torch.where(is_valid, is_valid.cumsum(dim=0) - 1, -1) # (num_det,)
keep = torch.isin(valid_inds, kept_inds) # (num_det,)
return keep
def fill_holes_in_mask_scores(mask, max_area, fill_holes=True, remove_sprinkles=True):
"""
A post processor to fill small holes in mask scores with area under `max_area`.
Holes are those small connected components in either background or foreground.
Note that it relies on the "cc_torch" package to find connected components fast. You can
install it via the following command (`TORCH_CUDA_ARCH_LIST=8.0` is for A100 GPUs):
```
pip uninstall -y cc_torch; TORCH_CUDA_ARCH_LIST=8.0 9.0 pip install git+https://github.com/ronghanghu/cc_torch
```
Otherwise, it will fallback to a slightly slower triton implementation, or skimage if the tensor is on cpu
"""
if max_area <= 0:
return mask # nothing to fill in this case
if fill_holes:
# We remove small connected components in background by changing them to foreground
# with a small positive mask score (0.1).
mask_bg = mask <= 0
bg_area_thresh = max_area
_, areas_bg = _get_connected_components_with_padding(mask_bg)
small_components_bg = mask_bg & (areas_bg <= bg_area_thresh)
mask = torch.where(small_components_bg, 0.1, mask)
if remove_sprinkles:
# We remove small connected components in foreground by changing them to background
# with a small negative mask score (-0.1). Here we only remove connected components
# whose areas are under both `max_area` and half of the entire mask's area. This
# removes sprinkles while avoids filtering out tiny objects that we want to track.
mask_fg = mask > 0
fg_area_thresh = torch.sum(mask_fg, dim=(2, 3), keepdim=True, dtype=torch.int32)
fg_area_thresh.floor_divide_(2).clamp_(max=max_area)
_, areas_fg = _get_connected_components_with_padding(mask_fg)
small_components_fg = mask_fg & (areas_fg <= fg_area_thresh)
mask = torch.where(small_components_fg, -0.1, mask)
return mask
def _get_connected_components_with_padding(mask):
"""Get connected components from masks (possibly padding them to an even size)."""
mask = mask.to(torch.uint8)
_, _, H, W = mask.shape
# Try to use kernels for connected components, fallback if unavailable
_load_cv_utils_kernel_once()
if not cv_utils_kernel:
# Fallback: return dummy labels and counts that won't trigger filtering
labels = torch.zeros_like(mask, dtype=torch.int32)
counts = torch.full_like(mask, fill_value=mask.shape[2] * mask.shape[3] + 1, dtype=torch.int32)
return labels, counts
# make sure both height and width are even (to be compatible with cc_torch)
pad_h = H % 2
pad_w = W % 2
try:
if pad_h == 0 and pad_w == 0:
labels, counts = cv_utils_kernel.cc_2d(mask.contiguous(), get_counts=True)
else:
# pad the mask to make its height and width even
# padding format is (padding_left,padding_right,padding_top,padding_bottom)
mask_pad = F.pad(mask, (0, pad_w, 0, pad_h), mode="constant", value=0)
labels, counts = cv_utils_kernel.cc_2d(mask_pad.contiguous(), get_counts=True)
labels = labels[:, :, :H, :W]
counts = counts[:, :, :H, :W]
except Exception as e:
logger.warning_once(
f"Failed to compute connected components using kernels library: {e}. "
"Hole filling and sprinkle removal will be skipped."
)
# Fallback: return dummy labels and counts that won't trigger filtering
labels = torch.zeros_like(mask, dtype=torch.int32)
counts = torch.full_like(mask, fill_value=H * W + 1, dtype=torch.int32)
return labels, counts
return labels, counts
__all__ = [
"Sam3VideoModel",
"Sam3VideoPreTrainedModel",
"Sam3VideoInferenceSession",
"Sam3VideoSegmentationOutput",
]
| Sam3VideoModel |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py | {
"start": 2667,
"end": 52416
} | class ____(test.TestCase, parameterized.TestCase):
"""Base test class for TF-quant tests."""
def setUp(self) -> None:
super().setUp()
# Many test cases for quantization involve creating and saving the input
# model and saving the output quantized model. These two member
# attributes can be used to specify the paths for such models,
# respectively. These paths will be cleaned up after each test case.
self._input_saved_model_path = self.create_tempdir('input').full_path
self._output_saved_model_path = self.create_tempdir('output').full_path
# Extra output path occasionally used for comparing two different
# quantized models.
self._output_saved_model_path_2 = self.create_tempdir('output2').full_path
def _get_dir_size(self, path: str = '.'):
"""Get the total size of files and sub-directories under the path.
Args:
path: Path of a directory or a file to calculate the total size.
Returns:
Total size of the directory or a file.
"""
total = 0
for root, _, files in os.walk(path):
for filename in files:
total += os.path.getsize(os.path.join(root, filename))
return total
def _any_log_contains(
self, substring: str, log_record_list: List['logging.LogRecord']
) -> bool:
"""Returns True if any of the log contains a given substring.
Args:
substring: A piece of string to check whether it exists in the log
message.
log_record_list: A list of `absl.logging.LogRecord`s.
Returns:
True if and only if the substring exists in any of the log in
`log_record_list`.
"""
return any(
map(
lambda log_record: substring in str(log_record.message),
log_record_list,
)
)
def _is_quantized_function(self, func: function_pb2.FunctionDef) -> bool:
"""Determine whether a FunctionDef is quantized.
Args:
func: A FunctionDef object.
Returns:
True iff `func` is quantized.
"""
return func.signature.name.startswith('quantized_')
def _is_composite_function(self, func: function_pb2.FunctionDef) -> bool:
"""Determine whether a FunctionDef is composite function.
Args:
func: A FunctionDef object.
Returns:
True iff `func` is composte function.
"""
return func.signature.name.startswith('composite_')
def _contains_op_with_name_and_attribute(
self,
nodes: Iterable[node_def_pb2.NodeDef],
op_name: str,
attr_name: str,
attr_val: _AttrValType,
node_name: str = '',
) -> bool:
"""Determine whether there is a node whose operation name matches `op_name`.
If `attr_name` is given, additionally check if the `attr_val` matches with
the attribute value of the op.
Args:
nodes: Iterable of NodeDefs.
op_name: Name of the op to match.
attr_name: Name of the attribute of the op to match.
attr_val: Value of the attr_name to check.
node_name: Name of the node to match. Accepts regex2 format.
Returns:
True if there exists a node whose name matches `op_name` and 'attr_val' if
'attr_name' is given.
"""
def match_node_name(name):
if not node_name:
return True
compiled_regex = re.compile(node_name)
match = re.fullmatch(compiled_regex, name)
return match is not None
return any(
node.attr.get(attr_name) == attr_val
for node in nodes
if node.op == op_name and match_node_name(node.name)
)
def _contains_quantized_function_call(
self, graphdef: graph_pb2.GraphDef
) -> bool:
"""Determines if the graph def has quantized function call.
Args:
graphdef: A GraphDef object.
Returns:
True if and only if the graph def contains a quantized function call.
"""
return any(map(self._is_quantized_function, graphdef.library.function))
def _contains_composite_function_call(
self, graphdef: graph_pb2.GraphDef
) -> bool:
"""Determines if the graph def has composite function call.
Args:
graphdef: A GraphDef object.
Returns:
True if and only if the graph def contains a composite function call.
"""
return any(map(self._is_composite_function, graphdef.library.function))
def _contains_op(
self,
graphdef: graph_pb2.GraphDef,
op_name: str,
attr_name: str = '',
attr_val: _AttrValType = None,
node_name: str = '',
) -> bool:
"""Determines if the graph def contains the given op.
Args:
graphdef: A GraphDef object.
op_name: Name of the operation to find within the graph.
attr_name: Name of the attribute of the op to match.
attr_val: Value of the attr_name to check.
node_name: Name of the node to match. Accepts regex2 format.
Returns:
True if and only if the graph def contains an op named `op_name`. If
`attr_name` is given, check if the `attr_val` matches with the attribute
value of the op.
"""
# Check the main graph
if self._contains_op_with_name_and_attribute(
nodes=graphdef.node,
op_name=op_name,
attr_name=attr_name,
attr_val=attr_val,
node_name=node_name,
):
return True
# Check the graph genederated from user defined functions
for func in graphdef.library.function:
if self._contains_op_with_name_and_attribute(
nodes=func.node_def,
op_name=op_name,
attr_name=attr_name,
attr_val=attr_val,
node_name=node_name,
):
return True
return False
def _count_ops(
self,
graphdef: graph_pb2.GraphDef,
op_names: Collection[str],
attr_name: str = '',
attr_val: _AttrValType = None,
get_op_name: bool = False,
) -> int:
"""Returns the number of given ops in a graph def.
Args:
graphdef: A GraphDef object.
op_names: Names of the operations to find within the graph.
attr_name: Name of the attribute of the ops to match.
attr_val: Value of the attr_name to check.
get_op_name: If set True, checks node.name rather than node.op.
Returns:
The number of occurrences of the given ops in a graph. The ops will be
counted only if the ops are named 'op_name' and has 'attr_val' if
'attr_name' is specified.
"""
op_count = 0
for op_name in op_names:
# Check the main graph
op_count += self._count_op_with_name_and_attribute(
nodes=graphdef.node,
op_name=op_name,
attr_name=attr_name,
attr_val=attr_val,
get_op_name=get_op_name,
)
# Check the graph genederated from user defined functions
for func in graphdef.library.function:
op_count += self._count_op_with_name_and_attribute(
nodes=func.node_def,
op_name=op_name,
attr_name=attr_name,
attr_val=attr_val,
get_op_name=get_op_name,
)
return op_count
def _count_op_with_name_and_attribute(
self,
nodes: Iterable[node_def_pb2.NodeDef],
op_name: str,
attr_name: str,
attr_val: _AttrValType,
get_op_name: bool = False,
) -> int:
"""Determine the number of nodes whose operation name matches `op_name`.
If `attr_name` is given, additionally check if the `attr_val` matches with
the attribute value of the op.
Args:
nodes: Iterable of NodeDefs.
op_name: Name of the op to match.
attr_name: Name of the attribute of the op to match.
attr_val: Value of the attr_name to check.
get_op_name: If set True, checks node.name rather than node.op.
Returns:
The number of occurrences of nodes whose name match `op_name` and
'attr_val' if 'attr_name' is given.
"""
if get_op_name:
return len(
[
node.attr.get(attr_name) == attr_val
for node in nodes
if node.name == op_name
]
)
else:
return len(
[
node.attr.get(attr_name) == attr_val
for node in nodes
if node.op == op_name
]
)
def _create_simple_tf1_conv_model(
self,
input_shape: Sequence[int] = (1, 3, 4, 3),
filter_shape: Sequence[int] = (2, 3, 3, 2),
use_variable_for_filter=False,
) -> Tuple[core.Tensor, core.Tensor]:
"""Creates a basic convolution model.
This is intended to be used for TF1 (graph mode) tests.
Args:
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter.
use_variable_for_filter: Setting this to `True` makes the filter for the
conv operation a `tf.Variable`.
Returns:
in_placeholder: Input tensor placeholder.
output_tensor: The resulting tensor of the convolution operation.
"""
in_placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)
filters = random_ops.random_uniform(
shape=filter_shape, minval=-1.0, maxval=1.0
)
if use_variable_for_filter:
filters = variables.Variable(filters)
output_tensor = nn_ops.conv2d(
in_placeholder,
filters,
strides=[1, 1, 2, 1],
dilations=[1, 1, 1, 1],
padding='SAME',
data_format='NHWC',
)
return in_placeholder, output_tensor
def _create_simple_tf1_gather_model(
self, input_type: dtypes.DType, use_variable_for_filter=False
) -> Tuple[core.Tensor, core.Tensor]:
"""Creates a basic gather model.
This is intended to be used for TF1 (graph mode) tests.
Args:
input_type: type of the input index tensor for gather operation.
use_variable_for_filter: Setting this to `True` makes the filter for the
gather operation a `tf.Variable`.
Returns:
in_placeholder: Input tensor placeholder.
output_tensor: The resulting tensor of the gather operation.
"""
in_placeholder = array_ops.placeholder(input_type, shape=(6))
filters = np.random.randn(128, 32).astype(np.float32)
if use_variable_for_filter:
filters = variables.Variable(filters)
output_tensor = array_ops.gather_v2(filters, in_placeholder)
return in_placeholder, output_tensor
def _create_and_save_vocab_table_lookup_model_tf1(
self,
output_path: str,
tags: Collection[str],
signature_def_key: str,
) -> Tuple[Mapping[str, core.Tensor], Mapping[str, core.Tensor]]:
"""Creates and saves a simple model that uses a vocab table.
Args:
output_path: Path to the directory to save the created model.
tags: Set of strings that identifies the saved meta graph.
signature_def_key: Name of the SignatureDef. Used to identify the
SignatureDef within the meta graph.
Returns:
inputs: A mapping of input_key -> input_tensor (placeholder). The input
key is "input_vocabs".
outputs: A mapping of output_key -> output_tensor. The output keys are
"lookup" and "output".
"""
with session.Session(graph=ops.Graph()) as sess:
input_vocabs_placeholder, lookup_tensor, output_tensor = (
self._create_vocab_table_lookup_model_tf1(sess)
)
inputs = {'input_vocabs': input_vocabs_placeholder}
outputs = {
'lookup': lookup_tensor,
'output': output_tensor,
}
self._save_tf1_model(
sess,
output_path,
signature_def_key,
tags,
inputs=inputs,
outputs=outputs,
init_op=lookup_ops.tables_initializer(),
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
)
return inputs, outputs
def _create_and_save_file_init_hash_table_model_tf1(
self,
output_path: str,
tags: Collection[str],
signature_def_key: str,
) -> Tuple[Mapping[str, core.Tensor], Mapping[str, core.Tensor]]:
"""Creates and saves a model that uses a file-initialized table.
The asset file "vocab_file.txt" is used to initialize a hash table.
Args:
output_path: Path to the directory to save the created model.
tags: Set of strings that identifies the saved meta graph.
signature_def_key: Name of the SignatureDef. Used to identify the
SignatureDef within the meta graph.
Returns:
inputs: A mapping of input_key -> input_tensor (placeholder). The input
key is "input_vocabs".
outputs: A mapping of output_key -> output_tensor. The output keys are
"lookup" and "output".
"""
with session.Session(graph=ops.Graph()) as sess:
input_vocabs_placeholder, lookup_tensor, output_tensor = (
self._create_table_init_from_file_model_tf1(sess)
)
inputs = {'input_vocabs': input_vocabs_placeholder}
outputs = {
'lookup': lookup_tensor,
'output': output_tensor,
}
self._save_tf1_model(
sess,
output_path,
signature_def_key,
tags,
inputs=inputs,
outputs=outputs,
init_op=lookup_ops.tables_initializer(),
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
)
return inputs, outputs
def _create_table_init_from_file_model_tf1(
self, sess: session.Session
) -> Tuple[core.Tensor, core.Tensor, core.Tensor]:
"""Creates a simple model that initializes a table from an asset file.
This model creates an asset file at "vocab_file.txt" containing
comma-separated vocabularies and uses it to initialize a
`StaticVocabularyTable`. For inference, the model performs a lookup with a
1D string tensor input vocabs.
Args:
sess: Tensorflow Session to create the model in.
Returns:
(input_vocabs_placeholder, lookup_vals, output_tensor), where
* input_vocabs_placeholder is a placeholder tensor of 1D strings
* lookup_vals is an output tensor that is a direct result of table lookup
* output_tensor is a float 2x2 matrix
"""
# Creates and populates an asset file.
asset_dir = self.create_tempdir('assets').full_path
asset_file = os.path.join(asset_dir, 'vocab_file.txt')
content = '\n'.join(['static', 'range', 'quantization'])
file_io.write_string_to_file(filename=asset_file, file_content=content)
# The resulting table looks like:
# "static" -> 0
# "range" -> 1
# "quantization" -> 2
# default -> -1
init = lookup_ops.TextFileInitializer(
filename=asset_file,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER,
)
table = lookup_ops.StaticHashTable(init, default_value=-1)
input_vocabs_placeholder = array_ops.placeholder(
dtypes.string, shape=(None,), name='input_vocabs'
)
# Introduce a matmul op that takes the lookup values to observe the
# effects of quantization.
lookup_vals = math_ops.cast(
table.lookup(input_vocabs_placeholder), dtypes.float32
)
# shape: (2, ?)
matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals])
# Create a dummy weight matrix filled with ones.
weight_row = array_ops.ones(
shape=array_ops.shape(input_vocabs_placeholder), dtype=dtypes.float32
)
# shape: (?, 2)
weight = array_ops.transpose_v2(
array_ops_stack.stack([weight_row, weight_row])
)
# shape: (2, 2)
output_tensor = math_ops.matmul(matmul_input, weight)
return input_vocabs_placeholder, lookup_vals, output_tensor
def _create_vocab_table_lookup_model_tf1(
self, sess: session.Session
) -> Tuple[core.Tensor, core.Tensor, core.Tensor]:
"""Creates a simple model that initializes and lookups a vocab table.
This model creates an asset file at "vocab_file.txt" containing
comma-separated vocabularies. It also initializes a `StaticVocabularyTable`
and performs a lookup with the input vocabs, which is a 1D tensor of
strings.
Args:
sess: Tensorflow Session to create the model in.
Returns:
(input_vocabs_placeholder, lookup_vals, output_tensor), where
* input_vocabs_placeholder is a placeholder tensor of 1D strings
* lookup_vals is an output tensor that is a direct result of table lookup
* output_tensor is a float 2x2 matrix
"""
# Creates and populates an asset file.
asset_dir = self.create_tempdir('assets').full_path
asset_file = os.path.join(asset_dir, 'vocab_file.txt')
file_io.write_string_to_file(
filename=asset_file, file_content='hello,model,quantization\n'
)
vocab_file = asset.Asset(asset_file)
raw_vocab = io_ops.read_file(vocab_file)
vocabs = ragged_string_ops.string_split_v2(
string_ops.string_strip(raw_vocab), sep=','
)
# Initialize the vocab table. Each comma-separated word in vocab_file.txt
# corresponds to the numeric identifiers in `values`.
kv_init = lookup_ops.KeyValueTensorInitializer(
keys=vocabs, values=np.array([0, 1, 2]), value_dtype=dtypes.int64
)
table = lookup_ops.StaticVocabularyTable(kv_init, num_oov_buckets=5)
input_vocabs_placeholder = array_ops.placeholder(
dtypes.string, shape=(None,), name='input_vocabs'
)
# Introduce a matmul op that takes the lookup values to observe the
# effects of quantization.
lookup_vals = math_ops.cast(
table.lookup(input_vocabs_placeholder), dtypes.float32
)
# shape: (2, ?)
matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals])
# Create a dummy weight matrix filled with ones.
weight_row = array_ops.ones(
shape=array_ops.shape(input_vocabs_placeholder), dtype=dtypes.float32
)
# shape: (?, 2)
weight = array_ops.transpose_v2(
array_ops_stack.stack([weight_row, weight_row])
)
# shape: (2, 2)
output_tensor = math_ops.matmul(matmul_input, weight)
return input_vocabs_placeholder, lookup_vals, output_tensor
def _create_and_save_vocab_table_lookup_qat_model_tf1(
self,
output_path: str,
tags: Collection[str],
signature_def_key: str,
) -> Tuple[Mapping[str, core.Tensor], Mapping[str, core.Tensor]]:
"""Creates and saves a simple QAT model that uses a vocab table.
Args:
output_path: Path to the directory to save the created model.
tags: Set of strings that identifies the saved meta graph.
signature_def_key: Name of the SignatureDef. Used to identify the
SignatureDef within the meta graph.
Returns:
inputs: A mapping of input_key -> input_tensor (placeholder). The input
key is "input_vocabs".
outputs: A mapping of output_key -> output_tensor. The output keys are
"lookup" and "output".
"""
with session.Session(graph=ops.Graph()) as sess:
input_vocabs_placeholder, lookup_tensor, output_tensor = (
self._create_vocab_table_lookup_qat_model_tf1(sess)
)
inputs = {'input_vocabs': input_vocabs_placeholder}
outputs = {
'lookup': lookup_tensor,
'output': output_tensor,
}
self._save_tf1_model(
sess,
output_path,
signature_def_key,
tags,
inputs=inputs,
outputs=outputs,
init_op=lookup_ops.tables_initializer(),
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
)
return inputs, outputs
def _create_vocab_table_lookup_qat_model_tf1(
self, sess: session.Session
) -> Tuple[core.Tensor, core.Tensor, core.Tensor]:
"""Creates a simple QAT model that initializes and lookups a vocab table.
This model creates an asset file at "vocab_file.txt" containing
comma-separated vocabularies. It also initializes a `StaticVocabularyTable`
and performs a lookup with the input vocabs, which is a 1D tensor of
strings.
Args:
sess: Tensorflow Session to create the model in.
Returns:
(input_vocabs_placeholder, lookup_vals, output_tensor), where
* input_vocabs_placeholder is a placeholder tensor of 1D strings
* lookup_vals is an output tensor that is a direct result of table lookup
* output_tensor is a float 2x2 matrix
"""
# Creates and populates an asset file.
asset_dir = self.create_tempdir('assets').full_path
asset_file = os.path.join(asset_dir, 'vocab_file.txt')
file_io.write_string_to_file(
filename=asset_file, file_content='hello,model,quantization\n'
)
vocab_file = asset.Asset(asset_file)
raw_vocab = io_ops.read_file(vocab_file)
vocabs = ragged_string_ops.string_split_v2(
string_ops.string_strip(raw_vocab), sep=','
)
# Initialize the vocab table. Each comma-separated word in vocab_file.txt
# corresponds to the numeric identifiers in `values`.
kv_init = lookup_ops.KeyValueTensorInitializer(
keys=vocabs, values=np.array([0, 1, 2]), value_dtype=dtypes.int64
)
table = lookup_ops.StaticVocabularyTable(kv_init, num_oov_buckets=5)
input_vocabs_placeholder = array_ops.placeholder(
dtypes.string, shape=(None,), name='input_vocabs'
)
# Introduce a matmul op that takes the lookup values to observe the
# effects of quantization.
lookup_vals = math_ops.cast(
table.lookup(input_vocabs_placeholder), dtypes.float32
)
# shape: (2, ?)
matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals])
# Insert fake quant to simulate a QAT model.
matmul_input = array_ops.fake_quant_with_min_max_args(
matmul_input, min=-0.3, max=0.3, num_bits=8, narrow_range=False
)
# Create a dummy weight matrix filled with ones.
weight_row = array_ops.ones(
shape=array_ops.shape(input_vocabs_placeholder), dtype=dtypes.float32
)
# shape: (?, 2)
weight = array_ops.transpose_v2(
array_ops_stack.stack([weight_row, weight_row])
)
# Insert fake quant to simulate a QAT model.
weight = array_ops.fake_quant_with_min_max_args(
weight, min=-0.1, max=0.2, num_bits=8, narrow_range=False
)
# shape: (2, 2)
output_tensor = math_ops.matmul(matmul_input, weight)
# Insert fake quant to simulate a QAT model.
output_tensor = array_ops.fake_quant_with_min_max_args(
output_tensor, min=-0.2, max=0.2, num_bits=8, narrow_range=False
)
return input_vocabs_placeholder, lookup_vals, output_tensor
def _create_table_init_from_file_qat_model_tf1(
self, sess: session.Session
) -> Tuple[core.Tensor, core.Tensor, core.Tensor]:
"""Creates a simple QAT model that initializes a table from an asset file.
This model creates an asset file at "vocab_file.txt" containing
comma-separated vocabularies and uses it to initialize a
`StaticVocabularyTable`. For inference, the model performs a lookup with a
1D string tensor input vocabs.
Args:
sess: Tensorflow Session to create the model in.
Returns:
(input_vocabs_placeholder, lookup_vals, output_tensor), where
* input_vocabs_placeholder is a placeholder tensor of 1D strings
* lookup_vals is an output tensor that is a direct result of table lookup
* output_tensor is a float 2x2 matrix
"""
# Creates and populates an asset file.
asset_dir = self.create_tempdir('assets').full_path
asset_file = os.path.join(asset_dir, 'vocab_file.txt')
content = '\n'.join(['static', 'range', 'quantization'])
file_io.write_string_to_file(filename=asset_file, file_content=content)
# The resulting table looks like:
# "static" -> 0
# "range" -> 1
# "quantization" -> 2
# default -> -1
init = lookup_ops.TextFileInitializer(
filename=asset_file,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER,
)
table = lookup_ops.StaticHashTable(init, default_value=-1)
input_vocabs_placeholder = array_ops.placeholder(
dtypes.string, shape=(None,), name='input_vocabs'
)
# Introduce a matmul op that takes the lookup values to observe the
# effects of quantization.
lookup_vals = math_ops.cast(
table.lookup(input_vocabs_placeholder), dtypes.float32
)
# shape: (2, ?)
matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals])
matmul_input = array_ops.fake_quant_with_min_max_args(
matmul_input, min=-0.3, max=0.3, num_bits=8, narrow_range=False
)
# Create a dummy weight matrix filled with ones.
weight_row = array_ops.ones(
shape=array_ops.shape(input_vocabs_placeholder), dtype=dtypes.float32
)
# shape: (?, 2)
weight = array_ops.transpose_v2(
array_ops_stack.stack([weight_row, weight_row])
)
weight = array_ops.fake_quant_with_min_max_args(
weight, min=-0.1, max=0.2, num_bits=8, narrow_range=False
)
# shape: (2, 2)
output_tensor = math_ops.matmul(matmul_input, weight)
output_tensor = array_ops.fake_quant_with_min_max_args(
output_tensor, min=-0.2, max=0.2, num_bits=8, narrow_range=False
)
return input_vocabs_placeholder, lookup_vals, output_tensor
def _create_and_save_file_init_hash_table_qat_model_tf1(
self,
output_path: str,
tags: Collection[str],
signature_def_key: str,
) -> Tuple[Mapping[str, core.Tensor], Mapping[str, core.Tensor]]:
"""Creates and saves a QAT model that uses a file-initialized table.
The asset file "vocab_file.txt" is used to initialize a hash table.
Args:
output_path: Path to the directory to save the created model.
tags: Set of strings that identifies the saved meta graph.
signature_def_key: Name of the SignatureDef. Used to identify the
SignatureDef within the meta graph.
Returns:
inputs: A mapping of input_key -> input_tensor (placeholder). The input
key is "input_vocabs".
outputs: A mapping of output_key -> output_tensor. The output keys are
"lookup" and "output".
"""
with session.Session(graph=ops.Graph()) as sess:
input_vocabs_placeholder, lookup_tensor, output_tensor = (
self._create_table_init_from_file_qat_model_tf1(sess)
)
inputs = {'input_vocabs': input_vocabs_placeholder}
outputs = {
'lookup': lookup_tensor,
'output': output_tensor,
}
self._save_tf1_model(
sess,
output_path,
signature_def_key,
tags,
inputs=inputs,
outputs=outputs,
init_op=lookup_ops.tables_initializer(),
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
)
return inputs, outputs
def _create_data_generator(
self,
input_key: str,
shape: Sequence[int],
minval: float = -1.0,
maxval: float = 1.0,
dtype: dtypes.DType = dtypes.float32,
num_examples: int = 8,
) -> repr_dataset.RepresentativeDataset:
"""Creates a data generator to be used as representative dataset.
Supports generating random value input tensors mapped by the `input_key`.
Args:
input_key: The string key that identifies the created tensor as an input.
shape: Shape of the tensor data.
minval: The lower bound of the generated input
maxval: The upper bound of the generated input
dtype: The type of the generated input - usually dtypes.float32 for float
and dtypes.int64 for int
num_examples: Number of examples in the representative dataset.
Yields:
data_gen: A `quantize_model._RepresentativeSample` filled with random
values.
"""
for _ in range(num_examples):
yield {input_key: random_ops.random_uniform(shape, minval, maxval, dtype)}
def _save_tf1_model(
self,
sess: session.Session,
saved_model_path: str,
signature_key: str,
tags: Collection[str],
inputs: Mapping[str, core.Tensor],
outputs: Mapping[str, core.Tensor],
init_op: Optional[ops.Operation] = None,
assets_collection: Optional[Sequence[core.Symbol]] = None,
) -> None:
"""Saves a TF1 model.
Args:
sess: Current tf.Session object.
saved_model_path: Directory to save the model.
signature_key: The key to the SignatureDef that inputs & outputs
correspond to.
tags: Set of tags associated with the model.
inputs: Input name -> input tensor mapping.
outputs: Output name -> output tensor mapping.
init_op: Op for initialization.
assets_collection: Assets collection. This collection is a list of string
tensors. Each tensor contains the asset file names.
"""
v1_builder = builder.SavedModelBuilder(saved_model_path)
sig_def = signature_def_utils_impl.predict_signature_def(
inputs=inputs, outputs=outputs
)
v1_builder.add_meta_graph_and_variables(
sess,
tags,
signature_def_map={signature_key: sig_def},
main_op=init_op,
assets_collection=assets_collection,
)
v1_builder.save()
def _create_simple_gather_and_conv_model(
self,
input_type: dtypes.DType,
filter_shape: Sequence[int],
is_qat_model: bool = False,
) -> module.Module:
class SimpleGatherAndConvModel(module.Module):
"""A simple model with a single gather and a conv2d."""
def __init__(self):
"""Initializes a SimpleGatherAndConvModel."""
self.embedding_w = np.random.randn(1024, 3, 4, 3).astype('f4')
self.embedding_w = np.minimum(np.maximum(self.embedding_w, -4), 4)
self.conv_filters = np.random.uniform(
low=-10, high=10, size=filter_shape
).astype('f4')
second_conv_filter_shape = (3, 3, filter_shape[-1], 1)
self.second_conv_filters = np.random.uniform(
low=-10, high=10, size=second_conv_filter_shape
).astype('f4')
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
shape=[None], dtype=input_type, name='input_tensor'
)
]
)
def model(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
"""Performs a gather and a 2D convolution operation.
Args:
input_tensor: Input tensor to perform operation on.
Returns:
A map of: output key -> output result.
"""
out = array_ops.gather_v2(self.embedding_w, input_tensor)
# One pure conv
out = nn_ops.conv2d(
out,
self.conv_filters,
strides=(1, 1, 2, 1),
dilations=(1, 1, 1, 1),
padding='SAME',
data_format='NHWC',
)
# One fakequant attached conv
if is_qat_model:
out = array_ops.fake_quant_with_min_max_args(
out, min=-0.1, max=0.2, num_bits=8, narrow_range=False
)
second_conv_filters = array_ops.fake_quant_with_min_max_args(
self.second_conv_filters,
min=-0.1,
max=0.2,
num_bits=8,
narrow_range=True,
)
else:
second_conv_filters = self.second_conv_filters
out = nn_ops.conv2d(
out,
second_conv_filters,
strides=(1, 1, 2, 1),
dilations=(1, 1, 1, 1),
padding='SAME',
data_format='NHWC',
)
if is_qat_model:
out = array_ops.fake_quant_with_min_max_args(
out, min=-0.1, max=0.2, num_bits=8, narrow_range=False
)
return {'output': out}
return SimpleGatherAndConvModel()
def _create_and_save_tf1_gather_model(
self,
saved_model_path: str,
signature_key: str,
tags: Collection[str],
input_key: str,
output_key: str,
input_type: dtypes.DType,
use_variable=False,
) -> core.Tensor:
"""Creates and saves a simple gather model.
This is intended to be used for TF1 (graph mode) tests.
Args:
saved_model_path: Directory to save the model.
signature_key: The key to the SignatureDef that inputs & outputs
correspond to.
tags: Set of tags associated with the model.
input_key: The key to the input tensor.
output_key: The key to the output tensor.
input_type: type of the input index tensor for gather operation.
use_variable: Setting this to `True` makes the filter for the gather
operation a `tf.Variable`.
Returns:
in_placeholder: The placeholder tensor used as an input to the model.
"""
with ops.Graph().as_default(), session.Session() as sess:
in_placeholder, output_tensor = self._create_simple_tf1_gather_model(
input_type=input_type, use_variable_for_filter=use_variable
)
if use_variable:
sess.run(variables.global_variables_initializer())
self._save_tf1_model(
sess,
saved_model_path,
signature_key,
tags,
inputs={input_key: in_placeholder},
outputs={output_key: output_tensor},
)
return in_placeholder
def _create_gather_model(self, input_type, use_variable):
class GatherModel(autotrackable.AutoTrackable):
"""A simple model with a single gather."""
def __init__(self, use_variable):
"""Initializes a GatherModel.
Args:
use_variable: If True, creates a variable for weight.
"""
super(GatherModel, self).__init__()
w_val = np.random.randn(128, 32).astype('f4')
if use_variable:
self.w = variables.Variable(w_val)
else:
self.w = w_val
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
shape=[6], dtype=input_type, name='input_tensor'
)
]
)
def __call__(
self, input_tensor: core.Tensor
) -> Mapping[str, core.Tensor]:
"""Performs a gather operation."""
out = array_ops.gather_v2(self.w, input_tensor)
return {'output': out}
return GatherModel(use_variable)
def _create_depthwise_conv2d_model(
self,
input_shape: Sequence[int],
filter_shape: Sequence[int],
has_bias: bool = False,
has_batch_norm: bool = False,
activation_fn: Optional[ops.Operation] = None,
strides: Sequence[int] = (1, 2, 2, 1),
dilations: Sequence[int] = (1, 1, 1, 1),
padding: str = 'SAME',
):
class DepthwiseConvModel(module.Module):
"""A simple model with a single depthwise conv2d, bias and relu."""
def __init__(self):
self.out_channel_size = filter_shape[2] * filter_shape[3]
# This ensures filters will have different value range per out channel
self.filters = np.stack(
[
np.random.uniform(
low=-(i + 1), high=(i + 1), size=filter_shape[:-2]
).astype('f4')
for i in range(self.out_channel_size)
],
axis=-1,
)
self.filters = self.filters.reshape(filter_shape)
self.bias = np.random.uniform(
low=0, high=10, size=(self.out_channel_size)
).astype('f4')
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(shape=input_shape, dtype=dtypes.float32)
]
)
def depthwise_conv(
self, input_tensor: core.Tensor
) -> Mapping[str, core.Tensor]:
"""Performs a 2D depthwise convolution operation.
Args:
input_tensor: Input tensor to perform convolution on.
Returns:
A map of: output key -> output result.
"""
scale = [1.0] * self.out_channel_size
offset = [0.5] * self.out_channel_size
mean, variance = scale, offset
out = nn_ops.depthwise_conv2d_native(
input_tensor,
self.filters,
strides=[1, 2, 2, 1],
dilations=[1, 1, 1, 1],
padding='SAME',
data_format='NHWC',
)
if has_bias:
out = nn_ops.bias_add(out, self.bias)
if has_batch_norm:
# Fusing is supported for non-training case.
out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(
out, scale, offset, mean, variance, is_training=False
)
if activation_fn is not None:
out = activation_fn(out)
return {'output': out}
return DepthwiseConvModel()
def _create_conv2d_model(
self,
input_shape: Sequence[int],
filter_shape: Sequence[int],
has_bias: bool = False,
has_batch_norm: bool = False,
activation_fn: Optional[ops.Operation] = None,
strides: Sequence[int] = (1, 2, 2, 1),
dilations: Sequence[int] = (1, 1, 1, 1),
padding: str = 'SAME',
):
class ConvModel(module.Module):
"""A simple model with a single conv2d, bias and relu."""
def __init__(self):
self.out_channel_size = filter_shape[-1]
# This ensures filters will have different value range per out channel
self.filters = np.stack(
[
np.random.uniform(
low=-(i + 1), high=(i + 1), size=filter_shape[:-1]
).astype('f4')
for i in range(self.out_channel_size)
],
axis=-1,
)
self.bias = np.random.uniform(
low=0, high=10, size=(self.out_channel_size)
).astype('f4')
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(shape=input_shape, dtype=dtypes.float32)
]
)
def conv(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
"""Performs a 2D convolution operation.
Args:
input_tensor: Input tensor to perform convolution on.
Returns:
A map of: output key -> output result.
"""
scale = [1.0] * self.out_channel_size
offset = [0.5] * self.out_channel_size
mean, variance = scale, offset
out = nn_ops.conv2d(
input_tensor,
self.filters,
strides=strides,
dilations=dilations,
padding=padding,
data_format='NHWC',
)
if has_bias:
out = nn_ops.bias_add(out, self.bias, data_format='NHWC')
if has_batch_norm:
# Fusing is supported for non-training case.
out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(
out, scale, offset, mean, variance, is_training=False
)
if activation_fn is not None:
out = activation_fn(out)
return {'output': out}
return ConvModel()
def _create_matmul_model(
self,
input_shape: Sequence[int],
weight_shape: Sequence[int],
saved_model_path: str,
has_bias: bool = False,
activation_fn: Optional[ops.Operation] = None,
bias_size: Optional[int] = None,
use_biasadd: bool = True,
) -> module.Module:
class MatmulModel(module.Module):
"""A simple model with a single matmul.
Bias and activation function are optional.
"""
def __init__(
self,
weight_shape: Sequence[int],
bias_size: Optional[int] = None,
activation_fn: Optional[ops.Operation] = None,
use_biasadd: bool = True,
) -> None:
"""Initializes a MatmulModel.
Args:
weight_shape: Shape of the weight tensor.
bias_size: If None, do not use bias. Else, use given size as bias.
activation_fn: The activation function to be used. No activation
function if None.
use_biasadd: If True, use BiasAdd for adding bias, else use AddV2.
"""
self.bias_size = bias_size
self.activation_fn = activation_fn
self.use_biasadd = use_biasadd
self.filters = np.random.uniform(low=-1.0, high=1.0, size=weight_shape)
if bias_size is not None:
self.bias = np.random.uniform(low=-1.0, high=1.0, size=bias_size)
def has_bias(self) -> bool:
return self.bias_size is not None
def has_reshape(self) -> bool:
return self.has_bias() and self.bias_size != self.filters.shape[-1]
@def_function.function
def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
"""Performs a matrix multiplication.
Depending on self.has_bias and self.activation_fn, it may add a bias
term or
go through the activaction function.
Args:
input_tensor: Input tensor to matmul with the filter.
Returns:
A map of: output key -> output result.
"""
out = math_ops.matmul(input_tensor, self.filters, name='sample/matmul')
if self.has_reshape():
input_shape = input_tensor.shape
if len(input_shape) == 3:
reshape_shape = (input_shape[0], -1, self.bias_size)
else:
reshape_shape = (-1, self.bias_size)
out = array_ops.reshape(out, reshape_shape)
if self.has_bias():
if self.use_biasadd:
out = nn_ops.bias_add(out, self.bias)
else:
out = math_ops.add_v2(out, self.bias)
if self.activation_fn is not None:
out = self.activation_fn(out)
return {'output': out}
# If bias_size is not explicitly given, it should default to width of weight.
if bias_size is None and has_bias:
bias_size = weight_shape[-1]
# Verify that when bias_size is not None, has_bias should be True.
# And if bias_size is None, has_bias should be False using XNOR
assert not ((bias_size is not None) ^ has_bias)
# Verify that bias size is correct
if bias_size:
input_height = input_shape[0] if len(input_shape) == 2 else input_shape[1]
assert input_height * weight_shape[-1] % bias_size == 0
model = MatmulModel(weight_shape, bias_size, activation_fn)
saved_model_save.save(
model,
saved_model_path,
signatures=model.matmul.get_concrete_function(
tensor_spec.TensorSpec(
shape=input_shape, dtype=dtypes.float32, name='input_tensor'
)
),
)
return model
# Prepares sample einsum input data shapes.
# This function returns:
# 1. Shape for input 1
# 2. Shape for input 2
# 3. Shape for bias
# 4. Signature for input 1 (Could contain None dimension)
# 5. Signature for input 2 (Could contain None dimension)
def _prepare_sample_einsum_datashapes(
self,
equation: str,
generate_unknown_shape_signature: bool = False,
use_bias: bool = False,
) -> Tuple[
List[Optional[int]],
List[Optional[int]],
Optional[List[Optional[int]]],
List[Optional[int]],
List[Optional[int]],
]:
# 1. Parse equation.
comma_pos = equation.find(',')
arrow_pos = equation.find('->')
x_labels = equation[0:comma_pos]
y_labels = equation[comma_pos + 1 : arrow_pos]
out_labels = equation[arrow_pos + 1 :]
# 2. Create sample shapes.
label_to_size = {'a': 4, 'b': 32, 'c': 64, 'd': 128, 'e': 8}
x_shape = [label_to_size.get(x_label) for x_label in x_labels]
y_shape = [label_to_size.get(y_label) for y_label in y_labels]
bias_shape = None
if use_bias:
bias_shape = [label_to_size.get(out_label) for out_label in out_labels]
bias_shape = bias_shape[-1:]
contracting_dims = set()
x_signature = list(x_shape)
y_signature = list(y_shape)
if generate_unknown_shape_signature:
for c in x_labels:
if c in y_labels:
contracting_dims.add(c)
x_signature = [
None if c not in contracting_dims else x_shape[cidx]
for cidx, c in enumerate(x_labels)
]
y_signature = [
None if c not in contracting_dims else y_shape[cidx]
for cidx, c in enumerate(y_labels)
]
return x_shape, y_shape, bias_shape, x_signature, y_signature
def _create_einsum_model(
self,
equation: str,
y_shape: Sequence[int],
x_signature: Sequence[Optional[int]],
y_signature: Sequence[Optional[int]],
bias_shape: Optional[Sequence[int]] = None,
activation_fn: Optional[ops.Operation] = None,
is_qat_model: bool = False,
) -> module.Module:
class EinsumModel(module.Module):
"""Einsum class."""
def __init__(self):
self._bias = None
if bias_shape is not None:
self._bias = array_ops.constant(
np.random.uniform(size=bias_shape), dtype=dtypes.float32
)
self._kernel = np.random.uniform(size=y_shape).astype('f4')
self._min = (-0.8, -0.8, -0.9)
self._max = (0.9, 0.9, 1.0)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
name='x', shape=x_signature, dtype=dtypes.float32
)
]
)
def einsum_with_kernel(self, x: core.Tensor) -> Mapping[str, core.Tensor]:
return self._einsum(x, self._kernel)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
name='x', shape=x_signature, dtype=dtypes.float32
),
tensor_spec.TensorSpec(
name='y', shape=y_signature, dtype=dtypes.float32
),
]
)
def einsum_without_kernel(
self, x: core.Tensor, y: core.Tensor
) -> Mapping[str, core.Tensor]:
return self._einsum(x, y)
def _einsum(self, x, y):
if is_qat_model:
x = array_ops.fake_quant_with_min_max_vars(
x,
min=ops.convert_to_tensor(self._min[0]),
max=ops.convert_to_tensor(self._max[0]),
num_bits=8,
narrow_range=False,
)
y = array_ops.fake_quant_with_min_max_vars(
y,
min=ops.convert_to_tensor(self._min[1]),
max=ops.convert_to_tensor(self._max[1]),
num_bits=8,
narrow_range=False,
)
out = tensorflow.einsum(equation, x, y)
if self._bias is not None:
out = nn_ops.bias_add(out, self._bias)
if activation_fn is not None:
out = activation_fn(out)
if is_qat_model:
out = array_ops.fake_quant_with_min_max_vars(
out,
min=ops.convert_to_tensor(self._min[2]),
max=ops.convert_to_tensor(self._max[2]),
num_bits=8,
narrow_range=False,
)
return {'output': out}
return EinsumModel()
def _create_and_save_tf1_conv_model(
self,
saved_model_path: str,
signature_key: str,
tags: Collection[str],
input_key: str,
output_key: str,
*,
input_shape: Sequence[int] = (1, 3, 4, 3),
filter_shape: Sequence[int] = (2, 3, 3, 2),
use_variable: bool = False,
) -> core.Tensor:
"""Creates and saves a simple convolution model.
This is intended to be used for TF1 (graph mode) tests.
Args:
saved_model_path: Directory to save the model.
signature_key: The key to the SignatureDef that inputs & outputs
correspond to.
tags: Set of tags associated with the model.
input_key: The key to the input tensor.
output_key: The key to the output tensor.
input_shape: Shape of the input tensor.
filter_shape: Shape of the filter.
use_variable: Setting this to `True` makes the filter for the conv
operation a `tf.Variable`.
Returns:
in_placeholder: The placeholder tensor used as an input to the model.
"""
with ops.Graph().as_default(), session.Session() as sess:
in_placeholder, output_tensor = self._create_simple_tf1_conv_model(
input_shape=input_shape,
filter_shape=filter_shape,
use_variable_for_filter=use_variable,
)
if use_variable:
sess.run(variables.global_variables_initializer())
self._save_tf1_model(
sess,
saved_model_path,
signature_key,
tags,
inputs={input_key: in_placeholder},
outputs={output_key: output_tensor},
)
return in_placeholder
def _create_while_model(self, input_shape: Sequence[int] = (1, 32, 32, 512)):
class WhileModel(module.Module):
"""A model with a while op."""
def __init__(self):
w_shape = [3, 3] + [input_shape[-1], input_shape[-1]]
self.w = np.random.uniform(low=-2, high=2, size=w_shape).astype('f4')
@def_function.function
def condition(self, x, w):
return math_ops.reduce_sum(x, keepdims=False) < 100
@def_function.function
def body(self, x, w):
z = nn_ops.conv2d(x, w, padding='SAME')
return z, w
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(
shape=input_shape, dtype=dtypes.float32, name='input_tensor'
)
]
)
def main(self, x):
x1 = nn_ops.conv2d(x, self.w, padding='SAME')
x2, _ = while_loop_ops.while_loop(
self.condition, self.body, [x, self.w]
)
result = x1 + x2
return {'output': result}
return WhileModel()
| QuantizedModelTest |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 74422,
"end": 77403
} | class ____:
def test_forward(self):
x = np.arange(-6, 5)
bins = np.arange(-5, 5)
assert_array_equal(digitize(x, bins), np.arange(11))
def test_reverse(self):
x = np.arange(5, -6, -1)
bins = np.arange(5, -5, -1)
assert_array_equal(digitize(x, bins), np.arange(11))
def test_random(self):
x = rand(10)
bin = np.linspace(x.min(), x.max(), 10)
assert_(np.all(digitize(x, bin) != 0))
def test_right_basic(self):
x = [1, 5, 4, 10, 8, 11, 0]
bins = [1, 5, 10]
default_answer = [1, 2, 1, 3, 2, 3, 0]
assert_array_equal(digitize(x, bins), default_answer)
right_answer = [0, 1, 1, 2, 2, 3, 0]
assert_array_equal(digitize(x, bins, True), right_answer)
def test_right_open(self):
x = np.arange(-6, 5)
bins = np.arange(-6, 4)
assert_array_equal(digitize(x, bins, True), np.arange(11))
def test_right_open_reverse(self):
x = np.arange(5, -6, -1)
bins = np.arange(4, -6, -1)
assert_array_equal(digitize(x, bins, True), np.arange(11))
def test_right_open_random(self):
x = rand(10)
bins = np.linspace(x.min(), x.max(), 10)
assert_(np.all(digitize(x, bins, True) != 10))
def test_monotonic(self):
x = [-1, 0, 1, 2]
bins = [0, 0, 1]
assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3])
assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3])
bins = [1, 1, 0]
assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0])
assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0])
bins = [1, 1, 1, 1]
assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4])
assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4])
bins = [0, 0, 1, 0]
assert_raises(ValueError, digitize, x, bins)
bins = [1, 1, 0, 1]
assert_raises(ValueError, digitize, x, bins)
def test_casting_error(self):
x = [1, 2, 3 + 1.j]
bins = [1, 2, 3]
assert_raises(TypeError, digitize, x, bins)
x, bins = bins, x
assert_raises(TypeError, digitize, x, bins)
def test_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
assert_(not isinstance(digitize(b, a, False), A))
assert_(not isinstance(digitize(b, a, True), A))
def test_large_integers_increasing(self):
# gh-11022
x = 2**54 # loses precision in a float
assert_equal(np.digitize(x, [x - 1, x + 1]), 1)
@pytest.mark.xfail(
reason="gh-11022: np._core.multiarray._monoticity loses precision")
def test_large_integers_decreasing(self):
# gh-11022
x = 2**54 # loses precision in a float
assert_equal(np.digitize(x, [x + 1, x - 1]), 1)
| TestDigitize |
python | charliermarsh__ruff | crates/ruff_python_parser/resources/valid/statement/class.py | {
"start": 119,
"end": 171
} | class ____:
def method():
a, b = data
| Test |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 57698,
"end": 58041
} | class ____(BaseModel):
usage: Optional["Usage"] = Field(default=None, description="")
time: Optional[float] = Field(default=None, description="Time spent to process this request")
status: Optional[str] = Field(default=None, description="")
result: Optional["CountResult"] = Field(default=None, description="")
| InlineResponse20019 |
python | kamyu104__LeetCode-Solutions | Python/maximum-balanced-subsequence-sum.py | {
"start": 2341,
"end": 4402
} | class ____(object):
def maxBalancedSubsequenceSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
NEG_INF = float("-inf")
# Range Maximum Query
class SegmentTree(object):
def __init__(self, N,
build_fn=lambda _: None,
query_fn=lambda x, y: max(x, y),
update_fn=lambda x, y: max(x, y)):
self.tree = [None]*(2*2**((N-1).bit_length()))
self.base = len(self.tree)//2
self.query_fn = query_fn
self.update_fn = update_fn
for i in xrange(self.base, self.base+N):
self.tree[i] = build_fn(i-self.base)
for i in reversed(xrange(1, self.base)):
self.tree[i] = query_fn(self.tree[2*i], self.tree[2*i+1])
def update(self, i, h):
x = self.base+i
self.tree[x] = self.update_fn(self.tree[x], h)
while x > 1:
x //= 2
self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2+1])
def query(self, L, R):
if L > R:
return None
L += self.base
R += self.base
left = right = None
while L <= R:
if L & 1:
left = self.query_fn(left, self.tree[L])
L += 1
if R & 1 == 0:
right = self.query_fn(self.tree[R], right)
R -= 1
L //= 2
R //= 2
return self.query_fn(left, right)
val_to_idx = {x:i for i, x in enumerate(sorted({x-i for i, x in enumerate(nums)}))}
st = SegmentTree(len(val_to_idx))
for i, x in enumerate(nums):
v = max(st.query(0, val_to_idx[x-i]), 0)+x
st.update(val_to_idx[x-i], v)
return st.query(0, len(val_to_idx)-1)
| Solution3 |
python | kamyu104__LeetCode-Solutions | Python/second-minimum-time-to-reach-destination.py | {
"start": 1933,
"end": 3461
} | class ____(object):
def secondMinimum(self, n, edges, time, change):
"""
:type n: int
:type edges: List[List[int]]
:type time: int
:type change: int
:rtype: int
"""
INF = float("inf")
def bfs(adj, start):
q = [start]
dist = [INF]*len(adj)
dist[start] = 0
while q:
new_q = []
for u in q:
for v in adj[u]:
if dist[v] != INF:
continue
dist[v] = dist[u]+1
new_q.append(v)
q = new_q
return dist
def calc_time(time, change, dist):
result = 0
for _ in xrange(dist):
if result//change%2:
result = (result//change+1)*change
result += time
return result
adj = [[] for _ in xrange(n)]
for u, v in edges:
adj[u-1].append(v-1)
adj[v-1].append(u-1)
dist_to_end, dist_to_start = bfs(adj, 0), bfs(adj, n-1)
dist = dist_to_end[n-1]+2 # always exists
for i in xrange(n): # case of detour
if dist_to_end[i]+dist_to_start[i] == dist_to_end[n-1]:
continue
dist = min(dist, dist_to_end[i]+dist_to_start[i]) # find second min
if dist == dist_to_end[n-1]+1:
break
return calc_time(time, change, dist)
| Solution2 |
python | fluentpython__example-code | attic/iterables/vector.py | {
"start": 1354,
"end": 2546
} | class ____:
"""An n-dimensional vector"""
def __init__(self, *components): # <1>
self._components = tuple(components) # <2>
def __repr__(self):
return 'Vector' + (reprlib.repr(self._components)) # <3>
def __iter__(self):
return iter(self._components) # <4>
def __abs__(self):
return math.sqrt(sum(comp*comp for comp in self)) # <5>
def __len__(self):
return len(self._components) # <6>
def __add__(self, other):
if len(self) != len(other):
raise ValueError(EQ_DIMENSIONS_MSG % 'Addition')
return Vector(*(a+b for a, b in zip(self, other))) # <7>
def __mul__(self, other):
if isinstance(other, numbers.Number):
return Vector(*(comp*other for comp in self)) # <8>
else:
return self.elementwise_mul(other) # <9>
def elementwise_mul(self, other):
if len(self) != len(other):
raise ValueError(EQ_DIMENSIONS_MSG %
'Elementwise multiplication')
return Vector(*(a*b for a, b in zip(self, other))) # <10>
def __bool__(self):
return any(self) # <11>
# END VECTOR_ITER
| Vector |
python | PrefectHQ__prefect | tests/runtime/test_task_run.py | {
"start": 3385,
"end": 3818
} | class ____:
async def test_tags_is_attribute(self):
assert "tags" in dir(task_run)
async def test_tags_is_empty_when_not_set(self):
assert task_run.tags == []
async def test_tags_returns_tags_when_present_dynamically(self):
with TaskRunContext.model_construct(
task_run=TaskRun.model_construct(tags=["foo", "bar"])
):
assert task_run.tags == ["foo", "bar"]
| TestTags |
python | tensorflow__tensorflow | tensorflow/python/data/ops/readers.py | {
"start": 14741,
"end": 18690
} | class ____(dataset_ops.DatasetV2):
"""A `Dataset` comprising records from one or more TFRecord files.
This dataset loads TFRecords from the files as bytes, exactly as they were
written.`TFRecordDataset` does not do any parsing or decoding on its own.
Parsing and decoding can be done by applying `Dataset.map` transformations
after the `TFRecordDataset`.
A minimal example is given below:
>>> import tempfile
>>> example_path = os.path.join(tempfile.gettempdir(), "example.tfrecords")
>>> np.random.seed(0)
>>> # Write the records to a file.
... with tf.io.TFRecordWriter(example_path) as file_writer:
... for _ in range(4):
... x, y = np.random.random(), np.random.random()
...
... record_bytes = tf.train.Example(features=tf.train.Features(feature={
... "x": tf.train.Feature(float_list=tf.train.FloatList(value=[x])),
... "y": tf.train.Feature(float_list=tf.train.FloatList(value=[y])),
... })).SerializeToString()
... file_writer.write(record_bytes)
>>> # Read the data back out.
>>> def decode_fn(record_bytes):
... return tf.io.parse_single_example(
... # Data
... record_bytes,
...
... # Schema
... {"x": tf.io.FixedLenFeature([], dtype=tf.float32),
... "y": tf.io.FixedLenFeature([], dtype=tf.float32)}
... )
>>> for batch in tf.data.TFRecordDataset([example_path]).map(decode_fn):
... print("x = {x:.4f}, y = {y:.4f}".format(**batch))
x = 0.5488, y = 0.7152
x = 0.6028, y = 0.5449
x = 0.4237, y = 0.6459
x = 0.4376, y = 0.8918
"""
def __init__(self,
filenames,
compression_type=None,
buffer_size=None,
num_parallel_reads=None,
name=None):
"""Creates a `TFRecordDataset` to read one or more TFRecord files.
Each element of the dataset will contain a single TFRecord.
Args:
filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or
more filenames.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes in the read buffer. If your input pipeline is I/O bottlenecked,
consider setting this parameter to a value 1-100 MBs. If `None`, a
sensible default for both local and remote file systems is used.
num_parallel_reads: (Optional.) A `tf.int64` scalar representing the
number of files to read in parallel. If greater than one, the records of
files read in parallel are outputted in an interleaved order. If your
input pipeline is I/O bottlenecked, consider setting this parameter to a
value greater than one to parallelize the I/O. If `None`, files will be
read sequentially.
name: (Optional.) A name for the tf.data operation.
Raises:
TypeError: If any argument does not have the expected type.
ValueError: If any argument does not have the expected shape.
"""
filenames = _create_or_validate_filenames_dataset(filenames, name=name)
self._filenames = filenames
self._compression_type = compression_type
self._buffer_size = buffer_size
self._num_parallel_reads = num_parallel_reads
def creator_fn(filename):
return _TFRecordDataset(
filename, compression_type, buffer_size, name=name)
self._impl = _create_dataset_reader(
creator_fn, filenames, num_parallel_reads, name=name)
variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access
super(TFRecordDatasetV2, self).__init__(variant_tensor)
def _inputs(self):
return self._impl._inputs() # pylint: disable=protected-access
@property
def element_spec(self):
return tensor_spec.TensorSpec([], dtypes.string)
@tf_export(v1=["data.TFRecordDataset"])
| TFRecordDatasetV2 |
python | kamyu104__LeetCode-Solutions | Python/subarrays-with-xor-at-least-k.py | {
"start": 92,
"end": 2427
} | class ____(object):
def countXorSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
class Trie(object):
def __init__(self, bit_length):
self.__lefts = [-1]*(1+(1+len(nums))*bit_length) # preallocate to speed up performance
self.__rights = [-1]*(1+(1+len(nums))*bit_length)
self.__cnts = [0]*(1+(1+len(nums))*bit_length)
self.__i = 0
self.__new_node()
self.__bit_length = bit_length
def __new_node(self):
self.__i += 1
return self.__i-1
def add(self, num):
curr = 0
for i in reversed(xrange(self.__bit_length)):
x = (num>>i)&1
if x == 0:
if self.__lefts[curr] == -1:
self.__lefts[curr] = self.__new_node()
curr = self.__lefts[curr]
else:
if self.__rights[curr] == -1:
self.__rights[curr] = self.__new_node()
curr = self.__rights[curr]
self.__cnts[curr] += 1
def query(self, prefix, k):
result = curr = 0
for i in reversed(xrange(self.__bit_length)):
t = (k>>i)&1
x = (prefix>>i)&1
if t == 0:
tmp = self.__lefts[curr] if 1^x == 0 else self.__rights[curr]
if tmp != -1:
result += self.__cnts[tmp]
curr = self.__lefts[curr] if t^x == 0 else self.__rights[curr]
if curr == -1:
break
else:
result += self.__cnts[curr]
return result
result = prefix = 0
mx = max(max(nums), k, 1)
trie = Trie(mx.bit_length())
trie.add(prefix)
for x in nums:
prefix ^= x
result += trie.query(prefix, k)
trie.add(prefix)
return result
# Time: O(nlogr), r = max(max(nums), k, 1)
# Space: O(t)
# bitmasks, prefix sum, trie
| Solution |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-scrapegraph/examples/scrapegraph-smartscraper-lama-index.py | {
"start": 367,
"end": 661
} | class ____(BaseModel):
"""Schema for representing a company founder."""
name: str = Field(description="Name of the founder")
role: str = Field(description="Role of the founder")
social_media: str = Field(description="Social media URL of the founder", default="N/A")
| FounderSchema |
python | huggingface__transformers | src/transformers/processing_utils.py | {
"start": 24590,
"end": 24790
} | class ____(TypedDict, total=False):
processor_kwargs: ProcessingKwargs
mm_load_kwargs: ChatTemplateLoadKwargs
template_kwargs: ProcessorChatTemplateKwargs
@dataclass
| AllKwargsForChatTemplate |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/cloud_tasks.py | {
"start": 1274,
"end": 2125
} | class ____(BaseGoogleLink):
"""Helper class for constructing Cloud Task Queue Link."""
name = "Cloud Tasks Queue"
key = "cloud_task_queue"
format_str = CLOUD_TASKS_QUEUE_LINK
@staticmethod
def extract_parts(queue_name: str | None):
"""
Extract project_id, location and queue id from queue name.
projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID.
"""
if not queue_name:
return "", "", ""
parts = queue_name.split("/")
return parts[1], parts[3], parts[5]
@classmethod
def persist(cls, context: Context, **value):
queue_name = value.get("queue_name")
project_id, location, queue_id = cls.extract_parts(queue_name)
super().persist(context, project_id=project_id, location=location, queue_id=queue_id)
| CloudTasksQueueLink |
python | google__jax | jax/experimental/mosaic/gpu/constraints.py | {
"start": 2454,
"end": 2632
} | class ____:
expression: Expression
axes: tuple[int, ...]
def __str__(self):
return f"Reduce([{self.axes}], {self.expression})"
@dataclasses.dataclass(frozen=True)
| Reduce |
python | great-expectations__great_expectations | great_expectations/core/serdes.py | {
"start": 178,
"end": 322
} | class ____(BaseModel):
datasource: _IdentifierBundle
asset: _IdentifierBundle
batch_definition: _IdentifierBundle
| _EncodedValidationData |
python | FactoryBoy__factory_boy | tests/test_regression.py | {
"start": 208,
"end": 294
} | class ____(T.NamedTuple):
fullname: str
pseudonym: T.Optional[str] = None
| Author |
python | walkccc__LeetCode | solutions/2517. Maximum Tastiness of Candy Basket/2517.py | {
"start": 0,
"end": 510
} | class ____:
def maximumTastiness(self, price: list[int], k: int) -> int:
price.sort()
def numBaskets(m: int) -> int:
"""Returns the number of baskets we can pick for m tastiness."""
baskets = 0
prevPrice = -m
for p in price:
if p >= prevPrice + m:
prevPrice = p
baskets += 1
return baskets
l = bisect.bisect_left(range(max(price) - min(price) + 1), True,
key=lambda m: numBaskets(m) < k)
return l - 1
| Solution |
python | huggingface__transformers | tests/models/bart/test_modeling_bart.py | {
"start": 15788,
"end": 21979
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(BartModel, BartForConditionalGeneration, BartForSequenceClassification, BartForQuestionAnswering)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": BartModel,
"fill-mask": BartForConditionalGeneration,
"question-answering": BartForQuestionAnswering,
"summarization": BartForConditionalGeneration,
"text-classification": BartForSequenceClassification,
"text-generation": BartForCausalLM,
"text2text-generation": BartForConditionalGeneration,
"translation": BartForConditionalGeneration,
"zero-shot": BartForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
def setUp(self):
self.model_tester = BartModelTester(self)
self.config_tester = ConfigTester(self, config_class=BartConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# BartForSequenceClassification does not support inputs_embeds
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (BartModel, BartForConditionalGeneration, BartForQuestionAnswering):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@unittest.skip("Bart no longer always uses self.shared so not working.")
def test_input_embeddings_support_forward_hook(self):
# Make sure that registering hooks on the input embeddings are indeed called
# in forward. This is necessary for gradient checkpointing in PEFT, see also #41821.
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
hook = unittest.mock.MagicMock(return_value=None)
model.get_input_embeddings().register_forward_hook(hook)
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
model(**inputs)
self.assertGreater(hook.call_count, 0)
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = BartForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
@unittest.skip(
reason="This architecture has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245"
)
def test_load_save_without_tied_weights(self):
pass
def test_resize_embeddings_persists_embeddings_type(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
config.scale_embedding = True
model = BartForConditionalGeneration(config)
old_type = type(model.model.decoder.embed_tokens)
model.resize_token_embeddings(new_num_tokens=config.vocab_size)
new_type = type(model.model.decoder.embed_tokens)
self.assertIs(old_type, new_type)
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
@require_torch
@slow
| BartModelTest |
python | allegroai__clearml | clearml/datasets/dataset.py | {
"start": 2815,
"end": 3293
} | class ____(object):
link = attrib(default=None, type=str)
relative_path = attrib(default=None, type=str)
parent_dataset_id = attrib(default=None, type=str)
size = attrib(default=None, type=int)
hash = attrib(default=None, type=str)
def as_dict(self) -> Dict:
return dict(
link=self.link,
relative_path=self.relative_path,
parent_dataset_id=self.parent_dataset_id,
size=self.size,
)
| LinkEntry |
python | sphinx-doc__sphinx | sphinx/registry.py | {
"start": 2529,
"end": 24032
} | class ____:
def __init__(self) -> None:
#: special attrgetter for autodoc; class object -> attrgetter
self.autodoc_attrgetters: dict[type, Callable[[Any, str, Any], Any]] = {}
#: builders; a dict of builder name -> builder class
self.builders: dict[str, type[Builder]] = {}
#: autodoc documenters; a dict of documenter name -> documenter class
self.documenters: dict[str, type[Documenter]] = {}
#: css_files; a list of tuple of filename and attributes
self.css_files: list[tuple[str, dict[str, Any]]] = []
#: domains; a dict of domain name -> domain class
self.domains: dict[str, type[Domain]] = {}
#: additional directives for domains
#: a dict of domain name -> dict of directive name -> directive
self.domain_directives: dict[str, dict[str, type[Directive]]] = {}
#: additional indices for domains
#: a dict of domain name -> list of index class
self.domain_indices: dict[str, list[type[Index]]] = {}
#: additional object types for domains
#: a dict of domain name -> dict of objtype name -> objtype
self.domain_object_types: dict[str, dict[str, ObjType]] = {}
#: additional roles for domains
#: a dict of domain name -> dict of role name -> role impl.
self.domain_roles: dict[str, dict[str, RoleFunction | XRefRole]] = {}
#: additional enumerable nodes
#: a dict of node class -> tuple of figtype and title_getter function
self.enumerable_nodes: dict[type[Node], tuple[str, TitleGetter | None]] = {}
#: HTML inline and block math renderers
#: a dict of name -> tuple of visit function and depart function
self.html_inline_math_renderers: dict[
str,
_MathsInlineRenderers,
] = {}
self.html_block_math_renderers: dict[
str,
_MathsBlockRenderers,
] = {}
#: HTML assets
self.html_assets_policy: str = 'per_page'
#: HTML themes
self.html_themes: dict[str, _StrPath] = {}
#: js_files; list of JS paths or URLs
self.js_files: list[tuple[str | None, dict[str, Any]]] = []
#: LaTeX packages; list of package names and its options
self.latex_packages: list[tuple[str, str | None]] = []
self.latex_packages_after_hyperref: list[tuple[str, str | None]] = []
#: post transforms; list of transforms
self.post_transforms: list[type[Transform]] = []
#: source parsers; file type -> parser class
self.source_parsers: dict[str, type[Parser]] = {}
#: source suffix: suffix -> file type
self.source_suffix: dict[str, str] = {}
#: custom translators; builder name -> translator class
self.translators: dict[str, type[nodes.NodeVisitor]] = {}
#: custom handlers for translators
#: a dict of builder name -> dict of node name -> visitor and departure functions
self.translation_handlers: dict[str, dict[str, _NodeHandlerPair]] = {}
#: additional transforms; list of transforms
self.transforms: list[type[Transform]] = []
@property
def autodoc_attrgettrs(self) -> dict[type, Callable[[Any, str, Any], Any]]:
return self.autodoc_attrgetters
def add_builder(self, builder: type[Builder], override: bool = False) -> None:
logger.debug('[app] adding builder: %r', builder)
if not hasattr(builder, 'name'):
raise ExtensionError(
__('Builder class %s has no "name" attribute') % builder
)
if builder.name in self.builders and not override:
raise ExtensionError(
__('Builder %r already exists (in module %s)')
% (builder.name, self.builders[builder.name].__module__)
)
self.builders[builder.name] = builder
def preload_builder(self, app: Sphinx, name: str) -> None:
if name is None:
return
if name not in self.builders:
builder_entry_points = entry_points(group='sphinx.builders')
try:
entry_point = builder_entry_points[name]
except KeyError as exc:
raise SphinxError(
__(
'Builder name %s not registered or available'
' through entry point'
)
% name
) from exc
self.load_extension(app, entry_point.module)
def create_builder(self, app: Sphinx, name: str, env: BuildEnvironment) -> Builder:
if name not in self.builders:
raise SphinxError(__('Builder name %s not registered') % name)
return self.builders[name](app, env)
def add_domain(self, domain: type[Domain], override: bool = False) -> None:
logger.debug('[app] adding domain: %r', domain)
if domain.name in self.domains and not override:
raise ExtensionError(__('domain %s already registered') % domain.name)
self.domains[domain.name] = domain
def has_domain(self, domain: str) -> bool:
return domain in self.domains
def create_domains(self, env: BuildEnvironment) -> Iterator[Domain]:
for DomainClass in self.domains.values():
domain = DomainClass(env)
# transplant components added by extensions
domain.directives.update(self.domain_directives.get(domain.name, {}))
domain.roles.update(self.domain_roles.get(domain.name, {}))
domain.indices.extend(self.domain_indices.get(domain.name, []))
for name, objtype in self.domain_object_types.get(domain.name, {}).items():
domain.add_object_type(name, objtype)
yield domain
def add_directive_to_domain(
self, domain: str, name: str, cls: type[Directive], override: bool = False
) -> None:
logger.debug('[app] adding directive to domain: %r', (domain, name, cls))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
directives: dict[str, type[Directive]] = self.domain_directives.setdefault(
domain, {}
)
if name in directives and not override:
raise ExtensionError(
__('The %r directive is already registered to domain %s')
% (name, domain)
)
directives[name] = cls
def add_role_to_domain(
self,
domain: str,
name: str,
role: RoleFunction | XRefRole,
override: bool = False,
) -> None:
logger.debug('[app] adding role to domain: %r', (domain, name, role))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
roles = self.domain_roles.setdefault(domain, {})
if name in roles and not override:
raise ExtensionError(
__('The %r role is already registered to domain %s') % (name, domain)
)
roles[name] = role
def add_index_to_domain(
self, domain: str, index: type[Index], override: bool = False
) -> None:
logger.debug('[app] adding index to domain: %r', (domain, index))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
indices = self.domain_indices.setdefault(domain, [])
if index in indices and not override:
raise ExtensionError(
__('The %r index is already registered to domain %s')
% (index.name, domain)
)
indices.append(index)
def add_object_type(
self,
directivename: str,
rolename: str,
indextemplate: str = '',
parse_node: Callable[[BuildEnvironment, str, addnodes.desc_signature], str]
| None = None,
ref_nodeclass: type[TextElement] | None = None,
objname: str = '',
doc_field_types: Sequence[Field] = (),
override: bool = False,
) -> None:
logger.debug(
'[app] adding object type: %r',
(
directivename,
rolename,
indextemplate,
parse_node,
ref_nodeclass,
objname,
doc_field_types,
),
)
# create a subclass of GenericObject as the new directive
directive = type(
directivename,
(GenericObject, object),
{
'indextemplate': indextemplate,
'parse_node': parse_node and staticmethod(parse_node),
'doc_field_types': doc_field_types,
},
)
self.add_directive_to_domain('std', directivename, directive)
self.add_role_to_domain('std', rolename, XRefRole(innernodeclass=ref_nodeclass))
object_types = self.domain_object_types.setdefault('std', {})
if directivename in object_types and not override:
raise ExtensionError(
__('The %r object_type is already registered') % directivename
)
object_types[directivename] = ObjType(objname or directivename, rolename)
def add_crossref_type(
self,
directivename: str,
rolename: str,
indextemplate: str = '',
ref_nodeclass: type[TextElement] | None = None,
objname: str = '',
override: bool = False,
) -> None:
logger.debug(
'[app] adding crossref type: %r',
(directivename, rolename, indextemplate, ref_nodeclass, objname),
)
# create a subclass of Target as the new directive
directive = type(
directivename,
(Target, object),
{'indextemplate': indextemplate},
)
self.add_directive_to_domain('std', directivename, directive)
self.add_role_to_domain('std', rolename, XRefRole(innernodeclass=ref_nodeclass))
object_types = self.domain_object_types.setdefault('std', {})
if directivename in object_types and not override:
raise ExtensionError(
__('The %r crossref_type is already registered') % directivename
)
object_types[directivename] = ObjType(objname or directivename, rolename)
def add_source_suffix(
self, suffix: str, filetype: str, override: bool = False
) -> None:
logger.debug('[app] adding source_suffix: %r, %r', suffix, filetype)
if suffix in self.source_suffix and not override:
raise ExtensionError(__('source_suffix %r is already registered') % suffix)
self.source_suffix[suffix] = filetype
def add_source_parser(self, parser: type[Parser], override: bool = False) -> None:
logger.debug('[app] adding search source_parser: %r', parser)
# create a map from filetype to parser
for filetype in parser.supported:
if filetype in self.source_parsers and not override:
raise ExtensionError(
__('source_parser for %r is already registered') % filetype
)
self.source_parsers[filetype] = parser
def get_source_parser(self, filetype: str) -> type[Parser]:
try:
return self.source_parsers[filetype]
except KeyError as exc:
raise SphinxError(
__('Source parser for %s not registered') % filetype
) from exc
def get_source_parsers(self) -> dict[str, type[Parser]]:
return self.source_parsers
def create_source_parser(
self, filename: str, *, config: Config, env: BuildEnvironment
) -> Parser:
parser_class = self.get_source_parser(filename)
parser = parser_class()
if isinstance(parser, SphinxParser):
parser._config = config
parser._env = env
return parser
def add_translator(
self, name: str, translator: type[nodes.NodeVisitor], override: bool = False
) -> None:
logger.debug('[app] Change of translator for the %s builder.', name)
if name in self.translators and not override:
raise ExtensionError(__('Translator for %r already exists') % name)
self.translators[name] = translator
def add_translation_handlers(
self, node: type[Element], **kwargs: _NodeHandlerPair
) -> None:
logger.debug('[app] adding translation_handlers: %r, %r', node, kwargs)
for builder_name, handlers in kwargs.items():
translation_handlers = self.translation_handlers.setdefault(
builder_name, {}
)
try:
visit, depart = handlers # unpack once for assertion
translation_handlers[node.__name__] = (visit, depart)
except ValueError as exc:
raise ExtensionError(
__(
'kwargs for add_node() must be a (visit, depart) '
'function tuple: %r=%r'
)
% (builder_name, handlers),
) from exc
def get_translator_class(
self, builder: type[Builder] | Builder
) -> type[nodes.NodeVisitor]:
try:
return self.translators[builder.name]
except KeyError:
try:
return builder.default_translator_class
except AttributeError as err:
msg = f'translator not found for {builder.name}'
raise AttributeError(msg) from err
def create_translator(
self, builder: type[Builder] | Builder, *args: Any
) -> nodes.NodeVisitor:
translator_class = self.get_translator_class(builder)
translator = translator_class(*args)
# transplant handlers for custom nodes to translator instance
handlers = self.translation_handlers.get(builder.name, None)
if handlers is None:
# retry with builder.format
handlers = self.translation_handlers.get(builder.format, {})
for name, (visit, depart) in handlers.items():
setattr(translator, 'visit_' + name, MethodType(visit, translator))
if depart:
setattr(translator, 'depart_' + name, MethodType(depart, translator))
return translator
def add_transform(self, transform: type[Transform]) -> None:
logger.debug('[app] adding transform: %r', transform)
self.transforms.append(transform)
def get_transforms(self) -> list[type[Transform]]:
return self.transforms
def add_post_transform(self, transform: type[Transform]) -> None:
logger.debug('[app] adding post transform: %r', transform)
self.post_transforms.append(transform)
def get_post_transforms(self) -> list[type[Transform]]:
return self.post_transforms
def add_documenter(self, objtype: str, documenter: type[Documenter]) -> None:
self.documenters[objtype] = documenter
def add_autodoc_attrgetter(
self, typ: type, attrgetter: Callable[[Any, str, Any], Any]
) -> None:
self.autodoc_attrgetters[typ] = attrgetter
def add_css_files(self, filename: str, **attributes: Any) -> None:
self.css_files.append((filename, attributes))
def add_js_file(self, filename: str | None, **attributes: Any) -> None:
logger.debug('[app] adding js_file: %r, %r', filename, attributes)
self.js_files.append((filename, attributes))
def has_latex_package(self, name: str) -> bool:
packages = self.latex_packages + self.latex_packages_after_hyperref
return bool([x for x in packages if x[0] == name])
def add_latex_package(
self, name: str, options: str | None, after_hyperref: bool = False
) -> None:
if self.has_latex_package(name):
logger.warning("latex package '%s' already included", name)
logger.debug('[app] adding latex package: %r', name)
if after_hyperref:
self.latex_packages_after_hyperref.append((name, options))
else:
self.latex_packages.append((name, options))
def add_enumerable_node(
self,
node: type[Node],
figtype: str,
title_getter: TitleGetter | None = None,
override: bool = False,
) -> None:
logger.debug(
'[app] adding enumerable node: (%r, %r, %r)', node, figtype, title_getter
)
if node in self.enumerable_nodes and not override:
raise ExtensionError(__('enumerable_node %r already registered') % node)
self.enumerable_nodes[node] = (figtype, title_getter)
def add_html_math_renderer(
self,
name: str,
inline_renderers: _MathsInlineRenderers | None,
block_renderers: _MathsBlockRenderers | None,
) -> None:
logger.debug(
'[app] adding html_math_renderer: %s, %r, %r',
name,
inline_renderers,
block_renderers,
)
if name in self.html_inline_math_renderers:
raise ExtensionError(__('math renderer %s is already registered') % name)
if inline_renderers is not None:
self.html_inline_math_renderers[name] = inline_renderers
if block_renderers is not None:
self.html_block_math_renderers[name] = block_renderers
def add_html_theme(self, name: str, theme_path: str | os.PathLike[str]) -> None:
self.html_themes[name] = _StrPath(theme_path)
def load_extension(self, app: Sphinx, extname: str) -> None:
"""Load a Sphinx extension."""
if extname in app.extensions: # already loaded
return
if extname in EXTENSION_BLACKLIST:
logger.warning(
__(
'the extension %r was already merged with Sphinx since '
'version %s; this extension is ignored.'
),
extname,
EXTENSION_BLACKLIST[extname],
)
return
# update loading context
prefix = __('while setting up extension %s:') % extname
with prefixed_warnings(prefix):
try:
mod = import_module(extname)
except ImportError as err:
logger.verbose(__('Original exception:\n') + traceback.format_exc())
raise ExtensionError(
__('Could not import extension %s') % extname, err
) from err
setup: _ExtensionSetupFunc | None = getattr(mod, 'setup', None)
if setup is None:
logger.warning(
__(
'extension %r has no setup() function; is it really '
'a Sphinx extension module?'
),
extname,
)
metadata: ExtensionMetadata = {}
else:
try:
metadata = setup(app)
except VersionRequirementError as err:
# add the extension name to the version required
raise VersionRequirementError(
__(
'The %s extension used by this project needs at least '
'Sphinx v%s; it therefore cannot be built with this '
'version.'
)
% (extname, err),
) from err
if metadata is None:
metadata = {}
elif not isinstance(metadata, dict):
logger.warning(
__(
'extension %r returned an unsupported object from '
'its setup() function; it should return None or a '
'metadata dictionary'
),
extname,
)
metadata = {}
app.extensions[extname] = Extension(extname, mod, **metadata)
def get_envversion(self, app: Sphinx) -> Mapping[str, int]:
from sphinx.environment import _get_env_version
return _get_env_version(app.extensions)
def merge_source_suffix(app: Sphinx, config: Config) -> None:
"""Merge any user-specified source_suffix with any added by extensions."""
for suffix, filetype in app.registry.source_suffix.items():
if suffix not in app.config.source_suffix:
app.config.source_suffix[suffix] = filetype
elif app.config.source_suffix[suffix] == 'restructuredtext':
# The filetype is not specified (default filetype).
# So it overrides default filetype by extensions setting.
app.config.source_suffix[suffix] = filetype
elif app.config.source_suffix[suffix] is None:
msg = __('`None` is not a valid filetype for %r.') % suffix
logger.warning(msg)
app.config.source_suffix[suffix] = filetype
# copy config.source_suffix to registry
app.registry.source_suffix = app.config.source_suffix
def setup(app: Sphinx) -> ExtensionMetadata:
app.connect('config-inited', merge_source_suffix, priority=800)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| SphinxComponentRegistry |
python | google__pytype | pytype/load_pytd_test.py | {
"start": 536,
"end": 1034
} | class ____(test_base.UnitTest):
"""Tests for load_pytd.Module."""
def test_is_package(self):
for filename, is_package in [
("foo/bar.pyi", False),
("foo/__init__.pyi", True),
("foo/__init__.pyi-1", True),
("foo/__init__.pickled", True),
(os.devnull, True),
]:
with self.subTest(filename=filename):
mod = load_pytd.Module(module_name=None, filename=filename, ast=None)
self.assertEqual(mod.is_package(), is_package)
| ModuleTest |
python | kamyu104__LeetCode-Solutions | Python/find-if-digit-game-can-be-won.py | {
"start": 56,
"end": 425
} | class ____(object):
def canAliceWin(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
total1 = total2 = 0
for x in nums:
if x < 10:
total1 += x
else:
total2 += x
return total1 != total2
# Time: O(n)
# Space: O(1)
# brute force, game theory
| Solution |
python | PrefectHQ__prefect | src/prefect/client/schemas/actions.py | {
"start": 22324,
"end": 22826
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to update a v2 concurrency limit."""
active: Optional[bool] = Field(default=None)
name: Optional[Name] = Field(default=None)
limit: Optional[NonNegativeInteger] = Field(default=None)
active_slots: Optional[NonNegativeInteger] = Field(default=None)
denied_slots: Optional[NonNegativeInteger] = Field(default=None)
slot_decay_per_second: Optional[NonNegativeFloat] = Field(default=None)
| ConcurrencyLimitV2Update |
python | doocs__leetcode | solution/1200-1299/1284.Minimum Number of Flips to Convert Binary Matrix to Zero Matrix/Solution.py | {
"start": 0,
"end": 1135
} | class ____:
def minFlips(self, mat: List[List[int]]) -> int:
m, n = len(mat), len(mat[0])
state = sum(1 << (i * n + j) for i in range(m) for j in range(n) if mat[i][j])
q = deque([state])
vis = {state}
ans = 0
dirs = [0, -1, 0, 1, 0, 0]
while q:
for _ in range(len(q)):
state = q.popleft()
if state == 0:
return ans
for i in range(m):
for j in range(n):
nxt = state
for k in range(5):
x, y = i + dirs[k], j + dirs[k + 1]
if not 0 <= x < m or not 0 <= y < n:
continue
if nxt & (1 << (x * n + y)):
nxt -= 1 << (x * n + y)
else:
nxt |= 1 << (x * n + y)
if nxt not in vis:
vis.add(nxt)
q.append(nxt)
ans += 1
return -1
| Solution |
python | streamlit__streamlit | lib/tests/streamlit/web/server/stats_handler_test.py | {
"start": 1090,
"end": 5801
} | class ____(tornado.testing.AsyncHTTPTestCase):
def get_app(self):
self.mock_stats = []
mock_stats_manager = MagicMock()
mock_stats_manager.get_stats = MagicMock(side_effect=lambda: self.mock_stats)
return tornado.web.Application(
[
(
rf"/{METRIC_ENDPOINT}",
StatsRequestHandler,
dict(stats_manager=mock_stats_manager),
)
]
)
def test_no_stats(self):
"""If we have no stats, we expect to see just the header and footer."""
response = self.fetch("/_stcore/metrics")
assert response.code == 200
expected_body = (
b"# TYPE cache_memory_bytes gauge\n"
b"# UNIT cache_memory_bytes bytes\n"
b"# HELP Total memory consumed by a cache.\n"
b"# EOF\n"
)
assert expected_body == response.body
def test_deprecated_endpoint(self):
response = self.fetch("/st-metrics")
assert response.code == 200
assert (
response.headers["link"]
== f'<http://127.0.0.1:{self.get_http_port()}/_stcore/metrics>; rel="alternate"'
)
assert response.headers["deprecation"] == "True"
def test_has_stats(self):
self.mock_stats = [
CacheStat(
category_name="st.singleton",
cache_name="foo",
byte_length=128,
),
CacheStat(
category_name="st.memo",
cache_name="bar",
byte_length=256,
),
]
response = self.fetch("/_stcore/metrics")
assert response.code == 200
assert response.headers.get("Content-Type") == "application/openmetrics-text"
expected_body = (
b"# TYPE cache_memory_bytes gauge\n"
b"# UNIT cache_memory_bytes bytes\n"
b"# HELP Total memory consumed by a cache.\n"
b'cache_memory_bytes{cache_type="st.singleton",cache="foo"} 128\n'
b'cache_memory_bytes{cache_type="st.memo",cache="bar"} 256\n'
b"# EOF\n"
)
assert expected_body == response.body
def test_new_metrics_endpoint_should_not_display_deprecation_warning(self):
response = self.fetch("/_stcore/metrics")
assert "link" not in response.headers
assert "deprecation" not in response.headers
def test_protobuf_stats(self):
"""Stats requests are returned in OpenMetrics protobuf format
if the request's Content-Type header is protobuf.
"""
self.mock_stats = [
CacheStat(
category_name="st.singleton",
cache_name="foo",
byte_length=128,
),
CacheStat(
category_name="st.memo",
cache_name="bar",
byte_length=256,
),
]
# Requests can have multiple Accept headers. Only one of them needs
# to specify protobuf in order to get back protobuf.
headers = HTTPHeaders()
headers.add("Accept", "application/openmetrics-text")
headers.add("Accept", "application/x-protobuf")
headers.add("Accept", "text/html")
response = self.fetch("/_stcore/metrics", headers=headers)
assert response.code == 200
assert response.headers.get("Content-Type") == "application/x-protobuf"
metric_set = MetricSetProto()
metric_set.ParseFromString(response.body)
expected = {
"metricFamilies": [
{
"name": "cache_memory_bytes",
"type": "GAUGE",
"unit": "bytes",
"help": "Total memory consumed by a cache.",
"metrics": [
{
"labels": [
{"name": "cache_type", "value": "st.singleton"},
{"name": "cache", "value": "foo"},
],
"metricPoints": [{"gaugeValue": {"intValue": "128"}}],
},
{
"labels": [
{"name": "cache_type", "value": "st.memo"},
{"name": "cache", "value": "bar"},
],
"metricPoints": [{"gaugeValue": {"intValue": "256"}}],
},
],
}
]
}
assert expected == MessageToDict(metric_set)
| StatsHandlerTest |
python | prabhupant__python-ds | data_structures/segment_tree/seg_tree_max.py | {
"start": 12,
"end": 1456
} | class ____():
def __init__(self, n, arr = None):
self._t = [-sys.maxsize - 1] * (4 * n)
self._n = n
if arr: self._build(arr, 1 , 0 , n - 1)
def _build(self, a, v, tl, tr):
if tl == tr:
self._t[v] = a[tl]
else:
tm = (tl + tr) // 2
self._build(a, v*2, tl, tm)
self._build(a, v*2+1, tm+1, tr)
self._t[v] = max(self._t[v*2] ,self._t[v*2+1])
def _max_util(self, v, tl, tr, l, r):
if l > r: return -sys.maxsize - 1
if l == tl and r == tr : return self._t[v]
tm = (tl + tr) // 2
return max(self._max_util(v*2, tl, tm, l, min(r, tm)) ,self._max_util(v*2+1, tm+1, tr, max(l, tm+1), r))
def _update_util(self, v, tl, tr, pos, new_val):
if tl == tr:
self._t[v] = new_val
else:
tm = (tl + tr) // 2
if pos <= tm: self._update_util(v*2, tl, tm, pos, new_val)
else: self._update_util(v*2+1, tm+1, tr, pos, new_val)
self._t[v] = max(self._t[v*2] , self._t[v*2+1])
def max_element(self, l, r):
return self._max_util(1 , 0 , self._n - 1 , l , r)
def update(self, pos, new_val):
self._update_util(1 , 0 , self._n - 1 , pos , new_val)
def add(self, pos, change):
value = self.max_element(pos , pos)
self._update_util(1 , 0 , self._n - 1 , pos , value + change) | MaxSegTree |
python | scikit-learn__scikit-learn | sklearn/utils/_param_validation.py | {
"start": 24032,
"end": 28589
} | class ____:
"""Class encapsulating a constraint not meant to be exposed to the user.
Parameters
----------
constraint : str or _Constraint instance
The constraint to be used internally.
"""
def __init__(self, constraint):
self.constraint = constraint
def generate_invalid_param_val(constraint):
"""Return a value that does not satisfy the constraint.
Raises a NotImplementedError if there exists no invalid value for this constraint.
This is only useful for testing purpose.
Parameters
----------
constraint : _Constraint instance
The constraint to generate a value for.
Returns
-------
val : object
A value that does not satisfy the constraint.
"""
if isinstance(constraint, StrOptions):
return f"not {' or '.join(constraint.options)}"
if isinstance(constraint, MissingValues):
return np.array([1, 2, 3])
if isinstance(constraint, _VerboseHelper):
return -1
if isinstance(constraint, HasMethods):
return type("HasNotMethods", (), {})()
if isinstance(constraint, _IterablesNotString):
return "a string"
if isinstance(constraint, _CVObjects):
return "not a cv object"
if isinstance(constraint, Interval) and constraint.type is Integral:
if constraint.left is not None:
return constraint.left - 1
if constraint.right is not None:
return constraint.right + 1
# There's no integer outside (-inf, +inf)
raise NotImplementedError
if isinstance(constraint, Interval) and constraint.type in (Real, RealNotInt):
if constraint.left is not None:
return constraint.left - 1e-6
if constraint.right is not None:
return constraint.right + 1e-6
# bounds are -inf, +inf
if constraint.closed in ("right", "neither"):
return -np.inf
if constraint.closed in ("left", "neither"):
return np.inf
# interval is [-inf, +inf]
return np.nan
raise NotImplementedError
def generate_valid_param(constraint):
"""Return a value that does satisfy a constraint.
This is only useful for testing purpose.
Parameters
----------
constraint : Constraint instance
The constraint to generate a value for.
Returns
-------
val : object
A value that does satisfy the constraint.
"""
if isinstance(constraint, _ArrayLikes):
return np.array([1, 2, 3])
if isinstance(constraint, _SparseMatrices):
return csr_matrix([[0, 1], [1, 0]])
if isinstance(constraint, _RandomStates):
return np.random.RandomState(42)
if isinstance(constraint, _Callables):
return lambda x: x
if isinstance(constraint, _NoneConstraint):
return None
if isinstance(constraint, _InstancesOf):
if constraint.type is np.ndarray:
# special case for ndarray since it can't be instantiated without arguments
return np.array([1, 2, 3])
if constraint.type in (Integral, Real):
# special case for Integral and Real since they are abstract classes
return 1
return constraint.type()
if isinstance(constraint, _Booleans):
return True
if isinstance(constraint, _VerboseHelper):
return 1
if isinstance(constraint, MissingValues) and constraint.numeric_only:
return np.nan
if isinstance(constraint, MissingValues) and not constraint.numeric_only:
return "missing"
if isinstance(constraint, HasMethods):
return type(
"ValidHasMethods", (), {m: lambda self: None for m in constraint.methods}
)()
if isinstance(constraint, _IterablesNotString):
return [1, 2, 3]
if isinstance(constraint, _CVObjects):
return 5
if isinstance(constraint, Options): # includes StrOptions
for option in constraint.options:
return option
if isinstance(constraint, Interval):
interval = constraint
if interval.left is None and interval.right is None:
return 0
elif interval.left is None:
return interval.right - 1
elif interval.right is None:
return interval.left + 1
else:
if interval.type is Real:
return (interval.left + interval.right) / 2
else:
return interval.left + 1
raise ValueError(f"Unknown constraint type: {constraint}")
| Hidden |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/datafusion.py | {
"start": 1667,
"end": 2092
} | class ____:
"""Data Fusion pipeline states."""
PENDING = "PENDING"
STARTING = "STARTING"
RUNNING = "RUNNING"
SUSPENDED = "SUSPENDED"
RESUMING = "RESUMING"
COMPLETED = "COMPLETED"
FAILED = "FAILED"
KILLED = "KILLED"
REJECTED = "REJECTED"
FAILURE_STATES = [PipelineStates.FAILED, PipelineStates.KILLED, PipelineStates.REJECTED]
SUCCESS_STATES = [PipelineStates.COMPLETED]
| PipelineStates |
python | mahmoud__boltons | boltons/statsutils.py | {
"start": 5764,
"end": 6396
} | class ____:
def __init__(self, name, func):
self.name = name
self.func = func
self.internal_name = '_' + name
doc = func.__doc__ or ''
pre_doctest_doc, _, _ = doc.partition('>>>')
self.__doc__ = pre_doctest_doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
if not obj.data:
return obj.default
try:
return getattr(obj, self.internal_name)
except AttributeError:
setattr(obj, self.internal_name, self.func(obj))
return getattr(obj, self.internal_name)
| _StatsProperty |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/externalmodule/package.py | {
"start": 217,
"end": 431
} | class ____(Package):
homepage = "http://somewhere.com"
url = "http://somewhere.com/module-1.0.tar.gz"
version("1.0", md5="1234567890abcdef1234567890abcdef")
depends_on("externalprereq")
| Externalmodule |
python | walkccc__LeetCode | solutions/2673. Make Costs of Paths Equal in a Binary Tree/2673.py | {
"start": 0,
"end": 395
} | class ____:
def minIncrements(self, n: int, cost: list[int]) -> int:
ans = 0
for i in range(n // 2 - 1, -1, -1):
l = i * 2 + 1
r = i * 2 + 2
ans += abs(cost[l] - cost[r])
# Record the information in the parent from the children. So, there's need to actually
# update the values in the children.
cost[i] += max(cost[l], cost[r])
return ans
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/override1.py | {
"start": 1812,
"end": 1859
} | class ____:
def method1(self):
pass
| F |
python | getsentry__sentry | tests/sentry/rules/conditions/test_every_event.py | {
"start": 199,
"end": 473
} | class ____(RuleTestCase):
rule_cls = EveryEventCondition
def test_applies_correctly(self) -> None:
rule = self.get_rule()
self.assertPasses(rule, self.event, is_new=True)
self.assertPasses(rule, self.event, is_new=False)
| EveryEventConditionTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 4553,
"end": 9307
} | class ____(IterableStream, CheckpointMixin, ABC):
"""
This stream utilize "export" Iterable api for getting large amount of data.
It can return data in form of new line separater strings each of each
representing json object.
Data could be windowed by date ranges by applying startDateTime and
endDateTime parameters. Single request could return large volumes of data
and request rate is limited by 4 requests per minute.
Details: https://api.iterable.com/api/docs#export_exportDataJson
"""
cursor_field = "createdAt"
primary_key = None
@property
def state(self) -> MutableMapping[str, Any]:
return self._state
@state.setter
def state(self, value: MutableMapping[str, Any]):
self._state = value
def __init__(self, start_date=None, end_date=None, **kwargs):
super().__init__(**kwargs)
self._start_date = pendulum.parse(start_date)
self._end_date = end_date and pendulum.parse(end_date)
self.stream_params = {"dataTypeName": self.data_field}
def path(self, **kwargs) -> str:
return "export/data.json"
@staticmethod
def _field_to_datetime(value: Union[int, str]) -> pendulum.datetime:
if isinstance(value, int):
value = pendulum.from_timestamp(value / 1000.0)
elif isinstance(value, str):
value = dateutil_parse(value)
else:
raise ValueError(f"Unsupported type of datetime field {type(value)}")
return value
def read_records(self, **kwargs) -> Iterable[Mapping[str, Any]]:
for record in super().read_records(**kwargs):
self.state = self._get_updated_state(self.state, record)
yield record
def _get_updated_state(
self,
current_stream_state: MutableMapping[str, Any],
latest_record: Mapping[str, Any],
) -> Mapping[str, Any]:
"""
Return the latest state by comparing the cursor value in the latest record with the stream's most recent state object
and returning an updated state object.
"""
latest_benchmark = latest_record[self.cursor_field]
if current_stream_state.get(self.cursor_field):
return {
self.cursor_field: str(
max(
latest_benchmark,
self._field_to_datetime(current_stream_state[self.cursor_field]),
)
)
}
return {self.cursor_field: str(latest_benchmark)}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: StreamSlice,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state=stream_state)
params.update(
{
"startDateTime": stream_slice.start_date.strftime("%Y-%m-%d %H:%M:%S"),
"endDateTime": stream_slice.end_date.strftime("%Y-%m-%d %H:%M:%S"),
},
**self.stream_params,
)
return params
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
for obj in response.iter_lines():
record = json.loads(obj)
record[self.cursor_field] = self._field_to_datetime(record[self.cursor_field])
yield record
def request_kwargs(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Mapping[str, Any]:
"""
https://api.iterable.com/api/docs#export_exportDataJson
Sending those type of requests could download large piece of json
objects splitted with newline character.
Passing stream=True argument to requests.session.send method to avoid
loading whole analytics report content into memory.
"""
return {
**super().request_kwargs(stream_state, stream_slice, next_page_token),
"stream": True,
}
def get_start_date(self, stream_state: Mapping[str, Any]) -> DateTime:
stream_state = stream_state or {}
start_datetime = self._start_date
if stream_state.get(self.cursor_field):
start_datetime = pendulum.parse(stream_state[self.cursor_field])
return start_datetime
def stream_slices(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Optional[StreamSlice]]:
start_datetime = self.get_start_date(stream_state)
return [StreamSlice(start_datetime, self._end_date or pendulum.now("UTC"))]
| IterableExportStream |
python | conda__conda | conda/api.py | {
"start": 14511,
"end": 17516
} | class ____:
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
High-level management and usage of conda environment prefixes.
"""
def __init__(self, prefix_path):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Args:
prefix_path (str):
"""
self._internal = _PrefixData(prefix_path)
def get(self, package_ref, default=NULL):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Args:
package_ref (PackageRef):
A :obj:`PackageRef` instance representing the key for the
:obj:`PrefixRecord` being sought.
default: The default value to return if the record does not exist. If not
specified and no record exists, :exc:`KeyError` is raised.
Returns:
PrefixRecord
"""
return self._internal.get(package_ref.name, default)
def query(self, package_ref_or_match_spec):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Run a query against this specific prefix instance.
Args:
package_ref_or_match_spec (PackageRef or MatchSpec or str):
Either an exact :obj:`PackageRef` to match against, or a :obj:`MatchSpec`
query object. A :obj:`str` will be turned into a :obj:`MatchSpec` automatically.
Returns:
tuple[PrefixRecord]
"""
return tuple(self._internal.query(package_ref_or_match_spec))
def iter_records(self):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Returns:
Iterable[PrefixRecord]: A generator over all records contained in the prefix.
Warning: this is a generator that is exhausted on first use.
"""
return self._internal.iter_records()
@property
def is_writable(self):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Indicates if the prefix is writable or read-only.
Returns:
bool or None:
True if the prefix is writable. False if read-only. None if the prefix
does not exist as a conda environment.
"""
return self._internal.is_writable
def reload(self):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Update the instance with new information. Backing information (i.e. contents of
the conda-meta directory) is lazily loaded on first use by the other methods of this
class. You should only use this method if you are *sure* you have outdated data.
Returns:
PrefixData
"""
self._internal = self._internal.reload()
return self
| PrefixData |
python | optuna__optuna | optuna/trial/_fixed.py | {
"start": 758,
"end": 6419
} | class ____(BaseTrial):
"""A trial class which suggests a fixed value for each parameter.
This object has the same methods as :class:`~optuna.trial.Trial`, and it suggests pre-defined
parameter values. The parameter values can be determined at the construction of the
:class:`~optuna.trial.FixedTrial` object. In contrast to :class:`~optuna.trial.Trial`,
:class:`~optuna.trial.FixedTrial` does not depend on :class:`~optuna.study.Study`, and it is
useful for deploying optimization results.
Example:
Evaluate an objective function with parameter values given by a user.
.. testcode::
import optuna
def objective(trial):
x = trial.suggest_float("x", -100, 100)
y = trial.suggest_categorical("y", [-1, 0, 1])
return x**2 + y
assert objective(optuna.trial.FixedTrial({"x": 1, "y": 0})) == 1
.. note::
Please refer to :class:`~optuna.trial.Trial` for details of methods and properties.
Args:
params:
A dictionary containing all parameters.
number:
A trial number. Defaults to ``0``.
"""
def __init__(self, params: dict[str, Any], number: int = 0) -> None:
self._params = params
self._suggested_params: dict[str, Any] = {}
self._distributions: dict[str, BaseDistribution] = {}
self._user_attrs: dict[str, Any] = {}
self._system_attrs: dict[str, Any] = {}
self._datetime_start = datetime.datetime.now()
self._number = number
def suggest_float(
self,
name: str,
low: float,
high: float,
*,
step: float | None = None,
log: bool = False,
) -> float:
return self._suggest(name, FloatDistribution(low, high, log=log, step=step))
@deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args=""))
def suggest_uniform(self, name: str, low: float, high: float) -> float:
return self.suggest_float(name, low, high)
@deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="(..., log=True)"))
def suggest_loguniform(self, name: str, low: float, high: float) -> float:
return self.suggest_float(name, low, high, log=True)
@deprecated_func("3.0.0", "6.0.0", text=_suggest_deprecated_msg.format(args="(..., step=...)"))
def suggest_discrete_uniform(self, name: str, low: float, high: float, q: float) -> float:
return self.suggest_float(name, low, high, step=q)
@convert_positional_args(
previous_positional_arg_names=_SUGGEST_INT_POSITIONAL_ARGS,
deprecated_version="3.5.0",
removed_version="5.0.0",
)
def suggest_int(
self, name: str, low: int, high: int, *, step: int = 1, log: bool = False
) -> int:
return int(self._suggest(name, IntDistribution(low, high, log=log, step=step)))
@overload
def suggest_categorical(self, name: str, choices: Sequence[None]) -> None: ...
@overload
def suggest_categorical(self, name: str, choices: Sequence[bool]) -> bool: ...
@overload
def suggest_categorical(self, name: str, choices: Sequence[int]) -> int: ...
@overload
def suggest_categorical(self, name: str, choices: Sequence[float]) -> float: ...
@overload
def suggest_categorical(self, name: str, choices: Sequence[str]) -> str: ...
@overload
def suggest_categorical(
self, name: str, choices: Sequence[CategoricalChoiceType]
) -> CategoricalChoiceType: ...
def suggest_categorical(
self, name: str, choices: Sequence[CategoricalChoiceType]
) -> CategoricalChoiceType:
return self._suggest(name, CategoricalDistribution(choices=choices))
def report(self, value: float, step: int) -> None:
pass
def should_prune(self) -> bool:
return False
def set_user_attr(self, key: str, value: Any) -> None:
self._user_attrs[key] = value
@deprecated_func("3.1.0", "5.0.0")
def set_system_attr(self, key: str, value: Any) -> None:
self._system_attrs[key] = value
def _suggest(self, name: str, distribution: BaseDistribution) -> Any:
if name not in self._params:
raise ValueError(
"The value of the parameter '{}' is not found. Please set it at "
"the construction of the FixedTrial object.".format(name)
)
value = self._params[name]
param_value_in_internal_repr = distribution.to_internal_repr(value)
if not distribution._contains(param_value_in_internal_repr):
optuna_warn(
"The value {} of the parameter '{}' is out of "
"the range of the distribution {}.".format(value, name, distribution)
)
if name in self._distributions:
distributions.check_distribution_compatibility(self._distributions[name], distribution)
self._suggested_params[name] = value
self._distributions[name] = distribution
return value
@property
def params(self) -> dict[str, Any]:
return self._suggested_params
@property
def distributions(self) -> dict[str, BaseDistribution]:
return self._distributions
@property
def user_attrs(self) -> dict[str, Any]:
return self._user_attrs
@property
def system_attrs(self) -> dict[str, Any]:
return self._system_attrs
@property
def datetime_start(self) -> datetime.datetime | None:
return self._datetime_start
@property
def number(self) -> int:
return self._number
| FixedTrial |
python | pallets__flask | src/flask/json/tag.py | {
"start": 3863,
"end": 4193
} | class ____(JSONTag):
__slots__ = ()
key = " t"
def check(self, value: t.Any) -> bool:
return isinstance(value, tuple)
def to_json(self, value: t.Any) -> t.Any:
return [self.serializer.tag(item) for item in value]
def to_python(self, value: t.Any) -> t.Any:
return tuple(value)
| TagTuple |
python | instagram__MonkeyType | monkeytype/db/base.py | {
"start": 1926,
"end": 2398
} | class ____(CallTraceLogger):
"""A CallTraceLogger that stores logged traces in a CallTraceStore."""
def __init__(self, store: CallTraceStore) -> None:
self.store = store
self.traces: List[CallTrace] = []
def log(self, trace: CallTrace) -> None:
if not trace.func.__module__ == "__main__":
self.traces.append(trace)
def flush(self) -> None:
self.store.add(self.traces)
self.traces = []
| CallTraceStoreLogger |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 16506,
"end": 16666
} | class ____(Protocol):
async def start(self) -> None: ...
async def commit(self) -> None: ...
async def rollback(self) -> None: ...
| _AsyncpgTransaction |
python | crytic__slither | slither/slithir/variables/reference.py | {
"start": 313,
"end": 2153
} | class ____(Variable):
def __init__(self, node: "Node", index: Optional[int] = None) -> None:
super().__init__()
if index is None:
self._index = node.compilation_unit.counter_slithir_reference
node.compilation_unit.counter_slithir_reference += 1
else:
self._index = index
self._points_to = None
self._node = node
@property
def node(self) -> "Node":
return self._node
@property
def index(self):
return self._index
@index.setter
def index(self, idx):
self._index = idx
@property
def points_to(self):
"""
Return the variable pointer by the reference
It is the left member of a Index or Member operator
"""
return self._points_to
@points_to.setter
def points_to(self, points_to):
# Can only be a rvalue of
# Member or Index operator
# pylint: disable=import-outside-toplevel
from slither.slithir.utils.utils import is_valid_lvalue
assert is_valid_lvalue(points_to) or isinstance(
points_to, (SolidityVariable, Contract, Enum, TopLevelVariable)
)
self._points_to = points_to
@property
def points_to_origin(self):
points = self.points_to
while isinstance(points, ReferenceVariable):
points = points.points_to
return points
@property
def name(self) -> str:
return f"REF_{self.index}"
# overide of core.variables.variables
# reference can have Function has a type
# to handle the function selector
def set_type(self, t) -> None:
if not isinstance(t, Function):
super().set_type(t)
else:
self._type = t
def __str__(self) -> str:
return self.name
| ReferenceVariable |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/call_tip_widget.py | {
"start": 161,
"end": 10683
} | class ____(QtWidgets.QLabel):
""" Shows call tips by parsing the current text of Q[Plain]TextEdit.
"""
#--------------------------------------------------------------------------
# 'QObject' interface
#--------------------------------------------------------------------------
def __init__(self, text_edit):
""" Create a call tip manager that is attached to the specified Qt
text edit widget.
"""
assert isinstance(text_edit, (QtWidgets.QTextEdit, QtWidgets.QPlainTextEdit))
super().__init__(None, QtCore.Qt.ToolTip)
text_edit.destroyed.connect(self.deleteLater)
self._hide_timer = QtCore.QBasicTimer()
self._text_edit = text_edit
self.setFont(text_edit.document().defaultFont())
self.setForegroundRole(QtGui.QPalette.ToolTipText)
self.setBackgroundRole(QtGui.QPalette.ToolTipBase)
self.setPalette(QtWidgets.QToolTip.palette())
self.setAlignment(QtCore.Qt.AlignLeft)
self.setIndent(1)
self.setFrameStyle(QtWidgets.QFrame.NoFrame)
self.setMargin(1 + self.style().pixelMetric(
QtWidgets.QStyle.PM_ToolTipLabelFrameWidth, None, self))
self.setWindowOpacity(self.style().styleHint(
QtWidgets.QStyle.SH_ToolTipLabel_Opacity, None, self, None) / 255.0)
self.setWordWrap(True)
def eventFilter(self, obj, event):
""" Reimplemented to hide on certain key presses and on text edit focus
changes.
"""
if obj == self._text_edit:
etype = event.type()
if etype == QtCore.QEvent.KeyPress:
key = event.key()
if key in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.hide()
elif key == QtCore.Qt.Key_Escape:
self.hide()
return True
elif etype == QtCore.QEvent.FocusOut:
self.hide()
elif etype == QtCore.QEvent.Enter:
self._hide_timer.stop()
elif etype == QtCore.QEvent.Leave:
self._leave_event_hide()
return super().eventFilter(obj, event)
def timerEvent(self, event):
""" Reimplemented to hide the widget when the hide timer fires.
"""
if event.timerId() == self._hide_timer.timerId():
self._hide_timer.stop()
self.hide()
#--------------------------------------------------------------------------
# 'QWidget' interface
#--------------------------------------------------------------------------
def enterEvent(self, event):
""" Reimplemented to cancel the hide timer.
"""
super().enterEvent(event)
self._hide_timer.stop()
def hideEvent(self, event):
""" Reimplemented to disconnect signal handlers and event filter.
"""
super().hideEvent(event)
# This fixes issue jupyter/qtconsole#383
try:
self._text_edit.cursorPositionChanged.disconnect(
self._cursor_position_changed)
except TypeError:
pass
self._text_edit.removeEventFilter(self)
def leaveEvent(self, event):
""" Reimplemented to start the hide timer.
"""
super().leaveEvent(event)
self._leave_event_hide()
def paintEvent(self, event):
""" Reimplemented to paint the background panel.
"""
painter = QtWidgets.QStylePainter(self)
option = QtWidgets.QStyleOptionFrame()
option.initFrom(self)
painter.drawPrimitive(QtWidgets.QStyle.PE_PanelTipLabel, option)
painter.end()
super().paintEvent(event)
def setFont(self, font):
""" Reimplemented to allow use of this method as a slot.
"""
super().setFont(font)
def showEvent(self, event):
""" Reimplemented to connect signal handlers and event filter.
"""
super().showEvent(event)
self._text_edit.cursorPositionChanged.connect(
self._cursor_position_changed)
self._text_edit.installEventFilter(self)
def deleteLater(self):
""" Avoids an error when the widget has already been deleted.
Fixes jupyter/qtconsole#507.
"""
try:
return super().deleteLater()
except RuntimeError:
pass
#--------------------------------------------------------------------------
# 'CallTipWidget' interface
#--------------------------------------------------------------------------
def show_inspect_data(self, content, maxlines=20):
"""Show inspection data as a tooltip"""
data = content.get('data', {})
text = data.get('text/plain', '')
match = re.match("(?:[^\n]*\n){%i}" % maxlines, text)
if match:
text = text[:match.end()] + '\n[Documentation continues...]'
return self.show_tip(self._format_tooltip(text))
def show_tip(self, tip):
""" Attempts to show the specified tip at the current cursor location.
"""
# Attempt to find the cursor position at which to show the call tip.
text_edit = self._text_edit
document = text_edit.document()
cursor = text_edit.textCursor()
search_pos = cursor.position() - 1
self._start_position, _ = self._find_parenthesis(search_pos,
forward=False)
if self._start_position == -1:
return False
# Set the text and resize the widget accordingly.
self.setText(tip)
self.resize(self.sizeHint())
# Locate and show the widget. Place the tip below the current line
# unless it would be off the screen. In that case, decide the best
# location based trying to minimize the area that goes off-screen.
padding = 3 # Distance in pixels between cursor bounds and tip box.
cursor_rect = text_edit.cursorRect(cursor)
if QT6:
screen_rect = text_edit.screen().geometry()
else:
screen_rect = QtWidgets.QApplication.instance().desktop().screenGeometry(text_edit)
point = text_edit.mapToGlobal(cursor_rect.bottomRight())
point.setY(point.y() + padding)
tip_height = self.size().height()
tip_width = self.size().width()
vertical = 'bottom'
horizontal = 'Right'
if point.y() + tip_height > screen_rect.height() + screen_rect.y():
point_ = text_edit.mapToGlobal(cursor_rect.topRight())
# If tip is still off screen, check if point is in top or bottom
# half of screen.
if point_.y() - tip_height < padding:
# If point is in upper half of screen, show tip below it.
# otherwise above it.
if 2*point.y() < screen_rect.height():
vertical = 'bottom'
else:
vertical = 'top'
else:
vertical = 'top'
if point.x() + tip_width > screen_rect.width() + screen_rect.x():
point_ = text_edit.mapToGlobal(cursor_rect.topRight())
# If tip is still off-screen, check if point is in the right or
# left half of the screen.
if point_.x() - tip_width < padding:
if 2*point.x() < screen_rect.width():
horizontal = 'Right'
else:
horizontal = 'Left'
else:
horizontal = 'Left'
pos = getattr(cursor_rect, '%s%s' %(vertical, horizontal))
point = text_edit.mapToGlobal(pos())
point.setY(point.y() + padding)
if vertical == 'top':
point.setY(point.y() - tip_height)
if horizontal == 'Left':
point.setX(point.x() - tip_width - padding)
self.move(point)
self.show()
return True
#--------------------------------------------------------------------------
# Protected interface
#--------------------------------------------------------------------------
def _find_parenthesis(self, position, forward=True):
""" If 'forward' is True (resp. False), proceed forwards
(resp. backwards) through the line that contains 'position' until an
unmatched closing (resp. opening) parenthesis is found. Returns a
tuple containing the position of this parenthesis (or -1 if it is
not found) and the number commas (at depth 0) found along the way.
"""
commas = depth = 0
document = self._text_edit.document()
char = document.characterAt(position)
# Search until a match is found or a non-printable character is
# encountered.
while category(char) != 'Cc' and position > 0:
if char == ',' and depth == 0:
commas += 1
elif char == ')':
if forward and depth == 0:
break
depth += 1
elif char == '(':
if not forward and depth == 0:
break
depth -= 1
position += 1 if forward else -1
char = document.characterAt(position)
else:
position = -1
return position, commas
def _leave_event_hide(self):
""" Hides the tooltip after some time has passed (assuming the cursor is
not over the tooltip).
"""
if (not self._hide_timer.isActive() and
# If Enter events always came after Leave events, we wouldn't need
# this check. But on Mac OS, it sometimes happens the other way
# around when the tooltip is created.
QtWidgets.QApplication.instance().topLevelAt(QtGui.QCursor.pos()) != self):
self._hide_timer.start(300, self)
def _format_tooltip(self, doc):
doc = re.sub(r'\033\[(\d|;)+?m', '', doc)
return doc
#------ Signal handlers ----------------------------------------------------
def _cursor_position_changed(self):
""" Updates the tip based on user cursor movement.
"""
cursor = self._text_edit.textCursor()
if cursor.position() <= self._start_position:
self.hide()
else:
position, commas = self._find_parenthesis(self._start_position + 1)
if position != -1:
self.hide()
| CallTipWidget |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/basecamp/tests.py | {
"start": 244,
"end": 1538
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = BasecampProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"expires_at": "2012-03-22T16:56:48-05:00",
"identity": {
"id": 9999999,
"first_name": "Jason Fried",
"last_name": "Jason Fried",
"email_address": "jason@example.com"
},
"accounts": [
{
"product": "bcx",
"id": 88888888,
"name": "Wayne Enterprises, Ltd.",
"href": "https://basecamp.com/88888888/api/v1"
},
{
"product": "bcx",
"id": 77777777,
"name": "Veidt, Inc",
"href": "https://basecamp.com/77777777/api/v1"
},
{
"product": "campfire",
"id": 44444444,
"name": "Acme Shipping Co.",
"href": "https://acme4444444.campfirenow.com"
}
]
}""",
)
def get_expected_to_str(self):
return "jason@example.com"
| BasecampTests |
python | pytorch__pytorch | torch/_inductor/pattern_matcher.py | {
"start": 15275,
"end": 16341
} | class ____(ABC):
"""
Base class for types of patterns.
"""
@abstractmethod
def _match(self, node: torch.fx.Node, ctx: MatchContext) -> MatchResult: ...
def match(self, node: torch.fx.Node) -> MatchResult:
try:
return MatchContext([self], graph=node.graph).match(self, node)
except FailedMatch as e:
return e
def has_multiple_users(self) -> bool:
return False
def __repr__(self) -> str:
return self.__class__.__name__ + "()"
def find_anchor_nodes(
self, ctx: MatchContext, searched: OrderedSet[torch.fx.Node]
) -> Generator[Optional[torch.fx.Node], None, None]:
if self in ctx.pattern_to_node:
yield ctx.pattern_to_node[self]
def pattern_eq(self, other: Any) -> bool:
"""
Compare two `PatternExpr`s and return true if they are the
same. Note this is NOT matching a pattern - it is comparing the pattern
structures (for debugging).
"""
return isinstance(other, self.__class__)
| PatternExpr |
python | jazzband__django-oauth-toolkit | tests/test_scopes.py | {
"start": 4786,
"end": 11832
} | class ____(BaseTest):
def test_scopes_protection_valid(self):
"""
Test access to a scope protected resource with correct scopes provided
"""
self.oauth2_settings.PKCE_REQUIRED = False
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "scope1 scope2",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ScopeResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
def test_scopes_protection_fail(self):
"""
Test access to a scope protected resource with wrong scopes provided
"""
self.oauth2_settings.PKCE_REQUIRED = False
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "scope2",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ScopeResourceView.as_view()
response = view(request)
self.assertEqual(response.status_code, 403)
def test_multi_scope_fail(self):
"""
Test access to a multi-scope protected resource with wrong scopes provided
"""
self.oauth2_settings.PKCE_REQUIRED = False
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "scope1 scope3",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = MultiScopeResourceView.as_view()
response = view(request)
self.assertEqual(response.status_code, 403)
def test_multi_scope_valid(self):
"""
Test access to a multi-scope protected resource with correct scopes provided
"""
self.oauth2_settings.PKCE_REQUIRED = False
self.client.login(username="test_user", password="123456")
# retrieve a valid authorization code
authcode_data = {
"client_id": self.application.client_id,
"state": "random_state_string",
"scope": "scope1 scope2",
"redirect_uri": "http://example.org",
"response_type": "code",
"allow": True,
}
response = self.client.post(reverse("oauth2_provider:authorize"), data=authcode_data)
query_dict = parse_qs(urlparse(response["Location"]).query)
authorization_code = query_dict["code"].pop()
# exchange authorization code for a valid access token
token_request_data = {
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": "http://example.org",
}
auth_headers = get_basic_auth_header(self.application.client_id, CLEARTEXT_SECRET)
response = self.client.post(reverse("oauth2_provider:token"), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content["access_token"]
# use token to access the resource
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = MultiScopeResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
| TestScopesProtection |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/errors.py | {
"start": 4344,
"end": 4412
} | class ____(OAuth2Error):
error = 'missing_token'
| MissingTokenError |
python | ApeWorX__ape | tests/functional/test_plugins.py | {
"start": 12722,
"end": 15248
} | class ____:
def test_version_range(self):
actual = ape_version.version_range
expected = f">=0.{ape_version[2]},<0.{int(ape_version[2]) + 1}"
assert actual == expected
def test_next_version_range(self):
actual = ape_version.next_version_range
expected = f">=0.{int(ape_version[2]) + 1},<0.{int(ape_version[2]) + 2}"
assert actual == expected
def test_previous_version_range(self):
actual = ape_version.previous_version_range
expected = f">=0.{int(ape_version[2]) - 2},<0.{int(ape_version[2]) - 1}"
assert actual == expected
@mark_specifiers_less_than_ape
def test_would_be_downgraded(self, specifier):
assert ape_version.would_get_downgraded(specifier)
def test_filter_plugins_from_dists_py39(mocker):
def make_dist(name: str):
mock_dist = mocker.MagicMock()
mock_dist.name = ""
mock_dist.metadata = {"Name": f"ape-{name}"}
return mock_dist
plugins = [make_dist("solidity"), make_dist("vyper"), make_dist("optimism")]
actual = set(_filter_plugins_from_dists(plugins))
expected = {"ape-solidity", "ape-vyper", "ape-optimism"}
assert actual == expected
def test_filter_plugins_from_dists_py310_and_greater(mocker):
def make_dist(name: str):
mock_dist = mocker.MagicMock()
mock_dist.name = f"ape-{name}"
return mock_dist
plugins = [make_dist("solidity"), make_dist("vyper"), make_dist("optimism")]
actual = set(_filter_plugins_from_dists(plugins))
expected = {"ape-solidity", "ape-vyper", "ape-optimism"}
assert actual == expected
@pytest.mark.parametrize("abstract_methods", [("method1", "method2"), {"method1": 0, "method2": 0}])
def test_get_unimplemented_methods_warning_list_containing_plugin(abstract_methods):
plugin_registration = ("foo", "bar", TransactionAPI)
actual = _get_unimplemented_methods_warning(plugin_registration, "p1")
expected = (
"'TransactionAPI' from 'p1' is not fully implemented. "
"Remaining abstract methods: 'serialize_transaction, txn_hash'."
)
assert actual == expected
def test_core_plugins():
# In case any of these happen to be installed, and this feature
# is broken, it will fail. If none are installed, the test will always pass.
non_core_plugins = ("ape_arbitrum", "ape_vyper", "ape_solidity", "ape_ens")
assert not any(p in CORE_PLUGINS_LIST for p in non_core_plugins)
assert "ape_ethereum" in CORE_PLUGINS_LIST
| TestApeVersion |
python | wireservice__csvkit | tests/test_utilities/test_csvformat.py | {
"start": 198,
"end": 2716
} | class ____(CSVKitTestCase, EmptyFileTests):
Utility = CSVFormat
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/dummy.csv']):
launch_new_instance()
def test_skip_lines(self):
self.assertLines(['--skip-lines', '3', '-D', '|', 'examples/test_skip_lines.csv'], [
'a|b|c',
'1|2|3',
])
def test_skip_header(self):
self.assertLines(['--skip-header', 'examples/dummy.csv'], [
'1,2,3',
])
def test_skip_header_no_header_row(self):
self.assertLines(['--no-header-row', '--skip-header', 'examples/no_header_row.csv'], [
'1,2,3',
])
def test_no_header_row(self):
self.assertLines(['--no-header-row', 'examples/no_header_row.csv'], [
'a,b,c',
'1,2,3',
])
def test_linenumbers(self):
self.assertLines(['--linenumbers', 'examples/dummy.csv'], [
'line_number,a,b,c',
'1,1,2,3',
])
def test_delimiter(self):
self.assertLines(['-D', '|', 'examples/dummy.csv'], [
'a|b|c',
'1|2|3',
])
def test_tabs(self):
self.assertLines(['-T', 'examples/dummy.csv'], [
'a\tb\tc',
'1\t2\t3',
])
def test_asv(self):
self.assertLines(['-A', 'examples/dummy.csv'], [
'a\x1fb\x1fc\x1e1\x1f2\x1f3\x1e',
], newline_at_eof=False)
def test_quotechar(self):
input_file = io.BytesIO(b'a,b,c\n1#2,3,4\n')
with stdin_as_string(input_file):
self.assertLines(['-Q', '#'], [
'a,b,c',
'#1##2#,3,4',
])
input_file.close()
def test_doublequote(self):
input_file = io.BytesIO(b'a\n"a ""quoted"" string"')
with stdin_as_string(input_file):
self.assertLines(['-P', '#', '-B'], [
'a',
'a #"quoted#" string',
])
input_file.close()
def test_escapechar(self):
input_file = io.BytesIO(b'a,b,c\n1"2,3,4\n')
with stdin_as_string(input_file):
self.assertLines(['-P', '#', '-U', '3'], [
'a,b,c',
'1#"2,3,4',
])
input_file.close()
def test_lineterminator(self):
self.assertLines(['-M', 'XYZ', 'examples/dummy.csv'], [
'a,b,cXYZ1,2,3XYZ',
], newline_at_eof=False)
| TestCSVFormat |
python | gevent__gevent | src/greentest/3.13/test_queue.py | {
"start": 22523,
"end": 22601
} | class ____(LifoQueueTest, unittest.TestCase):
queue = c_queue
| CLifoQueueTest |
python | pytorch__pytorch | torch/nn/modules/flatten.py | {
"start": 1723,
"end": 5760
} | class ____(Module):
r"""
Unflattens a tensor dim expanding it to a desired shape. For use with :class:`~nn.Sequential`.
* :attr:`dim` specifies the dimension of the input tensor to be unflattened, and it can
be either `int` or `str` when `Tensor` or `NamedTensor` is used, respectively.
* :attr:`unflattened_size` is the new shape of the unflattened dimension of the tensor and it can be
a `tuple` of ints or a `list` of ints or `torch.Size` for `Tensor` input; a `NamedShape`
(tuple of `(name, size)` tuples) for `NamedTensor` input.
Shape:
- Input: :math:`(*, S_{\text{dim}}, *)`, where :math:`S_{\text{dim}}` is the size at
dimension :attr:`dim` and :math:`*` means any number of dimensions including none.
- Output: :math:`(*, U_1, ..., U_n, *)`, where :math:`U` = :attr:`unflattened_size` and
:math:`\prod_{i=1}^n U_i = S_{\text{dim}}`.
Args:
dim (Union[int, str]): Dimension to be unflattened
unflattened_size (Union[torch.Size, Tuple, List, NamedShape]): New shape of the unflattened dimension
Examples:
>>> input = torch.randn(2, 50)
>>> # With tuple of ints
>>> m = nn.Sequential(
>>> nn.Linear(50, 50),
>>> nn.Unflatten(1, (2, 5, 5))
>>> )
>>> output = m(input)
>>> output.size()
torch.Size([2, 2, 5, 5])
>>> # With torch.Size
>>> m = nn.Sequential(
>>> nn.Linear(50, 50),
>>> nn.Unflatten(1, torch.Size([2, 5, 5]))
>>> )
>>> output = m(input)
>>> output.size()
torch.Size([2, 2, 5, 5])
>>> # With namedshape (tuple of tuples)
>>> input = torch.randn(2, 50, names=("N", "features"))
>>> unflatten = nn.Unflatten("features", (("C", 2), ("H", 5), ("W", 5)))
>>> output = unflatten(input)
>>> output.size()
torch.Size([2, 2, 5, 5])
"""
NamedShape = tuple[tuple[str, int]]
__constants__ = ["dim", "unflattened_size"]
dim: int | str
unflattened_size: _size | NamedShape
def __init__(self, dim: int | str, unflattened_size: _size | NamedShape) -> None:
super().__init__()
if isinstance(dim, int):
self._require_tuple_int(unflattened_size)
elif isinstance(dim, str):
self._require_tuple_tuple(unflattened_size)
else:
raise TypeError("invalid argument type for dim parameter")
self.dim = dim
self.unflattened_size = unflattened_size
def _require_tuple_tuple(self, input) -> None:
if isinstance(input, tuple):
for idx, elem in enumerate(input):
if not isinstance(elem, tuple):
raise TypeError(
"unflattened_size must be tuple of tuples, "
+ f"but found element of type {type(elem).__name__} at pos {idx}"
)
return
raise TypeError(
"unflattened_size must be a tuple of tuples, "
+ f"but found type {type(input).__name__}"
)
def _require_tuple_int(self, input) -> None:
if isinstance(input, (tuple, list)):
for idx, elem in enumerate(input):
if not isinstance(elem, int):
raise TypeError(
"unflattened_size must be tuple of ints, "
+ f"but found element of type {type(elem).__name__} at pos {idx}"
)
return
raise TypeError(
f"unflattened_size must be a tuple of ints, but found type {type(input).__name__}"
)
def forward(self, input: Tensor) -> Tensor:
"""
Runs the forward pass.
"""
return input.unflatten(self.dim, self.unflattened_size)
def extra_repr(self) -> str:
"""
Returns the extra representation of the module.
"""
return f"dim={self.dim}, unflattened_size={self.unflattened_size}"
| Unflatten |
python | huggingface__transformers | src/transformers/models/prophetnet/modeling_prophetnet.py | {
"start": 52258,
"end": 65891
} | class ____(ProphetNetPreTrainedModel):
def __init__(self, config: ProphetNetConfig):
super().__init__(config)
self.ngram = config.ngram
self.num_buckets = config.num_buckets
self.relative_max_distance = config.relative_max_distance
self.dropout = config.dropout
self.max_target_positions = config.max_position_embeddings
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = ProphetNetPositionalEmbeddings(config)
self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None)
self.layers = nn.ModuleList(
[ProphetNetDecoderLayer(config, layer_idx=i) for i in range(config.num_decoder_layers)]
)
self.embeddings_layer_norm = LayerNorm(config.hidden_size)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, ProphetNetDecoderModelOutput]:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, ProphetNetDecoder
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
>>> model = ProphetNetDecoder.from_pretrained("microsoft/prophetnet-large-uncased", add_cross_attention=False)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None and inputs_embeds is None:
raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.")
elif input_ids is not None and inputs_embeds is not None:
raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.")
elif input_ids is not None and inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
batch_size, sequence_length = inputs_embeds.shape[:2]
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
main_stream_pos_embed, position_ids = self.position_embeddings(
(batch_size, sequence_length),
device=inputs_embeds.device,
past_key_values=past_key_values,
)
if past_key_values_length != 0:
main_relative_position_buckets, predict_relative_position_buckets = None, None
else:
(
main_relative_position_buckets,
predict_relative_position_buckets,
) = self.compute_buffered_relative_buckets(position_ids)
predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1)
# add position embeddings
hidden_states = inputs_embeds + main_stream_pos_embed
ngram_embeddings = self.ngram_embeddings.weight
# prepare attention mask
if past_key_values_length != 0:
assert hidden_states.size(1) == 1, (
"At the moment `use_cache` is only supported for `decoder_input_ids` of length 1"
)
ngram_hidden_states = [
(ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1)
for ngram in range(self.ngram)
]
extended_attention_mask = None
extended_predict_attention_mask = None
else:
ngram_hidden_states = [
(ngram_embeddings[ngram - 1] + predicting_stream_pos_embed) for ngram in range(self.ngram)
]
extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask)
extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask)
# prepare encoder attention mask
if encoder_attention_mask is not None:
extended_encoder_attention_mask = (
1.0 - encoder_attention_mask[:, None, None, :].repeat(1, self.config.num_decoder_attention_heads, 1, 1)
) * torch.finfo(self.dtype).min
extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype)
else:
extended_encoder_attention_mask = None
hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1)
if self.embeddings_layer_norm:
hidden_states = self.embeddings_layer_norm(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# init attentions, hidden_states and cache with empty tuples
all_main_stream_hidden_states = () if output_hidden_states else None
all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None
all_main_stream_attns = () if output_attentions else None
all_ngram_stream_attns = () if output_attentions else None
all_cross_attns = () if output_attentions and self.config.add_cross_attention else None
for idx, decoder_layer in enumerate(self.layers):
if output_hidden_states:
# grad cannot be kept because tensor is sliced
all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
if self.config.ngram > 0:
all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
layer_outputs = decoder_layer(
hidden_states,
extended_attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attn_mask=extended_encoder_attention_mask,
extended_predict_attention_mask=extended_predict_attention_mask,
main_relative_position_buckets=main_relative_position_buckets,
predict_relative_position_buckets=predict_relative_position_buckets,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_main_stream_attns += (layer_outputs[1],)
all_ngram_stream_attns += (layer_outputs[2],)
if self.config.add_cross_attention:
all_cross_attns += (layer_outputs[3],)
if output_hidden_states:
all_main_stream_hidden_states += (hidden_states[:, :sequence_length],)
if self.config.ngram > 0:
all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],)
# split last_hidden_state for return
last_hidden_state = hidden_states[:, :sequence_length]
last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None
if not return_dict:
return tuple(
v
for v in [
last_hidden_state,
last_hidden_state_ngram,
past_key_values,
all_main_stream_hidden_states,
all_ngram_stream_hidden_states,
all_main_stream_attns,
all_ngram_stream_attns,
all_cross_attns,
]
if v is not None
)
return ProphetNetDecoderModelOutput(
last_hidden_state=last_hidden_state,
last_hidden_state_ngram=last_hidden_state_ngram,
past_key_values=past_key_values,
hidden_states=all_main_stream_hidden_states,
hidden_states_ngram=all_ngram_stream_hidden_states,
attentions=all_main_stream_attns,
ngram_attentions=all_ngram_stream_attns,
cross_attentions=all_cross_attns,
)
def compute_buffered_relative_buckets(self, position_ids):
batch_size, sequence_length = position_ids.shape
position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1)
main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets(
self.num_buckets, self.relative_max_distance, position_ids
)
# buffer relative buckets
main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1)
predict_relative_buckets = torch.cat(
[
predict_relative_buckets[:, :sequence_length, :sequence_length],
predict_relative_buckets[
:, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length
],
],
2,
).repeat(batch_size, 1, 1)
return main_relative_buckets, predict_relative_buckets
def prepare_attention_mask(self, hidden_states, attention_mask):
batch_size, seq_length = hidden_states.shape[:2]
# get causal mask
causal_mask = torch.full(
(seq_length, seq_length),
torch.finfo(hidden_states.dtype).min,
dtype=hidden_states.dtype,
device=hidden_states.device,
)
causal_mask = torch.triu(causal_mask, 1)
extended_causal_mask = causal_mask[:seq_length, :seq_length][None, None, :, :].expand(
(batch_size, self.config.num_decoder_attention_heads) + causal_mask.shape
)
# add usual attention mask
if attention_mask is not None:
extended_attention_mask = (1.0 - attention_mask[:, None, None, :]) * torch.finfo(self.dtype).min
extended_attention_mask = extended_causal_mask + extended_attention_mask
else:
extended_attention_mask = extended_causal_mask
return extended_attention_mask.to(hidden_states.dtype)
def prepare_predict_attention_mask(self, hidden_states, attention_mask):
batch_size, seq_length = hidden_states.shape[:2]
# get causal mask
predict_causal_mask = ngram_attention_bias(
self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype
)
predict_causal_mask = torch.cat(
[
predict_causal_mask[:, :seq_length, :seq_length],
predict_causal_mask[
:, :seq_length, self.max_target_positions : self.max_target_positions + seq_length
],
],
dim=-1,
)
extended_predict_causal_mask = predict_causal_mask[None, None, :, :, :].expand(
(batch_size, self.config.num_decoder_attention_heads) + predict_causal_mask.shape
)
# add usual attention mask
if attention_mask is not None:
extended_attention_mask = (1.0 - attention_mask[:, None, None, None, :]) * torch.finfo(self.dtype).min
extended_attention_mask = extended_attention_mask.expand(
(batch_size, self.config.num_decoder_attention_heads, self.ngram, seq_length, seq_length)
)
# predicted stream attention_mask should always be 0
extended_attention_mask = torch.cat(
[extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1
)
extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask
else:
extended_predict_attention_mask = extended_predict_causal_mask
return extended_predict_attention_mask.to(hidden_states.dtype)
@auto_docstring
| ProphetNetDecoder |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-motherduck/destination_motherduck/destination.py | {
"start": 1665,
"end": 1854
} | class ____(AirbyteStateMessage):
"""Declare the `id` attribute that platform sends."""
id: int | None = None
"""Injected by the platform."""
@dataclass
| PatchedAirbyteStateMessage |
python | numba__numba | numba/core/untyped_passes.py | {
"start": 3294,
"end": 4069
} | class ____(FunctionPass):
_name = "fixup_args"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
state['nargs'] = state['func_ir'].arg_count
if not state['args'] and state['flags'].force_pyobject:
# Allow an empty argument types specification when object mode
# is explicitly requested.
state['args'] = (types.pyobject,) * state['nargs']
elif len(state['args']) != state['nargs']:
raise TypeError("Signature mismatch: %d argument types given, "
"but function takes %d arguments"
% (len(state['args']), state['nargs']))
return True
@register_pass(mutates_CFG=True, analysis_only=False)
| FixupArgs |
python | astropy__astropy | astropy/visualization/wcsaxes/tests/test_frame.py | {
"start": 1043,
"end": 4620
} | class ____(BaseImageTests):
@figure_test(tolerance=0.5)
def test_custom_frame(self):
wcs = WCS(self.msx_header)
fig = Figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=wcs, frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.coords.grid(color="white")
im = ax.imshow(
np.ones((149, 149)),
vmin=0.0,
vmax=2.0,
origin="lower",
cmap="gist_heat",
)
minpad = {}
minpad["a"] = minpad["d"] = 1
minpad["b"] = minpad["c"] = minpad["e"] = minpad["f"] = 2.75
ax.coords["glon"].set_axislabel("Longitude", minpad=minpad)
ax.coords["glon"].set_axislabel_position("ad")
ax.coords["glat"].set_axislabel("Latitude", minpad=minpad)
ax.coords["glat"].set_axislabel_position("bcef")
ax.coords["glon"].set_ticklabel_position("ad")
ax.coords["glat"].set_ticklabel_position("bcef")
# Set limits so that no labels overlap
ax.set_xlim(5.5, 100.5)
ax.set_ylim(5.5, 110.5)
# Clip the image to the frame
im.set_clip_path(ax.coords.frame.patch)
return fig
@figure_test
def test_update_clip_path_rectangular(self, tmp_path):
fig = Figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
fig.add_axes(ax)
ax.set_xlim(0.0, 2.0)
ax.set_ylim(0.0, 2.0)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmp_path / "nothing")
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
return fig
@figure_test
def test_update_clip_path_nonrectangular(self, tmp_path):
fig = Figure()
ax = WCSAxes(
fig, [0.1, 0.1, 0.8, 0.8], aspect="equal", frame_class=HexagonalFrame
)
fig.add_axes(ax)
ax.set_xlim(0.0, 2.0)
ax.set_ylim(0.0, 2.0)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmp_path / "nothing")
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@figure_test
def test_update_clip_path_change_wcs(self, tmp_path):
# When WCS is changed, a new frame is created, so we need to make sure
# that the path is carried over to the new frame.
fig = Figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
fig.add_axes(ax)
ax.set_xlim(0.0, 2.0)
ax.set_ylim(0.0, 2.0)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmp_path / "nothing")
ax.reset_wcs()
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
return fig
def test_copy_frame_properties_change_wcs(self):
# When WCS is changed, a new frame is created, so we need to make sure
# that the color and linewidth are transferred over
fig = Figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.coords.frame.set_linewidth(5)
ax.coords.frame.set_color("purple")
ax.reset_wcs()
assert ax.coords.frame.get_linewidth() == 5
assert ax.coords.frame.get_color() == "purple"
| TestFrame |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_superfences.py | {
"start": 9063,
"end": 9951
} | class ____(util.MdCase):
"""Test highlight line wraps."""
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {
'pymdownx.highlight': {
'line_anchors': '__my_span',
'linenums_style': 'pymdownx-inline'
}
}
def test_linespans(self):
"""Test wrapping a line in line spans."""
self.check_markdown(
r'''
```python linenums="2"
import test
```
''',
r'''
<div class="highlight"><pre><span></span><code><a id="__my_span-0-2" name="__my_span-0-2"></a><span class="linenos" data-linenos="2 "></span><span class="kn">import</span><span class="w"> </span><span class="nn">test</span>
</code></pre></div>
''', # noqa: E501
True
)
| TestHighlightLineAnchorsPymdownxInline |
python | pytest-dev__pytest | testing/test_assertion.py | {
"start": 44278,
"end": 53093
} | class ____:
# The number of lines in the truncation explanation message. Used
# to calculate that results have the expected length.
LINES_IN_TRUNCATION_MSG = 2
def test_doesnt_truncate_when_input_is_empty_list(self) -> None:
expl: list[str] = []
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert result == expl
def test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars(self) -> None:
expl = ["a" * 100 for x in range(5)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80)
assert result == expl
def test_truncates_at_8_lines_when_given_list_of_empty_strings(self) -> None:
expl = ["" for x in range(50)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert len(result) != len(expl)
assert result != expl
assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG
assert "Full output truncated" in result[-1]
assert "42 lines hidden" in result[-1]
last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]
assert last_line_before_trunc_msg.endswith("...")
def test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars(self) -> None:
total_lines = 100
expl = ["a" for x in range(total_lines)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80)
assert result != expl
assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG
assert "Full output truncated" in result[-1]
assert f"{total_lines - 8} lines hidden" in result[-1]
last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]
assert last_line_before_trunc_msg.endswith("...")
def test_truncates_at_8_lines_when_there_is_one_line_to_remove(self) -> None:
"""The number of line in the result is 9, the same number as if we truncated."""
expl = ["a" for x in range(9)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80)
assert result == expl
assert "truncated" not in result[-1]
def test_truncates_edgecase_when_truncation_message_makes_the_result_longer_for_chars(
self,
) -> None:
line = "a" * 10
expl = [line, line]
result = truncate._truncate_explanation(expl, max_lines=10, max_chars=10)
assert result == [line, line]
def test_truncates_edgecase_when_truncation_message_makes_the_result_longer_for_lines(
self,
) -> None:
line = "a" * 10
expl = [line, line]
result = truncate._truncate_explanation(expl, max_lines=1, max_chars=100)
assert result == [line, line]
def test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars(self) -> None:
expl = [chr(97 + x) * 80 for x in range(16)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80)
assert result != expl
assert len(result) == 16 - 8 + self.LINES_IN_TRUNCATION_MSG
assert "Full output truncated" in result[-1]
assert "8 lines hidden" in result[-1]
last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]
assert last_line_before_trunc_msg.endswith("...")
def test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars(self) -> None:
expl = ["a" * 250 for x in range(10)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=999)
assert result != expl
assert len(result) == 4 + self.LINES_IN_TRUNCATION_MSG
assert "Full output truncated" in result[-1]
assert "7 lines hidden" in result[-1]
last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]
assert last_line_before_trunc_msg.endswith("...")
def test_truncates_at_1_line_when_first_line_is_GT_max_chars(self) -> None:
expl = ["a" * 250 for x in range(1000)]
result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)
assert result != expl
assert len(result) == 1 + self.LINES_IN_TRUNCATION_MSG
assert "Full output truncated" in result[-1]
assert "1000 lines hidden" in result[-1]
last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]
assert last_line_before_trunc_msg.endswith("...")
def test_full_output_truncated(self, monkeypatch, pytester: Pytester) -> None:
"""Test against full runpytest() output."""
line_count = 7
line_len = 100
expected_truncated_lines = 2
pytester.makepyfile(
rf"""
def test_many_lines():
a = list([str(i)[0] * {line_len} for i in range({line_count})])
b = a[::2]
a = '\n'.join(map(str, a))
b = '\n'.join(map(str, b))
assert a == b
"""
)
monkeypatch.delenv("CI", raising=False)
result = pytester.runpytest()
# without -vv, truncate the message showing a few diff lines only
result.stdout.fnmatch_lines(
[
"*+ 1*",
"*+ 3*",
f"*truncated ({expected_truncated_lines} lines hidden)*use*-vv*",
]
)
result = pytester.runpytest("-vv")
result.stdout.fnmatch_lines(["* 6*"])
# Setting CI to empty string is same as having it undefined
monkeypatch.setenv("CI", "")
result = pytester.runpytest()
result.stdout.fnmatch_lines(
[
"*+ 1*",
"*+ 3*",
f"*truncated ({expected_truncated_lines} lines hidden)*use*-vv*",
]
)
monkeypatch.setenv("CI", "1")
result = pytester.runpytest()
result.stdout.fnmatch_lines(["* 6*"])
@pytest.mark.parametrize(
["truncation_lines", "truncation_chars", "expected_lines_hidden"],
(
(3, None, 3),
(4, None, 0),
(0, None, 0),
(None, 8, 6),
(None, 9, 0),
(None, 0, 0),
(0, 0, 0),
(0, 1000, 0),
(1000, 0, 0),
),
)
def test_truncation_with_ini(
self,
monkeypatch,
pytester: Pytester,
truncation_lines: int | None,
truncation_chars: int | None,
expected_lines_hidden: int,
) -> None:
pytester.makepyfile(
"""\
string_a = "123456789\\n23456789\\n3"
string_b = "123456789\\n23456789\\n4"
def test():
assert string_a == string_b
"""
)
# This test produces 6 lines of diff output or 79 characters
# So the effect should be when threshold is < 4 lines (considering 2 additional lines for explanation)
# Or < 9 characters (considering 70 additional characters for explanation)
monkeypatch.delenv("CI", raising=False)
ini = "[pytest]\n"
if truncation_lines is not None:
ini += f"truncation_limit_lines = {truncation_lines}\n"
if truncation_chars is not None:
ini += f"truncation_limit_chars = {truncation_chars}\n"
pytester.makeini(ini)
result = pytester.runpytest()
if expected_lines_hidden != 0:
result.stdout.fnmatch_lines(
[f"*truncated ({expected_lines_hidden} lines hidden)*"]
)
else:
result.stdout.no_fnmatch_line("*truncated*")
result.stdout.fnmatch_lines(
[
"*- 4*",
"*+ 3*",
]
)
def test_python25_compile_issue257(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_rewritten():
assert 1 == 2
# some comment
"""
)
result = pytester.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(
"""
*E*assert 1 == 2*
*1 failed*
"""
)
def test_rewritten(pytester: Pytester) -> None:
pytester.makepyfile(
"""
def test_rewritten():
assert "@py_builtins" in globals()
"""
)
assert pytester.runpytest().ret == 0
def test_reprcompare_notin() -> None:
assert callop("not in", "foo", "aaafoobbb") == [
"'foo' not in 'aaafoobbb'",
"",
"'foo' is contained here:",
" aaafoobbb",
"? +++",
]
def test_reprcompare_whitespaces() -> None:
assert callequal("\r\n", "\n") == [
r"'\r\n' == '\n'",
"",
r"Strings contain only whitespace, escaping them using repr()",
r"- '\n'",
r"+ '\r\n'",
r"? ++",
]
| TestTruncateExplanation |
python | huggingface__transformers | src/transformers/models/xlm_roberta/modeling_xlm_roberta.py | {
"start": 39017,
"end": 42783
} | class ____(XLMRobertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.classifier = XLMRobertaClassificationHead(config)
self.roberta = XLMRobertaModel(config, add_pooling_layer=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| XLMRobertaForSequenceClassification |
python | sympy__sympy | sympy/combinatorics/pc_groups.py | {
"start": 214,
"end": 1308
} | class ____(DefaultPrinting):
is_group = True
is_solvable = True
def __init__(self, pc_sequence, pc_series, relative_order, collector=None):
"""
Parameters
==========
pc_sequence : list
A sequence of elements whose classes generate the cyclic factor
groups of pc_series.
pc_series : list
A subnormal sequence of subgroups where each factor group is cyclic.
relative_order : list
The orders of factor groups of pc_series.
collector : Collector
By default, it is None. Collector class provides the
polycyclic presentation with various other functionalities.
"""
self.pcgs = pc_sequence
self.pc_series = pc_series
self.relative_order = relative_order
self.collector = Collector(self.pcgs, pc_series, relative_order) if not collector else collector
def is_prime_order(self):
return all(isprime(order) for order in self.relative_order)
def length(self):
return len(self.pcgs)
| PolycyclicGroup |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws_tests/ecs_tests/stubbed_ecs.py | {
"start": 1674,
"end": 2324
} | class ____:
def __init__(self, region_name):
storage = StubStorage()
self.stubs = defaultdict(
lambda: StubbedEcs(
# Hack: Build the client from the Session because we monkeypatch
# boto3.client elsewhere to return an instance of this class and
# we want to avoid infinite recursion errors.
boto3.Session().client("ecs", region_name=region_name),
storage=storage,
)
)
def __getattr__(self, name):
thread = threading.current_thread().name
return getattr(self.stubs[thread], name)
| ThreadsafeStubbedEcs |
python | django__django | tests/prefetch_related/tests.py | {
"start": 19024,
"end": 43487
} | class ____(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in
the obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return
value.
"""
ret_val = []
if hasattr(obj_iter, "all"):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
@classmethod
def setUpTestData(cls):
cls.person1 = Person.objects.create(name="Joe")
cls.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
cls.house1 = House.objects.create(
name="House 1", address="123 Main St", owner=cls.person1
)
cls.room1_1 = Room.objects.create(name="Dining room", house=cls.house1)
cls.room1_2 = Room.objects.create(name="Lounge", house=cls.house1)
cls.room1_3 = Room.objects.create(name="Kitchen", house=cls.house1)
cls.house1.main_room = cls.room1_1
cls.house1.save()
cls.person1.houses.add(cls.house1)
cls.house2 = House.objects.create(
name="House 2", address="45 Side St", owner=cls.person1
)
cls.room2_1 = Room.objects.create(name="Dining room", house=cls.house2)
cls.room2_2 = Room.objects.create(name="Lounge", house=cls.house2)
cls.room2_3 = Room.objects.create(name="Kitchen", house=cls.house2)
cls.house2.main_room = cls.room2_1
cls.house2.save()
cls.person1.houses.add(cls.house2)
cls.house3 = House.objects.create(
name="House 3", address="6 Downing St", owner=cls.person2
)
cls.room3_1 = Room.objects.create(name="Dining room", house=cls.house3)
cls.room3_2 = Room.objects.create(name="Lounge", house=cls.house3)
cls.room3_3 = Room.objects.create(name="Kitchen", house=cls.house3)
cls.house3.main_room = cls.room3_1
cls.house3.save()
cls.person2.houses.add(cls.house3)
cls.house4 = House.objects.create(
name="house 4", address="7 Regents St", owner=cls.person2
)
cls.room4_1 = Room.objects.create(name="Dining room", house=cls.house4)
cls.room4_2 = Room.objects.create(name="Lounge", house=cls.house4)
cls.room4_3 = Room.objects.create(name="Kitchen", house=cls.house4)
cls.house4.main_room = cls.room4_1
cls.house4.save()
cls.person2.houses.add(cls.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related("houses")
related_objs_normal = ([list(p.houses.all()) for p in qs],)
related_objs_from_traverse = [
[inner[0] for inner in o[1]] for o in self.traverse_qs(qs, [["houses"]])
]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
msg = (
"'houses' lookup was already seen with a different queryset. You "
"may need to adjust the ordering of your lookups."
)
# lookup.queryset shouldn't be evaluated.
with self.assertNumQueries(3):
with self.assertRaisesMessage(ValueError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
Prefetch("houses", queryset=House.objects.all()),
),
[["houses", "rooms"]],
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing
# houses_lst__rooms.
msg = (
"Cannot find 'houses_lst' on Person object, 'houses_lst__rooms' is "
"an invalid parameter to prefetch_related()"
)
with self.assertRaisesMessage(AttributeError, msg):
self.traverse_qs(
Person.objects.prefetch_related(
"houses_lst__rooms",
Prefetch(
"houses", queryset=House.objects.all(), to_attr="houses_lst"
),
),
[["houses", "rooms"]],
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related("houses__rooms", "houses"),
[["houses", "rooms"]],
)
self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
Prefetch("houses", queryset=House.objects.all(), to_attr="houses_lst"),
),
[["houses", "rooms"]],
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related("houses"), [["houses"]]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch("houses")), [["houses"]]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses", to_attr="houses_lst")
),
[["houses_lst"]],
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related("occupants"), [["occupants"]]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch("occupants")), [["occupants"]]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(
Prefetch("occupants", to_attr="occupants_lst")
),
[["occupants_lst"]],
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related("house__occupants"),
[["house", "occupants"]],
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch("house__occupants")),
[["house", "occupants"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(
Prefetch("house__occupants", to_attr="occupants_lst")
),
[["house", "occupants_lst"]],
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag="houses").prefetch_related(
"content_object__rooms"
),
[["content_object", "rooms"]],
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch("content_object"),
Prefetch("content_object__rooms", to_attr="rooms_lst"),
),
[["content_object", "rooms_lst"]],
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related("houses", "houses__rooms"),
[["houses", "rooms"]],
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch("houses"), "houses__rooms"),
[["houses", "rooms"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses"), Prefetch("houses__rooms")
),
[["houses", "rooms"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses", to_attr="houses_lst"), "houses_lst__rooms"
),
[["houses_lst", "rooms"]],
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch("houses", to_attr="houses_lst"),
Prefetch("houses_lst__rooms", to_attr="rooms_lst"),
),
[["houses_lst", "rooms_lst"]],
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url="http://www.djangoproject.com/")
TaggedItem.objects.create(content_object=bookmark, tag="django")
TaggedItem.objects.create(
content_object=bookmark, favorite=bookmark, tag="python"
)
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related(
"tags", "tags__content_object", "favorite_tags"
),
[["tags", "content_object"], ["favorite_tags"]],
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch("tags", to_attr="tags_lst"),
Prefetch("tags_lst__content_object"),
Prefetch("favorite_tags"),
),
[["tags_lst", "content_object"], ["favorite_tags"]],
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
"primary_house__occupants__houses",
),
[["primary_house", "occupants", "houses"]],
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
"houses__rooms",
Prefetch("primary_house__occupants", to_attr="occupants_lst"),
"primary_house__occupants_lst__houses",
),
[["primary_house", "occupants_lst", "houses"]],
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
"houses",
"all_houses__occupants__houses",
),
[["all_houses", "occupants", "houses"]],
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
"houses",
Prefetch("all_houses__occupants", to_attr="occupants_lst"),
"all_houses__occupants_lst__houses",
),
[["all_houses", "occupants_lst", "houses"]],
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related("houses"))
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses", queryset=House.objects.all(), to_attr="houses_lst"
)
)
)
self.assertEqual(
self.traverse_qs(lst1, [["houses"]]),
self.traverse_qs(lst2, [["houses_lst"]]),
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses",
queryset=House.objects.filter(
pk__in=[self.house1.pk, self.house3.pk]
),
to_attr="houses_lst",
)
)
)
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related("houses__rooms"))
with self.assertNumQueries(3):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses__rooms",
queryset=Room.objects.all(),
to_attr="rooms_lst",
)
)
)
self.assertEqual(
self.traverse_qs(lst1, [["houses", "rooms"]]),
self.traverse_qs(lst2, [["houses", "rooms_lst"]]),
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related("houses__owner"))
with self.assertNumQueries(2):
lst2 = list(
Person.objects.prefetch_related(
Prefetch("houses", queryset=House.objects.select_related("owner"))
)
)
self.assertEqual(
self.traverse_qs(lst1, [["houses", "owner"]]),
self.traverse_qs(lst2, [["houses", "owner"]]),
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch("rooms", queryset=inner_rooms_qs, to_attr="rooms_lst")
)
with self.assertNumQueries(4):
lst2 = list(
Person.objects.prefetch_related(
Prefetch(
"houses",
queryset=houses_qs_prf.filter(pk=self.house1.pk),
to_attr="houses_lst",
),
Prefetch("houses_lst__rooms_lst__main_room_of"),
)
)
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ForwardManyToOneDescriptor.
houses = House.objects.select_related("owner")
with self.assertNumQueries(6):
rooms = Room.objects.prefetch_related("house")
lst1 = self.traverse_qs(rooms, [["house", "owner"]])
with self.assertNumQueries(2):
rooms = Room.objects.prefetch_related(Prefetch("house", queryset=houses))
lst2 = self.traverse_qs(rooms, [["house", "owner"]])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related("owner")
rooms = Room.objects.prefetch_related(
Prefetch("house", queryset=houses, to_attr="house_attr")
)
lst2 = self.traverse_qs(rooms, [["house_attr", "owner"]])
self.assertEqual(lst1, lst2)
room = Room.objects.prefetch_related(
Prefetch("house", queryset=houses.filter(address="DoesNotExist"))
).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, "house")
room = Room.objects.prefetch_related(
Prefetch(
"house",
queryset=houses.filter(address="DoesNotExist"),
to_attr="house_attr",
)
).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.prefetch_related(
Prefetch("house", queryset=House.objects.only("name"))
)
with self.assertNumQueries(2):
getattr(rooms.first().house, "name")
with self.assertNumQueries(3):
getattr(rooms.first().house, "address")
# Test ReverseOneToOneDescriptor.
houses = House.objects.select_related("owner")
with self.assertNumQueries(6):
rooms = Room.objects.prefetch_related("main_room_of")
lst1 = self.traverse_qs(rooms, [["main_room_of", "owner"]])
with self.assertNumQueries(2):
rooms = Room.objects.prefetch_related(
Prefetch("main_room_of", queryset=houses)
)
lst2 = self.traverse_qs(rooms, [["main_room_of", "owner"]])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(
Room.objects.prefetch_related(
Prefetch(
"main_room_of",
queryset=houses,
to_attr="main_room_of_attr",
)
)
)
lst2 = self.traverse_qs(rooms, [["main_room_of_attr", "owner"]])
self.assertEqual(lst1, lst2)
room = (
Room.objects.filter(main_room_of__isnull=False)
.prefetch_related(
Prefetch("main_room_of", queryset=houses.filter(address="DoesNotExist"))
)
.first()
)
with self.assertRaises(ObjectDoesNotExist):
getattr(room, "main_room_of")
room = (
Room.objects.filter(main_room_of__isnull=False)
.prefetch_related(
Prefetch(
"main_room_of",
queryset=houses.filter(address="DoesNotExist"),
to_attr="main_room_of_attr",
)
)
.first()
)
self.assertIsNone(room.main_room_of_attr)
# The custom queryset filters should be applied to the queryset
# instance returned by the manager.
person = Person.objects.prefetch_related(
Prefetch("houses", queryset=House.objects.filter(name="House 1")),
).get(pk=self.person1.pk)
self.assertEqual(
list(person.houses.all()),
list(person.houses.all().all()),
)
def test_nested_prefetch_related_are_not_overwritten(self):
# Regression test for #24873
houses_2 = House.objects.prefetch_related(Prefetch("rooms"))
persons = Person.objects.prefetch_related(Prefetch("houses", queryset=houses_2))
houses = House.objects.prefetch_related(Prefetch("occupants", queryset=persons))
list(houses) # queryset must be evaluated once to reproduce the bug.
self.assertEqual(
houses.all()[0].occupants.all()[0].houses.all()[1].rooms.all()[0],
self.room2_1,
)
def test_nested_prefetch_related_with_duplicate_prefetcher(self):
"""
Nested prefetches whose name clashes with descriptor names
(Person.houses here) are allowed.
"""
occupants = Person.objects.prefetch_related(
Prefetch("houses", to_attr="some_attr_name"),
Prefetch("houses", queryset=House.objects.prefetch_related("main_room")),
)
houses = House.objects.prefetch_related(
Prefetch("occupants", queryset=occupants)
)
with self.assertNumQueries(5):
self.traverse_qs(list(houses), [["occupants", "houses", "main_room"]])
def test_nested_prefetch_related_with_duplicate_prefetch_and_depth(self):
people = Person.objects.prefetch_related(
Prefetch(
"houses__main_room",
queryset=Room.objects.filter(name="Dining room"),
to_attr="dining_room",
),
"houses__main_room",
)
with self.assertNumQueries(4):
main_room = people[0].houses.all()[0]
people = Person.objects.prefetch_related(
"houses__main_room",
Prefetch(
"houses__main_room",
queryset=Room.objects.filter(name="Dining room"),
to_attr="dining_room",
),
)
with self.assertNumQueries(4):
main_room = people[0].houses.all()[0]
self.assertEqual(main_room.main_room, self.room1_1)
def test_values_queryset(self):
msg = "Prefetch querysets cannot use raw(), values(), and values_list()."
with self.assertRaisesMessage(ValueError, msg):
Prefetch("houses", House.objects.values("pk"))
with self.assertRaisesMessage(ValueError, msg):
Prefetch("houses", House.objects.values_list("pk"))
# That error doesn't affect managers with custom ModelIterable
# subclasses
self.assertIs(
Teacher.objects_custom.all()._iterable_class, ModelIterableSubclass
)
Prefetch("teachers", Teacher.objects_custom.all())
def test_raw_queryset(self):
msg = "Prefetch querysets cannot use raw(), values(), and values_list()."
with self.assertRaisesMessage(ValueError, msg):
Prefetch("houses", House.objects.raw("select pk from house"))
def test_to_attr_doesnt_cache_through_attr_as_list(self):
house = House.objects.prefetch_related(
Prefetch("rooms", queryset=Room.objects.all(), to_attr="to_rooms"),
).get(pk=self.house3.pk)
self.assertIsInstance(house.rooms.all(), QuerySet)
def test_to_attr_cached_property(self):
persons = Person.objects.prefetch_related(
Prefetch("houses", House.objects.all(), to_attr="cached_all_houses"),
)
for person in persons:
# To bypass caching at the related descriptor level, don't use
# person.houses.all() here.
all_houses = list(House.objects.filter(occupants=person))
with self.assertNumQueries(0):
self.assertEqual(person.cached_all_houses, all_houses)
def test_filter_deferred(self):
"""
Related filtering of prefetched querysets is deferred until necessary.
"""
add_q = Query.add_q
with mock.patch.object(
Query,
"add_q",
autospec=True,
side_effect=lambda self, q, reuse_all: add_q(self, q),
) as add_q_mock:
list(
House.objects.prefetch_related(
Prefetch("occupants", queryset=Person.objects.all())
)
)
self.assertEqual(add_q_mock.call_count, 1)
| CustomPrefetchTests |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 114470,
"end": 117267
} | class ____(SingleContinuousDistribution):
_argnames = ('n',)
@property
def set(self):
return Interval(0, self.n)
@staticmethod
def check(n):
_value_check((n > 0, n.is_integer),
"Parameter n must be positive integer.")
def pdf(self, x):
n = self.n
k = Dummy("k")
return 1/factorial(
n - 1)*Sum((-1)**k*binomial(n, k)*(x - k)**(n - 1), (k, 0, floor(x)))
def _cdf(self, x):
n = self.n
k = Dummy("k")
return Piecewise((S.Zero, x < 0),
(1/factorial(n)*Sum((-1)**k*binomial(n, k)*(x - k)**(n),
(k, 0, floor(x))), x <= n),
(S.One, True))
def _characteristic_function(self, t):
return ((exp(I*t) - 1) / (I*t))**self.n
def _moment_generating_function(self, t):
return ((exp(t) - 1) / t)**self.n
def UniformSum(name, n):
r"""
Create a continuous random variable with an Irwin-Hall distribution.
Explanation
===========
The probability distribution function depends on a single parameter
$n$ which is an integer.
The density of the Irwin-Hall distribution is given by
.. math ::
f(x) := \frac{1}{(n-1)!}\sum_{k=0}^{\left\lfloor x\right\rfloor}(-1)^k
\binom{n}{k}(x-k)^{n-1}
Parameters
==========
n : A positive integer, `n > 0`
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import UniformSum, density, cdf
>>> from sympy import Symbol, pprint
>>> n = Symbol("n", integer=True)
>>> z = Symbol("z")
>>> X = UniformSum("x", n)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
floor(z)
___
\ `
\ k n - 1 /n\
) (-1) *(-k + z) *| |
/ \k/
/__,
k = 0
--------------------------------
(n - 1)!
>>> cdf(X)(z)
Piecewise((0, z < 0), (Sum((-1)**_k*(-_k + z)**n*binomial(n, _k),
(_k, 0, floor(z)))/factorial(n), n >= z), (1, True))
Compute cdf with specific 'x' and 'n' values as follows :
>>> cdf(UniformSum("x", 5), evaluate=False)(2).doit()
9/40
The argument evaluate=False prevents an attempt at evaluation
of the sum for general n, before the argument 2 is passed.
References
==========
.. [1] https://en.wikipedia.org/wiki/Uniform_sum_distribution
.. [2] https://mathworld.wolfram.com/UniformSumDistribution.html
"""
return rv(name, UniformSumDistribution, (n, ))
#-------------------------------------------------------------------------------
# VonMises distribution --------------------------------------------------------
| UniformSumDistribution |
python | sympy__sympy | sympy/liealgebras/type_b.py | {
"start": 77,
"end": 4547
} | class ____(Standard_Cartan):
def __new__(cls, n):
if n < 2:
raise ValueError("n cannot be less than 2")
return Standard_Cartan.__new__(cls, "B", n)
def dimension(self):
"""Dimension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("B3")
>>> c.dimension()
3
"""
return self.n
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a 1 iin the ith position and a -1
in the jth position.
"""
root = [0]*self.n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
In B_n the first n-1 simple roots are the same as the
roots in A_(n-1) (a 1 in the ith position, a -1 in
the (i+1)th position, and zeroes elsewhere). The n-th
simple root is the root with a 1 in the nth position
and zeroes elsewhere.
This method returns the ith simple root for the B series.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("B3")
>>> c.simple_root(2)
[0, 1, -1]
"""
n = self.n
if i < n:
return self.basic_root(i-1, i)
else:
root = [0]*self.n
root[n-1] = 1
return root
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of B_n;
by multiplying all the positive roots by -1 we
get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
for i in range(0, n):
k += 1
root = [0]*n
root[i] = 1
posroots[k] = root
return posroots
def roots(self):
"""
Returns the total number of roots for B_n"
"""
n = self.n
return 2*(n**2)
def cartan_matrix(self):
"""
Returns the Cartan matrix for B_n.
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('B4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -2],
[ 0, 0, -1, 2]])
"""
n = self.n
m = 2* eye(n)
for i in range(1, n - 1):
m[i, i+1] = -1
m[i, i-1] = -1
m[0, 1] = -1
m[n-2, n-1] = -2
m[n-1, n-2] = -1
return m
def basis(self):
"""
Returns the number of independent generators of B_n
"""
n = self.n
return (n**2 - n)/2
def lie_algebra(self):
"""
Returns the Lie algebra associated with B_n
"""
n = self.n
return "so(" + str(2*n) + ")"
def dynkin_diagram(self):
n = self.n
diag = "---".join("0" for i in range(1, n)) + "=>=0\n"
diag += " ".join(str(i) for i in range(1, n+1))
return diag
| TypeB |
python | huggingface__transformers | tests/models/udop/test_modeling_udop.py | {
"start": 21789,
"end": 22725
} | class ____(unittest.TestCase):
@cached_property
def image(self):
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
return ds[1]["image"]
@cached_property
def processor(self):
return UdopProcessor.from_pretrained("microsoft/udop-large")
@cached_property
def model(self):
return UdopForConditionalGeneration.from_pretrained("microsoft/udop-large").to(torch_device)
def test_conditional_generation(self):
processor = self.processor
model = self.model
prompt = "Question answering. In which year is the report made?"
encoding = processor(images=self.image, text=prompt, return_tensors="pt").to(torch_device)
predicted_ids = model.generate(**encoding)
predicted_text = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
self.assertEqual(predicted_text, "2013")
| UdopModelIntegrationTests |
python | tensorflow__tensorflow | tensorflow/python/feature_column/sequence_feature_column_test.py | {
"start": 1771,
"end": 5822
} | class ____(test.TestCase, parameterized.TestCase):
"""Tests the utility fn concatenate_context_input."""
def test_concatenate_context_input(self):
seq_input = ops.convert_to_tensor(np.arange(12).reshape(2, 3, 2))
context_input = ops.convert_to_tensor(np.arange(10).reshape(2, 5))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
input_layer = sfc.concatenate_context_input(context_input, seq_input)
expected = np.array([
[[0, 1, 0, 1, 2, 3, 4], [2, 3, 0, 1, 2, 3, 4], [4, 5, 0, 1, 2, 3, 4]],
[[6, 7, 5, 6, 7, 8, 9], [8, 9, 5, 6, 7, 8, 9], [10, 11, 5, 6, 7, 8, 9]]
], dtype=np.float32)
output = self.evaluate(input_layer)
self.assertAllEqual(expected, output)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_3',
'seq_input_arg': np.arange(100).reshape(10, 10)},
{'testcase_name': 'rank_gt_3',
'seq_input_arg': np.arange(100).reshape(5, 5, 2, 2)}
)
def test_sequence_input_throws_error(self, seq_input_arg):
seq_input = ops.convert_to_tensor(seq_input_arg)
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegex(ValueError, 'sequence_input must have rank 3'):
sfc.concatenate_context_input(context_input, seq_input)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_2',
'context_input_arg': np.arange(100)},
{'testcase_name': 'rank_gt_2',
'context_input_arg': np.arange(100).reshape(5, 5, 4)}
)
def test_context_input_throws_error(self, context_input_arg):
context_input = ops.convert_to_tensor(context_input_arg)
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegex(ValueError, 'context_input must have rank 2'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_seq_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegex(TypeError,
'sequence_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_context_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
with self.assertRaisesRegex(TypeError,
'context_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
def _assert_sparse_tensor_value(test_case, expected, actual):
_assert_sparse_tensor_indices_shape(test_case, expected, actual)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
def _assert_sparse_tensor_indices_shape(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
def _get_sequence_dense_tensor(column, features):
return column.get_sequence_dense_tensor(
fc.FeatureTransformationCache(features), None)
def _get_sparse_tensors(column, features):
return column.get_sparse_tensors(
fc.FeatureTransformationCache(features), None)
@test_util.run_all_in_graph_and_eager_modes
| ConcatenateContextInputTest |
python | simonw__sqlite-utils | sqlite_utils/db.py | {
"start": 5990,
"end": 6055
} | class ____(Exception):
"Specified table does not exist"
| NoTable |
python | allegroai__clearml | clearml/backend_api/services/v2_23/models.py | {
"start": 40910,
"end": 42624
} | class ____(Request):
"""
Delete a model.
:param model: Model ID
:type model: str
:param force: Force. Required if there are tasks that use the model as an
execution model, or if the model's creating task is published.
:type force: bool
"""
_service = "models"
_action = "delete"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"force": {
"description": "Force. Required if there are tasks that use the model as an execution model, or if the model's creating task is published.\n ",
"type": "boolean",
},
"model": {"description": "Model ID", "type": "string"},
},
"required": ["model"],
"type": "object",
}
def __init__(self, model: str, force: Optional[bool] = None, **kwargs: Any) -> None:
super(DeleteRequest, self).__init__(**kwargs)
self.model = model
self.force = force
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
| DeleteRequest |
python | jazzband__django-oauth-toolkit | tests/admin.py | {
"start": 35,
"end": 112
} | class ____(admin.ModelAdmin):
list_display = ("id",)
| CustomApplicationAdmin |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 58488,
"end": 58776
} | class ____(Field):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.allow_blank = True
self.allow_null = True
def to_internal_value(self, data):
return data
def to_representation(self, value):
return value
| _UnvalidatedField |
python | getsentry__sentry | tests/sentry/snuba/test_metrics_enhanced_performance.py | {
"start": 630,
"end": 3727
} | class ____(MetricsEnhancedPerformanceTestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.snuba_params = SnubaParams(
organization=self.organization.id,
projects=[self.project],
start=before_now(days=1),
end=self.now,
)
@cached_property
def now(self):
return before_now()
@mock.patch("sentry.snuba.transactions.query")
def test_metrics_incompatible_query_redirects_to_transactions_when_flagged(
self, mock_transactions_query
):
self.store_transaction_metric(
33,
metric="measurements.datacenter_memory",
internal_metric="d:transactions/measurements.datacenter_memory@petabyte",
entity="metrics_distributions",
tags={"transaction": "foo_transaction"},
timestamp=before_now(days=1),
)
transaction_data = load_data("transaction", timestamp=before_now(days=1))
transaction_data["measurements"]["datacenter_memory"] = {
"value": 33,
"unit": "petabyte",
}
self.store_event(transaction_data, self.project.id)
metrics_enhanced_performance.query(
selected_columns=[
"transaction",
"measurements.datacenter_memory",
],
# Equations are not compatible with metrics in MEP, forces a fallback
equations=["measurements.datacenter_memory / 3"],
query="",
snuba_params=self.snuba_params,
referrer="test_query",
auto_fields=True,
fallback_to_transactions=True,
)
mock_transactions_query.assert_called_once()
@mock.patch("sentry.snuba.discover.query")
def test_metrics_incompatible_query_redirects_to_discover_when_not_flagged(
self, mock_discover_query
):
self.store_transaction_metric(
33,
metric="measurements.datacenter_memory",
internal_metric="d:transactions/measurements.datacenter_memory@petabyte",
entity="metrics_distributions",
tags={"transaction": "foo_transaction"},
timestamp=before_now(days=1),
)
transaction_data = load_data("transaction", timestamp=before_now(days=1))
transaction_data["measurements"]["datacenter_memory"] = {
"value": 33,
"unit": "petabyte",
}
self.store_event(transaction_data, self.project.id)
metrics_enhanced_performance.query(
selected_columns=[
"transaction",
"measurements.datacenter_memory",
],
# Equations are not compatible with metrics in MEP, forces a fallback
equations=["measurements.datacenter_memory / 3"],
query="",
snuba_params=self.snuba_params,
referrer="test_query",
auto_fields=True,
fallback_to_transactions=False,
)
mock_discover_query.assert_called_once()
| MetricsEnhancedPerformanceTest |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/tests/test_bases.py | {
"start": 240,
"end": 3208
} | class ____:
class DummyStep(steps.Step):
title = "Dummy step"
max_retries = 3
max_duration = timedelta(seconds=2)
async def _run(self, run_duration: timedelta) -> steps.StepResult:
await anyio.sleep(run_duration.total_seconds())
return steps.StepResult(step=self, status=steps.StepStatus.SUCCESS)
@pytest.fixture
def test_context(self, mocker):
return mocker.Mock(secrets_to_mask=[])
async def test_run_with_timeout(self, test_context):
step = self.DummyStep(test_context)
step_result = await step.run(run_duration=step.max_duration - timedelta(seconds=1))
assert step_result.status == steps.StepStatus.SUCCESS
assert step.retry_count == 0
step_result = await step.run(run_duration=step.max_duration + timedelta(seconds=1))
timed_out_step_result = step._get_timed_out_step_result()
assert step_result.status == timed_out_step_result.status
assert step_result.stdout == timed_out_step_result.stdout
assert step_result.stderr == timed_out_step_result.stderr
assert step_result.output == timed_out_step_result.output
assert step.retry_count == step.max_retries + 1
@pytest.mark.parametrize(
"step_status, exc_info, max_retries, max_dagger_error_retries, expect_retry",
[
(steps.StepStatus.SUCCESS, None, 0, 0, False),
(steps.StepStatus.SUCCESS, None, 3, 0, False),
(steps.StepStatus.SUCCESS, None, 0, 3, False),
(steps.StepStatus.SUCCESS, None, 3, 3, False),
(steps.StepStatus.SKIPPED, None, 0, 0, False),
(steps.StepStatus.SKIPPED, None, 3, 0, False),
(steps.StepStatus.SKIPPED, None, 0, 3, False),
(steps.StepStatus.SKIPPED, None, 3, 3, False),
(steps.StepStatus.FAILURE, DaggerError(), 0, 0, False),
(steps.StepStatus.FAILURE, DaggerError(), 0, 3, True),
(steps.StepStatus.FAILURE, None, 0, 0, False),
(steps.StepStatus.FAILURE, None, 0, 3, False),
(steps.StepStatus.FAILURE, None, 3, 0, True),
],
)
async def test_run_with_retries(self, mocker, test_context, step_status, exc_info, max_retries, max_dagger_error_retries, expect_retry):
step = self.DummyStep(test_context)
step.max_dagger_error_retries = max_dagger_error_retries
step.max_retries = max_retries
step.max_duration = timedelta(seconds=60)
step.retry_delay = timedelta(seconds=0)
step._run = mocker.AsyncMock(
side_effect=[steps.StepResult(step=step, status=step_status, exc_info=exc_info)]
* (max(max_dagger_error_retries, max_retries) + 1)
)
step_result = await step.run()
if expect_retry:
assert step.retry_count > 0
else:
assert step.retry_count == 0
assert step_result.status == step_status
| TestStep |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 9735,
"end": 9966
} | class ____:
@cached_property
def output_field(self):
return DistanceField(self.geo_field)
def source_is_geography(self):
return self.geo_field.geography and self.geo_field.srid == 4326
| DistanceResultMixin |
python | sqlalchemy__sqlalchemy | test/ext/asyncio/test_session.py | {
"start": 24347,
"end": 25541
} | class ____(AsyncFixture):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, Address = cls.classes("User", "Address")
users, addresses = cls.tables("users", "addresses")
cls.mapper(
User,
users,
properties={
"addresses": relationship(
Address, cascade="all, delete-orphan"
)
},
)
cls.mapper(
Address,
addresses,
)
@async_test
async def test_delete_w_cascade(self, async_session):
User = self.classes.User
Address = self.classes.Address
async with async_session.begin():
u1 = User(id=1, name="u1", addresses=[Address(email_address="e1")])
async_session.add(u1)
async with async_session.begin():
u1 = (await async_session.execute(select(User))).scalar_one()
await async_session.delete(u1)
eq_(
(
await async_session.execute(
select(func.count()).select_from(Address)
)
).scalar(),
0,
)
| AsyncCascadesTest |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 79887,
"end": 81593
} | class ____(BaseDataset):
"""
Test the symmetry of operators, at least with the numpy types.
Issue: https://github.com/h5py/h5py/issues/1947
"""
def test_numpy_commutative(self,):
"""
Create a h5py dataset, extract one element convert to numpy
Check that it returns symmetric response to == and !=
"""
shape = (100,1)
dset = self.f.create_dataset(make_name(), shape, dtype=float,
data=np.random.rand(*shape))
# grab a value from the elements, ie dset[0, 0]
# check that mask arrays are commutative wrt ==, !=
val = np.float64(dset[0, 0])
assert np.all((val == dset) == (dset == val))
assert np.all((val != dset) == (dset != val))
# generate sample not in the dset, ie max(dset)+delta
# check that mask arrays are commutative wrt ==, !=
delta = 0.001
nval = np.nanmax(dset)+delta
assert np.all((nval == dset) == (dset == nval))
assert np.all((nval != dset) == (dset != nval))
def test_basetype_commutative(self,):
"""
Create a h5py dataset and check basetype compatibility.
Check that operation is symmetric, even if it is potentially
not meaningful.
"""
shape = (100,1)
dset = self.f.create_dataset(make_name(), shape, dtype=float,
data=np.random.rand(*shape))
# generate float type, sample float(0.)
# check that operation is symmetric (but potentially meaningless)
val = float(0.)
assert (val == dset) == (dset == val)
assert (val != dset) == (dset != val)
| TestCommutative |
python | coleifer__peewee | tests/models.py | {
"start": 1895,
"end": 2060
} | class ____(TestModel):
name = CharField()
city = ForeignKeyField(City, backref='venues')
city_n = ForeignKeyField(City, backref='venues_n', null=True)
| Venue |
python | PyCQA__pylint | tests/functional/a/async_functions.py | {
"start": 314,
"end": 387
} | class ____:
@staticmethod
def test():
return 42
| OtherClass |
python | jmcnamara__XlsxWriter | xlsxwriter/feature_property_bag.py | {
"start": 354,
"end": 4283
} | class ____(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX FeaturePropertyBag file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
self.feature_property_bags = set()
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self) -> None:
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the FeaturePropertyBags element.
self._write_feature_property_bags()
# Write the Checkbox bag element.
self._write_checkbox_bag()
# Write the XFControls bag element.
self._write_xf_control_bag()
# Write the XFComplement bag element.
self._write_xf_compliment_bag()
# Write the XFComplements bag element.
self._write_xf_compliments_bag()
# Write the DXFComplements bag element.
if "DXFComplements" in self.feature_property_bags:
self._write_dxf_compliments_bag()
self._xml_end_tag("FeaturePropertyBags")
# Close the file.
self._xml_close()
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_feature_property_bags(self) -> None:
# Write the <FeaturePropertyBags> element.
xmlns = (
"http://schemas.microsoft.com/office/spreadsheetml/2022/featurepropertybag"
)
attributes = [("xmlns", xmlns)]
self._xml_start_tag("FeaturePropertyBags", attributes)
def _write_checkbox_bag(self) -> None:
# Write the Checkbox <bag> element.
attributes = [("type", "Checkbox")]
self._xml_empty_tag("bag", attributes)
def _write_xf_control_bag(self) -> None:
# Write the XFControls<bag> element.
attributes = [("type", "XFControls")]
self._xml_start_tag("bag", attributes)
# Write the bagId element.
self._write_bag_id("CellControl", 0)
self._xml_end_tag("bag")
def _write_xf_compliment_bag(self) -> None:
# Write the XFComplement <bag> element.
attributes = [("type", "XFComplement")]
self._xml_start_tag("bag", attributes)
# Write the bagId element.
self._write_bag_id("XFControls", 1)
self._xml_end_tag("bag")
def _write_xf_compliments_bag(self) -> None:
# Write the XFComplements <bag> element.
attributes = [
("type", "XFComplements"),
("extRef", "XFComplementsMapperExtRef"),
]
self._xml_start_tag("bag", attributes)
self._xml_start_tag("a", [("k", "MappedFeaturePropertyBags")])
self._write_bag_id("", 2)
self._xml_end_tag("a")
self._xml_end_tag("bag")
def _write_dxf_compliments_bag(self) -> None:
# Write the DXFComplements <bag> element.
attributes = [
("type", "DXFComplements"),
("extRef", "DXFComplementsMapperExtRef"),
]
self._xml_start_tag("bag", attributes)
self._xml_start_tag("a", [("k", "MappedFeaturePropertyBags")])
self._write_bag_id("", 2)
self._xml_end_tag("a")
self._xml_end_tag("bag")
def _write_bag_id(self, key, bag_id) -> None:
# Write the <bagId> element.
attributes = []
if key:
attributes = [("k", key)]
self._xml_data_element("bagId", bag_id, attributes)
| FeaturePropertyBag |
python | huggingface__transformers | src/transformers/models/qwen3_vl/modular_qwen3_vl.py | {
"start": 15594,
"end": 17855
} | class ____(LlamaRotaryEmbedding):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Qwen3VLTextConfig, device=None):
super().__init__(config, device=device)
self.mrope_section = config.rope_parameters.get("mrope_section", [24, 20, 20])
def apply_interleaved_mrope(self, freqs, mrope_section):
"""Apply interleaved MRoPE to 3D rotary embeddings.
Reorganizes frequency layout from chunked [TTT...HHH...WWW] to
interleaved [THWTHWTHW...TT], preserving frequency continuity.
args:
x: (3, bs, seq_len, head_dim // 2)
mrope_section: (3,)
returns:
x_t: (bs, seq_len, head_dim // 2)
"""
freqs_t = freqs[0] # just overwrite the first dimension T
for dim, offset in enumerate((1, 2), start=1): # H, W
length = mrope_section[dim] * 3
idx = slice(offset, length, 3)
freqs_t[..., idx] = freqs[dim, ..., idx]
return freqs_t
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
# In contrast to other models, Qwen3VL has different position ids for the grids
# So we expand the inv_freq to shape (3, ...)
if position_ids.ndim == 2:
position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
freqs = self.apply_interleaved_mrope(freqs, self.mrope_section)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
| Qwen3VLTextRotaryEmbedding |
python | gevent__gevent | src/gevent/resolver/dnspython.py | {
"start": 11053,
"end": 12975
} | class ____(object):
def __init__(self):
self.hosts_resolver = _HostsResolver()
self.network_resolver = resolver.get_default_resolver()
self.network_resolver.cache = resolver.LRUCache()
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True,
_hosts_rdtypes=(dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.PTR)):
# Query the resolver, using /etc/hosts
# Behavior:
# 1. if hosts is enabled and contains answer, return it now
# 2. query nameservers for qname
if qname is None:
qname = '0.0.0.0'
if not isinstance(qname, string_types):
if isinstance(qname, bytes):
qname = qname.decode("idna")
if isinstance(qname, string_types):
qname = dns.name.from_text(qname, None)
if isinstance(rdtype, string_types):
rdtype = dns.rdatatype.from_text(rdtype)
if rdclass == dns.rdataclass.IN and rdtype in _hosts_rdtypes:
try:
answer = self.hosts_resolver.query(qname, rdtype, raise_on_no_answer=False)
except Exception: # pylint: disable=broad-except
from gevent import get_hub
get_hub().handle_error(self, *sys.exc_info())
else:
if answer.rrset:
return answer
return self.network_resolver.query(qname, rdtype, rdclass,
tcp, source, raise_on_no_answer=raise_on_no_answer)
def _family_to_rdtype(family):
if family == socket.AF_INET:
rdtype = dns.rdatatype.A
elif family == socket.AF_INET6:
rdtype = dns.rdatatype.AAAA
else:
raise socket.gaierror(socket.EAI_FAMILY,
'Address family not supported')
return rdtype
| _DualResolver |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/update/tutorial001_py39.py | {
"start": 469,
"end": 2281
} | class ____(SQLModel):
name: Optional[str] = None
secret_name: Optional[str] = None
age: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate):
with Session(engine) as session:
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=list[HeroPublic])
def read_heroes(offset: int = 0, limit: int = Query(default=100, le=100)):
with Session(engine) as session:
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroPublic)
def update_hero(hero_id: int, hero: HeroUpdate):
with Session(engine) as session:
db_hero = session.get(Hero, hero_id)
if not db_hero:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.model_dump(exclude_unset=True)
db_hero.sqlmodel_update(hero_data)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
| HeroUpdate |
python | weaviate__weaviate-python-client | weaviate/groups/async_.py | {
"start": 165,
"end": 312
} | class ____:
def __init__(self, connection: ConnectionAsync):
self.oidc = _GroupsOIDCAsync(connection)
@executor.wrap("sync")
| _GroupsAsync |
python | bokeh__bokeh | src/bokeh/models/formatters.py | {
"start": 13745,
"end": 28853
} | class ____(TickFormatter):
''' A ``TickFormatter`` for displaying datetime values nicely across a
range of scales.
``DatetimeTickFormatter`` has the following properties (listed together
with their default values) that can be used to control the formatting
of axis ticks at different scales:
{defaults}
Each scale property can be set to format or list of formats to use for
formatting datetime tick values that fall in that "time scale".
By default, only the first format string passed for each time scale
will be used. By default, all leading zeros are stripped away from
the formatted labels.
This list of supported `strftime`_ formats is reproduced below.
+----+------------------------------------------------------------------+
| %a | The abbreviated name of the day of the week according to the |
| | current locale. |
+----+------------------------------------------------------------------+
| %A | The full name of the day of the week according to the current |
| | locale. |
+----+------------------------------------------------------------------+
| %b | The abbreviated month name according to the current locale. |
+----+------------------------------------------------------------------+
| %B | The full month name according to the current locale. |
+----+------------------------------------------------------------------+
| %c | The preferred date and time representation for the current |
| | locale. |
+----+------------------------------------------------------------------+
| %C | The century number (year/100) as a 2-digit integer. |
+----+------------------------------------------------------------------+
| %d | The day of the month as a decimal number (range 01 to 31). |
+----+------------------------------------------------------------------+
| %D | Equivalent to **%m/%d/%y**. (Americans should note that in many |
| | other countries **%d/%m/%y** is rather common. This means that |
| | in international context this format is ambiguous and should not |
| | be used.) |
+----+------------------------------------------------------------------+
| %e | Like **%d**, the day of the month as a decimal number, but a |
| | leading zero is replaced by a space. |
+----+------------------------------------------------------------------+
| %f | Microsecond as a decimal number, zero-padded on the left (range |
| | 000000-999999). This is an extension to the set of directives |
| | available to `timezone`_. |
+----+------------------------------------------------------------------+
| %F | Equivalent to **%Y-%m-%d** (the ISO 8601 date format). |
+----+------------------------------------------------------------------+
| %G | The ISO 8601 week-based year with century as a decimal number. |
| | The 4-digit year corresponding to the ISO week number (see |
| | **%V**). This has the same format and value as **%Y**, except |
| | that if the ISO week number belongs to the previous or next |
| | year, that year is used instead. |
+----+------------------------------------------------------------------+
| %g | Like **%G**, but without century, that is, with a 2-digit year |
| | (00-99). |
+----+------------------------------------------------------------------+
| %h | Equivalent to **%b**. |
+----+------------------------------------------------------------------+
| %H | The hour as a decimal number using a 24-hour clock (range 00 to |
| | 23). |
+----+------------------------------------------------------------------+
| %I | The hour as a decimal number using a 12-hour clock (range 01 to |
| | 12). |
+----+------------------------------------------------------------------+
| %j | The day of the year as a decimal number (range 001 to 366). |
+----+------------------------------------------------------------------+
| %k | The hour (24-hour clock) as a decimal number (range 0 to 23). |
| | Single digits are preceded by a blank. See also **%H**. |
+----+------------------------------------------------------------------+
| %l | The hour (12-hour clock) as a decimal number (range 1 to 12). |
| | Single digits are preceded by a blank. See also **%I**. |
+----+------------------------------------------------------------------+
| %m | The month as a decimal number (range 01 to 12). |
+----+------------------------------------------------------------------+
| %M | The minute as a decimal number (range 00 to 59). |
+----+------------------------------------------------------------------+
| %n | A newline character. Bokeh text does not currently support |
| | newline characters. |
+----+------------------------------------------------------------------+
| %N | Nanosecond as a decimal number, zero-padded on the left (range |
| | 000000000-999999999). Supports a padding width specifier, i.e. |
| | **%3N** displays 3 leftmost digits. However, this is only |
| | accurate to the millisecond level of precision due to |
| | limitations of `timezone`_. |
+----+------------------------------------------------------------------+
| %p | Either "AM" or "PM" according to the given time value, or the |
| | corresponding strings for the current locale. Noon is treated |
| | as "PM" and midnight as "AM". |
+----+------------------------------------------------------------------+
| %P | Like **%p** but in lowercase: "am" or "pm" or a corresponding |
| | string for the current locale. |
+----+------------------------------------------------------------------+
| %r | The time in a.m. or p.m. notation. In the POSIX locale this is |
| | equivalent to **%I:%M:%S %p**. |
+----+------------------------------------------------------------------+
| %R | The time in 24-hour notation (**%H:%M**). For a version |
| | including the seconds, see **%T** below. |
+----+------------------------------------------------------------------+
| %s | The number of seconds since the Epoch, 1970-01-01 00:00:00+0000 |
| | (UTC). |
+----+------------------------------------------------------------------+
| %S | The second as a decimal number (range 00 to 60). (The range is |
| | up to 60 to allow for occasional leap seconds.) |
+----+------------------------------------------------------------------+
| %t | A tab character. Bokeh text does not currently support tab |
| | characters. |
+----+------------------------------------------------------------------+
| %T | The time in 24-hour notation (**%H:%M:%S**). |
+----+------------------------------------------------------------------+
| %u | The day of the week as a decimal, range 1 to 7, Monday being 1. |
| | See also **%w**. |
+----+------------------------------------------------------------------+
| %U | The week number of the current year as a decimal number, range |
| | 00 to 53, starting with the first Sunday as the first day of |
| | week 01. See also **%V** and **%W**. |
+----+------------------------------------------------------------------+
| %V | The ISO 8601 week number (see NOTES) of the current year as a |
| | decimal number, range 01 to 53, where week 1 is the first week |
| | that has at least 4 days in the new year. See also **%U** and |
| | **%W.** |
+----+------------------------------------------------------------------+
| %w | The day of the week as a decimal, range 0 to 6, Sunday being 0. |
| | See also **%u**. |
+----+------------------------------------------------------------------+
| %W | The week number of the current year as a decimal number, range |
| | 00 to 53, starting with the first Monday as the first day of |
| | week 01. |
+----+------------------------------------------------------------------+
| %x | The preferred date representation for the current locale without |
| | the time. |
+----+------------------------------------------------------------------+
| %X | The preferred time representation for the current locale without |
| | the date. |
+----+------------------------------------------------------------------+
| %y | The year as a decimal number without a century (range 00 to 99). |
+----+------------------------------------------------------------------+
| %Y | The year as a decimal number including the century. |
+----+------------------------------------------------------------------+
| %z | The +hhmm or -hhmm numeric timezone (that is, the hour and |
| | minute offset from UTC). |
+----+------------------------------------------------------------------+
| %Z | The timezone name or abbreviation. |
+----+------------------------------------------------------------------+
| %% | A literal '%' character. |
+----+------------------------------------------------------------------+
.. warning::
The client library BokehJS uses the `timezone`_ library to
format datetimes. The inclusion of the list below is based on the
claim that `timezone`_ makes to support "the full complement
of GNU date format specifiers." However, this claim has not
been tested exhaustively against this list. If you find formats
that do not function as expected, please submit a `GitHub issue`_,
so that the documentation can be updated appropriately.
.. _strftime: http://man7.org/linux/man-pages/man3/strftime.3.html
.. _timezone: http://bigeasy.github.io/timezone/
.. _github issue: https://github.com/bokeh/bokeh/issues
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
microseconds = String(help=_DATETIME_TICK_FORMATTER_HELP("``microseconds``"),
default="%fus")
milliseconds = String(help=_DATETIME_TICK_FORMATTER_HELP("``milliseconds``"),
default="%3Nms")
seconds = String(help=_DATETIME_TICK_FORMATTER_HELP("``seconds``"),
default="%Ss")
minsec = String(help=_DATETIME_TICK_FORMATTER_HELP("``minsec`` (for combined minutes and seconds)"),
default=":%M:%S")
minutes = String(help=_DATETIME_TICK_FORMATTER_HELP("``minutes``"),
default=":%M")
hourmin = String(help=_DATETIME_TICK_FORMATTER_HELP("``hourmin`` (for combined hours and minutes)"),
default="%H:%M")
hours = String(help=_DATETIME_TICK_FORMATTER_HELP("``hours``"),
default="%Hh")
days = String(help=_DATETIME_TICK_FORMATTER_HELP("``days``"),
default="%m/%d")
months = String(help=_DATETIME_TICK_FORMATTER_HELP("``months``"),
default="%m/%Y")
years = String(help=_DATETIME_TICK_FORMATTER_HELP("``years``"),
default="%Y")
strip_leading_zeros = Either(Bool, Seq(Enum(ResolutionType)), default=False, help="""
Whether to strip any leading zeros in the formatted ticks.
Valid values are:
* ``True`` or ``False`` (default) to set stripping across all resolutions.
* A sequence of resolution types, e.g. ``["microseconds", "milliseconds"]``, to enable
scale-dependent stripping of leading zeros.
""")
boundary_scaling = Bool(default=True, help="""
Whether to scale up ticks that are right at the boundary of the next higher resolution of time.
E.g. at the hours scale a tick sequence of ["00h", "06h", "12h", 18h", "00h"] will see a scale up of the "00h" ticks
as they are on boundary of the days scale: ["06/08", "06h", "12h", 18h", "06/09"]
""")
hide_repeats = Bool(default=False, help="""
Whether repeated formatted tick values will be suppressed.
For example, an initial set of ticks ``["06/07", "06/07", "06/07", "06/08",
"06/08"]`` will become ``["06/07", "", "", "06/08", ""]``. Only the base
label, without any additional context, is considered when determining
repeats. If the context itself is a ``DatetimeTickFormatter``, then this
property may also be set for the context separately, if desired.
""")
context = Nullable(Either(String, Instance("bokeh.models.formatters.DatetimeTickFormatter")), default=None, help="""
A format for adding context to the tick or ticks specified by ``context_which``.
Valid values are:
* None, no context is added
* A standard :class:`~bokeh.models.DatetimeTickFormatter` format string, the single format is
used across all scales
* Another :class:`~bokeh.models.DatetimeTickFormatter` instance, to have scale-dependent
context
""")
context_which = Enum(ContextWhich, default="start", help="""
Which tick or ticks to add a formatted context string to. Valid values are:
`"start"`, `"end"`, `"center"`, and `"all"`.
""")
context_location = Enum(Location, default="below", help="""
Relative to the tick label text baseline, where the context should be
rendered. Valid values are: `"below"`, `"above"`, `"left"`, and `"right"`.
""")
| DatetimeTickFormatter |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 26916,
"end": 27619
} | class ____(StructModel):
def __init__(self, dmm, fe_type):
payload_type = types.SetPayload(fe_type.container)
members = [
# The meminfo data points to a SetPayload (shared with the
# original set object)
('meminfo', types.MemInfoPointer(payload_type)),
# The index into the entries table
('index', types.EphemeralPointer(types.intp)),
]
super(SetIterModel, self).__init__(dmm, fe_type, members)
@register_default(types.Array)
@register_default(types.Buffer)
@register_default(types.ByteArray)
@register_default(types.Bytes)
@register_default(types.MemoryView)
@register_default(types.PyArray)
| SetIterModel |
python | allegroai__clearml | clearml/backend_api/services/v2_9/models.py | {
"start": 61832,
"end": 66505
} | class ____(Response):
"""
Response of models.get_by_id endpoint.
:param model: Model info
:type model: Model
"""
_service = "models"
_action = "get_by_id"
_version = "2.9"
_schema = {
"definitions": {
"model": {
"properties": {
"comment": {
"description": "Model comment",
"type": ["string", "null"],
},
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Model creation time",
"format": "date-time",
"type": ["string", "null"],
},
"design": {
"additionalProperties": True,
"description": "Json object representing the model design. Should be identical to the network design of the task which created the model",
"type": ["object", "null"],
},
"framework": {
"description": "Framework on which the model is based. Should be identical to the framework of the task which created the model",
"type": ["string", "null"],
},
"id": {"description": "Model id", "type": ["string", "null"]},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.",
"type": ["object", "null"],
},
"name": {"description": "Model name", "type": ["string", "null"]},
"parent": {
"description": "Parent model ID",
"type": ["string", "null"],
},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
"ready": {
"description": "Indication if the model is final and can be used by other tasks",
"type": ["boolean", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task ID of task in which the model was created",
"type": ["string", "null"],
},
"ui_cache": {
"additionalProperties": True,
"description": "UI cache for this model",
"type": ["object", "null"],
},
"uri": {
"description": "URI for the model, pointing to the destination storage.",
"type": ["string", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"model": {
"description": "Model info",
"oneOf": [{"$ref": "#/definitions/model"}, {"type": "null"}],
}
},
"type": "object",
}
def __init__(self, model: Any = None, **kwargs: Any) -> None:
super(GetByIdResponse, self).__init__(**kwargs)
self.model = model
@schema_property("model")
def model(self) -> Any:
return self._property_model
@model.setter
def model(self, value: Any) -> None:
if value is None:
self._property_model = None
return
if isinstance(value, dict):
value = Model.from_dict(value)
else:
self.assert_isinstance(value, "model", Model)
self._property_model = value
| GetByIdResponse |
python | PrefectHQ__prefect | src/prefect/events/schemas/events.py | {
"start": 764,
"end": 2173
} | class ____(Labelled):
"""An observable business object of interest to the user"""
@model_validator(mode="after")
def enforce_maximum_labels(self) -> Self:
if len(self.root) > PREFECT_EVENTS_MAXIMUM_LABELS_PER_RESOURCE.value():
raise ValueError(
"The maximum number of labels per resource "
f"is {PREFECT_EVENTS_MAXIMUM_LABELS_PER_RESOURCE.value()}"
)
return self
@model_validator(mode="after")
def requires_resource_id(self) -> Self:
if "prefect.resource.id" not in self.root:
raise ValueError("Resources must include the prefect.resource.id label")
if not self.root["prefect.resource.id"]:
raise ValueError("The prefect.resource.id label must be non-empty")
return self
@property
def id(self) -> str:
return self["prefect.resource.id"]
@property
def name(self) -> Optional[str]:
return self.get("prefect.resource.name")
def prefect_object_id(self, kind: str) -> UUID:
"""Extracts the UUID from an event's resource ID if it's the expected kind
of prefect resource"""
prefix = f"{kind}." if not kind.endswith(".") else kind
if not self.id.startswith(prefix):
raise ValueError(f"Resource ID {self.id} does not start with {prefix}")
return UUID(self.id[len(prefix) :])
| Resource |
python | optuna__optuna | optuna/samplers/nsgaii/_crossovers/_blxalpha.py | {
"start": 295,
"end": 1675
} | class ____(BaseCrossover):
"""Blend Crossover operation used by :class:`~optuna.samplers.NSGAIISampler`.
Uniformly samples child individuals from the hyper-rectangles created
by the two parent individuals. For further information about BLX-alpha crossover,
please refer to the following paper:
- `Eshelman, L. and J. D. Schaffer.
Real-Coded Genetic Algorithms and Interval-Schemata. FOGA (1992).
<https://doi.org/10.1016/B978-0-08-094832-4.50018-0>`__
Args:
alpha:
Parametrizes blend operation.
"""
n_parents = 2
def __init__(self, alpha: float = 0.5) -> None:
self._alpha = alpha
def crossover(
self,
parents_params: np.ndarray,
rng: np.random.RandomState,
study: Study,
search_space_bounds: np.ndarray,
) -> np.ndarray:
# https://doi.org/10.1109/CEC.2001.934452
# Section 2 Crossover Operators for RCGA 2.1 Blend Crossover
parents_min = parents_params.min(axis=0)
parents_max = parents_params.max(axis=0)
diff = self._alpha * (parents_max - parents_min) # Equation (1).
low = parents_min - diff # Equation (1).
high = parents_max + diff # Equation (1).
r = rng.rand(len(search_space_bounds))
child_params = (high - low) * r + low
return child_params
| BLXAlphaCrossover |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/base_layer_v1.py | {
"start": 3301,
"end": 96367
} | class ____(base_layer.Layer):
"""Base layer class.
This is the class from which all layers inherit.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing weights,
losses, updates, and inter-layer connectivity.
Users will just instantiate a layer and then treat it as a callable.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Save configuration in member variables
* `build()`: Called once from `__call__`, when we know the shapes of inputs
and `dtype`. Should have the calls to `add_weight()`, and then
call the super's `build()` (which sets `self.built = True`, which is
nice in case the user wants to call `build()` manually before the
first `__call__`).
* `call()`: Called in `__call__` after making sure `build()` has been called
once. Should actually perform the logic of applying the layer to the
input tensors (which should be passed in as the first argument).
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: The dtype of the layer's computations and weights (default of
`None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type
of the first input in TensorFlow 1).
dynamic: Set this to `True` if your layer should only be run eagerly, and
should not be used to generate a static computation graph.
This would be the case for a Tree-RNN or a recursive network,
for example, or generally for any layer that manipulates tensors
using Python control flow. If `False`, we assume that the layer can
safely be used to generate a static computation graph.
Attributes:
name: The name of the layer (string).
dtype: The dtype of the layer's computations and weights. If mixed
precision is used with a `tf.keras.mixed_precision.Policy`, this is
instead just the dtype of the layer's weights, as the computations are
done in a different dtype.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
Each layer has a dtype, which is typically the dtype of the layer's
computations and variables. A layer's dtype can be queried via the
`Layer.dtype` property. The dtype is specified with the `dtype` constructor
argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()`
if no dtype is passed. `floatx()` itself defaults to "float32". Additionally,
layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed
precision is used, layers may have different computation and variable dtypes.
See `tf.keras.mixed_precision.Policy` for details on layer dtypes.
"""
# See tf.Module for the usage of this property.
# The key for _obj_reference_counts_dict is a Trackable, which could be a
# variable or layer etc. tf.Module._flatten will fail to flatten the key
# since it is trying to convert Trackable to a string. This attribute can be
# ignored even after the fix of nest lib, since the trackable object should
# already been available as individual attributes. _obj_reference_counts_dict
# just contains a copy of them.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_obj_reference_counts_dict',),
module.Module._TF_MODULE_IGNORED_PROPERTIES
))
@trackable.no_automatic_dependency_tracking
def __init__(self, trainable=True, name=None, dtype=None, dynamic=False,
**kwargs):
self._instrument_layer_creation()
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'input_dim', 'input_shape', 'batch_input_shape', 'batch_size',
'weights', 'activity_regularizer', 'autocast', 'implementation'
}
# Validate optional keyword arguments.
generic_utils.validate_kwargs(kwargs, allowed_kwargs)
# Mutable properties
# Indicates whether the layer's weights are updated during training
# and whether the layer's updates are run during training.
self._trainable = trainable
# A stateful layer is a layer whose updates are run during inference too,
# for instance stateful RNNs.
self._stateful = False
# Indicates whether `build` needs to be called upon layer call, to create
# the layer's weights.
self.built = False
self._build_input_shape = None
# Provides information about which inputs are compatible with the layer.
self._input_spec = None
self.supports_masking = False
self._init_set_name(name)
self._activity_regularizer = regularizers.get(
kwargs.pop('activity_regularizer', None))
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = []
# Object to store all thread local layer properties.
self._thread_local = threading.local()
# A list of zero-argument lambdas which return Tensors, used for variable
# regularizers.
self._callable_losses = []
# A list of symbolic Tensors containing activity regularizers and losses
# manually added through `add_loss` in graph-building mode.
self._losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# Both graph and subclassed networks have a dtype policy. For graph
# networks, the policy's compute and variable dtypes are ignored. Such
# networks only use the policy if it is a PolicyV1, in which case it uses
# the PolicyV1's loss_scale (Policy does not have a loss_scale). For
# subclassed networks, the compute and variable dtypes are used as like any
# ordinary layer.
self._set_dtype_policy(dtype)
# Boolean indicating whether the layer automatically casts its inputs to the
# layer's compute_dtype.
self._autocast = kwargs.get('autocast',
base_layer_utils.v2_dtype_behavior_enabled())
# Dependencies tracked via attribute assignment.
# All layers in order of horizontal graph traversal.
# Entries are unique. For models includes input and output layers.
self._maybe_create_attribute('_self_tracked_trackables', [])
# These lists will be filled via successive calls
# to self._add_inbound_node().
# Used in symbolic mode only, only in conjunction with graph-networks
self._inbound_nodes_value = []
self._outbound_nodes_value = []
self._init_call_fn_args()
# Whether the `call` method can be used to build a TF graph without issues.
# This attribute has no effect if the model is created using the Functional
# API. Instead, `model.dynamic` is determined based on the internal layers.
self._dynamic = dynamic
# Manage input shape information if passed.
if 'input_dim' in kwargs and 'input_shape' not in kwargs:
# Backwards compatibility: alias 'input_dim' to 'input_shape'.
kwargs['input_shape'] = (kwargs['input_dim'],)
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
self._initial_weights = kwargs.get('weights', None)
# Whether the layer will track any layers that is set as attribute on itself
# as sub-layers, the weights from the sub-layers will be included in the
# parent layer's variables() as well.
# Default to True, which means auto tracking is turned on. Certain subclass
# might want to turn it off, like Sequential model.
self._auto_track_sub_layers = True
# Mark this layer as having been originally built as a tf1 layer/model
self._originally_built_as_v1 = True
# For backwards compat reasons, most built-in layers do not guarantee
# That they will 100% preserve the structure of input args when saving
# / loading configs. E.g. they may un-nest an arg that is
# a list with one element.
self._preserve_input_structure_in_config = False
@trackable.no_automatic_dependency_tracking
@generic_utils.default
def build(self, input_shape):
"""Creates the variables of the layer (optional, for subclass implementers).
This is a method that implementers of subclasses of `Layer` or `Model`
can override if they need a state-creation step in-between
layer instantiation and layer call.
This is typically used to create the weights of `Layer` subclasses.
Args:
input_shape: Instance of `TensorShape`, or list of instances of
`TensorShape` if the layer expects a list of inputs
(one instance per input).
"""
if not hasattr(self.build, '_is_default'):
self._build_input_shape = input_shape
self.built = True
@doc_controls.for_subclass_implementers
def call(self, inputs, **kwargs): # pylint: disable=unused-argument
"""This is where the layer's logic lives.
Args:
inputs: Input tensor, or list/tuple of input tensors.
**kwargs: Additional keyword arguments.
Returns:
A tensor or list/tuple of tensors.
"""
return inputs
@doc_controls.for_subclass_implementers
def _add_trackable(self, trackable_object, trainable):
"""Adds a Trackable object to this layer's state.
Args:
trackable_object: The tf.tracking.Trackable object to add.
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases) or
"non_trainable_variables" (e.g. BatchNorm mean and variance).
Returns:
The TrackableWeightHandler used to track this object.
"""
if isinstance(trackable_object, base_layer_utils.TrackableWeightHandler):
handler = trackable_object
else:
handler = base_layer_utils.TrackableWeightHandler(trackable_object)
if trainable:
self._trainable_weights.append(handler)
else:
self._non_trainable_weights.append(handler)
return handler
@doc_controls.for_subclass_implementers
def add_weight(self,
name=None,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
partitioner=None,
use_resource=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
**kwargs):
"""Adds a new variable to the layer.
Args:
name: Variable name.
shape: Variable shape. Defaults to scalar if unspecified.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: Initializer instance (callable).
regularizer: Regularizer instance (callable).
trainable: Boolean, whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean and variance).
Note that `trainable` cannot be `True` if `synchronization`
is set to `ON_READ`.
constraint: Constraint instance (callable).
partitioner: Partitioner to be passed to the `Trackable` API.
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
**kwargs: Additional keyword arguments. Accepted values are `getter`,
`collections`, `experimental_autocast` and `caching_device`.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partitioned variable regularization and
eager execution is enabled.
ValueError: When giving unsupported dtype and no initializer or when
trainable has been set to True with synchronization set as `ON_READ`.
"""
if shape is None:
shape = ()
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in ['getter', 'collections', 'experimental_autocast',
'caching_device']:
raise TypeError('Unknown keyword argument:', kwarg)
has_custom_getter = 'getter' in kwargs
getter = kwargs.pop('getter', base_layer_utils.make_variable)
collections_arg = kwargs.pop('collections', None)
# 'experimental_autocast' can be set to False by the caller to indicate an
# AutoCastVariable should never be created.
autocast = kwargs.pop('experimental_autocast', True)
# See the docstring for tf.Variable about the details for caching_device.
caching_device = kwargs.pop('caching_device', None)
if dtype is None:
dtype = self.dtype or backend.floatx()
dtype = dtypes.as_dtype(dtype)
if self._dtype_policy.variable_dtype is None:
# The policy is "_infer", so we infer the policy from the variable dtype.
self._set_dtype_policy(policy.Policy(dtype.base_dtype.name))
initializer = initializers.get(initializer)
regularizer = regularizers.get(regularizer)
constraint = constraints.get(constraint)
if synchronization == tf_variables.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
# Initialize variable when no initializer provided
if initializer is None:
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = initializers.get('glorot_uniform')
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = initializers.zeros()
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
elif not has_custom_getter:
# When `getter` is specified, it's possibly fine for `initializer` to be
# None since it's up to the custom `getter` to raise error in case it
# indeed needs `initializer`.
raise ValueError('An initializer for variable %s of type %s is required'
' for layer %s' % (name, dtype.base_dtype, self.name))
if (autocast and
self._dtype_policy.compute_dtype != self._dtype_policy.variable_dtype
and dtype.is_floating):
# Wrap 'getter' with a version that returns an AutoCastVariable.
old_getter = getter
def getter(*args, **kwargs): # pylint: disable=function-redefined
variable = old_getter(*args, **kwargs)
return autocast_variable.create_autocast_variable(variable)
# Also the caching_device does not work with the mixed precision API,
# disable it if it is specified.
# TODO(b/142020079): Reenable it once the bug is fixed.
if caching_device is not None:
tf_logging.warning(
'`caching_device` does not work with mixed precision API. Ignoring '
'user specified `caching_device`.')
caching_device = None
variable = self._add_variable_with_custom_getter(
name=name,
shape=shape,
# TODO(allenl): a `make_variable` equivalent should be added as a
# `Trackable` method.
getter=getter,
# Manage errors in Layer rather than Trackable.
overwrite=True,
initializer=initializer,
dtype=dtype,
constraint=constraint,
trainable=trainable,
partitioner=partitioner,
use_resource=use_resource,
collections=collections_arg,
synchronization=synchronization,
aggregation=aggregation,
caching_device=caching_device)
if regularizer is not None:
# TODO(fchollet): in the future, this should be handled at the
# level of variable creation, and weight regularization losses
# should be variable attributes.
name_in_scope = variable.name[:variable.name.find(':')]
self._handle_weight_regularization(name_in_scope,
variable,
regularizer)
if base_layer_utils.is_split_variable(variable):
for v in variable:
backend.track_variable(v)
if trainable:
self._trainable_weights.append(v)
else:
self._non_trainable_weights.append(v)
else:
backend.track_variable(variable)
if trainable:
self._trainable_weights.append(variable)
else:
self._non_trainable_weights.append(variable)
return variable
@generic_utils.default
def get_config(self):
"""Returns the config of the layer.
A layer config is a Python dictionary (serializable)
containing the configuration of a layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
The config of a layer does not include connectivity
information, nor the layer class name. These are handled
by `Network` (one layer of abstraction above).
Returns:
Python dictionary.
"""
all_args = tf_inspect.getfullargspec(self.__init__).args
config = {'name': self.name, 'trainable': self.trainable}
if hasattr(self, '_batch_input_shape'):
config['batch_input_shape'] = self._batch_input_shape
config['dtype'] = policy.serialize(self._dtype_policy)
if hasattr(self, 'dynamic'):
# Only include `dynamic` in the `config` if it is `True`
if self.dynamic:
config['dynamic'] = self.dynamic
elif 'dynamic' in all_args:
all_args.remove('dynamic')
expected_args = config.keys()
# Finds all arguments in the `__init__` that are not in the config:
extra_args = [arg for arg in all_args if arg not in expected_args]
# Check that either the only argument in the `__init__` is `self`,
# or that `get_config` has been overridden:
if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):
raise NotImplementedError('Layers with arguments in `__init__` must '
'override `get_config`.')
return config
@classmethod
def from_config(cls, config):
"""Creates a layer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same layer from the config
dictionary. It does not handle layer connectivity
(handled by Network), nor weights (handled by `set_weights`).
Args:
config: A Python dictionary, typically the
output of get_config.
Returns:
A layer instance.
"""
return cls(**config)
def compute_output_shape(self, input_shape):
"""Computes the output shape of the layer.
If the layer has not been built, this method will call `build` on the
layer. This assumes that the layer will later be used with inputs that
match the input shape provided here.
Args:
input_shape: Shape tuple (tuple of integers)
or list of shape tuples (one per output tensor of the layer).
Shape tuples can include None for free dimensions,
instead of an integer.
Returns:
An input shape tuple.
"""
if context.executing_eagerly():
# In this case we build the model first in order to do shape inference.
# This is acceptable because the framework only calls
# `compute_output_shape` on shape values that the layer would later be
# built for. It would however cause issues in case a user attempts to
# use `compute_output_shape` manually with shapes that are incompatible
# with the shape the Layer will be called on (these users will have to
# implement `compute_output_shape` themselves).
self._maybe_build(input_shape)
with ops.get_default_graph().as_default():
graph = func_graph.FuncGraph('graph')
with graph.as_default():
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
inputs = nest.map_structure(
base_layer_utils.generate_placeholders_from_shape, input_shape)
try:
outputs = self(inputs, training=False)
except TypeError as e:
raise NotImplementedError(
'We could not automatically infer the static shape of the '
'layer\'s output. Please implement the '
'`compute_output_shape` method on your layer (%s).' %
self.__class__.__name__) from e
return nest.map_structure(lambda t: t.shape, outputs)
raise NotImplementedError
@doc_controls.for_subclass_implementers
def compute_output_signature(self, input_signature):
"""Compute the output tensor signature of the layer based on the inputs.
Unlike a TensorShape object, a TensorSpec object contains both shape
and dtype information for a tensor. This method allows layers to provide
output dtype information if it is different from the input dtype.
For any layer that doesn't implement this function,
the framework will fall back to use `compute_output_shape`, and will
assume that the output dtype matches the input dtype.
Args:
input_signature: Single TensorSpec or nested structure of TensorSpec
objects, describing a candidate input for the layer.
Returns:
Single TensorSpec or nested structure of TensorSpec objects, describing
how the layer would transform the provided input.
Raises:
TypeError: If input_signature contains a non-TensorSpec object.
"""
def check_type_return_shape(s):
if not isinstance(s, tensor.TensorSpec):
raise TypeError('Only TensorSpec signature types are supported, '
'but saw signature entry: {}.'.format(s))
return s.shape
input_shape = nest.map_structure(check_type_return_shape, input_signature)
output_shape = self.compute_output_shape(input_shape)
dtype = self._compute_dtype
if dtype is None:
input_dtypes = [s.dtype for s in nest.flatten(input_signature)]
# Default behavior when self.dtype is None, is to use the first input's
# dtype.
dtype = input_dtypes[0]
return nest.map_structure(
lambda s: tensor.TensorSpec(dtype=dtype, shape=s),
output_shape)
@generic_utils.default
def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument
"""Computes an output mask tensor.
Args:
inputs: Tensor or list of tensors.
mask: Tensor or list of tensors.
Returns:
None or a tensor (or list of tensors,
one per output tensor of the layer).
"""
if not self.supports_masking:
if any(m is not None for m in nest.flatten(mask)):
raise TypeError('Layer ' + self.name + ' does not support masking, '
'but was passed an input_mask: ' + str(mask))
# masking not explicitly supported: return None as mask.
return None
# if masking is explicitly supported, by default
# carry over the input mask
return mask
def __call__(self, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Args:
*args: Positional arguments to be passed to `self.call`.
**kwargs: Keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
Note:
- The following optional keyword arguments are reserved for specific uses:
* `training`: Boolean scalar tensor of Python boolean indicating
whether the `call` is meant for training or inference.
* `mask`: Boolean input mask.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
RuntimeError: if `super().__init__()` was not called in the constructor.
"""
self._assert_built_as_v1()
if not hasattr(self, '_thread_local'):
raise RuntimeError(
'You must call `super().__init__()` in the layer constructor.')
# Grab the first positional or keyword argument.
if args:
inputs = args[0]
args = args[1:]
elif self._call_fn_args[0] in kwargs:
inputs = kwargs.pop(self._call_fn_args[0])
else:
raise ValueError(
'The first argument to `Layer.call` must always be passed.')
call_context = base_layer_utils.call_context()
input_list = nest.flatten(inputs)
# We will attempt to build a TF graph if & only if all inputs are symbolic.
# This is always the case in graph mode. It can also be the case in eager
# mode when all inputs can be traced back to `keras.Input()` (when building
# models using the functional API).
build_graph = tf_utils.are_all_symbolic_tensors(input_list)
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):
def _convert_non_tensor(x):
# Don't call `tensor_conversion.convert_to_tensor` on all `inputs`
# because `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, (np.ndarray, float, int)):
return tensor_conversion.convert_to_tensor_v2_with_dispatch(x)
return x
inputs = nest.map_structure(_convert_non_tensor, inputs)
input_list = nest.flatten(inputs)
# Handle `mask` propagation from previous layer to current layer. Masks can
# be propagated explicitly via the `mask` argument, or implicitly via
# setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
# explicitly take priority.
mask_arg_passed_by_framework = False
input_masks = self._collect_input_masks(inputs, args, kwargs)
if (self._expects_mask_arg and input_masks is not None and
not self._call_arg_was_passed('mask', args, kwargs)):
mask_arg_passed_by_framework = True
kwargs['mask'] = input_masks
# If `training` argument is None or not explicitly passed,
# propagate `training` value from this layer's calling layer.
training_value = None
training_arg_passed_by_framework = False
# Priority 1: `training` was explicitly passed.
if self._call_arg_was_passed('training', args, kwargs):
training_value = self._get_call_arg_value('training', args, kwargs)
if not self._expects_training_arg:
kwargs.pop('training')
if training_value is None:
# Priority 2: `training` was passed to a parent layer.
if call_context.training is not None:
training_value = call_context.training
# Priority 3a: `learning_phase()` has been set.
elif backend.global_learning_phase_is_set():
training_value = backend.learning_phase()
# Priority 3b: Pass the `learning_phase()` if in the Keras FuncGraph.
elif build_graph:
with backend.get_graph().as_default():
if base_layer_utils.is_in_keras_graph():
training_value = backend.learning_phase()
if self._expects_training_arg and training_value is not None:
# Force the training_value to be bool type which matches to the contract
# for layer/model call args.
if tensor_util.is_tf_type(training_value):
training_value = math_ops.cast(training_value, dtypes.bool)
else:
training_value = bool(training_value)
args, kwargs = self._set_call_arg_value(
'training', training_value, args, kwargs)
training_arg_passed_by_framework = True
# Only create Keras history if at least one tensor originates from a
# `keras.Input`. Otherwise this Layer may be being used outside the Keras
# framework.
if build_graph and base_layer_utils.needs_keras_history(inputs):
base_layer_utils.create_keras_history(inputs)
with call_context.enter(self, inputs, build_graph, training_value):
# Check input assumptions set after layer building, e.g. input shape.
if build_graph:
# Symbolic execution on symbolic tensors. We will attempt to build
# the corresponding TF subgraph inside `backend.get_graph()`
input_spec.assert_input_compatibility(self.input_spec, inputs,
self.name)
graph = backend.get_graph()
with graph.as_default(), backend.name_scope(self._name_scope()): # pylint: disable=not-callable
# Build layer if applicable (if the `build` method has been
# overridden).
self._maybe_build(inputs)
cast_inputs = self._maybe_cast_inputs(inputs)
# Wrapping `call` function in autograph to allow for dynamic control
# flow and control dependencies in call. We are limiting this to
# subclassed layers as autograph is strictly needed only for
# subclassed layers and models.
# tf_convert will respect the value of autograph setting in the
# enclosing tf.function, if any.
if (base_layer_utils.is_subclassed(self) and
not base_layer_utils.from_saved_model(self)):
call_fn = autograph.tf_convert(
self.call, ag_ctx.control_status_ctx())
else:
call_fn = self.call
if not self.dynamic:
try:
with autocast_variable.enable_auto_cast_variables(
self._compute_dtype_object):
outputs = call_fn(cast_inputs, *args, **kwargs)
except errors.OperatorNotAllowedInGraphError as e:
raise TypeError('You are attempting to use Python control '
'flow in a layer that was not declared to be '
'dynamic. Pass `dynamic=True` to the class '
'constructor.\nEncountered error:\n"""\n' +
str(e) + '\n"""')
else:
# We will use static shape inference to return symbolic tensors
# matching the specifications of the layer outputs.
# Since `self.dynamic` is True, we will never attempt to
# run the underlying TF graph (which is disconnected).
# TODO(fchollet): consider py_func as an alternative, which
# would enable us to run the underlying graph if needed.
outputs = self._symbolic_call(inputs)
if outputs is None:
raise ValueError('A layer\'s `call` method should return a '
'Tensor or a list of Tensors, not None '
'(layer: ' + self.name + ').')
if base_layer_utils.have_all_keras_metadata(inputs):
if training_arg_passed_by_framework:
args, kwargs = self._set_call_arg_value(
'training', None, args, kwargs, pop_kwarg_if_none=True)
if mask_arg_passed_by_framework:
kwargs.pop('mask')
outputs = self._set_connectivity_metadata((inputs,) + args, kwargs,
outputs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks)
if hasattr(self, '_set_inputs') and not self.inputs:
# Subclassed network: explicitly set metadata normally set by
# a call to self._set_inputs().
# TODO(b/120997007): This should be done in Eager as well, but
# causes garbage collection issues because of the placeholders
# created on the default Keras graph.
self._set_inputs(inputs, outputs)
else:
# Eager execution on data tensors.
with backend.name_scope(self._name_scope()): # pylint: disable=not-callable
self._maybe_build(inputs)
cast_inputs = self._maybe_cast_inputs(inputs)
with autocast_variable.enable_auto_cast_variables(
self._compute_dtype_object):
outputs = self.call(cast_inputs, *args, **kwargs)
self._handle_activity_regularization(inputs, outputs)
self._set_mask_metadata(inputs, outputs, input_masks)
return outputs
def _assert_built_as_v1(self):
if not hasattr(self, '_originally_built_as_v1'):
raise ValueError(
'Your Layer or Model is in an invalid state. '
'This can happen for the following cases:\n '
'1. You might be interleaving models/layers made in '
'tf.compat.v1.Graph.as_default() with models/layers created '
'outside of it.\n'
'Making a layer or a model inside a '
'a tf.compat.v1.Graph invalidates all layers/models you previously '
'made outside of the graph.\n'
'2. You might be using a custom keras layer implementation with '
' custom __init__ which didn\'t call super().__init__. '
' Please check the implementation of %s and its bases.' %
(type(self),))
@property
def dtype(self):
return self._dtype_policy.variable_dtype
@property
def name(self):
return self._name
@property
def dynamic(self):
return any(layer._dynamic for layer in self._flatten_layers())
@property
@doc_controls.do_not_generate_docs
def stateful(self):
return any(layer._stateful for layer in self._flatten_layers())
@stateful.setter
def stateful(self, value):
self._stateful = value
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
for layer in getattr(self, '_self_tracked_trackables', []):
layer.trainable = value
@property
def activity_regularizer(self):
"""Optional regularizer function for the output of this layer."""
return self._activity_regularizer
@activity_regularizer.setter
def activity_regularizer(self, regularizer):
"""Optional regularizer function for the output of this layer."""
self._activity_regularizer = regularizer
@property
def input_spec(self):
return self._input_spec
@input_spec.setter
# Must be decorated to prevent tracking, since the input_spec can be nested
# InputSpec objects.
@trackable.no_automatic_dependency_tracking
def input_spec(self, value):
for v in nest.flatten(value):
if v is not None and not isinstance(v, base_layer.InputSpec):
raise TypeError('Layer input_spec must be an instance of InputSpec. '
'Got: {}'.format(v))
self._input_spec = value
@property
def updates(self):
collected_updates = []
all_layers = self._flatten_layers()
with backend.get_graph().as_default():
for layer in all_layers:
if not layer.trainable and not layer.stateful:
continue
for u in layer._updates:
if callable(u):
try:
u = u()
except ValueError as e:
if 'InaccessibleTensorError' in type(e).__name__:
# For one specific case of error we try to raise
# a more meaningful error message about the graph if we can.
# This error is an internal TF symbol that is not
# publicly exposed, so we check the name directly rather
# than using a direct import.
base_layer_utils.check_graph_consistency(
method='add_update', force_raise=True)
raise # check_graph_consistency may not always raise.
base_layer_utils.check_graph_consistency(u, method='add_update')
collected_updates.append(u)
return collected_updates
@property
def losses(self):
"""Losses which are associated with this `Layer`.
Variable regularization tensors are created when this property is accessed,
so it is eager safe: accessing `losses` under a `tf.GradientTape` will
propagate gradients back to the corresponding variables.
Returns:
A list of tensors.
"""
collected_losses = []
all_layers = self._flatten_layers()
for layer in all_layers:
# If any eager losses are present, we assume the model to be part of an
# eager training loop (either a custom one or the one used when
# `run_eagerly=True`) and so we always return just the eager losses.
collected_losses.extend(layer._losses)
for regularizer in layer._callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
collected_losses.append(loss_tensor)
return collected_losses
@doc_controls.for_subclass_implementers
def add_loss(self, losses, inputs=None):
"""Add loss tensor(s), potentially dependent on layer inputs.
Some losses (for instance, activity regularization losses) may be dependent
on the inputs passed when calling a layer. Hence, when reusing the same
layer on different inputs `a` and `b`, some entries in `layer.losses` may
be dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
This method can be used inside a subclassed layer or model's `call`
function, in which case `losses` should be a Tensor or list of Tensors.
Example:
```python
class MyLayer(tf.keras.layers.Layer):
def call(inputs, self):
self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True)
return inputs
```
This method can also be called directly on a Functional Model during
construction. In this case, any loss Tensors passed to this Model must
be symbolic and be able to be traced back to the model's `Input`s. These
losses become part of the model's topology and are tracked in `get_config`.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Activity regularization.
model.add_loss(tf.abs(tf.reduce_mean(x)))
```
If this is not the case for your loss (if, for example, your loss references
a `Variable` of one of the model's layers), you can wrap your loss in a
zero-argument lambda. These losses are not tracked as part of the model's
topology since they can't be serialized.
Example:
```python
inputs = tf.keras.Input(shape=(10,))
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs, outputs)
# Weight regularization.
model.add_loss(lambda: tf.reduce_mean(x.kernel))
```
The `get_losses_for` method allows to retrieve the losses relevant to a
specific set of inputs.
Args:
losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses
may also be zero-argument callables which create a loss tensor.
inputs: Ignored when executing eagerly. If anything other than None is
passed, it signals the losses are conditional on some of the layer's
inputs, and thus they should only be run where these inputs are
available. This is the case for activity regularization losses, for
instance. If `None` is passed, the losses are assumed
to be unconditional, and will apply across all dataflows of the layer
(e.g. weight regularization losses).
"""
def _tag_unconditional(loss):
"""Process the loss and tag it by setting loss._unconditional_loss."""
if callable(loss):
# We run the loss without autocasting, as regularizers are often
# numerically unstable in float16.
with autocast_variable.enable_auto_cast_variables(None):
loss = loss()
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tensor_util.is_tf_type(loss):
loss = tensor_conversion.convert_to_tensor_v2_with_dispatch(
loss, dtype=backend.floatx()
)
loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access
return loss
losses = nest.flatten(losses)
callable_losses = []
symbolic_losses = []
for loss in losses:
if callable(loss):
callable_losses.append(functools.partial(_tag_unconditional, loss))
continue
if loss is None:
continue
if not tensor_util.is_tf_type(loss):
loss = tensor_conversion.convert_to_tensor_v2_with_dispatch(
loss, dtype=backend.floatx()
)
# TF Functions should take the eager path.
if (tf_utils.is_symbolic_tensor(loss) and
not base_layer_utils.is_in_tf_function()):
symbolic_losses.append(_tag_unconditional(loss))
base_layer_utils.check_graph_consistency(loss, method='add_loss')
self._callable_losses.extend(callable_losses)
in_call_context = base_layer_utils.call_context().in_call
if in_call_context:
for symbolic_loss in symbolic_losses:
self._losses.append(symbolic_loss)
else:
for symbolic_loss in symbolic_losses:
if getattr(self, '_is_graph_network', False):
self._graph_network_add_loss(symbolic_loss)
else:
# Possible a loss was added in a Layer's `build`.
self._losses.append(symbolic_loss)
@property
def metrics(self):
collected_metrics = []
for layer in self._flatten_layers():
collected_metrics.extend(layer._metrics)
return collected_metrics
@doc_controls.for_subclass_implementers
def add_metric(self, value, aggregation=None, name=None):
"""Adds metric tensor to the layer.
Args:
value: Metric tensor.
aggregation: Sample-wise metric reduction function. If `aggregation=None`,
it indicates that the metric tensor provided has been aggregated
already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by
`model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the
given metric tensor will be sample-wise reduced using `mean` function.
eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',
aggregation='mean')`.
name: String metric name.
Raises:
ValueError: If `aggregation` is anything other than None or `mean`.
"""
if aggregation is not None and aggregation != 'mean':
raise ValueError(
'We currently support only `mean` sample-wise metric aggregation. '
'You provided aggregation=`%s`' % aggregation)
from_metric_obj = hasattr(value, '_metric_obj')
is_symbolic = tf_utils.is_symbolic_tensor(value)
in_call_context = base_layer_utils.call_context().in_call
if name is None and not from_metric_obj:
# Eg. `self.add_metric(math_ops.reduce_sum(x), aggregation='mean')`
# In eager mode, we use metric name to lookup a metric. Without a name,
# a new Mean metric wrapper will be created on every model/layer call.
# So, we raise an error when no name is provided.
# We will do the same for symbolic mode for consistency although a name
# will be generated if no name is provided.
# We will not raise this error in the foll use case for the sake of
# consistency as name in provided in the metric constructor.
# mean = metrics.Mean(name='my_metric')
# model.add_metric(mean(outputs))
raise ValueError('Please provide a name for your metric like '
'`self.add_metric(tf.reduce_sum(inputs), '
'name=\'mean_activation\', aggregation=\'mean\')`')
elif from_metric_obj:
name = value._metric_obj.name
if in_call_context:
# TF Function path should take the eager path.
self._symbolic_add_metric(value, aggregation, name)
else:
if not is_symbolic:
raise ValueError('Expected a symbolic Tensor for the metric value, '
'received: ' + str(value))
# Possible a metric was added in a Layer's `build`.
if not getattr(self, '_is_graph_network', False):
with backend.get_graph().as_default():
self._symbolic_add_metric(value, aggregation, name)
return
if from_metric_obj:
raise ValueError('Using the result of calling a `Metric` object '
'when calling `add_metric` on a Functional '
'Model is not supported. Please pass the '
'Tensor to monitor directly.')
# Insert layers into the Keras Graph Network.
self._graph_network_add_metric(value, aggregation, name)
@doc_controls.for_subclass_implementers
def add_update(self, updates, inputs=None):
"""Add update op(s), potentially dependent on layer inputs.
Weight updates (for instance, the updates of the moving mean and variance
in a BatchNormalization layer) may be dependent on the inputs passed
when calling a layer. Hence, when reusing the same layer on
different inputs `a` and `b`, some entries in `layer.updates` may be
dependent on `a` and some on `b`. This method automatically keeps track
of dependencies.
The `get_updates_for` method allows to retrieve the updates relevant to a
specific set of inputs.
This call is ignored when eager execution is enabled (in that case, variable
updates are run on the fly and thus do not need to be tracked for later
execution).
Args:
updates: Update op, or list/tuple of update ops, or zero-arg callable
that returns an update op. A zero-arg callable should be passed in
order to disable running the updates by setting `trainable=False`
on this Layer, when executing in Eager mode.
inputs: Deprecated, will be automatically inferred.
"""
if inputs is not None:
tf_logging.warning(
'`add_update` `inputs` kwarg has been deprecated. You no longer need '
'to pass a value to `inputs` as it is being automatically inferred.')
call_context = base_layer_utils.call_context()
if (distribute_lib.has_strategy() and
distribute_lib.in_cross_replica_context() and
# When saving the model, the distribution strategy context should be
# ignored, following the default path for adding updates.
not call_context.saving):
# Updates don't need to be run in a cross-replica context.
return
updates = generic_utils.to_list(updates)
if call_context.in_call:
relevant_inputs = call_context.inputs
else:
inbound_nodes = getattr(self, '_inbound_nodes', [])
relevant_inputs = [node.input_tensors for node in inbound_nodes]
def process_update(x):
"""Standardize update ops.
Args:
x: Tensor, op, or callable.
Returns:
An update op.
"""
if callable(x):
update = lambda: process_update(x())
return update()
elif isinstance(x, ops.Operation):
update = x
elif hasattr(x, 'op'):
update = x.op
else:
update = tensor_conversion.convert_to_tensor_v2_with_dispatch(x)
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])
update._unconditional_update = update not in reachable
return update
updates = [process_update(x) for x in updates]
self._updates.extend(updates)
def set_weights(self, weights):
"""Sets the weights of the layer, from Numpy arrays.
The weights of a layer represent the state of the layer. This function
sets the weight values from numpy arrays. The weight values should be
passed in the order they are created by the layer. Note that the layer's
weights must be instantiated before calling this function by calling
the layer.
For example, a Dense layer returns a list of two values-- per-output
weights and the bias value. These can be used to set the weights of another
Dense layer:
>>> a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> b.set_weights(a.get_weights())
>>> b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Args:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the layer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: If the provided weights list does not match the
layer's specifications.
"""
params = self.weights
expected_num_weights = 0
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
expected_num_weights += param.num_tensors
else:
expected_num_weights += 1
if expected_num_weights != len(weights):
raise ValueError(
'You called `set_weights(weights)` on layer "%s" '
'with a weight list of length %s, but the layer was '
'expecting %s weights. Provided weights: %s...' %
(self.name, len(weights), expected_num_weights, str(weights)[:50]))
weight_index = 0
weight_value_tuples = []
for param in params:
if isinstance(param, base_layer_utils.TrackableWeightHandler):
num_tensors = param.num_tensors
tensors = weights[weight_index:weight_index + num_tensors]
param.set_weights(tensors)
weight_index += num_tensors
else:
weight = weights[weight_index]
weight_shape = weight.shape if hasattr(weight, 'shape') else ()
ref_shape = param.shape
if not ref_shape.is_compatible_with(weight_shape):
raise ValueError(
'Layer weight shape %s not compatible with provided weight '
'shape %s' % (ref_shape, weight_shape))
weight_value_tuples.append((param, weight))
weight_index += 1
backend.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current weights of the layer.
The weights of a layer represent the state of the layer. This function
returns both trainable and non-trainable weight values associated with this
layer as a list of Numpy arrays, which can in turn be used to load state
into similarly parameterized layers.
For example, a Dense layer returns a list of two values-- per-output
weights and the bias value. These can be used to set the weights of another
Dense layer:
>>> a = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(1.))
>>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))
>>> a.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
>>> b = tf.keras.layers.Dense(1,
... kernel_initializer=tf.constant_initializer(2.))
>>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))
>>> b.get_weights()
[array([[2.],
[2.],
[2.]], dtype=float32), array([0.], dtype=float32)]
>>> b.set_weights(a.get_weights())
>>> b.get_weights()
[array([[1.],
[1.],
[1.]], dtype=float32), array([0.], dtype=float32)]
Returns:
Weights values as a list of numpy arrays.
"""
weights = self.weights
output_weights = []
for weight in weights:
if isinstance(weight, base_layer_utils.TrackableWeightHandler):
output_weights.extend(weight.get_tensors())
else:
output_weights.append(weight)
return backend.batch_get_value(output_weights)
def get_updates_for(self, inputs):
"""Retrieves updates relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of update ops of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional updates.
return [u for u in self.updates if u._unconditional_update]
# Requesting input-conditional updates.
updates = [u for u in self.updates if not u._unconditional_update]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, updates)
return [u for u in updates if u in reachable]
def get_losses_for(self, inputs):
"""Retrieves losses relevant to a specific set of inputs.
Args:
inputs: Input tensor or list/tuple of input tensors.
Returns:
List of loss tensors of the layer that depend on `inputs`.
"""
if inputs is None:
# Requesting unconditional losses.
return [l for l in self.losses if l._unconditional_loss]
# Requesting input-conditional losses.
losses = [l for l in self.losses if not l._unconditional_loss]
inputs = nest.flatten(inputs)
reachable = tf_utils.get_reachable_from_inputs(inputs, losses)
return [l for l in losses if l in reachable]
def get_input_mask_at(self, node_index):
"""Retrieves the input mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple inputs).
"""
inputs = self.get_input_at(node_index)
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
def get_output_mask_at(self, node_index):
"""Retrieves the output mask tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A mask tensor
(or list of tensors if the layer has multiple outputs).
"""
output = self.get_output_at(node_index)
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
@property
def input_mask(self):
"""Retrieves the input mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Input mask tensor (potentially None) or list of input
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
inputs = self.input
if isinstance(inputs, list):
return [getattr(x, '_keras_mask', None) for x in inputs]
else:
return getattr(inputs, '_keras_mask', None)
@property
def output_mask(self):
"""Retrieves the output mask tensor(s) of a layer.
Only applicable if the layer has exactly one inbound node,
i.e. if it is connected to one incoming layer.
Returns:
Output mask tensor (potentially None) or list of output
mask tensors.
Raises:
AttributeError: if the layer is connected to
more than one incoming layers.
"""
output = self.output
if isinstance(output, list):
return [getattr(x, '_keras_mask', None) for x in output]
else:
return getattr(output, '_keras_mask', None)
def get_input_shape_at(self, node_index):
"""Retrieves the input shape(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_shapes',
'input shape')
def get_output_shape_at(self, node_index):
"""Retrieves the output shape(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first time the layer was called.
Returns:
A shape tuple
(or list of shape tuples if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_shapes',
'output shape')
def get_input_at(self, node_index):
"""Retrieves the input tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first input node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple inputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'input_tensors',
'input')
def get_output_at(self, node_index):
"""Retrieves the output tensor(s) of a layer at a given node.
Args:
node_index: Integer, index of the node
from which to retrieve the attribute.
E.g. `node_index=0` will correspond to the
first output node of the layer.
Returns:
A tensor (or list of tensors if the layer has multiple outputs).
Raises:
RuntimeError: If called in Eager mode.
"""
return self._get_node_attribute_at_index(node_index, 'output_tensors',
'output')
@property
def input(self):
"""Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name +
' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input')
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only applicable if the layer has exactly one output,
i.e. if it is connected to one incoming layer.
Returns:
Output tensor or list of output tensors.
Raises:
AttributeError: if the layer is connected to more than one incoming
layers.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')
return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
@property
def input_shape(self):
"""Retrieves the input shape(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer, or if all inputs
have the same shape.
Returns:
Input shape, as an integer shape tuple
(or list of shape tuples, one tuple per input tensor).
Raises:
AttributeError: if the layer has no defined input_shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined input shape.')
all_input_shapes = set(
[str(node.input_shapes) for node in self._inbound_nodes])
if len(all_input_shapes) == 1:
return self._inbound_nodes[0].input_shapes
else:
raise AttributeError('The layer "' + str(self.name) +
' has multiple inbound nodes, '
'with different input shapes. Hence '
'the notion of "input shape" is '
'ill-defined for the layer. '
'Use `get_input_shape_at(node_index)` '
'instead.')
def count_params(self):
"""Count the total number of scalars composing the weights.
Returns:
An integer count.
Raises:
ValueError: if the layer isn't yet built
(in which case its weights aren't yet defined).
"""
if not self.built:
if getattr(self, '_is_graph_network', False):
with tf_utils.maybe_init_scope(self):
self._maybe_build(self.inputs)
else:
raise ValueError('You tried to call `count_params` on ' + self.name +
', but the layer isn\'t built. '
'You can build it manually via: `' + self.name +
'.build(batch_input_shape)`.')
return layer_utils.count_params(self.weights)
@property
def output_shape(self):
"""Retrieves the output shape(s) of a layer.
Only applicable if the layer has one output,
or if all outputs have the same shape.
Returns:
Output shape, as an integer shape tuple
(or list of shape tuples, one tuple per output tensor).
Raises:
AttributeError: if the layer has no defined output shape.
RuntimeError: if called in Eager mode.
"""
if not self._inbound_nodes:
raise AttributeError('The layer has never been called '
'and thus has no defined output shape.')
all_output_shapes = set(
[str(node.output_shapes) for node in self._inbound_nodes])
if len(all_output_shapes) == 1:
return self._inbound_nodes[0].output_shapes
else:
raise AttributeError('The layer "%s"'
' has multiple inbound nodes, '
'with different output shapes. Hence '
'the notion of "output shape" is '
'ill-defined for the layer. '
'Use `get_output_shape_at(node_index)` '
'instead.' % self.name)
@property
@doc_controls.do_not_doc_inheritable
def inbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._inbound_nodes
@property
@doc_controls.do_not_doc_inheritable
def outbound_nodes(self):
"""Deprecated, do NOT use! Only for compatibility with external Keras."""
return self._outbound_nodes
##############################################################################
# Methods & attributes below are public aliases of other methods. #
##############################################################################
@doc_controls.do_not_doc_inheritable
def apply(self, inputs, *args, **kwargs):
"""Deprecated, do NOT use!
This is an alias of `self.__call__`.
Args:
inputs: Input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
warnings.warn('`layer.apply` is deprecated and '
'will be removed in a future version. '
'Please use `layer.__call__` method instead.')
return self.__call__(inputs, *args, **kwargs)
@doc_controls.do_not_doc_inheritable
def add_variable(self, *args, **kwargs):
"""Deprecated, do NOT use! Alias for `add_weight`."""
warnings.warn('`layer.add_variable` is deprecated and '
'will be removed in a future version. '
'Please use `layer.add_weight` method instead.')
return self.add_weight(*args, **kwargs)
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Alias of `self.weights`.
Returns:
A list of variables.
"""
return self.weights
@property
def trainable_variables(self):
return self.trainable_weights
@property
def non_trainable_variables(self):
return self.non_trainable_weights
##############################################################################
# Methods & attributes below are all private and only used by the framework. #
##############################################################################
@property
def _inbound_nodes(self):
return self._inbound_nodes_value
@_inbound_nodes.setter
@trackable.no_automatic_dependency_tracking
def _inbound_nodes(self, value):
self._inbound_nodes_value = value
@property
def _outbound_nodes(self):
return self._outbound_nodes_value
@_outbound_nodes.setter
@trackable.no_automatic_dependency_tracking
def _outbound_nodes(self, value):
self._outbound_nodes_value = value
def _set_dtype_policy(self, dtype):
"""Sets self._dtype_policy."""
if isinstance(dtype, policy.Policy):
self._dtype_policy = dtype
elif isinstance(dtype, dict):
self._dtype_policy = policy.deserialize(dtype)
elif isinstance(dtype, str) and dtype in ('mixed_float16',
'mixed_bfloat16'):
# The isinstance check is required since np.dtype raises an error if
# compared to a non-dtype string.
self._dtype_policy = policy.Policy(dtype)
elif dtype:
self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name)
else:
self._dtype_policy = policy.global_policy()
if (self._dtype_policy.name == 'mixed_float16' and
not loss_scale_optimizer.strategy_supports_loss_scaling()):
# Although only loss scaling doesn't support certain strategies, to avoid
# confusion, we disallow the 'mixed_float16' policy with unsupported
# strategies. This is because 'mixed_float16' requires loss scaling for
# numeric stability.
strategy = distribute_lib.get_strategy()
raise ValueError('Mixed precision is not supported with the '
'tf.distribute.Strategy: %s. Either stop using mixed '
'precision by removing the use of the "%s" policy or '
'use a different Strategy, e.g. a MirroredStrategy.' %
(strategy.__class__.__name__, self._dtype_policy.name))
# Performance optimization: cache the compute dtype as a Dtype object or
# None, so that str to Dtype conversion doesn't happen in Layer.__call__.
if self._dtype_policy.compute_dtype:
self._compute_dtype_object = dtypes.as_dtype(
self._dtype_policy.compute_dtype)
else:
self._compute_dtype_object = None
# TODO(reedwm): Expose this property?
@property
def _compute_dtype(self):
"""The layer's compute dtype.
Unless mixed-precision is used, this is the same as `Layer.dtype`.
If self._autocast is True, layer's will cast floating-point inputs to this.
Returns:
The layer's compute dtype.
"""
return self._dtype_policy.compute_dtype
def _maybe_cast_inputs(self, inputs):
"""Maybe casts the inputs to the compute dtype.
If self._compute_dtype is floating-point, and self_autocast is True,
floating-point inputs are casted to self._compute_dtype.
Args:
inputs: Input tensor, or structure of input tensors.
Returns:
`inputs`, but tensors may have been casted to self._compute_dtype
"""
compute_dtype = self._compute_dtype
if (self._autocast and compute_dtype and
dtypes.as_dtype(compute_dtype).is_floating):
def f(x):
"""Cast a single Tensor or TensorSpec to the compute dtype."""
cast_types = (tensor.Tensor, sparse_tensor.SparseTensor,
ragged_tensor.RaggedTensor)
if (isinstance(x, cast_types) and x.dtype.is_floating and
x.dtype.base_dtype.name != compute_dtype):
return math_ops.cast(x, compute_dtype)
elif isinstance(x, tensor.TensorSpec) and x.dtype.is_floating:
# Inputs may be TensorSpecs when this function is called from
# model._set_inputs.
return tensor.TensorSpec(x.shape, compute_dtype, x.name)
else:
return x
return nest.map_structure(f, inputs)
else:
return inputs
# _dtype used to be an attribute set in the constructor. We still expose it
# because some clients still use it.
# TODO(reedwm): Deprecate, then remove the _dtype property.
@property
def _dtype(self):
# This is equivalent to returning self.dtype . We do not return self.dtype
# as it would cause infinite recursion in a few subclasses, which override
# "dtype" to return self._dtype.
return self._dtype_policy.variable_dtype
@_dtype.setter
def _dtype(self, value):
value = dtypes.as_dtype(value).name
self._set_dtype_policy(policy.Policy(value))
def _name_scope(self): # pylint: disable=method-hidden
return self.name
def _init_set_name(self, name, zero_based=True):
if not name:
self._name = backend.unique_object_name(
generic_utils.to_snake_case(self.__class__.__name__),
zero_based=zero_based)
else:
self._name = name
def _get_existing_metric(self, name=None):
match = [m for m in self._metrics if m.name == name]
if not match:
return
if len(match) > 1:
raise ValueError(
'Please provide different names for the metrics you have added. '
'We found {} metrics with the name: "{}"'.format(len(match), name))
return match[0]
def _symbolic_add_metric(self, value, aggregation=None, name=None):
base_layer_utils.check_graph_consistency(value, method='add_metric')
match = self._get_existing_metric(name)
if aggregation is None:
# Iterate over the metrics and check if the given metric exists already.
# This can happen when a metric instance is created in subclassed model
# layer `__init__` and we have tracked that instance already in
# model.__setattr__.
if match:
result_tensor = value
metric_obj = match
elif hasattr(value, '_metric_obj'):
# We track the instance using the metadata on the result tensor.
result_tensor = value
metric_obj = result_tensor._metric_obj
self._metrics.append(metric_obj)
else:
raise ValueError(
'We do not support adding an aggregated metric result tensor that '
'is not the output of a `tf.keras.metrics.Metric` metric instance. '
'Without having access to the metric instance we cannot reset the '
'state of a metric after every epoch during training. You can '
'create a `tf.keras.metrics.Metric` instance and pass the result '
'here or pass an un-aggregated result with `aggregation` parameter '
'set as `mean`. For example: `self.add_metric(tf.reduce_sum(inputs)'
', name=\'mean_activation\', aggregation=\'mean\')`')
else:
# If a non-aggregated tensor is given as input (ie. `aggregation` is
# explicitly set to `mean`), we wrap the tensor in `Mean` metric.
if match:
result_tensor = match(value)
metric_obj = match
else:
metric_obj, result_tensor = base_layer_utils.create_mean_metric(
value, name)
self._metrics.append(metric_obj)
def _handle_weight_regularization(self, name, variable, regularizer):
"""Create lambdas which compute regularization losses."""
def _loss_for_variable(v):
"""Creates a regularization loss `Tensor` for variable `v`."""
with backend.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
return regularization
if base_layer_utils.is_split_variable(variable):
for v in variable:
self.add_loss(functools.partial(_loss_for_variable, v))
else:
self.add_loss(functools.partial(_loss_for_variable, variable))
def _handle_activity_regularization(self, inputs, outputs):
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if self._activity_regularizer:
output_list = nest.flatten(outputs)
with backend.name_scope('ActivityRegularizer'):
for output in output_list:
activity_loss = self._activity_regularizer(output)
batch_size = math_ops.cast(
array_ops.shape(output)[0], activity_loss.dtype)
# Make activity regularization strength batch-agnostic.
mean_activity_loss = activity_loss / batch_size
base_layer_utils.check_graph_consistency(
mean_activity_loss, method='activity_regularizer')
self.add_loss(mean_activity_loss, inputs=inputs)
def _set_mask_metadata(self, inputs, outputs, previous_mask):
flat_outputs = nest.flatten(outputs)
mask_already_computed = (
getattr(self, '_compute_output_and_mask_jointly', False) or
all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))
# Only compute the mask if the Layer explicitly supports masking or has
# overridden `compute_mask`.
should_compute_mask = (
hasattr(self, 'compute_mask') and
(self.supports_masking or
not getattr(self.compute_mask, '_is_default', False)))
if mask_already_computed:
flat_masks = [getattr(x, '_keras_mask', None) for x in flat_outputs]
elif not should_compute_mask:
flat_masks = [None for _ in flat_outputs]
else:
output_masks = self.compute_mask(inputs, previous_mask)
# `compute_mask` can return a single `None` even when a Layer
# has multiple outputs.
if output_masks is None:
flat_masks = [None for _ in flat_outputs]
else:
flat_masks = nest.flatten(output_masks)
for output, mask in zip(flat_outputs, flat_masks):
try:
output._keras_mask = mask
except AttributeError:
# C Type such as np.ndarray.
pass
if tf_utils.are_all_symbolic_tensors(flat_outputs):
for output in flat_outputs:
if getattr(output, '_keras_mask', None) is not None:
# Do not track masks for `TensorFlowOpLayer` construction.
output._keras_mask._keras_history_checked = True
def _collect_input_masks(self, inputs, args, kwargs):
"""Checks if `mask` argument was passed, else gathers mask from inputs."""
if self._call_arg_was_passed('mask', args, kwargs):
return self._get_call_arg_value('mask', args, kwargs)
if not self._should_compute_mask:
return None
input_masks = nest.map_structure(lambda t: getattr(t, '_keras_mask', None),
inputs)
if generic_utils.is_all_none(input_masks):
return None
return input_masks
def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return True
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
if arg_name in dict(zip(call_fn_args, args)):
return True
return False
def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False):
if arg_name in kwargs:
return kwargs[arg_name]
call_fn_args = self._call_fn_args
if not inputs_in_args:
# Ignore `inputs` arg.
call_fn_args = call_fn_args[1:]
args_dict = dict(zip(call_fn_args, args))
return args_dict[arg_name]
def _set_call_arg_value(
self, arg_name, new_value, args,
kwargs, inputs_in_args=False, pop_kwarg_if_none=False):
arg_pos = self._call_fn_arg_positions.get(arg_name, None)
if arg_pos is not None:
if not inputs_in_args:
# Ignore `inputs` arg.
arg_pos = arg_pos - 1
if len(args) > arg_pos:
args = list(args)
args[arg_pos] = new_value
return args, kwargs
if new_value is None and pop_kwarg_if_none:
kwargs.pop(arg_name, None)
else:
kwargs[arg_name] = new_value
return args, kwargs
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the methods:
- get_input_shape_at
- get_output_shape_at
- get_input_at
etc...
Args:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The layer's attribute `attr` at the node of index `node_index`.
Raises:
RuntimeError: If the layer has no inbound nodes, or if called in Eager
mode.
ValueError: If the index provided does not match any node.
"""
if not self._inbound_nodes:
raise RuntimeError('The layer has never been called '
'and thus has no defined ' + attr_name + '.')
if not len(self._inbound_nodes) > node_index:
raise ValueError('Asked to get ' + attr_name + ' at node ' +
str(node_index) + ', but the layer has only ' +
str(len(self._inbound_nodes)) + ' inbound nodes.')
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _maybe_build(self, inputs):
# Check input assumptions set before layer building, e.g. input rank.
if not self.built:
input_spec.assert_input_compatibility(
self.input_spec, inputs, self.name)
input_list = nest.flatten(inputs)
if input_list and self._dtype_policy.compute_dtype is None:
try:
dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
else:
self._set_dtype_policy(policy.Policy(dtype))
input_shapes = None
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
# Only call `build` if the user has manually overridden the build method.
if not hasattr(self.build, '_is_default'):
# Any setup work performed only once should happen in an `init_scope`
# to avoid creating symbolic Tensors that will later pollute any eager
# operations.
with tf_utils.maybe_init_scope(self):
self.build(input_shapes)
# We must set also ensure that the layer is marked as built, and the build
# shape is stored since user defined build functions may not be calling
# `super.build()`
Layer.build(self, input_shapes)
# Optionally load weight values specified at layer instantiation.
if self._initial_weights is not None:
self.set_weights(self._initial_weights)
self._initial_weights = None
def _symbolic_call(self, inputs):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
output_shapes = self.compute_output_shape(input_shapes)
def _make_placeholder_like(shape):
ph = backend.placeholder(shape=shape, dtype=self.dtype)
ph._keras_mask = None
return ph
return nest.map_structure(_make_placeholder_like, output_shapes)
def _get_trainable_state(self):
"""Get the `trainable` state of each sublayer.
Returns:
A dict mapping all sublayers to their `trainable` value.
"""
layers = self._flatten_layers(include_self=False, recursive=False)
trainable_state = {self: self.trainable}
for l in layers:
trainable_state.update(l._get_trainable_state())
return trainable_state
def _set_trainable_state(self, trainable_state):
"""Set `trainable` state for each sublayer."""
if self in trainable_state:
self.trainable = trainable_state[self]
layers = self._flatten_layers(include_self=False, recursive=False)
for l in layers:
if l in trainable_state:
l._set_trainable_state(trainable_state)
@property
def _obj_reference_counts(self):
"""A dictionary counting the number of attributes referencing an object."""
self._maybe_create_attribute('_obj_reference_counts_dict',
object_identity.ObjectIdentityDictionary())
return self._obj_reference_counts_dict
@trackable.no_automatic_dependency_tracking
def _maybe_create_attribute(self, name, default_value):
"""Create the attribute with the default value if it hasn't been created.
This is useful for fields that is used for tracking purpose,
_trainable_weights, or _layers. Note that user could create a layer subclass
and assign an internal field before invoking the Layer.__init__(), the
__setattr__() need to create the tracking fields and __init__() need to not
override them.
Args:
name: String, the name of the attribute.
default_value: Object, the default value of the attribute.
"""
if not hasattr(self, name):
self.__setattr__(name, default_value)
def __delattr__(self, name):
# For any super.__delattr__() call, we will directly use the implementation
# in Trackable and skip the behavior in AutoTrackable. The Layer was
# originally use Trackable as base class, the change of using Module as base
# class forced us to have AutoTrackable in the class hierarchy.
#
# TODO(b/180760306) Keeping the status quo of skipping _delattr__ and
# __setattr__ in AutoTrackable may be unsustainable.
existing_value = getattr(self, name, None)
# If this value is replacing an existing object assigned to an attribute, we
# should clean it out to avoid leaking memory. First we check if there are
# other attributes referencing it.
reference_counts = self._obj_reference_counts
if existing_value not in reference_counts:
super(autotrackable.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
return
reference_count = reference_counts[existing_value]
if reference_count > 1:
# There are other remaining references. We can't remove this object from
# _layers etc.
reference_counts[existing_value] = reference_count - 1
super(autotrackable.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
return
else:
# This is the last remaining reference.
del reference_counts[existing_value]
super(autotrackable.AutoTrackable, self).__delattr__(name) # pylint: disable=bad-super-call
if (isinstance(existing_value, Layer)
or base_layer_utils.has_weights(existing_value)):
super(autotrackable.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_self_tracked_trackables',
[l for l in self._self_tracked_trackables if l is not existing_value])
if isinstance(existing_value, tf_variables.Variable):
super(autotrackable.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_trainable_weights',
[w for w in self._trainable_weights if w is not existing_value])
super(autotrackable.AutoTrackable, self).__setattr__( # pylint: disable=bad-super-call
'_non_trainable_weights',
[w for w in self._non_trainable_weights if w is not existing_value])
def __setattr__(self, name, value):
if (name == '_self_setattr_tracking' or
not getattr(self, '_self_setattr_tracking', True) or
# Exclude @property.setters from tracking
hasattr(self.__class__, name)):
try:
super(autotrackable.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call
except AttributeError:
raise AttributeError(
('Can\'t set the attribute "{}", likely because it conflicts with '
'an existing read-only @property of the object. Please choose a '
'different name.').format(name))
return
# Keep track of trackable objects, for the needs of `Network.save_weights`.
value = data_structures.sticky_attribute_assignment(
trackable=self, value=value, name=name)
reference_counts = self._obj_reference_counts
reference_counts[value] = reference_counts.get(value, 0) + 1
# Clean out the old attribute, which clears _layers and _trainable_weights
# if necessary.
try:
self.__delattr__(name)
except AttributeError:
pass
# Keep track of metric instance created in subclassed layer.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
for val in nest.flatten(value):
if isinstance(val, metrics_module.Metric) and hasattr(self, '_metrics'):
self._metrics.append(val)
# TODO(scottzhu): Need to track Module object as well for weight tracking.
# Be careful about metric if it becomes a Module in future.
# Append value to self._layers if relevant
if (getattr(self, '_auto_track_sub_layers', True) and
(isinstance(value, Layer) or base_layer_utils.has_weights(value))):
self._maybe_create_attribute('_self_tracked_trackables', [])
# We need to check object identity to avoid de-duplicating empty
# container types which compare equal.
if not any((layer is value for layer in self._self_tracked_trackables)):
self._self_tracked_trackables.append(value)
if hasattr(value, '_use_resource_variables'):
# Legacy layers (V1 tf.layers) must always use
# resource variables.
value._use_resource_variables = True
# Append value to list of trainable / non-trainable weights if relevant
# TODO(b/125122625): This won't pick up on any variables added to a
# list/dict after creation.
for val in nest.flatten(value):
if not isinstance(val, tf_variables.Variable):
continue
# Users may add extra weights/variables
# simply by assigning them to attributes (invalid for graph networks)
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
if val.trainable:
if any(val is w for w in self._trainable_weights):
continue
self._trainable_weights.append(val)
else:
if any(val is w for w in self._non_trainable_weights):
continue
self._non_trainable_weights.append(val)
backend.track_variable(val)
# TODO(b/180760306) Skip the auto trackable from tf.Module to keep status
# quo. See the comment at __delattr__.
super(autotrackable.AutoTrackable, self).__setattr__(name, value) # pylint: disable=bad-super-call
# This is a hack so that the is_layer (within
# training/trackable/layer_utils.py) check doesn't get the weights attr.
# TODO(b/110718070): Remove when fixed.
def _is_layer(self):
return True
def _init_call_fn_args(self, expects_training_arg=None):
# Clear cached call function arguments.
self.__class__._call_full_argspec.fget.cache.pop(self, None)
self.__class__._call_fn_args.fget.cache.pop(self, None)
self.__class__._call_accepts_kwargs.fget.cache.pop(self, None)
call_fn_args = self._call_fn_args
if expects_training_arg is None:
self._expects_training_arg = ('training' in call_fn_args or
self._call_accepts_kwargs)
else:
# Use value encoded into the metadata when loading from the SavedModel.
self._expects_training_arg = expects_training_arg
self._expects_mask_arg = ('mask' in call_fn_args or
self._call_accepts_kwargs)
@property
@layer_utils.cached_per_instance
def _call_full_argspec(self):
# Argspec inspection is expensive and the call spec is used often, so it
# makes sense to cache the result.
return tf_inspect.getfullargspec(self.call)
@property
@layer_utils.cached_per_instance
def _call_fn_args(self):
all_args = self._call_full_argspec.args
# Scrub `self` that appears if a decorator was applied.
if all_args and all_args[0] == 'self':
return all_args[1:]
return all_args
@property
@layer_utils.cached_per_instance
def _call_fn_arg_positions(self):
call_fn_arg_positions = dict()
for pos, arg in enumerate(self._call_fn_args):
call_fn_arg_positions[arg] = pos
return call_fn_arg_positions
@property
@layer_utils.cached_per_instance
def _call_accepts_kwargs(self):
return self._call_full_argspec.varkw is not None
@property
@layer_utils.cached_per_instance
def _should_compute_mask(self):
return ('mask' in self._call_fn_args or
getattr(self, 'compute_mask', None) is not None)
def _dedup_weights(self, weights):
"""Dedupe weights while maintaining order as much as possible."""
output, seen_ids = [], set()
for w in weights:
if id(w) not in seen_ids:
output.append(w)
# Track the Variable's identity to avoid __eq__ issues.
seen_ids.add(id(w))
return output
# SavedModel properties. Please see keras/saving/saved_model for details.
@property
def _trackable_saved_model_saver(self):
return layer_serialization.LayerSavedModelSaver(self)
@property
def _object_identifier(self):
return self._trackable_saved_model_saver.object_identifier
@property
def _tracking_metadata(self):
return self._trackable_saved_model_saver.tracking_metadata
def _trackable_children(self, save_type='checkpoint', **kwargs):
if save_type == 'savedmodel':
cache = kwargs['cache']
# TODO(b/213628533): This must be called before super() to ensure
# that any input shape changes are applied before getting the config of
# the model.
children = self._trackable_saved_model_saver.trackable_children(cache)
else:
children = {}
children.update(super()._trackable_children(save_type, **kwargs))
return children
def __getstate__(self):
# Override to support `copy.deepcopy` and pickling.
# Thread-local objects cannot be copied in Python 3, so pop these.
# Thread-local objects are used to cache losses in MirroredStrategy, and
# so shouldn't be copied.
state = self.__dict__.copy()
state.pop('_thread_local', None)
return state
def __setstate__(self, state):
state['_thread_local'] = threading.local()
# Bypass Trackable logic as `__dict__` already contains this info.
object.__setattr__(self, '__dict__', state)
| Layer |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-valid-strings-to-form-target-i.py | {
"start": 3316,
"end": 3786
} | class ____(object):
def minValidStrings(self, words, target):
"""
:type words: List[str]
:type target: str
:rtype: int
"""
trie = AhoTrie(words)
dp = [0]*(len(target)+1)
for i in xrange(len(target)):
l = trie.step(target[i])
if not l:
return -1
dp[i+1] = dp[(i-l)+1]+1
return dp[-1]
# Time: O(w * (l + n))
# Space: O(l + n)
# kmp, dp
| Solution2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.