language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/models/d_fine/modular_d_fine.py
{ "start": 32310, "end": 33384 }
class ____(nn.Module): """ A static layer that calculates integral results from a distribution. This layer computes the target location using the formula: `sum{Pr(n) * W(n)}`, where Pr(n) is the softmax probability vector representing the discrete distribution, and W(n) is the non-uniform Weighting Function. Args: max_num_bins (int): Max number of the discrete bins. Default is 32. It can be adjusted based on the dataset or task requirements. """ def __init__(self, config: DFineConfig): super().__init__() self.max_num_bins = config.max_num_bins def forward(self, pred_corners: torch.Tensor, project: torch.Tensor) -> torch.Tensor: batch_size, num_queries, _ = pred_corners.shape pred_corners = F.softmax(pred_corners.reshape(-1, self.max_num_bins + 1), dim=1) pred_corners = F.linear(pred_corners, project.to(pred_corners.device)).reshape(-1, 4) pred_corners = pred_corners.reshape(batch_size, num_queries, -1) return pred_corners
DFineIntegral
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constructor7.py
{ "start": 393, "end": 565 }
class ____: def __new__(cls, func: Callable[_P, _R]) -> Callable[_P, _R]: return func v2 = ClassB(func1) reveal_type(v2, expected_text="(a: int) -> int")
ClassB
python
falconry__falcon
tests/test_utils.py
{ "start": 1297, "end": 1769 }
class ____(media.JSONHandler): def __init__(self): super().__init__() self.deserialize_count = 0 def deserialize(self, *args, **kwargs): result = super().deserialize(*args, **kwargs) self.deserialize_count += 1 return result async def deserialize_async(self, *args, **kwargs): result = await super().deserialize_async(*args, **kwargs) self.deserialize_count += 1 return result
TrackingJSONHandler
python
kamyu104__LeetCode-Solutions
Python/k-inverse-pairs-array.py
{ "start": 2991, "end": 3537 }
class ____(object): def kInversePairs(self, n, k): """ :type n: int :type k: int :rtype: int """ MOD = 10**9+7 dp = [[] for _ in xrange(k+1)] dp[0].append([]) for i in xrange(n): dp = [[p[:len(p)-k]+[i]+p[len(p)-k:] for k in xrange(min(i+1, j+1)) for p in dp[j-k]] for j in xrange(len(dp))] assert(all(sum(int(p[j] > p[i]) for i in xrange(n) for j in xrange(i)) == len(dp)-1) for p in dp[-1]) return len(dp[-1])%MOD
Solution_ConstructPermutation2
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_composite.py
{ "start": 3621, "end": 6386 }
class ____: @classmethod @st.composite def st_classmethod_then_composite(draw, cls): return draw(st.integers(0, 10)) @st.composite @classmethod def st_composite_then_classmethod(draw, cls): return draw(st.integers(0, 10)) @staticmethod @st.composite def st_staticmethod_then_composite(draw): return draw(st.integers(0, 10)) @st.composite @staticmethod def st_composite_then_staticmethod(draw): return draw(st.integers(0, 10)) @st.composite def st_composite_method(draw, self): return draw(st.integers(0, 10)) @given(st.data()) def test_applying_composite_decorator_to_methods(data): instance = ClsWithStrategyMethods() for strategy in [ ClsWithStrategyMethods.st_classmethod_then_composite(), ClsWithStrategyMethods.st_composite_then_classmethod(), ClsWithStrategyMethods.st_staticmethod_then_composite(), ClsWithStrategyMethods.st_composite_then_staticmethod(), instance.st_classmethod_then_composite(), instance.st_composite_then_classmethod(), instance.st_staticmethod_then_composite(), instance.st_composite_then_staticmethod(), instance.st_composite_method(), ]: x = data.draw(strategy) assert isinstance(x, int) assert 0 <= x <= 10 def test_drawfn_cannot_be_instantiated(): with pytest.raises(TypeError): st.DrawFn() def test_warns_on_strategy_annotation(): # TODO: print the stack on Python 3.10 and 3.11 to determine the appropriate # stack depth to use. Consider adding a debug-print if IN_COVERAGE_TESTS # and the relevant depth is_hypothesis_file(), for easier future fixing. # # Meanwhile, the test is not skipped on 3.10/3.11 as it is still required for # coverage of the warning-generating branch. with pytest.warns(HypothesisWarning, match="Return-type annotation") as w: @st.composite def my_integers(draw: st.DrawFn) -> st.SearchStrategy[int]: return draw(st.integers()) if sys.version_info[:2] > (3, 11): # TEMP: see PR #3961 assert len(w.list) == 1 assert w.list[0].filename == __file__ # check stacklevel points to user code def test_composite_allows_overload_without_draw(): # See https://github.com/HypothesisWorks/hypothesis/issues/3970 @st.composite @typing.overload def overloaded(draw: st.DrawFn, *, x: int) -> typing.Literal[True]: ... @st.composite @typing.overload def overloaded(draw: st.DrawFn, *, x: str) -> typing.Literal[False]: ... @st.composite def overloaded(draw: st.DrawFn, *, x: int | str) -> bool: return draw(st.just(isinstance(x, int)))
ClsWithStrategyMethods
python
cython__cython
Cython/Compiler/ExprNodes.py
{ "start": 414150, "end": 414406 }
class ____(_TempModifierNode): """Sorts a newly created Python list in place. """ type = list_type def generate_result_code(self, code): code.putln(code.error_goto_if_neg(f"PyList_Sort({self.arg.result()})", self.pos))
SortedListNode
python
pypa__setuptools
setup.py
{ "start": 151, "end": 1917 }
class ____(install): """ Custom install command to install a .pth file for distutils patching. This hack is necessary because there's no standard way to install behavior on startup (and it's debatable if there should be one). This hack (ab)uses the `extra_path` behavior in Setuptools to install a `.pth` file with implicit behavior on startup to give higher precedence to the local version of `distutils` over the version from the standard library. Please do not replicate this behavior. """ _pth_name = 'distutils-precedence' _pth_contents = ( textwrap.dedent( """ import os var = 'SETUPTOOLS_USE_DISTUTILS' enabled = os.environ.get(var, 'local') == 'local' enabled and __import__('_distutils_hack').add_shim() """ ) .lstrip() .replace('\n', '; ') ) def initialize_options(self): install.initialize_options(self) self.extra_path = self._pth_name, self._pth_contents def finalize_options(self): install.finalize_options(self) self._restore_install_lib() def _restore_install_lib(self): """ Undo secondary effect of `extra_path` adding to `install_lib` """ suffix = os.path.relpath(self.install_lib, self.install_libbase) if suffix.strip() == self._pth_contents.strip(): self.install_lib = self.install_libbase setup_params = dict( cmdclass={'install': install_with_pth}, ) if __name__ == '__main__': # allow setup.py to run from another directory # TODO: Use a proper conditional statement here here and os.chdir(here) # type: ignore[func-returns-value] dist = setuptools.setup(**setup_params)
install_with_pth
python
getsentry__sentry
src/sentry/integrations/repository/issue_alert.py
{ "start": 3044, "end": 7994 }
class ____: """ Repository class that is responsible for querying the data store for notification messages in relation to issue alerts. """ _model = NotificationMessage def __init__(self, logger: Logger) -> None: self._logger: Logger = logger @classmethod def default(cls) -> IssueAlertNotificationMessageRepository: return cls(logger=_default_logger) @classmethod def _parent_notification_message_base_filter(cls) -> Q: """ Returns the query used to filter the notification messages for parent notification messages. Parent notification messages are notification message instances without a parent notification message itself, and where the error code is null. """ return Q(parent_notification_message__isnull=True, error_code__isnull=True) def get_parent_notification_message( self, rule_id: int, group_id: int, rule_action_uuid: str, open_period_start: datetime | None = None, ) -> IssueAlertNotificationMessage | None: """ Returns the parent notification message for a metric rule if it exists, otherwise returns None. Will raise an exception if the query fails and logs the error with associated data. """ try: base_filter = self._parent_notification_message_base_filter() instance: NotificationMessage = ( self._model.objects.filter(base_filter) .filter( rule_fire_history__rule__id=rule_id, rule_fire_history__group__id=group_id, rule_action_uuid=rule_action_uuid, open_period_start=open_period_start, ) .latest("date_added") ) return IssueAlertNotificationMessage.from_model(instance=instance) except NotificationMessage.DoesNotExist: return None except Exception as e: self._logger.exception( "Failed to get parent notification for issue rule", exc_info=e, extra={ "rule_id": rule_id, "group_id": group_id, "rule_action_uuid": rule_action_uuid, }, ) raise def create_notification_message( self, data: NewIssueAlertNotificationMessage ) -> IssueAlertNotificationMessage: if (error := data.get_validation_error()) is not None: raise error try: new_instance = self._model.objects.create( error_details=data.error_details, error_code=data.error_code, message_identifier=data.message_identifier, parent_notification_message_id=data.parent_notification_message_id, rule_fire_history_id=data.rule_fire_history_id, rule_action_uuid=data.rule_action_uuid, open_period_start=data.open_period_start, ) return IssueAlertNotificationMessage.from_model(instance=new_instance) except Exception as e: self._logger.exception( "failed to create new issue alert notification alert", exc_info=e, extra=data.__dict__, ) raise def get_all_parent_notification_messages_by_filters( self, group_ids: list[int] | None = None, project_ids: list[int] | None = None, open_period_start: datetime | None = None, ) -> Generator[IssueAlertNotificationMessage]: """ If no filters are passed, then all parent notification objects are returned. Because an unbounded amount of parent notification objects can be returned, this method leverages generator to control the usage of memory in the application. It is up to the caller to iterate over all the data, or store in memory if they need all objects concurrently. """ group_id_filter = Q(rule_fire_history__group_id__in=group_ids) if group_ids else Q() project_id_filter = Q(rule_fire_history__project_id__in=project_ids) if project_ids else Q() open_period_start_filter = ( Q(open_period_start=open_period_start) if open_period_start else Q() ) query = self._model.objects.filter( group_id_filter & project_id_filter & open_period_start_filter ).filter(self._parent_notification_message_base_filter()) try: for instance in query: yield IssueAlertNotificationMessage.from_model(instance=instance) except Exception as e: self._logger.exception( "Failed to get parent notifications on filters", exc_info=e, extra=filter.__dict__, ) raise
IssueAlertNotificationMessageRepository
python
kamyu104__LeetCode-Solutions
Python/count-paths-that-can-form-a-palindrome-in-a-tree.py
{ "start": 78, "end": 1020 }
class ____(object): def countPalindromePaths(self, parent, s): """ :type parent: List[int] :type s: str :rtype: int """ def iter_dfs(): result = 0 cnt = collections.defaultdict(int) cnt[0] = 1 stk = [(0, 0)] while stk: u, mask = stk.pop() if u: mask ^= 1<<(ord(s[u])-ord('a')) result += cnt[mask]+sum(cnt[mask^(1<<i)] if mask^(1<<i) in cnt else 0 for i in xrange(26)) cnt[mask] += 1 for v in reversed(adj[u]): stk.append((v, mask)) return result adj = [[] for _ in xrange(len(parent))] for u, p in enumerate(parent): if p != -1: adj[p].append(u) return iter_dfs() # Time: O(n) # Space: O(n) import collections # dfs, freq table
Solution
python
jazzband__django-oauth-toolkit
oauth2_provider/models.py
{ "start": 24491, "end": 24664 }
class ____: # https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 # scope is optional client_id: str scope: Optional[str] = None @dataclass
DeviceRequest
python
Unity-Technologies__ml-agents
ml-agents/mlagents/trainers/trajectory.py
{ "start": 4198, "end": 11646 }
class ____(NamedTuple): steps: List[AgentExperience] next_obs: List[ np.ndarray ] # Observation following the trajectory, for bootstrapping next_group_obs: List[List[np.ndarray]] agent_id: str behavior_id: str def to_agentbuffer(self) -> AgentBuffer: """ Converts a Trajectory to an AgentBuffer :param trajectory: A Trajectory :returns: AgentBuffer. Note that the length of the AgentBuffer will be one less than the trajectory, as the next observation need to be populated from the last step of the trajectory. """ agent_buffer_trajectory = AgentBuffer() obs = self.steps[0].obs for step, exp in enumerate(self.steps): is_last_step = step == len(self.steps) - 1 if not is_last_step: next_obs = self.steps[step + 1].obs else: next_obs = self.next_obs num_obs = len(obs) for i in range(num_obs): agent_buffer_trajectory[ObsUtil.get_name_at(i)].append(obs[i]) agent_buffer_trajectory[ObsUtil.get_name_at_next(i)].append(next_obs[i]) # Take care of teammate obs and actions teammate_continuous_actions, teammate_discrete_actions, teammate_rewards = ( [], [], [], ) for group_status in exp.group_status: teammate_rewards.append(group_status.reward) teammate_continuous_actions.append(group_status.action.continuous) teammate_discrete_actions.append(group_status.action.discrete) # Team actions agent_buffer_trajectory[BufferKey.GROUP_CONTINUOUS_ACTION].append( teammate_continuous_actions ) agent_buffer_trajectory[BufferKey.GROUP_DISCRETE_ACTION].append( teammate_discrete_actions ) agent_buffer_trajectory[BufferKey.GROUPMATE_REWARDS].append( teammate_rewards ) agent_buffer_trajectory[BufferKey.GROUP_REWARD].append(exp.group_reward) # Next actions teammate_cont_next_actions = [] teammate_disc_next_actions = [] if not is_last_step: next_exp = self.steps[step + 1] for group_status in next_exp.group_status: teammate_cont_next_actions.append(group_status.action.continuous) teammate_disc_next_actions.append(group_status.action.discrete) else: for group_status in exp.group_status: teammate_cont_next_actions.append(group_status.action.continuous) teammate_disc_next_actions.append(group_status.action.discrete) agent_buffer_trajectory[BufferKey.GROUP_NEXT_CONT_ACTION].append( teammate_cont_next_actions ) agent_buffer_trajectory[BufferKey.GROUP_NEXT_DISC_ACTION].append( teammate_disc_next_actions ) for i in range(num_obs): ith_group_obs = [] for _group_status in exp.group_status: # Assume teammates have same obs space ith_group_obs.append(_group_status.obs[i]) agent_buffer_trajectory[GroupObsUtil.get_name_at(i)].append( ith_group_obs ) ith_group_obs_next = [] if is_last_step: for _obs in self.next_group_obs: ith_group_obs_next.append(_obs[i]) else: next_group_status = self.steps[step + 1].group_status for _group_status in next_group_status: # Assume teammates have same obs space ith_group_obs_next.append(_group_status.obs[i]) agent_buffer_trajectory[GroupObsUtil.get_name_at_next(i)].append( ith_group_obs_next ) if exp.memory is not None: agent_buffer_trajectory[BufferKey.MEMORY].append(exp.memory) agent_buffer_trajectory[BufferKey.MASKS].append(1.0) agent_buffer_trajectory[BufferKey.DONE].append(exp.done) agent_buffer_trajectory[BufferKey.GROUP_DONES].append( [_status.done for _status in exp.group_status] ) # Adds the log prob and action of continuous/discrete separately agent_buffer_trajectory[BufferKey.CONTINUOUS_ACTION].append( exp.action.continuous ) agent_buffer_trajectory[BufferKey.DISCRETE_ACTION].append( exp.action.discrete ) if not is_last_step: next_action = self.steps[step + 1].action cont_next_actions = next_action.continuous disc_next_actions = next_action.discrete else: cont_next_actions = np.zeros_like(exp.action.continuous) disc_next_actions = np.zeros_like(exp.action.discrete) agent_buffer_trajectory[BufferKey.NEXT_CONT_ACTION].append( cont_next_actions ) agent_buffer_trajectory[BufferKey.NEXT_DISC_ACTION].append( disc_next_actions ) agent_buffer_trajectory[BufferKey.CONTINUOUS_LOG_PROBS].append( exp.action_probs.continuous ) agent_buffer_trajectory[BufferKey.DISCRETE_LOG_PROBS].append( exp.action_probs.discrete ) # Store action masks if necessary. Note that 1 means active, while # in AgentExperience False means active. if exp.action_mask is not None: mask = 1 - np.concatenate(exp.action_mask) agent_buffer_trajectory[BufferKey.ACTION_MASK].append( mask, padding_value=1 ) else: # This should never be needed unless the environment somehow doesn't supply the # action mask in a discrete space. action_shape = exp.action.discrete.shape agent_buffer_trajectory[BufferKey.ACTION_MASK].append( np.ones(action_shape, dtype=np.float32), padding_value=1 ) agent_buffer_trajectory[BufferKey.PREV_ACTION].append(exp.prev_action) agent_buffer_trajectory[BufferKey.ENVIRONMENT_REWARDS].append(exp.reward) # Store the next visual obs as the current obs = next_obs return agent_buffer_trajectory @property def done_reached(self) -> bool: """ Returns true if trajectory is terminated with a Done. """ return self.steps[-1].done @property def all_group_dones_reached(self) -> bool: """ Returns true if all other agents in this trajectory are done at the end of the trajectory. Combine with done_reached to check if the whole team is done. """ return all(_status.done for _status in self.steps[-1].group_status) @property def interrupted(self) -> bool: """ Returns true if trajectory was terminated because max steps was reached. """ return self.steps[-1].interrupted
Trajectory
python
getsentry__sentry
src/sentry/conf/types/uptime.py
{ "start": 44, "end": 381 }
class ____: """ Defines a region which uptime checks can be run in. """ slug: str name: str # Temporarily defaulted for backwards compat config_redis_cluster: str = "default" # Prefix we'll add to keys in the redis config. Currently just used in tests config_redis_key_prefix: str = ""
UptimeRegionConfig
python
getsentry__sentry
tests/sentry/api/endpoints/test_relay_register.py
{ "start": 369, "end": 19048 }
class ____(APITestCase): def setUp(self) -> None: super().setUp() self.key_pair = generate_key_pair() self.public_key = self.key_pair[1] settings.SENTRY_RELAY_WHITELIST_PK.append(str(self.public_key)) self.private_key = self.key_pair[0] self.relay_id = str(uuid4()) self.path = reverse("sentry-api-0-relay-register-challenge") def register_relay( self, key_pair: tuple[SecretKey, PublicKey], version: str, relay_id: str | int ) -> None: private_key = key_pair[0] public_key = key_pair[1] data = {"public_key": str(public_key), "relay_id": relay_id, "version": version} raw_json, signature = private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content result = orjson.loads(resp.content) data = { "token": str(result.get("token")), "relay_id": relay_id, "version": version, } raw_json, signature = private_key.pack(data) resp = self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content def test_valid_register(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content def test_register_missing_relay_id(self) -> None: data = {"public_key": str(self.public_key)} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 400, resp.content def test_register_missing_public_key(self) -> None: data = {"relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 400, resp.content def test_register_invalid_body(self) -> None: resp = self.client.post( self.path, data="a", content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, ) assert resp.status_code == 400, resp.content def test_register_missing_header(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, ) assert resp.status_code == 400, resp.content def test_register_missing_header2(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 400, resp.content def test_register_wrong_sig(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature + "a", ) assert resp.status_code == 400, resp.content def test_valid_register_response(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content result = orjson.loads(resp.content) raw_json, signature = self.private_key.pack(result) resp = self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content relay = Relay.objects.get(relay_id=self.relay_id) assert relay assert relay.relay_id == self.relay_id def test_forge_public_key(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content result = orjson.loads(resp.content) raw_json, signature = self.private_key.pack(result) self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) keys = generate_key_pair() settings.SENTRY_RELAY_WHITELIST_PK.append(str(keys[1])) data = {"public_key": str(keys[1]), "relay_id": self.relay_id} raw_json, signature = keys[0].pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 400, resp.content def test_public_key_mismatch(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content result = orjson.loads(resp.content) raw_json, signature = self.private_key.pack(result) self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) keys = generate_key_pair() data = {"token": str(result.get("token")), "relay_id": self.relay_id} raw_json, signature = keys[0].pack(data) resp = self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 400, resp.content def test_forge_public_key_on_register(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) result = orjson.loads(resp.content) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content keys = generate_key_pair() data = {"token": str(result.get("token")), "relay_id": self.relay_id} raw_json, signature = keys[0].pack(data) resp = self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 400, resp.content def test_invalid_json_response(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content result = orjson.loads(resp.content) _, signature = self.private_key.pack(result) resp = self.client.post( reverse("sentry-api-0-relay-register-response"), data="a", content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 400, resp.content def test_missing_token_response(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content result = orjson.loads(resp.content) del result["token"] raw_json, signature = self.private_key.pack(result) resp = self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 400, resp.content def test_missing_sig_response(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content result = orjson.loads(resp.content) raw_json, signature = self.private_key.pack(result) resp = self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, ) assert resp.status_code == 400, resp.content def test_relay_id_mismatch_response(self) -> None: data = {"public_key": str(self.public_key), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content result = orjson.loads(resp.content) raw_json, signature = self.private_key.pack(result) resp = self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=str(uuid4()), HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 400, resp.content def test_valid_register_response_twice(self) -> None: self.test_valid_register_response() self.test_valid_register_response() def test_old_relays_can_register(self) -> None: """ Test that an old Relay that does not send version information in the challenge response is still able to register. """ data = { "public_key": str(self.public_key), "relay_id": self.relay_id, "version": "1.0.0", } raw_json, signature = self.private_key.pack(data) resp = self.client.post( self.path, data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content result = orjson.loads(resp.content) raw_json, signature = self.private_key.pack(result) self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) data = {"token": str(result.get("token")), "relay_id": self.relay_id} raw_json, signature = self.private_key.pack(data) resp = self.client.post( reverse("sentry-api-0-relay-register-response"), data=raw_json, content_type="application/json", HTTP_X_SENTRY_RELAY_ID=self.relay_id, HTTP_X_SENTRY_RELAY_SIGNATURE=signature, ) assert resp.status_code == 200, resp.content def test_multiple_relay_versions_tracked(self) -> None: """ Test that updating the relay version would properly be reflected in the relay analytics. Also that tests that multiple relays """ key_pair = generate_key_pair() relay_id = str(uuid4()) before_registration = timezone.now() self.register_relay(key_pair, "1.1.1", relay_id) after_first_relay = timezone.now() self.register_relay(key_pair, "2.2.2", relay_id) after_second_relay = timezone.now() v1 = Relay.objects.get(relay_id=relay_id) assert v1 is not None rv1 = RelayUsage.objects.get(relay_id=relay_id, version="1.1.1") assert rv1 is not None rv2 = RelayUsage.objects.get(relay_id=relay_id, version="2.2.2") assert rv2 is not None assert rv1.first_seen > before_registration assert rv1.last_seen > before_registration assert rv1.first_seen < after_first_relay assert rv1.last_seen < after_first_relay assert rv2.first_seen > after_first_relay assert rv2.last_seen > after_first_relay assert rv2.first_seen < after_second_relay assert rv2.last_seen < after_second_relay def test_relay_usage_is_updated_at_registration(self) -> None: """ Tests that during registration the proper relay usage information is updated """ key_pair = generate_key_pair() relay_id = str(uuid4()) before_registration = timezone.now() # register one relay self.register_relay(key_pair, "1.1.1", relay_id) after_first_relay = timezone.now() # register another one that should not be updated after this self.register_relay(key_pair, "2.2.2", relay_id) after_second_relay = timezone.now() # re register the first one in order to update the last used time self.register_relay(key_pair, "1.1.1", relay_id) after_re_register = timezone.now() rv1 = RelayUsage.objects.get(relay_id=relay_id, version="1.1.1") assert rv1 is not None rv2 = RelayUsage.objects.get(relay_id=relay_id, version="2.2.2") assert rv2 is not None # check first seen is not modified by re register assert rv1.first_seen > before_registration assert rv1.first_seen < after_first_relay # check last seen shows the time at re-registration assert rv1.last_seen > after_second_relay assert rv1.last_seen < after_re_register # check version 2.2.2 is not affected by version 1.1.1 assert rv2.first_seen > after_first_relay assert rv2.last_seen > after_first_relay assert rv2.first_seen < after_second_relay assert rv2.last_seen < after_second_relay def test_no_db_for_static_relays(self) -> None: """ Tests that statically authenticated relays do not access the database during registration """ key_pair = generate_key_pair() relay_id = str(uuid4()) public_key = key_pair[1] static_auth = {relay_id: {"internal": True, "public_key": str(public_key)}} with self.assertNumQueries(0): with override_options( { "relay.static_auth": static_auth, # XXX: Temporary; remove it once the endpoint is removed "issues.browser_reporting.reporting_endpoints_header_enabled": False, } ): self.register_relay(key_pair, "1.1.1", relay_id)
RelayRegisterTest
python
kamyu104__LeetCode-Solutions
Python/two-city-scheduling.py
{ "start": 96, "end": 1535 }
class ____(object): def twoCitySchedCost(self, costs): """ :type costs: List[List[int]] :rtype: int """ def kthElement(nums, k, compare): def PartitionAroundPivot(left, right, pivot_idx, nums, compare): new_pivot_idx = left nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx] for i in xrange(left, right): if compare(nums[i], nums[right]): nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i] new_pivot_idx += 1 nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right] return new_pivot_idx left, right = 0, len(nums) - 1 while left <= right: pivot_idx = random.randint(left, right) new_pivot_idx = PartitionAroundPivot(left, right, pivot_idx, nums, compare) if new_pivot_idx == k: return elif new_pivot_idx > k: right = new_pivot_idx - 1 else: # new_pivot_idx < k. left = new_pivot_idx + 1 kthElement(costs, len(costs)//2, lambda a, b: a[0]-a[1] < b[0]-b[1]) result = 0 for i in xrange(len(costs)): result += costs[i][0] if i < len(costs)//2 else costs[i][1] return result
Solution
python
davidhalter__jedi
jedi/inference/gradual/type_var.py
{ "start": 3882, "end": 4139 }
class ____(ValueWrapper): def __init__(self, wrapped_value, original_value): super().__init__(wrapped_value) self._original_value = original_value def execute_annotation(self): return ValueSet({self._original_value})
TypeWrapper
python
walkccc__LeetCode
solutions/3395. Subsequences with a Unique Middle Mode I/3395.py
{ "start": 0, "end": 2388 }
class ____: def __init__(self): self.MOD = 1_000_000_007 def subsequencesWithMiddleMode(self, nums: list[int]) -> int: n = len(nums) ans = 0 left = collections.Counter() right = collections.Counter() for i in range(2): left[nums[i]] += 1 for i in range(2, n): right[nums[i]] += 1 for i in range(2, n - 2): num = nums[i] right[num] -= 1 if right[num] == 0: del right[num] leftCount = left[num] rightCount = right[num] leftOther = i - leftCount rightOther = n - 1 - i - rightCount # count[mode] = 5 -- [a a] a [a a] ans += math.comb(leftCount, 2) * math.comb(rightCount, 2) # count[mode] = 4 -- [a a] a [a ?] ans += math.comb(leftCount, 2) * rightCount * rightOther # count[mode] = 4 -- [a ?] a [a a] ans += leftCount * leftOther * math.comb(rightCount, 2) # count[mode] = 3 -- [a a] a [? ?] ans += math.comb(leftCount, 2) * math.comb(rightOther, 2) # count[mode] = 3 -- [? ?] a [a a] ans += math.comb(leftOther, 2) * math.comb(rightCount, 2) # count[mode] = 3 -- [a ?] a [a ?] ans += leftCount * leftOther * rightCount * rightOther # count[mode] = 2 -- [a ?] a [? ?] ans += leftCount * self._calc(num, leftOther, rightOther, left, right) # count[mode] = 2 -- [? ?] a [a ?] ans += rightCount * self._calc(num, rightOther, leftOther, right, left) ans %= self.MOD left[num] += 1 return ans def _calc( self, a: int, other1: int, other2: int, count1: dict[int, int], count2: dict[int, int] ) -> int: """ Returns the count of subsequences that have `a` as the middle number, where invalid subsequences are excluded. """ # [a ?] a [? ?] res = (other1 * math.comb(other2, 2)) % self.MOD for b, b1 in count1.items(): if b == a: continue b2 = count2[b] # Exclude triples -- [a b] a [b b]. res = (res - b1 * math.comb(b2, 2)) % self.MOD # Exclude doubles -- [a b] a [b ?]. res = (res - b1 * b2 * (other2 - b2)) % self.MOD for b, b2 in count2.items(): if b == a: continue b1 = count1[b] # Exclude doubles -- [a ?] a [b b]. res = (res - (other1 - b1) * math.comb(b2, 2)) % self.MOD return (res + self.MOD) % self.MOD
Solution
python
apache__thrift
test/py/TestClient.py
{ "start": 16700, "end": 16872 }
class ____(MultiplexedOptionalTest): def get_protocol(self, transport): return make_pedantic(TJSONProtocol.TJSONProtocolFactory().getProtocol(transport))
JSONTest
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_dms.py
{ "start": 21304, "end": 24175 }
class ____: TASK_DATA = { "ReplicationConfigIdentifier": "test-config", "SourceEndpointArn": "arn:aws:dms:us-east-1:123456789012:endpoint:RZZK4EZW5UANC7Y3P4E776WHBE", "TargetEndpointArn": "arn:aws:dms:us-east-1:123456789012:endpoint:GVBUJQXJZASXWHTWCLN2WNT57E", "ComputeConfig": { "MaxCapacityUnits": 2, "MinCapacityUnits": 4, }, "ReplicationType": "full-load", "TableMappings": json.dumps( { "TableMappings": [ { "Type": "Selection", "RuleId": 123, "RuleName": "test-rule", "SourceSchema": "/", "SourceTable": "/", } ] } ), "ReplicationSettings": "string", "SupplementalSettings": "string", "ResourceIdentifier": "string", } MOCK_REPLICATION_CONFIG_RESP: dict[str, Any] = { "ReplicationConfig": { "ReplicationConfigIdentifier": "test-config", "ReplicationConfigArn": "arn:aws:dms:us-east-1:123456789012:replication-config/test-config", "SourceEndpointArn": "arn:aws:dms:us-east-1:123456789012:endpoint:RZZK4EZW5UANC7Y3P4E776WHBE", "TargetEndpointArn": "arn:aws:dms:us-east-1:123456789012:endpoint:GVBUJQXJZASXWHTWCLN2WNT57E", "ReplicationType": "full-load", } } def test_init(self): DmsCreateReplicationConfigOperator( task_id="create_replication_config", replication_config_id=self.TASK_DATA["ReplicationConfigIdentifier"], source_endpoint_arn=self.TASK_DATA["SourceEndpointArn"], target_endpoint_arn=self.TASK_DATA["TargetEndpointArn"], replication_type=self.TASK_DATA["ReplicationType"], table_mappings=self.TASK_DATA["TableMappings"], compute_config=self.TASK_DATA["ComputeConfig"], ) @mock.patch.object(DmsHook, "conn") def test_operator(self, mock_hook): mock_hook.create_replication_config.return_value = self.MOCK_REPLICATION_CONFIG_RESP op = DmsCreateReplicationConfigOperator( task_id="create_replication_config", replication_config_id=self.TASK_DATA["ReplicationConfigIdentifier"], source_endpoint_arn=self.TASK_DATA["SourceEndpointArn"], target_endpoint_arn=self.TASK_DATA["TargetEndpointArn"], replication_type=self.TASK_DATA["ReplicationType"], table_mappings=self.TASK_DATA["TableMappings"], compute_config=self.TASK_DATA["ComputeConfig"], ) resp = op.execute(None) assert resp == self.MOCK_REPLICATION_CONFIG_RESP["ReplicationConfig"]["ReplicationConfigArn"]
TestDmsCreateReplicationConfigOperator
python
joerick__pyinstrument
pyinstrument/context_manager.py
{ "start": 646, "end": 3350 }
class ____: options: ProfileContextOptions def __init__( self, **kwargs: Unpack[ProfileContextOptions], ): profiler_options = { "interval": kwargs.get("interval", 0.001), # note- different async mode from the default, because it's easy # to run multiple profilers at once using the decorator/context # manager "async_mode": kwargs.get("async_mode", "disabled"), "use_timing_thread": kwargs.get("use_timing_thread", None), } self.profiler = Profiler(**profiler_options) self.options = kwargs @typing.overload def __call__(self, func: CallableVar, /) -> CallableVar: ... @typing.overload def __call__(self, /, **kwargs: Unpack[ProfileContextOptions]) -> "ProfileContext": ... def __call__( self, func: typing.Callable | None = None, /, **kwargs: Unpack[ProfileContextOptions] ): if func is not None: @functools.wraps(func) def wrapper(*args, **kwargs): target_description = self.options.get("target_description") if target_description is None: target_description = f"Function {func.__qualname__} at {func.__code__.co_filename}:{func.__code__.co_firstlineno}" with self(target_description=target_description): return func(*args, **kwargs) return typing.cast(typing.Callable, wrapper) else: return ProfileContext(**{**self.options, **kwargs}) def __enter__(self): if self.profiler.is_running: raise RuntimeError( "This profiler is already running - did you forget the brackets on pyinstrument.profile() ?" ) caller_frame = inspect.currentframe().f_back # type: ignore assert caller_frame is not None target_description = self.options.get("target_description") if target_description is None: target_description = "Block at {}:{}".format( caller_frame.f_code.co_filename, caller_frame.f_lineno ) self.profiler.start( caller_frame=caller_frame, target_description=target_description, ) def __exit__(self, exc_type, exc_value, traceback): session = self.profiler.stop() renderer = self.options.get("renderer") f = sys.stderr if renderer is None: renderer = ConsoleRenderer( color=file_supports_color(f), unicode=file_supports_unicode(f), short_mode=True, ) f.write(renderer.render(session))
ProfileContext
python
huggingface__transformers
src/transformers/models/detr/modeling_detr.py
{ "start": 72936, "end": 76276 }
class ____(nn.Module): """ Simple convolutional head, using group norm. Upsampling is done using a FPN approach """ def __init__(self, dim, fpn_dims, context_dim): super().__init__() if dim % 8 != 0: raise ValueError( "The hidden_size + number of attention heads must be divisible by 8 as the number of groups in" " GroupNorm is set to 8" ) inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] self.lay1 = nn.Conv2d(dim, dim, 3, padding=1) self.gn1 = nn.GroupNorm(8, dim) self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1) self.gn2 = nn.GroupNorm(min(8, inter_dims[1]), inter_dims[1]) self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) self.gn3 = nn.GroupNorm(min(8, inter_dims[2]), inter_dims[2]) self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) self.gn4 = nn.GroupNorm(min(8, inter_dims[3]), inter_dims[3]) self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) self.gn5 = nn.GroupNorm(min(8, inter_dims[4]), inter_dims[4]) self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1) self.dim = dim self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1) self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1) self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1) for m in self.modules(): if isinstance(m, nn.Conv2d): init.kaiming_uniform_(m.weight, a=1) init.constant_(m.bias, 0) def forward(self, x: Tensor, bbox_mask: Tensor, fpns: list[Tensor]): # here we concatenate x, the projected feature map, of shape (batch_size, d_model, height/32, width/32) with # the bbox_mask = the attention maps of shape (batch_size, n_queries, n_heads, height/32, width/32). # We expand the projected feature map to match the number of heads. x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) x = self.lay1(x) x = self.gn1(x) x = nn.functional.relu(x) x = self.lay2(x) x = self.gn2(x) x = nn.functional.relu(x) cur_fpn = self.adapter1(fpns[0]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay3(x) x = self.gn3(x) x = nn.functional.relu(x) cur_fpn = self.adapter2(fpns[1]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay4(x) x = self.gn4(x) x = nn.functional.relu(x) cur_fpn = self.adapter3(fpns[2]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay5(x) x = self.gn5(x) x = nn.functional.relu(x) x = self.out_lay(x) return x
DetrMaskHeadSmallConv
python
doocs__leetcode
solution/1000-1099/1092.Shortest Common Supersequence/Solution.py
{ "start": 0, "end": 1026 }
class ____: def shortestCommonSupersequence(self, str1: str, str2: str) -> str: m, n = len(str1), len(str2) f = [[0] * (n + 1) for _ in range(m + 1)] for i in range(1, m + 1): for j in range(1, n + 1): if str1[i - 1] == str2[j - 1]: f[i][j] = f[i - 1][j - 1] + 1 else: f[i][j] = max(f[i - 1][j], f[i][j - 1]) ans = [] i, j = m, n while i or j: if i == 0: j -= 1 ans.append(str2[j]) elif j == 0: i -= 1 ans.append(str1[i]) else: if f[i][j] == f[i - 1][j]: i -= 1 ans.append(str1[i]) elif f[i][j] == f[i][j - 1]: j -= 1 ans.append(str2[j]) else: i, j = i - 1, j - 1 ans.append(str1[i]) return ''.join(ans[::-1])
Solution
python
gevent__gevent
src/greentest/3.10/test_socket.py
{ "start": 19438, "end": 20518 }
class ____(SocketListeningTestMixin, ThreadedSocketTestMixin): """Mixin to allow client/server stream tests with connected client. Server's socket representing connection to client is self.cli_conn and client's connection to server is self.serv_conn. (Based on SocketConnectedTest.) """ def setUp(self): super().setUp() # Indicate explicitly we're ready for the client thread to # proceed and then perform the blocking call to accept self.serverExplicitReady() conn, addr = self.serv.accept() self.cli_conn = conn def tearDown(self): self.cli_conn.close() self.cli_conn = None super().tearDown() def clientSetUp(self): super().clientSetUp() self.cli.connect(self.serv_addr) self.serv_conn = self.cli def clientTearDown(self): try: self.serv_conn.close() self.serv_conn = None except AttributeError: pass super().clientTearDown()
ConnectedStreamTestMixin
python
mlflow__mlflow
mlflow/sagemaker/__init__.py
{ "start": 131065, "end": 132153 }
class ____: STATE_SUCCEEDED = "succeeded" STATE_FAILED = "failed" STATE_IN_PROGRESS = "in progress" STATE_TIMED_OUT = "timed_out" def __init__(self, state, message): self.state = state self.message = message @classmethod def in_progress(cls, message=None): if message is None: message = "The operation is still in progress." return cls(_SageMakerOperationStatus.STATE_IN_PROGRESS, message) @classmethod def timed_out(cls, duration_seconds): return cls( _SageMakerOperationStatus.STATE_TIMED_OUT, f"Timed out after waiting {duration_seconds} seconds for the operation to" " complete. This operation may still be in progress. Please check the AWS" " console for more information.", ) @classmethod def failed(cls, message): return cls(_SageMakerOperationStatus.STATE_FAILED, message) @classmethod def succeeded(cls, message): return cls(_SageMakerOperationStatus.STATE_SUCCEEDED, message)
_SageMakerOperationStatus
python
pypa__hatch
tests/backend/version/scheme/test_standard.py
{ "start": 2898, "end": 3400 }
class ____: @pytest.mark.parametrize("key", ["post", "rev", "r"]) def test_begin(self, isolation, key): scheme = StandardScheme(str(isolation), {}) assert scheme.update(key, "9000.0.0-rc.3.dev5", {}) == "9000.0.0rc3.post0" @pytest.mark.parametrize("key", ["post", "rev", "r"]) def test_continue(self, isolation, key): scheme = StandardScheme(str(isolation), {}) assert scheme.update(key, f"9000.0.0-rc.3-{key}7.dev5", {}) == "9000.0.0rc3.post8"
TestPost
python
pennersr__django-allauth
allauth/idp/oidc/views.py
{ "start": 13273, "end": 14796 }
class ____(View): def post(self, request): if request.POST.get("grant_type") == Client.GrantType.DEVICE_CODE: return self._post_device_token(request) return self._create_token_response(request) def _create_token_response( self, request, *, user: Optional[AbstractBaseUser] = None, data: Optional[dict] = None, ): orequest = extract_params(request) oresponse = get_server( pre_token=[lambda orequest: self._pre_token(orequest, user, data)] ).create_token_response(*orequest) return convert_response(*oresponse) def _pre_token( self, orequest, user: Optional[AbstractBaseUser], data: Optional[dict] ): if orequest.grant_type == Client.GrantType.DEVICE_CODE: assert user is not None # nosec assert data is not None # nosec if scope := data.get("scope"): orequest.scope = scope orequest.user = user def _post_device_token(self, request): try: user, data = device_codes.poll_device_code(request) except OAuth2Error as e: return HttpResponse( e.json, content_type="application/json", status=e.status_code ) else: return self._create_token_response(request, user=user, data=data) token = TokenView.as_view() @method_decorator(csrf_exempt, name="dispatch") @method_decorator(login_not_required, name="dispatch")
TokenView
python
ray-project__ray
python/ray/tests/unit/test_runtime_env_validation.py
{ "start": 7557, "end": 9573 }
class ____: def test_empty(self): assert RuntimeEnv() == {} def test_serialization(self): env1 = RuntimeEnv(pip=["requests"], env_vars={"hi1": "hi1", "hi2": "hi2"}) env2 = RuntimeEnv(env_vars={"hi2": "hi2", "hi1": "hi1"}, pip=["requests"]) assert env1 == env2 serialized_env1 = env1.serialize() serialized_env2 = env2.serialize() # Key ordering shouldn't matter. assert serialized_env1 == serialized_env2 deserialized_env1 = RuntimeEnv.deserialize(serialized_env1) deserialized_env2 = RuntimeEnv.deserialize(serialized_env2) assert env1 == deserialized_env1 == env2 == deserialized_env2 def test_reject_pip_and_conda(self): with pytest.raises(ValueError): RuntimeEnv(pip=["requests"], conda="env_name") def test_ray_commit_injection(self): # Should not be injected if no pip and conda. result = RuntimeEnv(env_vars={"hi": "hi"}) assert "_ray_commit" not in result # Should be injected if pip or conda present. result = RuntimeEnv(pip=["requests"]) assert "_ray_commit" in result result = RuntimeEnv(conda="env_name") assert "_ray_commit" in result # Should not override if passed. result = RuntimeEnv(conda="env_name", _ray_commit="Blah") assert result["_ray_commit"] == "Blah" def test_inject_current_ray(self): # Should not be injected if not provided by env var. result = RuntimeEnv(env_vars={"hi": "hi"}) assert "_inject_current_ray" not in result os.environ["RAY_RUNTIME_ENV_LOCAL_DEV_MODE"] = "1" # Should be injected if provided by env var. result = RuntimeEnv() assert result["_inject_current_ray"] # Should be preserved if passed. result = RuntimeEnv(_inject_current_ray=False) assert not result["_inject_current_ray"] del os.environ["RAY_RUNTIME_ENV_LOCAL_DEV_MODE"]
TestParsedRuntimeEnv
python
python__mypy
mypyc/ir/ops.py
{ "start": 23682, "end": 24974 }
class ____(RegisterOp): """A higher-level primitive operation. Some of these have special compiler support. These will be lowered (transformed) into lower-level IR ops before code generation, and after reference counting op insertion. Others will be transformed into CallC ops. Tagged integer equality is a typical primitive op with non-trivial lowering. It gets transformed into a tag check, followed by different code paths for short and long representations. """ def __init__(self, args: list[Value], desc: PrimitiveDescription, line: int = -1) -> None: self.args = args self.type = desc.return_type self.error_kind = desc.error_kind self.desc = desc def sources(self) -> list[Value]: return self.args def set_sources(self, new: list[Value]) -> None: self.args = new[:] def stolen(self) -> list[Value]: steals = self.desc.steals if isinstance(steals, list): assert len(steals) == len(self.args) return [arg for arg, steal in zip(self.args, steals) if steal] else: return [] if not steals else self.sources() def accept(self, visitor: OpVisitor[T]) -> T: return visitor.visit_primitive_op(self) @final
PrimitiveOp
python
pytorch__pytorch
test/distributed/test_dist2.py
{ "start": 1422, "end": 8091 }
class ____(MultiProcessTestCase): @property def device(self) -> torch.device: raise NotImplementedError # @device.setter # def device(self, value: torch.device) -> None: # self._device = value @property def world_size(self) -> int: return 2 def setUp(self): super().setUp() self._spawn_processes() def new_group(self) -> torch.distributed.ProcessGroup: raise unittest.SkipTest("new_group() must be implemented by subclasses") def test_allreduce(self) -> None: pg = self.new_group() t = torch.ones(10, device=self.device) pg.allreduce(t, timeout=timedelta(seconds=30)).wait() synchronize_accelerator() self.assertEqual(t, torch.full_like(t, self.world_size)) pg.shutdown() def test_barrier(self) -> None: pg = self.new_group() pg.barrier(timeout=timedelta(seconds=30)).wait() synchronize_accelerator() pg.shutdown() def test_broadcast(self) -> None: pg = self.new_group() t = torch.full((10,), self.rank, device=self.device) pg.broadcast(t, root=0, timeout=timedelta(seconds=30)).wait() synchronize_accelerator() self.assertEqual(t, torch.full_like(t, 0)) pg.shutdown() def test_allgather(self) -> None: pg = self.new_group() t = torch.full((10,), self.rank + 1, device=self.device, dtype=torch.float32) out = [torch.zeros(10, device=self.device) for _ in range(self.world_size)] pg.allgather(out, t, timeout=timedelta(seconds=30)).wait() synchronize_accelerator() for i in range(self.world_size): self.assertEqual(out[i], torch.full_like(t, i + 1)) pg.shutdown() def test_gather(self) -> None: pg = self.new_group() inp = torch.full((10,), self.rank + 1, device=self.device, dtype=torch.float32) out = ( [torch.zeros(10, device=self.device) for _ in range(self.world_size)] if self.rank == 0 else [] ) pg.gather(out, inp, root=0, timeout=timedelta(seconds=30)).wait() synchronize_accelerator() if self.rank == 0: for i in range(self.world_size): self.assertEqual(out[i], torch.full_like(inp, i + 1)) pg.shutdown() def test_scatter(self) -> None: pg = self.new_group() inp = ( [ torch.torch.full((10,), i + 1, device=self.device, dtype=torch.float32) for i in range(self.world_size) ] if self.rank == 0 else [] ) out = torch.zeros(10, device=self.device) pg.scatter(out, inp, root=0, timeout=timedelta(seconds=30)).wait() synchronize_accelerator() self.assertEqual(out, torch.full_like(out, self.rank + 1)) pg.shutdown() def test_reduce(self) -> None: pg = self.new_group() t = torch.full((10,), 1, device=self.device, dtype=torch.float32) pg.reduce( t, root=0, op=dist2.ReduceOp.SUM, timeout=timedelta(seconds=30) ).wait() synchronize_accelerator() if self.rank == 0: self.assertEqual(t, torch.full_like(t, self.world_size)) pg.shutdown() def test_reduce_scatter(self) -> None: pg = self.new_group() inp = [ torch.full((10,), i + 1, device=self.device, dtype=torch.float32) for i in range(self.world_size) ] out = torch.zeros(10, device=self.device) pg.reduce_scatter( out, inp, op=dist2.ReduceOp.SUM, timeout=timedelta(seconds=30) ).wait() synchronize_accelerator() self.assertEqual(out, torch.full_like(out, self.world_size * (self.rank + 1))) pg.shutdown() def test_alltoall_base(self) -> None: pg = self.new_group() out = torch.zeros(self.world_size * 10, device=self.device) inp = torch.full( (self.world_size * 10,), self.rank + 1, device=self.device, dtype=torch.float32, ) split_sizes = [10 for _ in range(self.world_size)] pg.alltoall_base( out, inp, split_sizes, split_sizes, timeout=timedelta(seconds=30) ).wait() synchronize_accelerator() for i in range(self.world_size): out_range = out[i * 10 : (i + 1) * 10] self.assertEqual(out_range, torch.full_like(out_range, i + 1)) def test_group_split(self) -> None: group = self.new_group() subgroup = group.split_group( [0], timeout=timedelta(seconds=30), group_name="subgroup_1" ) if self.rank == 0: assert subgroup is not None self.assertEqual(subgroup.size(), 1) backend = subgroup._get_backend(self.device) self.assertEqual(backend.options._timeout, timedelta(seconds=30)) self.assertEqual(subgroup.group_name, "subgroup_1") else: self.assertEqual(subgroup, None) def test_remote_group_merge(self) -> None: group = self.new_group() subgroup_1 = group.split_group([0], timeout=timedelta(seconds=30)) subgroup_2 = group.split_group([1], timeout=timedelta(seconds=30)) if self.rank == 0: assert subgroup_1 is not None tcp_store = dist.TCPStore( host_name=os.environ["MASTER_ADDR"], port=29781, world_size=2, is_master=True, ) merged_pg = subgroup_1.merge_remote_group( tcp_store, 2, timedelta(seconds=40), "merged_pg" ) self.assertEqual(merged_pg.size(), 2) backend = merged_pg._get_backend(self.device) self.assertEqual(backend.options._timeout, timedelta(seconds=40)) self.assertEqual(merged_pg.group_name, "merged_pg") else: assert subgroup_2 is not None tcp_store = dist.TCPStore( host_name=os.environ["MASTER_ADDR"], port=29781, world_size=2, is_master=False, ) merged_pg = subgroup_2.merge_remote_group( tcp_store, 2, timedelta(seconds=40), "merged_pg" ) self.assertEqual(merged_pg.size(), 2) backend = merged_pg._get_backend(self.device) self.assertEqual(backend.options._timeout, timedelta(seconds=40)) self.assertEqual(merged_pg.group_name, "merged_pg")
Dist2MultiProcessTestCase
python
pdm-project__pdm
src/pdm/models/setup.py
{ "start": 1171, "end": 13700 }
class ____: """ Class that reads a setup.py file without executing it. """ @classmethod def read_from_directory(cls, directory: Path) -> Setup: result = Setup() for filename, file_reader in [ ("pyproject.toml", cls.read_pyproject_toml), ("setup.cfg", cls.read_setup_cfg), ("setup.py", cls.read_setup_py), ]: filepath = directory / filename if not filepath.exists(): continue new_result = file_reader(filepath) result.update(new_result) return result @staticmethod def read_pyproject_toml(file: Path) -> Setup: from pdm import termui from pdm.exceptions import ProjectError from pdm.formats import MetaConvertError from pdm.project.project_file import PyProject try: metadata = PyProject(file, ui=termui.UI()).metadata except ProjectError: return Setup() except MetaConvertError as e: termui.logger.warning("Error parsing pyproject.toml, metadata may be incomplete. %s", e) metadata = e.data return Setup( name=metadata.get("name"), summary=metadata.get("description"), version=metadata.get("version"), install_requires=metadata.get("dependencies", []), extras_require=metadata.get("optional-dependencies", {}), python_requires=metadata.get("requires-python"), ) @no_type_check @classmethod def read_setup_py(cls, file: Path) -> Setup: with file.open(encoding="utf-8") as f: content = f.read() body = ast.parse(content).body setup_call, body = cls._find_setup_call(body) if not setup_call: return Setup() return Setup( name=cls._find_single_string(setup_call, body, "name"), version=cls._find_single_string(setup_call, body, "version") or "0.0.0", install_requires=cls._find_install_requires(setup_call, body), extras_require=cls._find_extras_require(setup_call, body), python_requires=cls._find_single_string(setup_call, body, "python_requires"), ) @staticmethod def read_setup_cfg(file: Path) -> Setup: parser = ConfigParser() parser.read(str(file)) name = None version = "0.0.0" if parser.has_option("metadata", "name"): name = parser.get("metadata", "name") if parser.has_option("metadata", "version"): meta_version = parser.get("metadata", "version") if not meta_version.startswith("attr:"): version = meta_version install_requires = [] extras_require: dict[str, list[str]] = {} python_requires = None if parser.has_section("options"): if parser.has_option("options", "install_requires"): for dep in parser.get("options", "install_requires").split("\n"): dep = dep.strip() if not dep: continue install_requires.append(dep) if parser.has_option("options", "python_requires"): python_requires = parser.get("options", "python_requires") if parser.has_section("options.extras_require"): for group in parser.options("options.extras_require"): extras_require[group] = [] deps = parser.get("options.extras_require", group) for dep in deps.split("\n"): dep = dep.strip() if not dep: continue extras_require[group].append(dep) return Setup( name=name, version=version, install_requires=install_requires, extras_require=extras_require, python_requires=python_requires, ) @classmethod def _find_setup_call(cls, elements: list[Any]) -> tuple[ast.Call | None, list[Any | None]]: funcdefs = [] for i, element in enumerate(elements): if isinstance(element, ast.If) and i == len(elements) - 1: # Checking if the last element is an if statement # and if it is 'if __name__ == "__main__"' which # could contain the call to setup() test = element.test if not isinstance(test, ast.Compare): continue left = test.left if not isinstance(left, ast.Name): continue if left.id != "__name__": continue setup_call, body = cls._find_sub_setup_call([element]) if not setup_call: continue return setup_call, body + elements if not isinstance(element, ast.Expr): if isinstance(element, ast.FunctionDef): funcdefs.append(element) continue value = element.value if not isinstance(value, ast.Call): continue func = value.func if not (isinstance(func, ast.Name) and func.id == "setup") and not ( isinstance(func, ast.Attribute) and isinstance(func.value, ast.Name) and func.value.id == "setuptools" and func.attr == "setup" ): continue return value, elements # Nothing, we inspect the function definitions return cls._find_sub_setup_call(funcdefs) @no_type_check @classmethod def _find_sub_setup_call(cls, elements: list[Any]) -> tuple[ast.Call | None, list[Any | None]]: for element in elements: if not isinstance(element, (ast.FunctionDef, ast.If)): continue setup_call = cls._find_setup_call(element.body) if setup_call != (None, None): setup_call, body = setup_call body = elements + body return setup_call, body return None, None @no_type_check @classmethod def _find_install_requires(cls, call: ast.Call, body: Iterable[Any]) -> list[str]: install_requires: list[str] = [] value = cls._find_in_call(call, "install_requires") if value is None: # Trying to find in kwargs kwargs = cls._find_call_kwargs(call) if kwargs is None or not isinstance(kwargs, ast.Name): return install_requires variable = cls._find_variable_in_body(body, kwargs.id) if not isinstance(variable, (ast.Dict, ast.Call)): return install_requires if isinstance(variable, ast.Call): if not isinstance(variable.func, ast.Name): return install_requires if variable.func.id != "dict": return install_requires value = cls._find_in_call(variable, "install_requires") else: value = cls._find_in_dict(variable, "install_requires") if value is None: return install_requires if isinstance(value, ast.List): install_requires.extend( [el.value for el in value.elts if isinstance(el, ast.Constant) and isinstance(el.value, str)] ) elif isinstance(value, ast.Name): variable = cls._find_variable_in_body(body, value.id) if variable is not None and isinstance(variable, ast.List): install_requires.extend( [el.value for el in variable.elts if isinstance(el, ast.Constant) and isinstance(el.value, str)] ) return install_requires @no_type_check @classmethod def _find_extras_require(cls, call: ast.Call, body: Iterable[Any]) -> dict[str, list[str]]: extras_require: dict[str, list[str]] = {} value = cls._find_in_call(call, "extras_require") if value is None: # Trying to find in kwargs kwargs = cls._find_call_kwargs(call) if kwargs is None or not isinstance(kwargs, ast.Name): return extras_require variable = cls._find_variable_in_body(body, kwargs.id) if not isinstance(variable, (ast.Dict, ast.Call)): return extras_require if isinstance(variable, ast.Call): if not isinstance(variable.func, ast.Name): return extras_require if variable.func.id != "dict": return extras_require value = cls._find_in_call(variable, "extras_require") else: value = cls._find_in_dict(variable, "extras_require") if value is None: return extras_require if isinstance(value, ast.Dict): for key, val in zip(value.keys, value.values): if isinstance(val, ast.Name): val = cls._find_variable_in_body(body, val.id) if isinstance(val, ast.List): extras_require[key.value] = [ e.value for e in val.elts if isinstance(e, ast.Constant) and isinstance(e.value, str) ] elif isinstance(value, ast.Name): variable = cls._find_variable_in_body(body, value.id) if variable is None or not isinstance(variable, ast.Dict): return extras_require for key, val in zip(variable.keys, variable.values): if isinstance(val, ast.Name): val = cls._find_variable_in_body(body, val.id) if isinstance(val, ast.List): extras_require[key.value] = [ e.value for e in val.elts if isinstance(e, ast.Constant) and isinstance(e.value, str) ] return extras_require @classmethod def _find_single_string(cls, call: ast.Call, body: list[Any], name: str) -> str | None: value = cls._find_in_call(call, name) if value is None: # Trying to find in kwargs kwargs = cls._find_call_kwargs(call) if kwargs is None or not isinstance(kwargs, ast.Name): return None variable = cls._find_variable_in_body(body, kwargs.id) if not isinstance(variable, (ast.Dict, ast.Call)): return None if isinstance(variable, ast.Call): if not isinstance(variable.func, ast.Name): return None if variable.func.id != "dict": return None value = cls._find_in_call(variable, name) else: value = cls._find_in_dict(variable, name) if value is None: return None if isinstance(value, ast.Constant) and isinstance(value.value, str): return value.value elif isinstance(value, ast.Name): variable = cls._find_variable_in_body(body, value.id) if variable is not None and isinstance(variable, ast.Constant) and isinstance(variable.value, str): return variable.value return None @staticmethod def _find_in_call(call: ast.Call, name: str) -> Any | None: for keyword in call.keywords: if keyword.arg == name: return keyword.value return None @staticmethod def _find_call_kwargs(call: ast.Call) -> Any | None: kwargs = None for keyword in call.keywords: if keyword.arg is None: kwargs = keyword.value return kwargs @staticmethod def _find_variable_in_body(body: Iterable[Any], name: str) -> Any | None: for elem in body: if not isinstance(elem, ast.Assign): continue for target in elem.targets: if not isinstance(target, ast.Name): continue if target.id == name: return elem.value return None @staticmethod def _find_in_dict(dict_: ast.Dict, name: str) -> Any | None: for key, val in zip(dict_.keys, dict_.values): if isinstance(key, ast.Constant) and key.value == name: return val return None
_SetupReader
python
gevent__gevent
src/gevent/tests/test__hub.py
{ "start": 1798, "end": 2657 }
class ____(greentest.TestCase): def test_sleep(self): # even if there was an error in the mainloop, the hub should continue to work start = time.time() gevent.sleep(DELAY) delay = time.time() - start delay_range = DELAY * 0.9 self.assertTimeWithinRange(delay, DELAY - delay_range, DELAY + delay_range) error = greentest.ExpectedException('TestExceptionInMainloop.test_sleep/fail') def fail(): raise error with get_hub().loop.timer(0.001) as t: t.start(fail) self.expect_one_error() start = time.time() gevent.sleep(DELAY) delay = time.time() - start self.assert_error(value=error) self.assertTimeWithinRange(delay, DELAY - delay_range, DELAY + delay_range)
TestExceptionInMainloop
python
google__jax
jax/experimental/mosaic/gpu/core.py
{ "start": 8986, "end": 9603 }
class ____: shape: tuple[int, int] dtype: Any _: dataclasses.KW_ONLY layout: tcgen05.TMEMLayout | None = None collective: bool = False packing: int | None = None def __post_init__(self): if self.layout is not None: self.layout.check_type( self.shape, utils.bitwidth(utils.dtype_to_ir_type(self.dtype)) ) if self.packing is not None: raise ValueError("Cannot specify both layout and packing") def _count_buffer_bytes(shape_dtype: jax.ShapeDtypeStruct) -> int: return math.prod(shape_dtype.shape) * dtypes.itemsize_bits(dtypes.dtype(shape_dtype.dtype)) // 8
TMEM
python
getsentry__sentry
src/sentry/hybridcloud/services/replica/service.py
{ "start": 1525, "end": 3732 }
class ____(RpcService): key = "region_replica" local_mode = SiloMode.REGION @regional_rpc_method(resolve=ByRegionName()) @abc.abstractmethod def upsert_replicated_auth_provider( self, *, auth_provider: RpcAuthProvider, region_name: str ) -> None: pass @regional_rpc_method(resolve=ByRegionName()) @abc.abstractmethod def upsert_replicated_auth_identity( self, *, auth_identity: RpcAuthIdentity, region_name: str ) -> None: pass @regional_rpc_method(resolve=ByRegionName()) @abc.abstractmethod def upsert_replicated_api_key(self, *, api_key: RpcApiKey, region_name: str) -> None: pass @regional_rpc_method(resolve=ByRegionName()) @abc.abstractmethod def upsert_replicated_api_token(self, *, api_token: RpcApiToken, region_name: str) -> None: pass @regional_rpc_method(resolve=ByRegionName()) @abc.abstractmethod def delete_replicated_api_token(self, *, apitoken_id: int, region_name: str) -> None: pass @regional_rpc_method(resolve=ByRegionName()) @abc.abstractmethod def upsert_replicated_org_auth_token(self, *, token: RpcOrgAuthToken, region_name: str) -> None: pass @regional_rpc_method(resolve=ByRegionName()) @abc.abstractmethod def upsert_replicated_org_slug_reservation( self, *, slug_reservation: RpcOrganizationSlugReservation, region_name: str ) -> None: pass @regional_rpc_method(resolve=ByRegionName()) @abc.abstractmethod def delete_replicated_org_slug_reservation( self, *, organization_slug_reservation_id: int, region_name: str ) -> None: pass @regional_rpc_method(resolve=ByRegionName()) @abc.abstractmethod def delete_replicated_auth_provider(self, *, auth_provider_id: int, region_name: str) -> None: pass @classmethod def get_local_implementation(cls) -> RpcService: from .impl import DatabaseBackedRegionReplicaService return DatabaseBackedRegionReplicaService() region_replica_service = RegionReplicaService.create_delegation() control_replica_service = ControlReplicaService.create_delegation()
RegionReplicaService
python
apache__airflow
providers/cncf/kubernetes/tests/unit/cncf/kubernetes/test_callbacks.py
{ "start": 946, "end": 1078 }
class ____: mock_callbacks = MagicMock() @classmethod def reset(cls): cls.mock_callbacks.reset_mock()
MockWrapper
python
pytorch__pytorch
benchmarks/operator_benchmark/benchmark_pytorch.py
{ "start": 5822, "end": 10724 }
class ____: """This class includes all the information needed to benchmark an operator. op_bench: it's a user-defined class (child of TorchBenchmarkBase) which includes input and operator, .etc test_config: a namedtuple includes test_name, input_shape, tag, run_backward. When run_backward is false, the run_forward method will be executed, When run_backward is true, run_forward_eager and _output_mean will be executed to generate output. Then, run_backward will be executed. """ def __init__(self, op_bench, test_config): self.test_config = test_config self.op_bench = op_bench self.place_holder_tensor = torch.ones(1) self.framework = "PyTorch" self.time_series = [] self._jit_forward_graph = None self._compile_forward_graph = None def _generate_jit_forward_graph(self): """generate a graph for the forward function via scripting""" scripted_op_bench = torch.jit.script(self.op_bench) return scripted_op_bench.forward_consume def _generate_compile_forward_graph(self): """generate a compiled graph for the forward function via torch.compile""" compiled_forward_consume = torch.compile( self.op_bench.forward_consume_eager, backend="inductor" ) return compiled_forward_consume def run_jit_forward(self, num_runs, print_per_iter=False, cuda_sync=False): """Run the forward path of an op with JIT mode""" if self._jit_forward_graph is None: self._jit_forward_graph = self._generate_jit_forward_graph() self._jit_forward_graph(num_runs) def run_compile_forward(self, num_runs, print_per_iter=False, cuda_sync=False): """Run the forward path of an op with compile mode""" if self._compile_forward_graph is None: self._compile_forward_graph = self._generate_compile_forward_graph() self._compile_forward_graph(num_runs) if cuda_sync: torch.cuda.synchronize(torch.cuda.current_device()) def _print_per_iter(self): # print last 50 values length = min(len(self.time_series), 50) for i in range(length): print( "PyTorchObserver " + json.dumps( { "type": self.test_config.test_name, "metric": "latency", "unit": "ms", "value": str(self.time_series[length - i - 1]), } ) ) def run_forward(self, num_runs, print_per_iter, cuda_sync): """Run the forward path of an op with eager mode""" if print_per_iter: for _ in range(num_runs): start_time = time.time() self.output = self.op_bench.forward_impl_eager() if cuda_sync: torch.cuda.synchronize(torch.cuda.current_device()) end_time = time.time() self.time_series.append((end_time - start_time) * 1e3) else: for _ in range(num_runs): self.output = self.op_bench.forward_impl_eager() if cuda_sync: torch.cuda.synchronize(torch.cuda.current_device()) def _output_mean(self): """TODO (mingzhe): it is not necessary to sum up everything by myself, torch.autograd.backward do take a gradient tensor. By default, it is the same shape as your output tensor, with all 1s. Mathematically, it is the same as if the output is summed together. So we should be able to get ride of this method. dummy function for gradient calculation """ self.mean = self.output.mean() def run_backward(self, num_runs, print_per_iter=False): """Run the backward path of an op in many iterations""" # TODO: can we use JIT here to reduce python overhead? for _ in range(num_runs): self.mean.backward(retain_graph=True) def create_pytorch_op_test_case(op_bench, test_config): """This method is used to generate est. func_name is a global unique string. For PyTorch add operator with M=8, N=2, K=1, tag = long, here are the values for the members in test_case: op.module_name: add framework: PyTorch test_config: TestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1', tag='long', run_backward=False) func_name: addPyTorchTestConfig(test_name='add_M8_N2_K1', input_config='M: 8, N: 2, K: 1', tag='long', run_backward=False) """ test_case = PyTorchOperatorTestCase(op_bench, test_config) test_config = test_case.test_config op = test_case.op_bench func_name = f"{op.module_name()}{test_case.framework}{str(test_config)}" return (func_name, test_case)
PyTorchOperatorTestCase
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/generic1.py
{ "start": 260, "end": 327 }
class ____(Generic[]): ... # This should generate an error.
Class2
python
apache__airflow
airflow-core/src/airflow/callbacks/database_callback_sink.py
{ "start": 1180, "end": 1528 }
class ____(BaseCallbackSink): """Sends callbacks to database.""" @provide_session def send(self, callback: CallbackRequest, session: Session = NEW_SESSION) -> None: """Send callback for execution.""" db_callback = DbCallbackRequest(callback=callback, priority_weight=1) session.add(db_callback)
DatabaseCallbackSink
python
spyder-ide__spyder
spyder/plugins/editor/extensions/snippets.py
{ "start": 1415, "end": 2217 }
class ____: """Traverse and extract information from snippets AST.""" def __init__(self, line, column, node_number=0): self.line = line self.column = column self.line_offset = 0 self.column_offset = 0 self.node_number = node_number self.snippet_map = {} self.position_map = {} def visit(self, node): if isinstance(node, nodes.TabstopSnippetNode): snippet_number = node.number number_snippets = self.snippet_map.get(snippet_number, []) number_snippets.append(node) self.snippet_map[snippet_number] = number_snippets if node.mark_for_position: self.position_map[self.node_number] = (node.position, node) self.node_number += 1
SnippetSearcherVisitor
python
conda__conda
conda/core/solve.py
{ "start": 57248, "end": 62328 }
class ____: # A mutable container with defined attributes to help keep method signatures clean # and also keep track of important state variables. def __init__( self, prefix, update_modifier, deps_modifier, prune, ignore_pinned, force_remove, should_retry_solve, ): # prefix, channels, subdirs, specs_to_add, specs_to_remove # self.prefix = prefix # self.channels = channels # self.subdirs = subdirs # self.specs_to_add = specs_to_add # self.specs_to_remove = specs_to_remove # Group 1. Behavior flags self.update_modifier = update_modifier self.deps_modifier = deps_modifier self.prune = prune self.ignore_pinned = ignore_pinned self.force_remove = force_remove self.should_retry_solve = should_retry_solve # Group 2. System state self.prefix = prefix # self.prefix_data = None # self.specs_from_history_map = None # self.track_features_specs = None # self.pinned_specs = None # Group 3. Repository metadata self.index = None self.r = None # Group 4. Mutable working containers self.specs_map = {} self.solution_precs = None self._init_solution_precs() self.add_back_map = {} # name: (prec, spec) self.final_environment_specs = None @memoizedproperty def prefix_data(self): return PrefixData(self.prefix) @memoizedproperty def specs_from_history_map(self): return History(self.prefix).get_requested_specs_map() @memoizedproperty def track_features_specs(self): return tuple(MatchSpec(x + "@") for x in context.track_features) @memoizedproperty def pinned_specs(self): return () if self.ignore_pinned else get_pinned_specs(self.prefix) def set_repository_metadata(self, index, r): self.index, self.r = index, r def _init_solution_precs(self): if self.prune: # DO NOT add existing prefix data to solution on prune self.solution_precs = tuple() else: self.solution_precs = tuple(self.prefix_data.iter_records()) def working_state_reset(self): self.specs_map = {} self._init_solution_precs() self.add_back_map = {} # name: (prec, spec) self.final_environment_specs = None def get_pinned_specs(prefix: str) -> tuple[MatchSpec]: """Find pinned specs from file and return a tuple of MatchSpec.""" context_pinned_packages = tuple( MatchSpec(spec, optional=True) for spec in context.pinned_packages ) prefix_data = PrefixData(prefix_path=prefix) return context_pinned_packages + prefix_data.get_pinned_specs() def diff_for_unlink_link_precs( prefix, final_precs, specs_to_add=(), force_reinstall=NULL, ) -> tuple[tuple[PackageRecord, ...], tuple[PackageRecord, ...]]: # Ensure final_precs supports the IndexedSet interface if not isinstance(final_precs, IndexedSet): if not hasattr(final_precs, "__getitem__"): raise TypeError("final_precs must support list indexing") if not hasattr(final_precs, "__sub__"): raise TypeError("final_precs must support set difference") previous_records = IndexedSet(PrefixGraph(PrefixData(prefix).iter_records()).graph) force_reinstall = ( context.force_reinstall if force_reinstall is NULL else force_reinstall ) unlink_precs = previous_records - final_precs link_precs = final_precs - previous_records def _add_to_unlink_and_link(rec): link_precs.add(rec) if prec in previous_records: unlink_precs.add(rec) # If force_reinstall is enabled, make sure any package in specs_to_add is unlinked then # re-linked if force_reinstall: for spec in specs_to_add: prec = next((rec for rec in final_precs if spec.match(rec)), None) if not prec: raise RuntimeError(f"Could not find record for spec {spec}") _add_to_unlink_and_link(prec) # add back 'noarch: python' packages to unlink and link if python version changes python_spec = MatchSpec("python") prev_python = next( (rec for rec in previous_records if python_spec.match(rec)), None ) curr_python = next((rec for rec in final_precs if python_spec.match(rec)), None) gmm = get_major_minor_version if ( prev_python and curr_python and gmm(prev_python.version) != gmm(curr_python.version) ): noarch_python_precs = (p for p in final_precs if p.noarch == NoarchType.python) for prec in noarch_python_precs: _add_to_unlink_and_link(prec) unlink_precs = IndexedSet( reversed(sorted(unlink_precs, key=lambda x: previous_records.index(x))) ) link_precs = IndexedSet(sorted(link_precs, key=lambda x: final_precs.index(x))) return tuple(unlink_precs), tuple(link_precs)
SolverStateContainer
python
wandb__wandb
wandb/sdk/artifacts/_generated/registry_versions.py
{ "start": 747, "end": 961 }
class ____(GQLResult): page_info: PageInfoFragment = Field(alias="pageInfo") edges: List[RegistryVersionsOrganizationOrgEntityArtifactMembershipsEdges]
RegistryVersionsOrganizationOrgEntityArtifactMemberships
python
getsentry__sentry
src/sentry/api/serializers/rest_framework/group_notes.py
{ "start": 647, "end": 2280 }
class ____(serializers.Serializer[None]): text = serializers.CharField() mentions = ListField(child=ActorField(), required=False) external_id = serializers.CharField(allow_null=True, required=False) def validate_mentions(self, mentions: list[Actor]) -> list[Actor]: if mentions and "projects" in self.context: separated_actors = separate_actors(mentions) # Validate that all mentioned users exist and are on the project. users = separated_actors["users"] mentioned_user_ids = {user.id for user in users} projects = self.context["projects"] user_ids = list( OrganizationMember.objects.filter( teams__projectteam__project__in=[p.id for p in projects], user_id__in=mentioned_user_ids, ).values_list("user_id", flat=True) ) if len(mentioned_user_ids) > len(user_ids): raise serializers.ValidationError("Cannot mention a non team member") # Validate that all mentioned teams exist and are on the project. teams = separated_actors["teams"] mentioned_team_ids = {team.id for team in teams} if ( len(mentioned_team_ids) > Team.objects.filter( id__in=mentioned_team_ids, projectteam__project__in=projects ).count() ): raise serializers.ValidationError( "Mentioned team not found or not associated with project" ) return mentions
NoteSerializer
python
google__pytype
pytype/rewrite/abstract/functions.py
{ "start": 19649, "end": 21403 }
class ____(SimpleFunction[_FrameT]): """Function with a code object.""" def __init__( self, ctx: base.ContextType, name: str, code: blocks.OrderedCode, enclosing_scope: tuple[str, ...], parent_frame: _FrameT, ): super().__init__( ctx=ctx, name=name, signatures=(Signature.from_code(ctx, name, code),), ) self.code = code self.enclosing_scope = enclosing_scope # A function saves a pointer to the frame it's defined in so that it has all # the context needed to call itself. self._parent_frame = parent_frame self._call_cache = {} def __repr__(self): return f'InterpreterFunction({self.name})' @property def _attrs(self): return (self.name, self.code) def call_with_mapped_args(self, mapped_args: MappedArgs[_FrameT]) -> _FrameT: log.info('Calling function %s:\n Sig: %s\n Args: %s', self.full_name, mapped_args.signature, mapped_args.argdict) parent_frame = mapped_args.frame or self._parent_frame if parent_frame.final_locals is None: k = None else: # If the parent frame has finished running, then the context of this call # will not change, so we can cache the return value. k = (parent_frame.name, datatypes.immutabledict(mapped_args.argdict)) if k in self._call_cache: log.info('Reusing cached return value of function %s', self.name) return self._call_cache[k] frame = parent_frame.make_child_frame(self, mapped_args.argdict) frame.run() if k: self._call_cache[k] = frame return frame def bind_to(self, callself: base.BaseValue) -> 'BoundFunction[_FrameT]': return BoundFunction(self._ctx, callself, self)
InterpreterFunction
python
ray-project__ray
rllib/env/tests/test_multi_agent_env.py
{ "start": 5306, "end": 7919 }
class ____(MultiAgentEnv): """Env of independent agents, each of which exits after n steps.""" def __init__(self): super().__init__() self.envs = {} self.agents = [] self.possible_agents = list(range(10000)) # Absolute max. number of agents. self.agentID = 0 self.terminateds = set() self.truncateds = set() # All agents have the exact same spaces. self.observation_space = gym.spaces.Discrete(2) self.action_space = gym.spaces.Discrete(2) self.resetted = False def spawn(self): # Spawn a new agent into the current episode. agentID = self.agentID self.envs[agentID] = MockEnv(25) self.agents.append(agentID) self.agentID += 1 return agentID def kill(self, agent_id): del self.envs[agent_id] self.agents.remove(agent_id) def reset(self, *, seed=None, options=None): self.envs = {} self.agents.clear() self.spawn() self.resetted = True self.terminateds = set() self.truncateds = set() obs = {} infos = {} for i, a in self.envs.items(): obs[i], infos[i] = a.reset() return obs, infos def step(self, action_dict): obs, rew, terminated, truncated, info = {}, {}, {}, {}, {} # Apply the actions. for i, action in action_dict.items(): obs[i], rew[i], terminated[i], truncated[i], info[i] = self.envs[i].step( action ) if terminated[i]: self.terminateds.add(i) if truncated[i]: self.truncateds.add(i) # Sometimes, add a new agent to the episode. if random.random() > 0.75 and len(action_dict) > 0: aid = self.spawn() obs[aid], rew[aid], terminated[aid], truncated[aid], info[aid] = self.envs[ aid ].step(action) if terminated[aid]: self.terminateds.add(aid) if truncated[aid]: self.truncateds.add(aid) # Sometimes, kill an existing agent. if len(self.envs) > 1 and random.random() > 0.25: keys = list(self.envs.keys()) aid = random.choice(keys) self.kill(aid) terminated[aid] = True self.terminateds.add(aid) terminated["__all__"] = len(self.terminateds) == len(self.envs) truncated["__all__"] = len(self.truncateds) == len(self.envs) return obs, rew, terminated, truncated, info
FlexAgentsMultiAgent
python
PyCQA__pylint
doc/data/messages/a/arguments-differ/bad.py
{ "start": 0, "end": 93 }
class ____: def mix(self, fluid_one, fluid_two): return fluid_one + fluid_two
Drink
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/paramInference1.py
{ "start": 239, "end": 906 }
class ____(Parent): def __init__(self, a, b): reveal_type(a, expected_text="int") reveal_type(b, expected_text="str") def func1(self, a, b): reveal_type(self, expected_text="Self@Child") reveal_type(a, expected_text="int") reveal_type(b, expected_text="str") return a def func2(a, b=0, c=None): reveal_type(a, expected_text="Unknown") reveal_type(b, expected_text="int") reveal_type(c, expected_text="Unknown | None") def func3(a=(1, 2), b=[1, 2], c={1: 2}): reveal_type(a, expected_text="Unknown") reveal_type(b, expected_text="Unknown") reveal_type(c, expected_text="Unknown")
Child
python
vyperlang__vyper
vyper/builtins/functions.py
{ "start": 49750, "end": 49856 }
class ____(_AddMulMod): _id = "uint256_mulmod" _eval_fn = operator.mul _opcode = "mulmod"
MulMod
python
lazyprogrammer__machine_learning_examples
rl/grid_world.py
{ "start": 3742, "end": 9756 }
class ____: def __init__(self, rows, cols, start): self.rows = rows self.cols = cols self.i = start[0] self.j = start[1] def set(self, rewards, actions, probs): # rewards should be a dict of: (i, j): r (row, col): reward # actions should be a dict of: (i, j): A (row, col): list of possible actions self.rewards = rewards self.actions = actions self.probs = probs def set_state(self, s): self.i = s[0] self.j = s[1] def current_state(self): return (self.i, self.j) def is_terminal(self, s): return s not in self.actions def move(self, action): s = (self.i, self.j) a = action next_state_probs = self.probs[(s, a)] next_states = list(next_state_probs.keys()) next_probs = list(next_state_probs.values()) next_state_idx = np.random.choice(len(next_states), p=next_probs) s2 = next_states[next_state_idx] # update the current state self.i, self.j = s2 # return a reward (if any) return self.rewards.get(s2, 0) def game_over(self): # returns true if game is over, else false # true if we are in a state where no actions are possible return (self.i, self.j) not in self.actions def all_states(self): # possibly buggy but simple way to get all states # either a position that has possible next actions # or a position that yields a reward return set(self.actions.keys()) | set(self.rewards.keys()) def windy_grid(): g = WindyGrid(3, 4, (2, 0)) rewards = {(0, 3): 1, (1, 3): -1} actions = { (0, 0): ('D', 'R'), (0, 1): ('L', 'R'), (0, 2): ('L', 'D', 'R'), (1, 0): ('U', 'D'), (1, 2): ('U', 'D', 'R'), (2, 0): ('U', 'R'), (2, 1): ('L', 'R'), (2, 2): ('L', 'R', 'U'), (2, 3): ('L', 'U'), } # p(s' | s, a) represented as: # KEY: (s, a) --> VALUE: {s': p(s' | s, a)} probs = { ((2, 0), 'U'): {(1, 0): 1.0}, ((2, 0), 'D'): {(2, 0): 1.0}, ((2, 0), 'L'): {(2, 0): 1.0}, ((2, 0), 'R'): {(2, 1): 1.0}, ((1, 0), 'U'): {(0, 0): 1.0}, ((1, 0), 'D'): {(2, 0): 1.0}, ((1, 0), 'L'): {(1, 0): 1.0}, ((1, 0), 'R'): {(1, 0): 1.0}, ((0, 0), 'U'): {(0, 0): 1.0}, ((0, 0), 'D'): {(1, 0): 1.0}, ((0, 0), 'L'): {(0, 0): 1.0}, ((0, 0), 'R'): {(0, 1): 1.0}, ((0, 1), 'U'): {(0, 1): 1.0}, ((0, 1), 'D'): {(0, 1): 1.0}, ((0, 1), 'L'): {(0, 0): 1.0}, ((0, 1), 'R'): {(0, 2): 1.0}, ((0, 2), 'U'): {(0, 2): 1.0}, ((0, 2), 'D'): {(1, 2): 1.0}, ((0, 2), 'L'): {(0, 1): 1.0}, ((0, 2), 'R'): {(0, 3): 1.0}, ((2, 1), 'U'): {(2, 1): 1.0}, ((2, 1), 'D'): {(2, 1): 1.0}, ((2, 1), 'L'): {(2, 0): 1.0}, ((2, 1), 'R'): {(2, 2): 1.0}, ((2, 2), 'U'): {(1, 2): 1.0}, ((2, 2), 'D'): {(2, 2): 1.0}, ((2, 2), 'L'): {(2, 1): 1.0}, ((2, 2), 'R'): {(2, 3): 1.0}, ((2, 3), 'U'): {(1, 3): 1.0}, ((2, 3), 'D'): {(2, 3): 1.0}, ((2, 3), 'L'): {(2, 2): 1.0}, ((2, 3), 'R'): {(2, 3): 1.0}, ((1, 2), 'U'): {(0, 2): 0.5, (1, 3): 0.5}, ((1, 2), 'D'): {(2, 2): 1.0}, ((1, 2), 'L'): {(1, 2): 1.0}, ((1, 2), 'R'): {(1, 3): 1.0}, } g.set(rewards, actions, probs) return g def windy_grid_no_wind(): g = windy_grid() g.probs[((1, 2), 'U')] = {(0, 2): 1.0} return g def windy_grid_penalized(step_cost=-0.1): g = WindyGrid(3, 4, (2, 0)) rewards = { (0, 0): step_cost, (0, 1): step_cost, (0, 2): step_cost, (1, 0): step_cost, (1, 2): step_cost, (2, 0): step_cost, (2, 1): step_cost, (2, 2): step_cost, (2, 3): step_cost, (0, 3): 1, (1, 3): -1 } actions = { (0, 0): ('D', 'R'), (0, 1): ('L', 'R'), (0, 2): ('L', 'D', 'R'), (1, 0): ('U', 'D'), (1, 2): ('U', 'D', 'R'), (2, 0): ('U', 'R'), (2, 1): ('L', 'R'), (2, 2): ('L', 'R', 'U'), (2, 3): ('L', 'U'), } # p(s' | s, a) represented as: # KEY: (s, a) --> VALUE: {s': p(s' | s, a)} probs = { ((2, 0), 'U'): {(1, 0): 1.0}, ((2, 0), 'D'): {(2, 0): 1.0}, ((2, 0), 'L'): {(2, 0): 1.0}, ((2, 0), 'R'): {(2, 1): 1.0}, ((1, 0), 'U'): {(0, 0): 1.0}, ((1, 0), 'D'): {(2, 0): 1.0}, ((1, 0), 'L'): {(1, 0): 1.0}, ((1, 0), 'R'): {(1, 0): 1.0}, ((0, 0), 'U'): {(0, 0): 1.0}, ((0, 0), 'D'): {(1, 0): 1.0}, ((0, 0), 'L'): {(0, 0): 1.0}, ((0, 0), 'R'): {(0, 1): 1.0}, ((0, 1), 'U'): {(0, 1): 1.0}, ((0, 1), 'D'): {(0, 1): 1.0}, ((0, 1), 'L'): {(0, 0): 1.0}, ((0, 1), 'R'): {(0, 2): 1.0}, ((0, 2), 'U'): {(0, 2): 1.0}, ((0, 2), 'D'): {(1, 2): 1.0}, ((0, 2), 'L'): {(0, 1): 1.0}, ((0, 2), 'R'): {(0, 3): 1.0}, ((2, 1), 'U'): {(2, 1): 1.0}, ((2, 1), 'D'): {(2, 1): 1.0}, ((2, 1), 'L'): {(2, 0): 1.0}, ((2, 1), 'R'): {(2, 2): 1.0}, ((2, 2), 'U'): {(1, 2): 1.0}, ((2, 2), 'D'): {(2, 2): 1.0}, ((2, 2), 'L'): {(2, 1): 1.0}, ((2, 2), 'R'): {(2, 3): 1.0}, ((2, 3), 'U'): {(1, 3): 1.0}, ((2, 3), 'D'): {(2, 3): 1.0}, ((2, 3), 'L'): {(2, 2): 1.0}, ((2, 3), 'R'): {(2, 3): 1.0}, ((1, 2), 'U'): {(0, 2): 0.5, (1, 3): 0.5}, ((1, 2), 'D'): {(2, 2): 1.0}, ((1, 2), 'L'): {(1, 2): 1.0}, ((1, 2), 'R'): {(1, 3): 1.0}, } g.set(rewards, actions, probs) return g def grid_5x5(step_cost=-0.1): g = Grid(5, 5, (4, 0)) rewards = {(0, 4): 1, (1, 4): -1} actions = { (0, 0): ('D', 'R'), (0, 1): ('L', 'R'), (0, 2): ('L', 'R'), (0, 3): ('L', 'D', 'R'), (1, 0): ('U', 'D', 'R'), (1, 1): ('U', 'D', 'L'), (1, 3): ('U', 'D', 'R'), (2, 0): ('U', 'D', 'R'), (2, 1): ('U', 'L', 'R'), (2, 2): ('L', 'R', 'D'), (2, 3): ('L', 'R', 'U'), (2, 4): ('L', 'U', 'D'), (3, 0): ('U', 'D'), (3, 2): ('U', 'D'), (3, 4): ('U', 'D'), (4, 0): ('U', 'R'), (4, 1): ('L', 'R'), (4, 2): ('L', 'R', 'U'), (4, 3): ('L', 'R'), (4, 4): ('L', 'U'), } g.set(rewards, actions) # non-terminal states visitable_states = actions.keys() for s in visitable_states: g.rewards[s] = step_cost return g
WindyGrid
python
rapidsai__cudf
python/cudf/cudf/core/column/decimal.py
{ "start": 14559, "end": 17080 }
class ____(DecimalBaseColumn): _VALID_PLC_TYPES = {plc.TypeId.DECIMAL32} def __init__( self, plc_column: plc.Column, size: int, dtype: Decimal32Dtype, offset: int, null_count: int, exposed: bool, ) -> None: if not isinstance(dtype, Decimal32Dtype): raise ValueError(f"{dtype=} must be a Decimal32Dtype instance") super().__init__( plc_column=plc_column, size=size, dtype=dtype, offset=offset, null_count=null_count, exposed=exposed, ) @classmethod def from_arrow(cls, data: pa.Array | pa.ChunkedArray) -> Self: return cls._from_32_64_arrow( data, view_type="int32", plc_type=plc.TypeId.DECIMAL32, step=4 ) def to_arrow(self) -> pa.Array: data_buf_32 = np.array(self.base_data.memoryview()).view("int32") # type: ignore[union-attr] data_buf_128: np.ndarray = np.empty( len(data_buf_32) * 4, dtype="int32" ) # use striding to set the first 32 bits of each 128-bit chunk: data_buf_128[::4] = data_buf_32 # use striding again to set the remaining bits of each 128-bit chunk: # 0 for non-negative values, -1 for negative values: data_buf_128[1::4] = np.piecewise( data_buf_32, [data_buf_32 < 0], [-1, 0] ) data_buf_128[2::4] = np.piecewise( data_buf_32, [data_buf_32 < 0], [-1, 0] ) data_buf_128[3::4] = np.piecewise( data_buf_32, [data_buf_32 < 0], [-1, 0] ) data_buf = pa.py_buffer(data_buf_128) mask_buf = ( self.base_mask if self.base_mask is None else pa.py_buffer(self.base_mask.memoryview()) ) return pa.Array.from_buffers( type=self.dtype.to_arrow(), # type: ignore[union-attr] offset=self._offset, length=self.size, # PyArrow stubs are too strict - from_buffers should accept None for missing buffers buffers=[mask_buf, data_buf], # type: ignore[list-item] ) def _with_type_metadata(self: Self, dtype: DtypeObj) -> Self: if isinstance(dtype, Decimal32Dtype): self.dtype.precision = dtype.precision # type: ignore[union-attr] if cudf.get_option("mode.pandas_compatible"): self._dtype = get_dtype_of_same_type(dtype, self.dtype) return self
Decimal32Column
python
wandb__wandb
wandb/apis/public/artifacts.py
{ "start": 23944, "end": 28430 }
class ____(SizedRelayPaginator["ArtifactFragment", "Artifact"]): """An iterable collection of artifact versions associated with a project. Optionally pass in filters to narrow down the results based on specific criteria. Args: client: The client instance to use for querying W&B. entity: The entity (user or team) that owns the project. project: The name of the project to query for artifacts. collection_name: The name of the artifact collection to query. type: The type of the artifacts to query. Common examples include "dataset" or "model". filters: Optional mapping of filters to apply to the query. order: Optional string to specify the order of the results. per_page: The number of artifact versions to fetch per page. Default is 50. tags: Optional string or list of strings to filter artifacts by tags. <!-- lazydoc-ignore-init: internal --> """ QUERY: Document # Must be set per-instance # Loosely-annotated to avoid importing heavy types at module import time. last_response: _ArtifactConnectionGeneric | None def __init__( self, client: Client, entity: str, project: str, collection_name: str, type: str, filters: Mapping[str, Any] | None = None, order: str | None = None, per_page: int = 50, tags: str | list[str] | None = None, ): from wandb.sdk.artifacts._generated import PROJECT_ARTIFACTS_GQL omit_fields = omit_artifact_fields(client) self.QUERY = gql_compat(PROJECT_ARTIFACTS_GQL, omit_fields=omit_fields) self.entity = entity self.collection_name = collection_name self.type = type self.project = project self.filters = {"state": "COMMITTED"} if filters is None else filters self.tags = always_list(tags or []) self.order = order variables = { "project": self.project, "entity": self.entity, "order": self.order, "type": self.type, "collection": self.collection_name, "filters": json.dumps(self.filters), } super().__init__(client, variables=variables, per_page=per_page) @override def _update_response(self) -> None: from wandb.sdk.artifacts._generated import ArtifactFragment, ProjectArtifacts data = self.client.execute(self.QUERY, variable_values=self.variables) result = ProjectArtifacts.model_validate(data) # Extract the inner `*Connection` result for faster/easier access. if not ( (proj := result.project) and (type_ := proj.artifact_type) and (collection := type_.artifact_collection) and (conn := collection.artifacts) ): raise ValueError(f"Unable to parse {nameof(type(self))!r} response data") self.last_response = _ArtifactConnectionGeneric[ ArtifactFragment ].model_validate(conn) # FIXME: For now, we deliberately override the signatures of: # - `_convert()` # - `convert_objects()` # ... since the prior implementation must get `version` from the GQL edge # (i.e. `edge.version`), which lives outside of the GQL node (`edge.node`). # # In the future, we should move to fetching artifacts via (GQL) artifactMemberships, # not (GQL) artifacts, so we don't have to deal with this hack. @override def _convert(self, edge: _ArtifactEdgeGeneric[ArtifactFragment]) -> Artifact: from wandb.sdk.artifacts._validators import FullArtifactPath from wandb.sdk.artifacts.artifact import Artifact return Artifact._from_attrs( path=FullArtifactPath( prefix=self.entity, project=self.project, name=f"{self.collection_name}:{edge.version}", ), attrs=edge.node, client=self.client, ) @override def convert_objects(self) -> list[Artifact]: """Convert the raw response data into a list of wandb.Artifact objects. <!-- lazydoc-ignore: internal --> """ if (conn := self.last_response) is None: return [] artifacts = (self._convert(edge) for edge in conn.edges if edge.node) required_tags = set(self.tags or []) return [art for art in artifacts if required_tags.issubset(art.tags)]
Artifacts
python
mahmoud__boltons
boltons/tbutils.py
{ "start": 25012, "end": 30474 }
class ____: """Stores a parsed traceback and exception as would be typically output by :func:`sys.excepthook` or :func:`traceback.print_exception`. .. note: Does not currently store SyntaxError details such as column. """ def __init__(self, exc_type_name, exc_msg, frames=None): self.exc_type = exc_type_name self.exc_msg = exc_msg self.frames = list(frames or []) @property def source_file(self): """ The file path of module containing the function that raised the exception, or None if not available. """ try: return self.frames[-1]['filepath'] except IndexError: return None def to_dict(self): "Get a copy as a JSON-serializable :class:`dict`." return {'exc_type': self.exc_type, 'exc_msg': self.exc_msg, 'frames': list(self.frames)} def __repr__(self): cn = self.__class__.__name__ return ('%s(%r, %r, frames=%r)' % (cn, self.exc_type, self.exc_msg, self.frames)) def to_string(self): """Formats the exception and its traceback into the standard format, as returned by the traceback module. ``ParsedException.from_string(text).to_string()`` should yield ``text``. .. note:: Note that this method does not output "anchors" (e.g., ``~~~~~^^``), as were added in Python 3.13. See the built-in ``traceback`` module if these are necessary. """ lines = ['Traceback (most recent call last):'] for frame in self.frames: lines.append(' File "{}", line {}, in {}'.format(frame['filepath'], frame['lineno'], frame['funcname'])) source_line = frame.get('source_line') if source_line: lines.append(f' {source_line}') if self.exc_msg: lines.append(f'{self.exc_type}: {self.exc_msg}') else: lines.append(f'{self.exc_type}') return '\n'.join(lines) @classmethod def from_string(cls, tb_str): """Parse a traceback and exception from the text *tb_str*. This text is expected to have been decoded, otherwise it will be interpreted as UTF-8. This method does not search a larger body of text for tracebacks. If the first line of the text passed does not match one of the known patterns, a :exc:`ValueError` will be raised. This method will ignore trailing text after the end of the first traceback. Args: tb_str (str): The traceback text (:class:`unicode` or UTF-8 bytes) """ if not isinstance(tb_str, str): tb_str = tb_str.decode('utf-8') tb_lines = tb_str.lstrip().splitlines() # First off, handle some ignored exceptions. These can be the # result of exceptions raised by __del__ during garbage # collection while tb_lines: cl = tb_lines[-1] if cl.startswith('Exception ') and cl.endswith('ignored'): tb_lines.pop() else: break if tb_lines and tb_lines[0].strip() == 'Traceback (most recent call last):': start_line = 1 frame_re = _frame_re elif len(tb_lines) > 1 and tb_lines[-2].lstrip().startswith('^'): # This is to handle the slight formatting difference # associated with SyntaxErrors, which also don't really # have tracebacks start_line = 0 frame_re = _se_frame_re else: raise ValueError('unrecognized traceback string format') frames = [] line_no = start_line while True: frame_line = tb_lines[line_no].strip() frame_match = frame_re.match(frame_line) if frame_match: frame_dict = frame_match.groupdict() try: next_line = tb_lines[line_no + 1] except IndexError: # We read what we could next_line = '' next_line_stripped = next_line.strip() if ( frame_re.match(next_line_stripped) or # The exception message will not be indented # This check is to avoid overrunning on eval-like # tracebacks where the last frame doesn't have source # code in the traceback not next_line.startswith(' ') ): frame_dict['source_line'] = '' else: frame_dict['source_line'] = next_line_stripped line_no += 1 if _underline_re.match(tb_lines[line_no + 1]): # To deal with anchors line_no += 1 else: break line_no += 1 frames.append(frame_dict) try: exc_line = '\n'.join(tb_lines[line_no:]) exc_type, _, exc_msg = exc_line.partition(': ') except Exception: exc_type, exc_msg = '', '' return cls(exc_type, exc_msg, frames) ParsedTB = ParsedException # legacy alias
ParsedException
python
apache__airflow
airflow-ctl/src/airflowctl/api/datamodels/generated.py
{ "start": 52222, "end": 52586 }
class ____(BaseModel): """ DAG Warning serializer for responses. """ dag_id: Annotated[str, Field(title="Dag Id")] warning_type: DagWarningType message: Annotated[str, Field(title="Message")] timestamp: Annotated[datetime, Field(title="Timestamp")] dag_display_name: Annotated[str, Field(title="Dag Display Name")]
DAGWarningResponse
python
google__jax
jax/_src/stages.py
{ "start": 23867, "end": 33645 }
class ____(Stage): """Compiled representation of a function specialized to types/values. A compiled computation is associated with an executable and the remaining information needed to execute it. It also provides a common API for querying properties of compiled computations across JAX's various compilation paths and backends. """ __slots__ = ["args_info", "out_tree", "_executable", "_no_kwargs", "_params"] args_info: Any # PyTree of ArgInfo, not including const_args out_tree: tree_util.PyTreeDef _executable: Executable _no_kwargs: bool _params: CompiledCallParams def __init__(self, executable, const_args: list[ArrayLike], args_info, out_tree, no_kwargs=False, in_types=None, out_types=None): self._executable = executable self._no_kwargs = no_kwargs self.args_info = args_info self.out_tree = out_tree self._params = CompiledCallParams( self._executable, self._no_kwargs, self.in_tree, self.out_tree, const_args, in_types, out_types) self._call = None def as_text(self) -> str | None: """A human-readable text representation of this executable. Intended for visualization and debugging purposes. This is not a valid nor reliable serialization. Returns ``None`` if unavailable, e.g. based on backend, compiler, or runtime. """ try: return self._executable.as_text() except NotImplementedError: return None def cost_analysis(self) -> Any | None: """A summary of execution cost estimates. Intended for visualization and debugging purposes. The object output by this is some simple data structure that can easily be printed or serialized (e.g. nested dicts, lists, and tuples with numeric leaves). However, its structure can be arbitrary: it may be inconsistent across versions of JAX and jaxlib, or even across invocations. Returns ``None`` if unavailable, e.g. based on backend, compiler, or runtime. """ # TODO(frostig): improve annotation (basic pytree of arbitrary structure) try: return self._executable.cost_analysis() except NotImplementedError: return None def memory_analysis(self) -> Any | None: """A summary of estimated memory requirements. Intended for visualization and debugging purposes. The object output by this is some simple data structure that can easily be printed or serialized (e.g. nested dicts, lists, and tuples with numeric leaves). However, its structure can be arbitrary: it may be inconsistent across versions of JAX and jaxlib, or even across invocations. Returns ``None`` if unavailable, e.g. based on backend, compiler, or runtime. """ # TODO(frostig): improve annotation (basic pytree of arbitrary structure) try: return self._executable.memory_analysis() except NotImplementedError: return None @property def out_info(self): # PyTree of jax.ShapeDtypeStruct out_avals = self._executable.out_avals out_formats_flat = self._output_formats_flat return self.out_tree.unflatten( [core.ShapeDtypeStruct(o.shape, o.dtype, sharding=f) for o, f in zip(out_avals, out_formats_flat)]) def runtime_executable(self) -> Any | None: """An arbitrary object representation of this executable. Intended for debugging purposes. This is not valid nor reliable serialization. The output has no guarantee of consistency across invocations. Returns ``None`` if unavailable, e.g. based on backend, compiler, or runtime. """ return self._executable.runtime_executable() def _input_shardings_flat(self): shardings_flat = self._executable._in_shardings # Some input shardings got DCE'd if self.in_tree.num_leaves > len(shardings_flat): iter_shardings_flat = iter(shardings_flat) shardings_flat = [next(iter_shardings_flat) if i in self._executable._kept_var_idx else None for i in range(self.in_tree.num_leaves)] return shardings_flat @property def input_shardings(self): # -> PyTree[sharding.Sharding] shardings_flat = self._input_shardings_flat() return tree_util.tree_unflatten(self.in_tree, shardings_flat) # pytype: disable=attribute-error @property def output_shardings(self): # -> PyTree[sharding.Sharding] shardings_flat = self._executable._out_shardings return tree_util.tree_unflatten(self.out_tree, shardings_flat) # pytype: disable=attribute-error def _input_layouts_flat(self): layouts_flat = self._executable._xla_in_layouts # Some input layouts got DCE'd if self.in_tree.num_leaves > len(layouts_flat): iter_layouts_flat = iter(layouts_flat) layouts_flat = [next(iter_layouts_flat) if i in self._executable._kept_var_idx else None for i in range(self.in_tree.num_leaves)] return layouts_flat @property def input_formats(self): layouts_flat = self._input_layouts_flat() shardings_flat = self._input_shardings_flat() formats_flat = [Format(l, s) for l, s in zip(layouts_flat, shardings_flat)] return tree_util.tree_unflatten(self.in_tree, formats_flat) # pytype: disable=attribute-error @property def _output_formats_flat(self): layouts_flat = self._executable._xla_out_layouts shardings_flat = self._executable._out_shardings assert all(isinstance(l, Layout) for l in layouts_flat) return [Format(l, s) for l, s in zip(layouts_flat, shardings_flat)] @property def output_formats(self): formats_flat = self._output_formats_flat return tree_util.tree_unflatten(self.out_tree, formats_flat) # pytype: disable=attribute-error @staticmethod def call(*args, **kwargs): util.test_event("stages_compiled_call") # This is because `__call__` passes in `self._params` as the first argument. # Instead of making the call signature `call(params, *args, **kwargs)` # extract it from args because `params` can be passed as a kwarg by users # which might conflict here. params = args[0] args = args[1:] # Not including const_args if config.dynamic_shapes.value: raise NotImplementedError if params.no_kwargs and kwargs: kws = ', '.join(kwargs.keys()) raise NotImplementedError( "function was compiled by a transformation that does not support " f"keyword arguments, but called with keyword arguments: {kws}") if params.is_high: hi_args_flat, in_hi_tree = tree_util.tree_flatten((args, kwargs)) in_hi_tree_, final_qdds = params.in_types args_flat = [a.read_loval(core.cur_qdd(x), x) if (a := typeof(x)).has_qdd else a.lower_val(x) for x in hi_args_flat] args_flat, in_tree = \ tree_util.tree_flatten(tree_util.tree_unflatten(in_hi_tree, args_flat)) else: args_flat, in_tree = tree_util.tree_flatten((args, kwargs)) if in_tree != params.in_tree: errs = list(tree_util.equality_errors_pytreedef(in_tree, params.in_tree)) msg = [] msg.append( "Function compiled with input pytree does not match the input pytree" f" it was called with. There are {len(errs)} mismatches, including:") for path, thing1, thing2, explanation in errs: fst, *rest = path base = ['args', 'kwargs'][fst.idx] msg.append( f" * at {base}{tree_util.keystr(tuple(rest))}, seen {thing2} but now" f" given {thing1}, so {explanation}") raise TypeError('\n'.join(msg)) if not core.trace_state_clean(): # We check for tracers when we are under a transformation, and skip the # check in the common path. We can't transform ahead-of-time compiled # calls, since we've lowered and compiled for a fixed function signature, # and JAX transformations change signatures. for arg in args_flat: if isinstance(arg, core.Tracer): raise TypeError( "Cannot apply JAX transformations to a function lowered and " "compiled for a particular signature. Detected argument of " f"Tracer type {type(arg)}.") lo_outs = params.executable.call(*params.const_args, *args_flat) if params.is_high: out_mut, lo_outs = util.split_list(lo_outs, [_num_himuts_out(final_qdds)]) _apply_himut(final_qdds, hi_args_flat, out_mut) out_hi_tree, out_hi_types = params.out_types out_flat = _raise_lo_outs(out_hi_types, lo_outs) outs = tree_util.tree_unflatten(out_hi_tree, out_flat) else: out_flat = lo_outs outs = tree_util.tree_unflatten(params.out_tree, out_flat) return outs, out_flat, args_flat def __call__(self, *args, **kwargs): if self._call is None: self._call = self._executable.create_cpp_call(self._params) if self._call is None: params = self._params def cpp_call_fallback(*args, **kwargs): outs, _, _ = Compiled.call(params, *args, **kwargs) return outs self._call = cpp_call_fallback return self._call(*args, **kwargs) def _raise_lo_outs(avals, lo_outs): from jax._src.interpreters import partial_eval as pe # type: ignore return pe.raise_lo_outs(avals, lo_outs) # TODO(mattjj): de-dup with partial_eval.py def _num_himuts_out(final_qdds): return sum(len(a.lo_ty()) for a in final_qdds if a.has_qdd) # TODO(mattjj): de-dup with partial_eval.py def _apply_himut(final_qdds, hi_args, out_mut): out_mut_ = iter(out_mut) for i, a in enumerate(final_qdds): if isinstance(a, core.AvalQDD): lo_vals = it.islice(out_mut_, len(a.aval.lo_ty_qdd(a.qdd))) a.aval.update_from_loval(a.qdd, hi_args[i], *lo_vals) # type: ignore assert next(out_mut_, None) is None @runtime_checkable
Compiled
python
django__django
tests/custom_managers/models.py
{ "start": 579, "end": 717 }
class ____(models.Manager): def get_queryset(self): return super().get_queryset().filter(is_published=True)
PublishedBookManager
python
tensorflow__tensorflow
tensorflow/python/checkpoint/tensor_callable_test.py
{ "start": 1695, "end": 2534 }
class ____(test.TestCase): def test_callable(self): trackable = IncrementWhenSave() ckpt = checkpoint.Checkpoint(attr=trackable) prefix = os.path.join(self.get_temp_dir(), "ckpt") save_path = ckpt.save(prefix) self.assertEqual(1, self.evaluate(trackable.read_counter)) ckpt.save(prefix) self.assertEqual(2, self.evaluate(trackable.read_counter)) ckpt.restore(save_path) self.assertEqual(0, self.evaluate(trackable.read_counter)) def test_callable_saved_model_compatibility(self): trackable = IncrementWhenSave() trackable.read_counter.assign(15) save_path = os.path.join(self.get_temp_dir(), "saved_model") with self.assertRaisesRegex(NotImplementedError, "returns a Callable"): saved_model_save.save(trackable, save_path) if __name__ == "__main__": test.main()
CallableTest
python
tensorflow__tensorflow
tensorflow/python/trackable/base_delegate_test.py
{ "start": 1238, "end": 1509 }
class ____(base.Trackable): def __init__(self, v): self.v = v self._track_trackable(v, "v") def _copy_trackable_to_cpu(self, object_map): if self not in object_map: object_map[self] = Inner(self.v) self.v._copy_trackable_to_cpu(object_map)
Inner
python
run-llama__llama_index
llama-index-core/llama_index/core/postprocessor/node_recency.py
{ "start": 1368, "end": 3000 }
class ____(BaseNodePostprocessor): """ Fixed Recency post-processor. This post-processor does the following steps orders nodes by date. Assumes the date_key corresponds to a date field in the metadata. """ top_k: int = 1 date_key: str = "date" @classmethod def class_name(cls) -> str: return "FixedRecencyPostprocessor" def _postprocess_nodes( self, nodes: List[NodeWithScore], query_bundle: Optional[QueryBundle] = None, ) -> List[NodeWithScore]: """Postprocess nodes.""" try: import pandas as pd except ImportError: raise ImportError( "pandas is required for this function. Please install it with `pip install pandas`." ) if query_bundle is None: raise ValueError("Missing query bundle in extra info.") # sort nodes by date node_dates = pd.to_datetime( [node.node.metadata[self.date_key] for node in nodes] ) sorted_node_idxs = np.flip(node_dates.argsort()) sorted_nodes = [nodes[idx] for idx in sorted_node_idxs] return sorted_nodes[: self.top_k] DEFAULT_QUERY_EMBEDDING_TMPL = ( "The current document is provided.\n" "----------------\n" "{context_str}\n" "----------------\n" "Given the document, we wish to find documents that contain \n" "similar context. Note that these documents are older " "than the current document, meaning that certain details may be changed. \n" "However, the high-level context should be similar.\n" )
FixedRecencyPostprocessor
python
sympy__sympy
sympy/plotting/series.py
{ "start": 67507, "end": 67754 }
class ____(Line2DBaseSeries): """A base class for 3D lines. Most of the stuff is derived from Line2DBaseSeries.""" is_2Dline = False is_3Dline = True _dim = 3 def __init__(self): super().__init__()
Line3DBaseSeries
python
getsentry__sentry
tests/sentry/incidents/endpoints/serializers/test_alert_rule.py
{ "start": 8853, "end": 10788 }
class ____(BaseAlertRuleSerializerTest, TestCase): def test_simple(self) -> None: projects = [self.project, self.create_project()] alert_rule = self.create_alert_rule(projects=projects) result = serialize(alert_rule, serializer=DetailedAlertRuleSerializer()) self.assert_alert_rule_serialized(alert_rule, result) assert sorted(result["projects"]) == sorted(p.slug for p in projects) assert result["eventTypes"] == [SnubaQueryEventType.EventType.ERROR.name.lower()] def test_triggers(self) -> None: alert_rule = self.create_alert_rule() other_alert_rule = self.create_alert_rule() trigger = create_alert_rule_trigger(alert_rule, "test", 1000) result = serialize([alert_rule, other_alert_rule], serializer=DetailedAlertRuleSerializer()) assert result[0]["triggers"] == [serialize(trigger)] assert result[1]["triggers"] == [] @patch( "sentry.incidents.logic.get_target_identifier_display_for_integration", return_value=AlertTarget(123, "test"), ) def test_trigger_actions(self, mock_get: MagicMock) -> None: alert_rule = self.create_alert_rule() other_alert_rule = self.create_alert_rule() trigger = create_alert_rule_trigger(alert_rule, "test", 1000) trigger_action = create_alert_rule_trigger_action( trigger, AlertRuleTriggerAction.Type.PAGERDUTY, AlertRuleTriggerAction.TargetType.SPECIFIC, target_identifier="123", integration_id=self.integration.id, priority="error", ) result = serialize([alert_rule, other_alert_rule], serializer=DetailedAlertRuleSerializer()) assert result[0]["triggers"] == [serialize(trigger)] assert result[0]["triggers"][0]["actions"] == [serialize(trigger_action)] assert result[1]["triggers"] == []
DetailedAlertRuleSerializerTest
python
scipy__scipy
scipy/stats/_stats_py.py
{ "start": 418533, "end": 418915 }
class ____: # A very simple, array-API compatible chi-squared distribution for use in # hypothesis tests. May be replaced by new infrastructure chi-squared # distribution in due time. def __init__(self, df): self.df = df def cdf(self, x): return special.chdtr(self.df, x) def sf(self, x): return special.chdtrc(self.df, x)
_SimpleChi2
python
bokeh__bokeh
src/bokeh/sphinxext/_internal/bokeh_autodoc.py
{ "start": 2918, "end": 3243 }
class ____(AttributeDocumenter): directivetype = "bokeh-prop" objtype = "prop" priority = 20 member_order = -100 # This puts properties first in the docs @classmethod def can_document_member(cls, member, membername, isattr, parent): return isinstance(member, PropertyDescriptor)
PropDocumenter
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP050.py
{ "start": 329, "end": 378 }
class ____( A, metaclass=type, ): ...
B
python
huggingface__transformers
src/transformers/models/edgetam_video/modeling_edgetam_video.py
{ "start": 36094, "end": 45979 }
class ____: r""" Manages video inference session parameters, state and cache. Args: video (`torch.FloatTensor`, *optional*): The video to process. No need to provide when streaming. video_height (`int`, *optional*): The height of the video. video_width (`int`, *optional*): The width of the video. inference_device (`torch.device`, *optional*, defaults to `"cpu"`): The device to use for inference. inference_state_device (`torch.device`, *optional*, defaults to `"cpu"`): The device to store the inference state on. video_storage_device (`torch.device`, *optional*, defaults to `"cpu"`): The device to store the video on. dtype (`torch.dtype`, *optional*, defaults to `"float32"`): The dtype to use for the video. max_vision_features_cache_size (`int`, *optional*, defaults to 1): The maximum number of vision features to cache. """ def __init__( self, video: Optional[torch.FloatTensor] = None, video_height: Optional[int] = None, video_width: Optional[int] = None, inference_device: Union[torch.device, str] = "cpu", inference_state_device: Union[torch.device, str] = "cpu", video_storage_device: Union[torch.device, str] = "cpu", dtype: Union[torch.dtype, str] = "float32", max_vision_features_cache_size: int = 1, ): # store as a dictionary to avoid double memory allocation with torch.cat when adding new frames self.processed_frames = ( dict(enumerate(video.to(video_storage_device, dtype=dtype))) if video is not None else None ) self.video_height = video_height self.video_width = video_width self.inference_device = inference_device self.inference_state_device = inference_state_device self.video_storage_device = video_storage_device self.dtype = dtype self.max_vision_features_cache_size = max_vision_features_cache_size # Cache for computed features self.cache = EdgeTamVideoInferenceCache( inference_device=self.inference_device, inference_state_device=self.inference_state_device, max_vision_features_cache_size=self.max_vision_features_cache_size, ) # Persistent object tracking state self._obj_id_to_idx = OrderedDict() self._obj_idx_to_id = OrderedDict() self.obj_ids = [] # Persistent user inputs self.point_inputs_per_obj = {} self.mask_inputs_per_obj = {} # Persistent model outputs/history self.output_dict_per_obj = {} self.frames_tracked_per_obj = {} # Session state flags self.obj_with_new_inputs = [] @property def num_frames(self) -> Optional[int]: return len(self.processed_frames) if self.processed_frames is not None else None # Object management def obj_id_to_idx(self, obj_id: int) -> int: """Map object ID to index, creating new entry if needed.""" obj_idx = self._obj_id_to_idx.get(obj_id, None) if obj_idx is not None: return obj_idx obj_idx = len(self._obj_id_to_idx) self._obj_id_to_idx[obj_id] = obj_idx self._obj_idx_to_id[obj_idx] = obj_id self.obj_ids = list(self._obj_id_to_idx) self.point_inputs_per_obj[obj_idx] = {} self.mask_inputs_per_obj[obj_idx] = {} self.output_dict_per_obj[obj_idx] = { "cond_frame_outputs": {}, "non_cond_frame_outputs": {}, } self.frames_tracked_per_obj[obj_idx] = {} return obj_idx # Video Inference specific functions def obj_idx_to_id(self, obj_idx: int) -> int: """Map model-side object index to client-side object id.""" return self._obj_idx_to_id[obj_idx] def get_obj_num(self) -> int: """Get the total number of unique object ids received so far in this session.""" return len(self._obj_idx_to_id) # Input management with device handling def add_point_inputs(self, obj_idx: int, frame_idx: int, inputs: dict): """Add point inputs with automatic device placement.""" device_inputs = {} for key, value in inputs.items(): if isinstance(value, torch.Tensor): device_inputs[key] = value.to(self.inference_device, non_blocking=True) else: device_inputs[key] = value self.point_inputs_per_obj[obj_idx][frame_idx] = device_inputs def remove_point_inputs(self, obj_idx: int, frame_idx: int): """Remove point inputs.""" self.point_inputs_per_obj[obj_idx].pop(frame_idx, None) def add_mask_inputs(self, obj_idx: int, frame_idx: int, inputs: torch.Tensor): """Add mask inputs with automatic device placement.""" self.mask_inputs_per_obj[obj_idx][frame_idx] = inputs.to( self.inference_device, dtype=self.dtype, non_blocking=True ) def remove_mask_inputs(self, obj_idx: int, frame_idx: int): """Remove mask inputs.""" self.mask_inputs_per_obj[obj_idx].pop(frame_idx, None) # Output management with smart device placement def store_output( self, obj_idx: int, frame_idx: int, output_key: Optional[str] = None, output_value: Optional[Union[torch.Tensor, dict]] = None, is_conditioning_frame: bool = True, ): """ Store output with smart device management. If output_key is None, the output is stored as a dictionary. Args: obj_idx (int): The index of the object. frame_idx (int): The index of the frame. output_key (Optional[str]): The key of the output. If None, the output is stored as a dictionary. output_value (Optional[Union[torch.Tensor, dict]]): The value of the output. is_conditioning_frame (bool): Whether the output is for a conditioning frame. """ storage_key = "cond_frame_outputs" if is_conditioning_frame else "non_cond_frame_outputs" if output_key is None and isinstance(output_value, dict): self.output_dict_per_obj[obj_idx][storage_key][frame_idx] = {} for key, value in output_value.items(): self.store_output(obj_idx, frame_idx, key, value, is_conditioning_frame) return # Device placement: small tensors stay on inference device, large ones go to inference state device if output_key in ["object_pointer", "object_score_logits"]: # Small tensors self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value elif isinstance(output_value, torch.Tensor): # Large tensors like masks, features self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value.to( self.inference_state_device, non_blocking=True ) else: self.output_dict_per_obj[obj_idx][storage_key][frame_idx][output_key] = output_value def get_output( self, obj_idx: int, frame_idx: int, output_key: str, is_conditioning_frame: bool = True, ): """ Get output with smart device management. Args: obj_idx (int): The index of the object. frame_idx (int): The index of the frame. output_key (str): The key of the output. is_conditioning_frame (bool): Whether the output is for a conditioning frame. """ storage_key = "cond_frame_outputs" if is_conditioning_frame else "non_cond_frame_outputs" out = self.output_dict_per_obj[obj_idx][storage_key].get(frame_idx, None) # move to inference device if needed if out is None: return None value = out[output_key] if isinstance(value, torch.Tensor): value = value.to(self.inference_device, non_blocking=True) return value # Video frame management def add_new_frame(self, pixel_values: torch.Tensor, frame_idx: Optional[int] = None) -> int: """Add new frame with automatic device placement.""" pixel_values = pixel_values.to(self.video_storage_device, dtype=self.dtype, non_blocking=True) if pixel_values.dim() == 4: pixel_values = pixel_values.squeeze(0) if frame_idx is None: frame_idx = len(self.processed_frames) if self.processed_frames is not None else 0 if self.processed_frames is None: self.processed_frames = {frame_idx: pixel_values} else: self.processed_frames[frame_idx] = pixel_values return frame_idx def get_frame(self, frame_idx: int) -> torch.Tensor: """Get frame from video.""" return self.processed_frames[frame_idx].to(self.inference_device, non_blocking=True) def reset_tracking_data(self): """Reset tracking data but keep cache.""" self._obj_id_to_idx.clear() self._obj_idx_to_id.clear() self.obj_ids.clear() self.point_inputs_per_obj.clear() self.mask_inputs_per_obj.clear() self.output_dict_per_obj.clear() self.frames_tracked_per_obj.clear() self.obj_with_new_inputs = [] # Note: cache and video data are preserved def reset_inference_session(self): """Reset tracking data and cache.""" self._obj_id_to_idx.clear() self._obj_idx_to_id.clear() self.obj_ids.clear() self.point_inputs_per_obj.clear() self.mask_inputs_per_obj.clear() self.output_dict_per_obj.clear() self.frames_tracked_per_obj.clear() self.obj_with_new_inputs = [] self.cache.clear_all()
EdgeTamVideoInferenceSession
python
rapidsai__cudf
python/cudf/cudf/core/udf/masked_typing.py
{ "start": 20277, "end": 21729 }
class ____(AttributeTemplate): key = MaskedType(string_view) def resolve_replace(self, mod): return types.BoundFunction( MaskedStringViewReplace, MaskedType(string_view) ) def resolve_count(self, mod): return types.BoundFunction( MaskedStringViewCount, MaskedType(string_view) ) def resolve_value(self, mod): return string_view def resolve_valid(self, mod): return types.boolean # Build attributes for `MaskedType(string_view)` for func in bool_binary_funcs: setattr( MaskedStringViewAttrs, f"resolve_{func}", create_masked_binary_attr(f"MaskedType.{func}", types.boolean), ) for func in int_binary_funcs: setattr( MaskedStringViewAttrs, f"resolve_{func}", create_masked_binary_attr(f"MaskedType.{func}", size_type), ) for func in string_return_attrs: setattr( MaskedStringViewAttrs, f"resolve_{func}", create_masked_binary_attr(f"MaskedType.{func}", managed_udf_string), ) for func in id_unary_funcs: setattr( MaskedStringViewAttrs, f"resolve_{func}", create_masked_unary_attr(f"MaskedType.{func}", types.boolean), ) for func in string_unary_funcs: setattr( MaskedStringViewAttrs, f"resolve_{func}", create_masked_unary_attr(f"MaskedType.{func}", managed_udf_string), )
MaskedStringViewAttrs
python
scikit-learn__scikit-learn
sklearn/tests/test_base.py
{ "start": 17338, "end": 18678 }
class ____(DontPickleAttributeMixin, BaseEstimator): def __init__(self, attribute_pickled=5): self.attribute_pickled = attribute_pickled self._attribute_not_pickled = None def test_pickling_when_getstate_is_overwritten_by_mixin(): estimator = MultiInheritanceEstimator() estimator._attribute_not_pickled = "this attribute should not be pickled" serialized = pickle.dumps(estimator) estimator_restored = pickle.loads(serialized) assert estimator_restored.attribute_pickled == 5 assert estimator_restored._attribute_not_pickled is None assert estimator_restored._restored def test_pickling_when_getstate_is_overwritten_by_mixin_outside_of_sklearn(): try: estimator = MultiInheritanceEstimator() text = "this attribute should not be pickled" estimator._attribute_not_pickled = text old_mod = type(estimator).__module__ type(estimator).__module__ = "notsklearn" serialized = estimator.__getstate__() assert serialized == {"_attribute_not_pickled": None, "attribute_pickled": 5} serialized["attribute_pickled"] = 4 estimator.__setstate__(serialized) assert estimator.attribute_pickled == 4 assert estimator._restored finally: type(estimator).__module__ = old_mod
MultiInheritanceEstimator
python
huggingface__transformers
src/transformers/models/dinat/modeling_dinat.py
{ "start": 1703, "end": 2653 }
class ____(ModelOutput): r""" reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None attentions: Optional[tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass @auto_docstring( custom_intro=""" Dinat model's outputs that also contains a pooling of the last hidden states. """ )
DinatEncoderOutput
python
sympy__sympy
sympy/solvers/ode/single.py
{ "start": 19549, "end": 21968 }
class ____(SinglePatternODESolver): r""" Solves 1st order linear differential equations. These are differential equations of the form .. math:: dy/dx + P(x) y = Q(x)\text{.} These kinds of differential equations can be solved in a general way. The integrating factor `e^{\int P(x) \,dx}` will turn the equation into a separable equation. The general solution is:: >>> from sympy import Function, dsolve, Eq, pprint, diff, sin >>> from sympy.abc import x >>> f, P, Q = map(Function, ['f', 'P', 'Q']) >>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x)) >>> pprint(genform) d P(x)*f(x) + --(f(x)) = Q(x) dx >>> pprint(dsolve(genform, f(x), hint='1st_linear_Integral')) / / \ | | | | | / | / | | | | | | | | P(x) dx | - | P(x) dx | | | | | | | / | / f(x) = |C1 + | Q(x)*e dx|*e | | | \ / / Examples ======== >>> f = Function('f') >>> pprint(dsolve(Eq(x*diff(f(x), x) - f(x), x**2*sin(x)), ... f(x), '1st_linear')) f(x) = x*(C1 - cos(x)) References ========== - https://en.wikipedia.org/wiki/Linear_differential_equation#First-order_equation_with_variable_coefficients - M. Tenenbaum & H. Pollard, "Ordinary Differential Equations", Dover 1963, pp. 92 # indirect doctest """ hint = '1st_linear' has_integral = True order = [1] def _wilds(self, f, x, order): P = Wild('P', exclude=[f(x)]) Q = Wild('Q', exclude=[f(x), f(x).diff(x)]) return P, Q def _equation(self, fx, x, order): P, Q = self.wilds() return fx.diff(x) + P*fx - Q def _get_general_solution(self, *, simplify_flag: bool = True): P, Q = self.wilds_match() fx = self.ode_problem.func x = self.ode_problem.sym (C1,) = self.ode_problem.get_numbered_constants(num=1) gensol = Eq(fx, ((C1 + Integral(Q*exp(Integral(P, x)), x)) * exp(-Integral(P, x)))) return [gensol]
FirstLinear
python
GoogleCloudPlatform__python-docs-samples
appengine/standard/ndb/async/app_sync.py
{ "start": 738, "end": 1091 }
class ____(webapp2.RequestHandler): def get(self): acct = Account.get_by_id(users.get_current_user().user_id()) acct.view_counter += 1 acct.put() # ...read something else from Datastore... self.response.out.write("Content of the page") app = webapp2.WSGIApplication([("/", MyRequestHandler)])
MyRequestHandler
python
getsentry__sentry
src/sentry/integrations/github/repository.py
{ "start": 853, "end": 6398 }
class ____(IntegrationRepositoryProvider): name = "GitHub" repo_provider = IntegrationProviderSlug.GITHUB.value def _validate_repo(self, client: Any, installation: IntegrationInstallation, repo: str) -> Any: try: repo_data = client.get_repo(repo) except Exception as e: raise installation.raise_error(e) try: # make sure installation has access to this specific repo # use hooks endpoint since we explicitly ask for those permissions # when installing the app (commits can be accessed for public repos) # https://docs.github.com/en/rest/webhooks/repo-config#list-hooks client.repo_hooks(repo) except ApiError: raise IntegrationError(f"You must grant Sentry access to {repo}") return repo_data def get_repository_data( self, organization: Organization, config: MutableMapping[str, Any] ) -> Mapping[str, Any]: installation = self.get_installation(config.get("installation"), organization.id) client = installation.get_client() repo = self._validate_repo(client, installation, config["identifier"]) config["external_id"] = str(repo["id"]) config["integration_id"] = installation.model.id return config def build_repository_config( self, organization: RpcOrganization, data: dict[str, Any] ) -> RepositoryConfig: return { "name": data["identifier"], "external_id": data["external_id"], "url": "https://github.com/{}".format(data["identifier"]), "config": {"name": data["identifier"]}, "integration_id": data["integration_id"], } def compare_commits( self, repo: Repository, start_sha: str | None, end_sha: str ) -> Sequence[Mapping[str, Any]]: def eval_commits(client: Any) -> Sequence[Mapping[str, Any]]: # use config name because that is kept in sync via webhooks name = repo.config["name"] if start_sha is None: res = client.get_last_commits(name, end_sha) return self._format_commits(client, name, res[:20]) else: res = client.compare_commits(name, start_sha, end_sha) return self._format_commits(client, name, res["commits"]) integration_id = repo.integration_id if integration_id is None: raise NotImplementedError("GitHub apps requires an integration id to fetch commits") integration = integration_service.get_integration( integration_id=integration_id, status=ObjectStatus.ACTIVE ) if integration is None: raise NotImplementedError( "GitHub apps requires a valid active integration to fetch commits" ) installation = integration.get_installation(organization_id=repo.organization_id) client = installation.get_client() try: return eval_commits(client) except Exception as e: installation.raise_error(e) def _format_commits( self, client: Any, repo_name: str, commit_list: Any, ) -> Sequence[Mapping[str, Any]]: """Convert GitHub commits into our internal format For each commit in the list we have to fetch patch data, as the compare API gives us all of the files changed in the commit range but not which files changed in each commit. Without this we cannot know which specific commit changed a given file. See sentry.models.Release.set_commits """ return [ { "id": c["sha"], "repository": repo_name, "author_email": c["commit"]["author"].get("email"), "author_name": c["commit"]["author"].get("name"), "message": c["commit"]["message"], "timestamp": self.format_date(c["commit"]["author"].get("date")), "patch_set": self._get_patchset(client, repo_name, c["sha"]), } for c in commit_list ] def _get_patchset(self, client: Any, repo_name: str, sha: str) -> Sequence[Mapping[str, Any]]: """Get the modified files for a commit""" commit = client.get_commit(repo_name, sha) return self._transform_patchset(commit["files"]) def _transform_patchset(self, diff: Sequence[Mapping[str, Any]]) -> Sequence[Mapping[str, Any]]: """Convert the patch data from GitHub into our internal format See sentry.models.Release.set_commits """ changes = [] for change in diff: if change["status"] == "modified": changes.append({"path": change["filename"], "type": "M"}) if change["status"] == "added": changes.append({"path": change["filename"], "type": "A"}) if change["status"] == "removed": changes.append({"path": change["filename"], "type": "D"}) if change["status"] == "renamed": changes.append({"path": change["previous_filename"], "type": "D"}) changes.append({"path": change["filename"], "type": "A"}) return changes def pull_request_url(self, repo: Repository, pull_request: PullRequest) -> str: return f"{repo.url}/pull/{pull_request.key}" def repository_external_slug(self, repo: Repository) -> str: return repo.name
GitHubRepositoryProvider
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/mssql/base.py
{ "start": 45924, "end": 46130 }
class ____(sqltypes.Boolean): """MSSQL BIT type. Both pyodbc and pymssql return values from BIT columns as Python <class 'bool'> so just subclass Boolean. """ __visit_name__ = "BIT"
BIT
python
pytorch__pytorch
test/inductor/test_efficient_conv_bn_eval.py
{ "start": 2656, "end": 7226 }
class ____(TestCase): @tf32_on_and_off(0.003) @inductor_config.patch({"efficient_conv_bn_eval_fx_passes": True}) def test_basic(self): def test_conv_bn_eval( test_class, use_bias, module, sync_bn, decompose_nn_module ): from functorch import make_fx from torch._dispatch.python import enable_python_dispatcher kwargs = {"kernel_size": 3, "stride": 2} if module[0] != nn.Linear else {} mod_eager = test_class( module[0], module[1], use_bias, 3, 32, self.device, **kwargs, ).eval() # Copy module to test backward mod_optimized = copy.deepcopy(mod_eager) if sync_bn: mod_eager = nn.SyncBatchNorm.convert_sync_batchnorm(mod_eager).eval() mod_optimized = nn.SyncBatchNorm.convert_sync_batchnorm( mod_optimized ).eval() torch._dynamo.reset() inps = [4, 3] # Conv shape goes from big to small, and ConvTranspose shape goes from small to big spatial_d = ( 4 if issubclass(module[0], nn.modules.conv._ConvTransposeNd) else 96 ) if module[0] is nn.Conv1d or module[0] is nn.ConvTranspose1d: inps += [spatial_d] * 1 if module[0] is nn.Conv2d or module[0] is nn.ConvTranspose2d: inps += [spatial_d] * 2 if module[0] is nn.Conv3d or module[0] is nn.ConvTranspose3d: inps += [spatial_d] * 3 inp = torch.rand(inps).to(self.device) if decompose_nn_module: with enable_python_dispatcher(): mod_optimized = make_fx(mod_optimized, pre_dispatch=True)(inp) mod_optimized = torch.compile(mod_optimized) original_value = counters["inductor"]["efficient_conv_bn_eval"] optim_eager = torch.optim.SGD(mod_eager.parameters(), lr=1e-3) optim_optimized = torch.optim.SGD(mod_optimized.parameters(), lr=1e-3) optim_eager.zero_grad() optim_optimized.zero_grad() # test forward out_eager = mod_eager(inp) out_optimized = mod_optimized(inp) self.assertEqual(out_optimized, out_eager) out_eager.mean().backward() out_optimized.mean().backward() optim_eager.step() optim_optimized.step() # test forward (by testing forward again after one training iteration) inp_bw = torch.rand_like(inp) out_eager_bw = mod_eager(inp_bw) out_optimized_bw = mod_optimized(inp_bw) self.assertEqual(out_eager_bw, out_optimized_bw) current_value = counters["inductor"]["efficient_conv_bn_eval"] self.assertEqual( current_value - original_value, test_class.expected_optimization_count ) conv_bias = [True, False] modules = [ (nn.Linear, nn.BatchNorm1d), (nn.Conv1d, nn.BatchNorm1d), (nn.Conv2d, nn.BatchNorm2d), (nn.Conv3d, nn.BatchNorm3d), (nn.ConvTranspose1d, nn.BatchNorm1d), (nn.ConvTranspose2d, nn.BatchNorm2d), (nn.ConvTranspose3d, nn.BatchNorm3d), ] test_classes = [ConvOp, MultiUserConvOp] sync_bns = [False, True] decompose_nn_modules = [False, True] for ( test_class, use_bias, module, sync_bn, decompose_nn_module, ) in itertools.product( test_classes, conv_bias, modules, sync_bns, decompose_nn_modules, ): test_conv_bn_eval( test_class, use_bias, module, sync_bn, decompose_nn_module ) if HAS_CPU and not torch.backends.mps.is_available(): class EfficientConvBNEvalCpuTests(TestCase): device = "cpu" copy_tests(EfficientConvBNEvalTemplate, EfficientConvBNEvalCpuTests, "cpu") if HAS_GPU: class EfficientConvBNEvalGpuTests(TestCase): device = GPU_TYPE copy_tests(EfficientConvBNEvalTemplate, EfficientConvBNEvalGpuTests, GPU_TYPE) del EfficientConvBNEvalTemplate if __name__ == "__main__": from torch._inductor.test_case import run_tests if HAS_CPU or HAS_GPU: run_tests(needs="filelock")
EfficientConvBNEvalTemplate
python
MongoEngine__mongoengine
tests/queryset/test_field_list.py
{ "start": 2518, "end": 16553 }
class ____(unittest.TestCase): def setUp(self): connect(db="mongoenginetest") class Person(Document): name = StringField() age = IntField() meta = {"allow_inheritance": True} Person.drop_collection() self.Person = Person def test_mixing_only_exclude(self): class MyDoc(Document): a = StringField() b = StringField() c = StringField() d = StringField() e = StringField() f = StringField() include = ["a", "b", "c", "d", "e"] exclude = ["d", "e"] only = ["b", "c"] qs = MyDoc.objects.fields(**{i: 1 for i in include}) assert qs._loaded_fields.as_dict() == {"a": 1, "b": 1, "c": 1, "d": 1, "e": 1} qs = qs.only(*only) assert qs._loaded_fields.as_dict() == {"b": 1, "c": 1} qs = qs.exclude(*exclude) assert qs._loaded_fields.as_dict() == {"b": 1, "c": 1} qs = MyDoc.objects.fields(**{i: 1 for i in include}) qs = qs.exclude(*exclude) assert qs._loaded_fields.as_dict() == {"a": 1, "b": 1, "c": 1} qs = qs.only(*only) assert qs._loaded_fields.as_dict() == {"b": 1, "c": 1} qs = MyDoc.objects.exclude(*exclude) qs = qs.fields(**{i: 1 for i in include}) assert qs._loaded_fields.as_dict() == {"a": 1, "b": 1, "c": 1} qs = qs.only(*only) assert qs._loaded_fields.as_dict() == {"b": 1, "c": 1} def test_slicing(self): class MyDoc(Document): a = ListField() b = ListField() c = ListField() d = ListField() e = ListField() f = ListField() include = ["a", "b", "c", "d", "e"] exclude = ["d", "e"] only = ["b", "c"] qs = MyDoc.objects.fields(**{i: 1 for i in include}) qs = qs.exclude(*exclude) qs = qs.only(*only) qs = qs.fields(slice__b=5) assert qs._loaded_fields.as_dict() == {"b": {"$slice": 5}, "c": 1} qs = qs.fields(slice__c=[5, 1]) assert qs._loaded_fields.as_dict() == { "b": {"$slice": 5}, "c": {"$slice": [5, 1]}, } qs = qs.exclude("c") assert qs._loaded_fields.as_dict() == {"b": {"$slice": 5}} def test_mix_slice_with_other_fields(self): class MyDoc(Document): a = ListField() b = ListField() c = ListField() qs = MyDoc.objects.fields(a=1, b=0, slice__c=2) assert qs._loaded_fields.as_dict() == {"c": {"$slice": 2}, "a": 1} def test_only(self): """Ensure that QuerySet.only only returns the requested fields.""" person = self.Person(name="test", age=25) person.save() obj = self.Person.objects.only("name").get() assert obj.name == person.name assert obj.age is None obj = self.Person.objects.only("age").get() assert obj.name is None assert obj.age == person.age obj = self.Person.objects.only("name", "age").get() assert obj.name == person.name assert obj.age == person.age obj = self.Person.objects.only(*("id", "name")).get() assert obj.name == person.name assert obj.age is None # Check polymorphism still works class Employee(self.Person): salary = IntField(db_field="wage") employee = Employee(name="test employee", age=40, salary=30000) employee.save() obj = self.Person.objects(id=employee.id).only("age").get() assert isinstance(obj, Employee) # Check field names are looked up properly obj = Employee.objects(id=employee.id).only("salary").get() assert obj.salary == employee.salary assert obj.name is None def test_only_with_subfields(self): class User(EmbeddedDocument): name = StringField() email = StringField() class Comment(EmbeddedDocument): title = StringField() text = StringField() class VariousData(EmbeddedDocument): some = BooleanField() class BlogPost(Document): content = StringField() author = EmbeddedDocumentField(User) comments = ListField(EmbeddedDocumentField(Comment)) various = MapField(field=EmbeddedDocumentField(VariousData)) BlogPost.drop_collection() post = BlogPost( content="Had a good coffee today...", various={"test_dynamic": {"some": True}}, ) post.author = User(name="Test User") post.comments = [ Comment(title="I aggree", text="Great post!"), Comment(title="Coffee", text="I hate coffee"), ] post.save() obj = BlogPost.objects.only("author.name").get() assert obj.content is None assert obj.author.email is None assert obj.author.name == "Test User" assert obj.comments == [] obj = BlogPost.objects.only("various.test_dynamic.some").get() assert obj.various["test_dynamic"].some is True obj = BlogPost.objects.only("content", "comments.title").get() assert obj.content == "Had a good coffee today..." assert obj.author is None assert obj.comments[0].title == "I aggree" assert obj.comments[1].title == "Coffee" assert obj.comments[0].text is None assert obj.comments[1].text is None obj = BlogPost.objects.only("comments").get() assert obj.content is None assert obj.author is None assert obj.comments[0].title == "I aggree" assert obj.comments[1].title == "Coffee" assert obj.comments[0].text == "Great post!" assert obj.comments[1].text == "I hate coffee" BlogPost.drop_collection() def test_exclude(self): class User(EmbeddedDocument): name = StringField() email = StringField() class Comment(EmbeddedDocument): title = StringField() text = StringField() class BlogPost(Document): content = StringField() author = EmbeddedDocumentField(User) comments = ListField(EmbeddedDocumentField(Comment)) BlogPost.drop_collection() post = BlogPost(content="Had a good coffee today...") post.author = User(name="Test User") post.comments = [ Comment(title="I aggree", text="Great post!"), Comment(title="Coffee", text="I hate coffee"), ] post.save() obj = BlogPost.objects.exclude("author", "comments.text").get() assert obj.author is None assert obj.content == "Had a good coffee today..." assert obj.comments[0].title == "I aggree" assert obj.comments[0].text is None BlogPost.drop_collection() def test_exclude_only_combining(self): class Attachment(EmbeddedDocument): name = StringField() content = StringField() class Email(Document): sender = StringField() to = StringField() subject = StringField() body = StringField() content_type = StringField() attachments = ListField(EmbeddedDocumentField(Attachment)) Email.drop_collection() email = Email( sender="me", to="you", subject="From Russia with Love", body="Hello!", content_type="text/plain", ) email.attachments = [ Attachment(name="file1.doc", content="ABC"), Attachment(name="file2.doc", content="XYZ"), ] email.save() obj = Email.objects.exclude("content_type").exclude("body").get() assert obj.sender == "me" assert obj.to == "you" assert obj.subject == "From Russia with Love" assert obj.body is None assert obj.content_type is None obj = Email.objects.only("sender", "to").exclude("body", "sender").get() assert obj.sender is None assert obj.to == "you" assert obj.subject is None assert obj.body is None assert obj.content_type is None obj = ( Email.objects.exclude("attachments.content") .exclude("body") .only("to", "attachments.name") .get() ) assert obj.attachments[0].name == "file1.doc" assert obj.attachments[0].content is None assert obj.sender is None assert obj.to == "you" assert obj.subject is None assert obj.body is None assert obj.content_type is None Email.drop_collection() def test_all_fields(self): class Email(Document): sender = StringField() to = StringField() subject = StringField() body = StringField() content_type = StringField() Email.drop_collection() email = Email( sender="me", to="you", subject="From Russia with Love", body="Hello!", content_type="text/plain", ) email.save() obj = ( Email.objects.exclude("content_type", "body") .only("to", "body") .all_fields() .get() ) assert obj.sender == "me" assert obj.to == "you" assert obj.subject == "From Russia with Love" assert obj.body == "Hello!" assert obj.content_type == "text/plain" Email.drop_collection() def test_slicing_fields(self): """Ensure that query slicing an array works.""" class Numbers(Document): n = ListField(IntField()) Numbers.drop_collection() numbers = Numbers(n=[0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1]) numbers.save() # first three numbers = Numbers.objects.fields(slice__n=3).get() assert numbers.n == [0, 1, 2] # last three numbers = Numbers.objects.fields(slice__n=-3).get() assert numbers.n == [-3, -2, -1] # skip 2, limit 3 numbers = Numbers.objects.fields(slice__n=[2, 3]).get() assert numbers.n == [2, 3, 4] # skip to fifth from last, limit 4 numbers = Numbers.objects.fields(slice__n=[-5, 4]).get() assert numbers.n == [-5, -4, -3, -2] # skip to fifth from last, limit 10 numbers = Numbers.objects.fields(slice__n=[-5, 10]).get() assert numbers.n == [-5, -4, -3, -2, -1] # skip to fifth from last, limit 10 dict method numbers = Numbers.objects.fields(n={"$slice": [-5, 10]}).get() assert numbers.n == [-5, -4, -3, -2, -1] def test_slicing_nested_fields(self): """Ensure that query slicing an embedded array works.""" class EmbeddedNumber(EmbeddedDocument): n = ListField(IntField()) class Numbers(Document): embedded = EmbeddedDocumentField(EmbeddedNumber) Numbers.drop_collection() numbers = Numbers() numbers.embedded = EmbeddedNumber(n=[0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1]) numbers.save() # first three numbers = Numbers.objects.fields(slice__embedded__n=3).get() assert numbers.embedded.n == [0, 1, 2] # last three numbers = Numbers.objects.fields(slice__embedded__n=-3).get() assert numbers.embedded.n == [-3, -2, -1] # skip 2, limit 3 numbers = Numbers.objects.fields(slice__embedded__n=[2, 3]).get() assert numbers.embedded.n == [2, 3, 4] # skip to fifth from last, limit 4 numbers = Numbers.objects.fields(slice__embedded__n=[-5, 4]).get() assert numbers.embedded.n == [-5, -4, -3, -2] # skip to fifth from last, limit 10 numbers = Numbers.objects.fields(slice__embedded__n=[-5, 10]).get() assert numbers.embedded.n == [-5, -4, -3, -2, -1] # skip to fifth from last, limit 10 dict method numbers = Numbers.objects.fields(embedded__n={"$slice": [-5, 10]}).get() assert numbers.embedded.n == [-5, -4, -3, -2, -1] def test_exclude_from_subclasses_docs(self): class Base(Document): username = StringField() meta = {"allow_inheritance": True} class Anon(Base): anon = BooleanField() class User(Base): password = StringField() wibble = StringField() Base.drop_collection() User(username="mongodb", password="secret").save() user = Base.objects().exclude("password", "wibble").first() assert user.password is None with pytest.raises(LookUpError): Base.objects.exclude("made_up") def test_gt_gte_lt_lte_ne_operator_with_list(self): class Family(Document): ages = ListField(field=FloatField()) Family.drop_collection() Family(ages=[1.0, 2.0]).save() Family(ages=[]).save() qs = list(Family.objects(ages__gt=[1.0])) assert len(qs) == 1 assert qs[0].ages == [1.0, 2.0] qs = list(Family.objects(ages__gt=[1.0, 1.99])) assert len(qs) == 1 assert qs[0].ages == [1.0, 2.0] qs = list(Family.objects(ages__gt=[])) assert len(qs) == 1 assert qs[0].ages == [1.0, 2.0] qs = list(Family.objects(ages__gte=[1.0, 2.0])) assert len(qs) == 1 assert qs[0].ages == [1.0, 2.0] qs = list(Family.objects(ages__lt=[1.0])) assert len(qs) == 1 assert qs[0].ages == [] qs = list(Family.objects(ages__lte=[5.0])) assert len(qs) == 2 qs = list(Family.objects(ages__ne=[5.0])) assert len(qs) == 2 qs = list(Family.objects(ages__ne=[])) assert len(qs) == 1 assert qs[0].ages == [1.0, 2.0] if __name__ == "__main__": unittest.main()
TestOnlyExcludeAll
python
pypa__pipenv
pipenv/vendor/plette/lockfiles.py
{ "start": 129, "end": 1552 }
class ____(json.JSONEncoder): """A specilized JSON encoder to convert loaded data into a lock file. This adds a few characteristics to the encoder: * The JSON is always prettified with indents and spaces. * The output is always UTF-8-encoded text, never binary, even on Python 2. """ def __init__(self): super(_LockFileEncoder, self).__init__( indent=4, separators=(",", ": "), sort_keys=True, ) def encode(self, obj): content = super(_LockFileEncoder, self).encode(obj) if not isinstance(content, str): content = content.decode("utf-8") content += "\n" return content def iterencode(self, obj): for chunk in super(_LockFileEncoder, self).iterencode(obj): if not isinstance(chunk, str): chunk = chunk.decode("utf-8") yield chunk yield "\n" PIPFILE_SPEC_CURRENT = 6 def _copy_jsonsafe(value): """Deep-copy a value into JSON-safe types. """ if isinstance(value, (str, numbers.Number)): return value if isinstance(value, collections_abc.Mapping): return {str(k): _copy_jsonsafe(v) for k, v in value.items()} if isinstance(value, collections_abc.Iterable): return [_copy_jsonsafe(v) for v in value] if value is None: # This doesn't happen often for us. return None return str(value)
_LockFileEncoder
python
walkccc__LeetCode
solutions/1537. Get the Maximum Score/1537.py
{ "start": 0, "end": 1069 }
class ____: def maxSum(self, nums1: list[int], nums2: list[int]) -> int: # Keep the running the sum of `nums1` and `nums2` before the next rendezvous. # Since `nums1` and `nums2` are increasing, move forward on the smaller one # to ensure we don't miss any rendezvous. When meet rendezvous, choose the # better path. ans = 0 sum1 = 0 # sum(nums1) in (the prevoious rendezvous, the next rendezvous) sum2 = 0 # sum(nums2) in (the prevoious rendezvous, the next rendezvous) i = 0 # nums1's index j = 0 # nums2's index while i < len(nums1) and j < len(nums2): if nums1[i] < nums2[j]: sum1 += nums1[i] i += 1 elif nums1[i] > nums2[j]: sum2 += nums2[j] j += 1 else: # An rendezvous happens. ans += max(sum1, sum2) + nums1[i] sum1 = 0 sum2 = 0 i += 1 j += 1 while i < len(nums1): sum1 += nums1[i] i += 1 while j < len(nums2): sum2 += nums2[j] j += 1 return (ans + max(sum1, sum2)) % (10**9 + 7)
Solution
python
tiangolo__fastapi
docs_src/response_model/tutorial003.py
{ "start": 241, "end": 450 }
class ____(BaseModel): username: str email: EmailStr full_name: Union[str, None] = None @app.post("/user/", response_model=UserOut) async def create_user(user: UserIn) -> Any: return user
UserOut
python
dagster-io__dagster
python_modules/libraries/dagster-airlift/dagster_airlift/in_airflow/dagster_run_utils.py
{ "start": 828, "end": 1535 }
class ____(NamedTuple): status: str tags: Mapping[str, Any] @property def run_will_automatically_retry(self) -> bool: return ( not self.succeeded and get_boolean_tag_value(self.tags.get(WILL_RETRY_TAG), False) is True ) @property def retried_run_id(self) -> Optional[str]: return self.tags.get(AUTO_RETRY_RUN_ID_TAG) @property def succeeded(self) -> bool: return self.status == SUCCESS_STATUS def get_boolean_tag_value(tag_value: Optional[str], default_value: bool = False) -> bool: if tag_value is None: return default_value return tag_value.lower() not in {"false", "none", "0", ""}
DagsterRunResult
python
pytorch__pytorch
test/test_compile_benchmark_util.py
{ "start": 491, "end": 1526 }
class ____(TestCase): def test_training_and_inference(self): class ToyModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.nn.Parameter(torch.Tensor(2, 2)) def forward(self, x): return x * self.weight torchdynamo.reset() model = ToyModel().cuda() inference_table = bench_all(model, torch.ones(1024, 2, 2).cuda(), 5) self.assertTrue( "Inference" in inference_table and "Eager" in inference_table and "-" in inference_table ) training_table = bench_all( model, torch.ones(1024, 2, 2).cuda(), 5, optimizer=torch.optim.SGD(model.parameters(), lr=0.01), ) self.assertTrue( "Train" in training_table and "Eager" in training_table and "-" in training_table ) if __name__ == "__main__": run_tests()
TestCompileBenchmarkUtil
python
kamyu104__LeetCode-Solutions
Python/next-greater-numerically-balanced-number.py
{ "start": 3743, "end": 4626 }
class ____(object): def nextBeautifulNumber(self, n): """ :type n: int :rtype: int """ # obtained by manually enumerating min number of permutations in each length balanced = [1, 22, 122, 333, 1333, 4444, 14444, 22333, 55555, 122333, 155555, 224444, 666666] s = tuple(str(n)) result = 1224444 for x in balanced: x = tuple(str(x)) if len(x) < len(s): continue if len(x) > len(s): result = min(result, int("".join(x))) continue for perm in itertools.permutations(x): # not distinct permutations if perm > s: result = min(result, int("".join(perm))) return result
Solution3
python
matplotlib__matplotlib
lib/matplotlib/hatch.py
{ "start": 5620, "end": 5825 }
class ____(Circles): size = 0.1 filled = True def __init__(self, hatch, density): self.num_rows = (hatch.count('.')) * density super().__init__(hatch, density)
SmallFilledCircles
python
wandb__wandb
wandb/vendor/promise-2.3.0/wandb_promise/async_.py
{ "start": 253, "end": 3959 }
class ____(local): def __init__(self, trampoline_enabled=True): self.is_tick_used = False self.late_queue = deque() # type: ignore self.normal_queue = deque() # type: ignore self.have_drained_queues = False self.trampoline_enabled = trampoline_enabled def enable_trampoline(self): self.trampoline_enabled = True def disable_trampoline(self): self.trampoline_enabled = False def have_items_queued(self): return self.is_tick_used or self.have_drained_queues def _async_invoke_later(self, fn, scheduler): self.late_queue.append(fn) self.queue_tick(scheduler) def _async_invoke(self, fn, scheduler): # type: (Callable, Any) -> None self.normal_queue.append(fn) self.queue_tick(scheduler) def _async_settle_promise(self, promise): # type: (Promise) -> None self.normal_queue.append(promise) self.queue_tick(promise.scheduler) def invoke_later(self, fn): if self.trampoline_enabled: self._async_invoke_later(fn, scheduler) else: scheduler.call_later(0.1, fn) def invoke(self, fn, scheduler): # type: (Callable, Any) -> None if self.trampoline_enabled: self._async_invoke(fn, scheduler) else: scheduler.call(fn) def settle_promises(self, promise): # type: (Promise) -> None if self.trampoline_enabled: self._async_settle_promise(promise) else: promise.scheduler.call(promise._settle_promises) def throw_later(self, reason, scheduler): # type: (Exception, Any) -> None def fn(): # type: () -> None raise reason scheduler.call(fn) fatal_error = throw_later def drain_queue(self, queue): # type: (deque) -> None from .promise import Promise while queue: fn = queue.popleft() if isinstance(fn, Promise): fn._settle_promises() continue fn() def drain_queue_until_resolved(self, promise): # type: (Promise) -> None from .promise import Promise queue = self.normal_queue while queue: if not promise.is_pending: return fn = queue.popleft() if isinstance(fn, Promise): fn._settle_promises() continue fn() self.reset() self.have_drained_queues = True self.drain_queue(self.late_queue) def wait(self, promise, timeout=None): # type: (Promise, Optional[float]) -> None if not promise.is_pending: # We return if the promise is already # fulfilled or rejected return target = promise._target() if self.trampoline_enabled: if self.is_tick_used: self.drain_queue_until_resolved(target) if not promise.is_pending: # We return if the promise is already # fulfilled or rejected return target.scheduler.wait(target, timeout) def drain_queues(self): # type: () -> None assert self.is_tick_used self.drain_queue(self.normal_queue) self.reset() self.have_drained_queues = True self.drain_queue(self.late_queue) def queue_tick(self, scheduler): # type: (Any) -> None if not self.is_tick_used: self.is_tick_used = True scheduler.call(self.drain_queues) def reset(self): # type: () -> None self.is_tick_used = False
Async
python
joke2k__faker
faker/providers/bank/it_IT/__init__.py
{ "start": 42, "end": 197 }
class ____(BankProvider): """Implement bank provider for ``it_IT`` locale.""" bban_format = "?######################" country_code = "IT"
Provider
python
pytorch__pytorch
torch/distributed/flight_recorder/components/types.py
{ "start": 749, "end": 783 }
class ____(Generic[T]): pass
Ref
python
getsentry__sentry
src/sentry/workflow_engine/processors/schedule.py
{ "start": 3182, "end": 11632 }
class ____: """ ProjectChooser assists in determining which projects to process based on the cohort updates. """ def __init__( self, buffer_client: DelayedWorkflowClient, num_cohorts: int, min_scheduling_age: timedelta ): self.client = buffer_client assert num_cohorts > 0 and num_cohorts <= 255 self.num_cohorts = num_cohorts self.min_scheduling_age = min_scheduling_age def _project_id_to_cohort(self, project_id: int) -> int: return hashlib.sha256(project_id.to_bytes(8)).digest()[0] % self.num_cohorts def project_ids_to_process( self, now: float, cohort_updates: CohortUpdates, all_project_ids: list[int] ) -> list[int]: """ Given the time, the cohort update history, and the list of project ids in need of processing, determine which project ids should be processed. """ must_process = set[int]() may_process = set[int]() cohort_to_elapsed = dict[int, timedelta]() long_ago = now - 1000 target_max_age = timedelta(minutes=1) # must run min_scheduling_age = self.min_scheduling_age # can run # The cohort choice algorithm is essentially: # 1. Any cohort that hasn't been run recently enough (based on target_max_age) # must be run. # 2. If no cohort _must_ be run, pick the most stale cohort that hasn't # been run too recently (based on min_scheduling_age). To guarantee even distribution, # min_scheduling_age should be <= the scheduling interval. # # With this, we distribute cohorts across runs, but ensure we don't process them # too frequently or too late, while not being too dependent on number of cohorts or # frequency of scheduling. if len(cohort_updates.values) != self.num_cohorts: logger.info( "%s cohort_updates.values, but num_cohorts is %s. Resetting.", len(cohort_updates.values), self.num_cohorts, extra={"cohort_updates": cohort_updates.values}, ) # When cohort counts change, we accept that we'll be potentially running some # projects a bit too early. Previous cohorts are no longer valid, so the timestamps # associated with them are only accurate for setting bounds on the whole project space. # But, we can still use that to avoid running projects too early by giving them all the # eldest timestamp. eldest = min(cohort_updates.values.values(), default=long_ago) # This also ensures that if we downsize cohorts, we don't let data from now-dead cohorts # linger. cohort_updates.values = {co: eldest for co in range(self.num_cohorts)} for co in range(self.num_cohorts): last_run = cohort_updates.values.get(co) if last_run is None: last_run = long_ago # It's a bug if the cohort doesn't exist at this point. metrics.incr( "workflow_engine.schedule.cohort_not_found", tags={"cohort": co}, sample_rate=1.0, ) elapsed = timedelta(seconds=now - last_run) if last_run != long_ago: # Only track duration if we know the last run. metrics.distribution( "workflow_engine.schedule.cohort_freshness", elapsed.total_seconds(), sample_rate=1.0, ) cohort_to_elapsed[co] = elapsed if elapsed >= target_max_age: must_process.add(co) elif elapsed >= min_scheduling_age: may_process.add(co) if may_process and not must_process: choice = min(may_process, key=lambda c: (cohort_updates.values.get(c, long_ago), c)) must_process.add(choice) cohort_updates.values.update({cohort_id: now for cohort_id in must_process}) for cohort_id, elapsed in cohort_to_elapsed.items(): if cohort_id in must_process: metrics.distribution( "workflow_engine.schedule.processed_cohort_freshness", elapsed.total_seconds(), sample_rate=1.0, ) metrics.incr( "workflow_engine.schedule.scheduled_cohort", tags={"cohort": cohort_id}, sample_rate=1.0, ) logger.info( "schedule.selected_cohorts", extra={"selected": sorted(must_process), "may_process": sorted(may_process)}, ) return [ project_id for project_id in all_project_ids if self._project_id_to_cohort(project_id) in must_process ] @contextmanager def chosen_projects( project_chooser: ProjectChooser, fetch_time: float, all_project_ids: list[int], ) -> Generator[list[int]]: """ Context manager that yields the project ids to be processed, and manages the cohort state after the processing is complete. """ cohort_updates = project_chooser.client.fetch_updates() project_ids_to_process = project_chooser.project_ids_to_process( fetch_time, cohort_updates, all_project_ids ) yield project_ids_to_process project_chooser.client.persist_updates(cohort_updates) def process_buffered_workflows(buffer_client: DelayedWorkflowClient) -> None: option_name = buffer_client.option if option_name and not options.get(option_name): logger.info("delayed_workflow.disabled", extra={"option": option_name}) return with metrics.timer("workflow_engine.schedule.process_all_conditions.duration", sample_rate=1.0): fetch_time = datetime.now(tz=timezone.utc).timestamp() all_project_ids_and_timestamps = buffer_client.get_project_ids( min=0, max=fetch_time, ) project_chooser = ProjectChooser( buffer_client, num_cohorts=options.get("workflow_engine.num_cohorts", 1), min_scheduling_age=timedelta( seconds=options.get( "workflow_engine.schedule.min_cohort_scheduling_age_seconds", 50 ) ), ) with chosen_projects( project_chooser, fetch_time, list(all_project_ids_and_timestamps.keys()) ) as project_ids_to_process: metrics.distribution("workflow_engine.schedule.projects", len(project_ids_to_process)) logger.info( "delayed_workflow.project_id_list", extra={"project_ids": sorted(project_ids_to_process)}, ) for project_id in project_ids_to_process: process_in_batches(buffer_client.for_project(project_id)) mark_projects_processed( buffer_client, project_ids_to_process, all_project_ids_and_timestamps ) def mark_projects_processed( buffer_client: DelayedWorkflowClient, processed_project_ids: list[int], all_project_ids_and_timestamps: dict[int, list[float]], ) -> None: if not all_project_ids_and_timestamps: return with metrics.timer("workflow_engine.scheduler.mark_projects_processed"): processed_member_maxes = [ (project_id, max(timestamps)) for project_id, timestamps in all_project_ids_and_timestamps.items() if project_id in processed_project_ids ] deleted_project_ids = set[int]() # The conditional delete can be slow, so we break it into chunks that probably # aren't big enough to hold onto the main redis thread for too long. for chunk in chunked(processed_member_maxes, 500): with metrics.timer( "workflow_engine.conditional_delete_from_sorted_sets.chunk_duration" ): deleted = buffer_client.mark_project_ids_as_processed(dict(chunk)) deleted_project_ids.update(deleted) logger.info( "process_buffered_workflows.project_ids_deleted", extra={ "deleted_project_ids": sorted(deleted_project_ids), "processed_project_ids": sorted(processed_project_ids), }, )
ProjectChooser
python
PrefectHQ__prefect
tests/server/models/test_filters.py
{ "start": 27808, "end": 33220 }
class ____: params = [ [{}, 3], [ dict(deployment_filter=filters.DeploymentFilter(name=dict(any_=["d-1-1"]))), 1, ], [ dict(deployment_filter=filters.DeploymentFilter(name=dict(like_="d-"))), 3, ], [ dict(deployment_filter=filters.DeploymentFilter(name=dict(like_="-1-2"))), 1, ], [ dict( deployment_filter=filters.DeploymentFilter( name=dict(any_=["d-1-1", "d-1-2"]) ) ), 2, ], [ dict( deployment_filter=filters.DeploymentFilter(name=dict(any_=["zaphod"])) ), 0, ], [dict(flow_filter=filters.FlowFilter(name=dict(any_=["f-1", "f-2"]))), 2], [dict(flow_filter=filters.FlowFilter(name=dict(any_=["f-1", "f-100"]))), 2], [dict(flow_filter=filters.FlowFilter(name=dict(any_=["f-3"]))), 1], [dict(flow_filter=filters.FlowFilter(name=dict(like_="f-"))), 3], [dict(flow_filter=filters.FlowFilter(name=dict(like_="f-2"))), 0], [dict(flow_filter=filters.FlowFilter(tags=dict(all_=["db"]))), 2], [dict(flow_filter=filters.FlowFilter(tags=dict(all_=["db", "red"]))), 0], [dict(flow_run_filter=filters.FlowRunFilter(name=dict(like_="test-happy"))), 1], [dict(flow_run_filter=filters.FlowRunFilter(name=dict(like_="nothing!"))), 0], [ dict( flow_run_filter=filters.FlowRunFilter( deployment_id=dict(any_=[d_1_1_id, d_1_2_id, uuid4()]) ) ), 2, ], [ dict( task_run_filter=filters.TaskRunFilter( state=dict(type=dict(any_=["FAILED"])) ) ), 1, ], # next two check that filters are applied as an intersection not a union [ dict( deployment_filter=filters.DeploymentFilter(name=dict(any_=["d-1-1"])), flow_filter=filters.FlowFilter(name=dict(any_=["f-1", "f-2"])), ), 1, ], [ dict( deployment_filter=filters.DeploymentFilter(name=dict(any_=["d-1-1"])), flow_filter=filters.FlowFilter(name=dict(any_=["f-2"])), ), 0, ], [ dict( work_pool_filter=filters.WorkPoolFilter(name=dict(any_=["Test Pool"])) ), 1, ], [ dict( work_queue_filter=filters.WorkQueueFilter(name=dict(any_=["default"])) ), 1, ], [ dict( work_pool_filter=filters.WorkPoolFilter(name=dict(any_=["Test Pool"])), work_queue_filter=filters.WorkQueueFilter(name=dict(any_=["default"])), ), 1, ], [ dict( work_pool_filter=filters.WorkPoolFilter( name=dict(any_=["A pool that doesn't exist"]) ), work_queue_filter=filters.WorkQueueFilter(name=dict(any_=["default"])), ), 0, ], [ dict( work_pool_filter=filters.WorkPoolFilter(name=dict(any_=["Test Pool"])), work_queue_filter=filters.WorkQueueFilter( name=dict(any_=["a queue that doesn't exist"]) ), ), 0, ], # empty filter [dict(flow_filter=filters.FlowFilter()), 3], # multiple empty filters [ dict( flow_filter=filters.FlowFilter(), flow_run_filter=filters.FlowRunFilter(), ), 3, ], ] @pytest.mark.parametrize("kwargs,expected", params) async def test_python_client_filter(self, kwargs, expected): async with get_client() as client: deployments = await client.read_deployments(**kwargs) assert len(deployments) == expected @pytest.mark.parametrize("kwargs,expected", params) async def test_models_count(self, session, kwargs, expected): count = await models.deployments.count_deployments(session=session, **kwargs) assert count == expected @pytest.mark.parametrize("kwargs,expected", params) async def test_models_read(self, session, kwargs, expected): read = await models.deployments.read_deployments(session=session, **kwargs) assert len({r.id for r in read}) == expected @pytest.mark.parametrize("kwargs,expected", params) async def test_api_count(self, client, kwargs, expected): adjusted_kwargs = adjust_kwargs_for_client(kwargs) response = await client.post( "/deployments/count", json=adjusted_kwargs, ) assert response.json() == expected @pytest.mark.parametrize("kwargs,expected", params) async def test_api_read(self, client, kwargs, expected): adjusted_kwargs = adjust_kwargs_for_client(kwargs) response = await client.post( "/deployments/filter", json=adjusted_kwargs, ) assert len({r["id"] for r in response.json()}) == expected
TestCountDeploymentModels
python
run-llama__llama_index
llama-index-core/llama_index/core/indices/keyword_table/retrievers.py
{ "start": 1003, "end": 4539 }
class ____(BaseRetriever): """ Base Keyword Table Retriever. Arguments are shared among subclasses. Args: keyword_extract_template (Optional[BasePromptTemplate]): A Keyword Extraction Prompt (see :ref:`Prompt-Templates`). query_keyword_extract_template (Optional[BasePromptTemplate]): A Query Keyword Extraction Prompt (see :ref:`Prompt-Templates`). refine_template (Optional[BasePromptTemplate]): A Refinement Prompt (see :ref:`Prompt-Templates`). text_qa_template (Optional[BasePromptTemplate]): A Question Answering Prompt (see :ref:`Prompt-Templates`). max_keywords_per_query (int): Maximum number of keywords to extract from query. num_chunks_per_query (int): Maximum number of text chunks to query. """ def __init__( self, index: BaseKeywordTableIndex, keyword_extract_template: Optional[BasePromptTemplate] = None, query_keyword_extract_template: Optional[BasePromptTemplate] = None, max_keywords_per_query: int = 10, num_chunks_per_query: int = 10, callback_manager: Optional[CallbackManager] = None, object_map: Optional[dict] = None, verbose: bool = False, **kwargs: Any, ) -> None: """Initialize params.""" self._index = index self._index_struct = index.index_struct self._docstore = index.docstore self.max_keywords_per_query = max_keywords_per_query self.num_chunks_per_query = num_chunks_per_query self.keyword_extract_template = ( keyword_extract_template or DEFAULT_KEYWORD_EXTRACT_TEMPLATE ) self.query_keyword_extract_template = query_keyword_extract_template or DQKET super().__init__( callback_manager=callback_manager or Settings.callback_manager, object_map=object_map, verbose=verbose, ) @abstractmethod def _get_keywords(self, query_str: str) -> List[str]: """Extract keywords.""" def _retrieve( self, query_bundle: QueryBundle, ) -> List[NodeWithScore]: """Get nodes for response.""" logger.info(f"> Starting query: {query_bundle.query_str}") keywords = self._get_keywords(query_bundle.query_str) logger.info(f"query keywords: {keywords}") # go through text chunks in order of most matching keywords chunk_indices_count: Dict[str, int] = defaultdict(int) keywords = [k for k in keywords if k in self._index_struct.keywords] logger.info(f"> Extracted keywords: {keywords}") for k in keywords: for node_id in self._index_struct.table[k]: chunk_indices_count[node_id] += 1 sorted_chunk_indices = sorted( chunk_indices_count.keys(), key=lambda x: chunk_indices_count[x], reverse=True, ) sorted_chunk_indices = sorted_chunk_indices[: self.num_chunks_per_query] sorted_nodes = self._docstore.get_nodes(sorted_chunk_indices) if logging.getLogger(__name__).getEffectiveLevel() == logging.DEBUG: for chunk_idx, node in zip(sorted_chunk_indices, sorted_nodes): logger.debug( f"> Querying with idx: {chunk_idx}: " f"{truncate_text(node.get_content(), 50)}" ) return [NodeWithScore(node=node) for node in sorted_nodes]
BaseKeywordTableRetriever
python
eventlet__eventlet
tests/greendns_test.py
{ "start": 35055, "end": 39327 }
class ____(tests.LimitedTestCase): def test_raise_new_error(self): # https://github.com/eventlet/eventlet/issues/810 # Raise exception multiple times for _ in range(3): with self.assertRaises(socket.gaierror) as error: greendns._raise_new_error(greendns.EAI_EAGAIN_ERROR) self.assertIsNone(error.exception.__traceback__) # Check no memory leak of exception instance self.assertIsNone(greendns.EAI_EAGAIN_ERROR.__traceback__) def test_reverse_name(): tests.run_isolated('greendns_from_address_203.py') def test_proxy_resolve_unqualified(): # https://github.com/eventlet/eventlet/issues/363 rp = greendns.ResolverProxy(filename=None) rp._resolver.search.append(dns.name.from_text('example.com')) with tests.mock.patch('dns.resolver.Resolver.query', side_effect=dns.resolver.NoAnswer) as m: try: rp.query('machine') assert False, 'Expected NoAnswer exception' except dns.resolver.NoAnswer: pass assert any(call[0][0] == dns.name.from_text('machine') for call in m.call_args_list) assert any(call[0][0] == dns.name.from_text('machine.') for call in m.call_args_list) def test_hosts_priority(): name = 'example.com' addr_from_ns = '1.0.2.0' hr = _make_host_resolver() rp = greendns.ResolverProxy(hosts_resolver=hr, filename=None) base = _make_mock_base_resolver() base.rr.address = addr_from_ns rp._resolver = base() # Default behavior rrns = greendns.resolve(name, _proxy=rp).rrset[0] assert rrns.address == addr_from_ns # Hosts result must shadow that from nameservers hr.hosts.write(b'1.2.3.4 example.com\ndead:beef::1 example.com\n') hr.hosts.flush() hr._load() rrs4 = greendns.resolve(name, family=socket.AF_INET, _proxy=rp).rrset assert len(rrs4) == 1 assert rrs4[0].address == '1.2.3.4', rrs4[0].address rrs6 = greendns.resolve(name, family=socket.AF_INET6, _proxy=rp).rrset assert len(rrs6) == 1 assert rrs6[0].address == 'dead:beef::1', rrs6[0].address def test_hosts_no_network(): name = 'example.com' addr_from_ns = '1.0.2.0' addr6_from_ns = 'dead:beef::1' hr = _make_host_resolver() rp = greendns.ResolverProxy(hosts_resolver=hr, filename=None) base = _make_mock_base_resolver() base.rr.address = addr_from_ns base.rr6.address = addr6_from_ns rp._resolver = base() with tests.mock.patch.object(greendns, 'resolver', new_callable=tests.mock.PropertyMock(return_value=rp)): res = greendns.getaddrinfo('example.com', 'domain', socket.AF_UNSPEC) # Default behavior addr = (addr_from_ns, 53) tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) addr = (addr6_from_ns, 53, 0, 0) tcp6 = (socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp6 = (socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) filt_res = [ai[:3] + (ai[4],) for ai in res] assert tcp in filt_res assert udp in filt_res assert tcp6 in filt_res assert udp6 in filt_res # Hosts result must shadow that from nameservers hr = _make_host_resolver() hr.hosts.write(b'1.2.3.4 example.com') hr.hosts.flush() hr._load() greendns.resolver._hosts = hr res = greendns.getaddrinfo('example.com', 'domain', socket.AF_UNSPEC) filt_res = [ai[:3] + (ai[4],) for ai in res] # Make sure that only IPv4 entry from hosts is present. assert tcp not in filt_res assert udp not in filt_res assert tcp6 not in filt_res assert udp6 not in filt_res addr = ('1.2.3.4', 53) tcp = (socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, addr) udp = (socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP, addr) assert tcp in filt_res assert udp in filt_res def test_import_rdtypes_then_eventlet(): # https://github.com/eventlet/eventlet/issues/479 tests.run_isolated('greendns_import_rdtypes_then_eventlet.py')
TestRaiseErrors
python
zarr-developers__zarr-python
src/zarr/core/buffer/core.py
{ "start": 3044, "end": 9024 }
class ____(ABC): """A flat contiguous memory block We use Buffer throughout Zarr to represent a contiguous block of memory. A Buffer is backed by an underlying array-like instance that represents the memory. The memory type is unspecified; can be regular host memory, CUDA device memory, or something else. The only requirement is that the array-like instance can be copied/converted to a regular Numpy array (host memory). Notes ----- This buffer is untyped, so all indexing and sizes are in bytes. Parameters ---------- array_like array-like object that must be 1-dim, contiguous, and byte dtype. """ def __init__(self, array_like: ArrayLike) -> None: if array_like.ndim != 1: raise ValueError("array_like: only 1-dim allowed") if array_like.dtype != np.dtype("B"): raise ValueError("array_like: only byte dtype allowed") self._data = array_like @classmethod @abstractmethod def create_zero_length(cls) -> Self: """Create an empty buffer with length zero Returns ------- New empty 0-length buffer """ if cls is Buffer: raise NotImplementedError("Cannot call abstract method on the abstract class 'Buffer'") return cls( cast("ArrayLike", None) ) # This line will never be reached, but it satisfies the type checker @classmethod def from_array_like(cls, array_like: ArrayLike) -> Self: """Create a new buffer of an array-like object Parameters ---------- array_like array-like object that must be 1-dim, contiguous, and byte dtype. Returns ------- New buffer representing `array_like` """ return cls(array_like) @classmethod @abstractmethod def from_buffer(cls, buffer: Buffer) -> Self: """Create a new buffer of an existing Buffer This is useful if you want to ensure that an existing buffer is of the correct subclass of Buffer. E.g., MemoryStore uses this to return a buffer instance of the subclass specified by its BufferPrototype argument. Typically, this only copies data if the data has to be moved between memory types, such as from host to device memory. Parameters ---------- buffer buffer object. Returns ------- A new buffer representing the content of the input buffer Notes ----- Subclasses of `Buffer` must override this method to implement more optimal conversions that avoid copies where possible """ if cls is Buffer: raise NotImplementedError("Cannot call abstract method on the abstract class 'Buffer'") return cls( cast("ArrayLike", None) ) # This line will never be reached, but it satisfies the type checker @classmethod @abstractmethod def from_bytes(cls, bytes_like: BytesLike) -> Self: """Create a new buffer of a bytes-like object (host memory) Parameters ---------- bytes_like bytes-like object Returns ------- New buffer representing `bytes_like` """ if cls is Buffer: raise NotImplementedError("Cannot call abstract method on the abstract class 'Buffer'") return cls( cast("ArrayLike", None) ) # This line will never be reached, but it satisfies the type checker def as_array_like(self) -> ArrayLike: """Returns the underlying array (host or device memory) of this buffer This will never copy data. Returns ------- The underlying 1d array such as a NumPy or CuPy array. """ return self._data @abstractmethod def as_numpy_array(self) -> npt.NDArray[Any]: """Returns the buffer as a NumPy array (host memory). Notes ----- Might have to copy data, consider using `.as_array_like()` instead. Returns ------- NumPy array of this buffer (might be a data copy) """ ... def as_buffer_like(self) -> BytesLike: """Returns the buffer as an object that implements the Python buffer protocol. Notes ----- Might have to copy data, since the implementation uses `.as_numpy_array()`. Returns ------- An object that implements the Python buffer protocol """ return memoryview(self.as_numpy_array()) # type: ignore[arg-type] def to_bytes(self) -> bytes: """Returns the buffer as `bytes` (host memory). Warnings -------- Will always copy data, only use this method for small buffers such as metadata buffers. If possible, use `.as_numpy_array()` or `.as_array_like()` instead. Returns ------- `bytes` of this buffer (data copy) """ return bytes(self.as_numpy_array()) def __getitem__(self, key: slice) -> Self: check_item_key_is_1d_contiguous(key) return self.__class__(self._data.__getitem__(key)) def __setitem__(self, key: slice, value: Any) -> None: check_item_key_is_1d_contiguous(key) self._data.__setitem__(key, value) def __len__(self) -> int: return self._data.size @abstractmethod def combine(self, others: Iterable[Buffer]) -> Self: """Concatenate many buffers""" ... def __add__(self, other: Buffer) -> Self: """Concatenate two buffers""" return self.combine([other]) def __eq__(self, other: object) -> bool: # Another Buffer class can override this to choose a more efficient path return isinstance(other, Buffer) and np.array_equal( self.as_numpy_array(), other.as_numpy_array() )
Buffer
python
huggingface__transformers
src/transformers/models/time_series_transformer/modeling_time_series_transformer.py
{ "start": 32843, "end": 42179 }
class ____(TimeSeriesTransformerPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TimeSeriesTransformerDecoderLayer`] Args: config: TimeSeriesTransformerConfig """ def __init__(self, config: TimeSeriesTransformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop if config.prediction_length is None: raise ValueError("The `prediction_length` config needs to be specified.") self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding( config.context_length + config.prediction_length, config.d_model ) self.layers = nn.ModuleList( [TimeSeriesTransformerDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)] ) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" Args: attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = inputs_embeds.size()[:-1] # initialize `past_key_values` if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + input_shape[1], device=inputs_embeds.device ) attention_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, ) encoder_attention_mask = create_bidirectional_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, ) hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer( hidden_states, attention_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @auto_docstring
TimeSeriesTransformerDecoder
python
apache__airflow
providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_asb.py
{ "start": 18310, "end": 20614 }
class ____: def test_init(self): """ Test init by creating AzureServiceBusUpdateSubscriptionOperator with task id, subscription name, topic name and asserting with values """ asb_update_subscription_operator = AzureServiceBusUpdateSubscriptionOperator( task_id="asb_update_subscription", topic_name=TOPIC_NAME, subscription_name=SUBSCRIPTION_NAME, max_delivery_count=10, ) assert asb_update_subscription_operator.task_id == "asb_update_subscription" assert asb_update_subscription_operator.topic_name == TOPIC_NAME assert asb_update_subscription_operator.subscription_name == SUBSCRIPTION_NAME assert asb_update_subscription_operator.max_delivery_count == 10 @mock.patch("azure.servicebus.management.SubscriptionProperties") @mock.patch("airflow.providers.microsoft.azure.hooks.asb.AdminClientHook.get_conn") def test_update_subscription(self, mock_get_conn, mock_subscription_properties): """ Test AzureServiceBusUpdateSubscriptionOperator passed with the subscription name, topic name mocking the connection details, hook update_subscription function """ mock_subscription_properties.name = SUBSCRIPTION_NAME mock_subscription_properties.max_delivery_count = 20 mock_get_conn.return_value.__enter__.return_value.get_subscription.return_value = ( mock_subscription_properties ) asb_update_subscription = AzureServiceBusUpdateSubscriptionOperator( task_id="asb_update_subscription", topic_name=TOPIC_NAME, subscription_name=SUBSCRIPTION_NAME, max_delivery_count=20, ) asb_update_subscription.execute(None) mock_get_conn.return_value.__enter__.return_value.get_subscription.assert_has_calls( [ mock.call(TOPIC_NAME, SUBSCRIPTION_NAME), # before update mock.call(TOPIC_NAME, SUBSCRIPTION_NAME), # after update ] ) mock_get_conn.return_value.__enter__.return_value.update_subscription.assert_called_once_with( TOPIC_NAME, mock_subscription_properties, )
TestAzureServiceBusUpdateSubscriptionOperator
python
Netflix__metaflow
metaflow/plugins/aws/step_functions/step_functions.py
{ "start": 53822, "end": 54340 }
class ____(object): def __init__(self): tree = lambda: defaultdict(tree) self.payload = tree() self.payload["ReaderConfig"] = {"InputType": "JSON", "MaxItems": 1} def resource(self, resource): self.payload["Resource"] = resource return self def parameter(self, name, value): self.payload["Parameters"][name] = value return self def output_path(self, output_path): self.payload["OutputPath"] = output_path return self
JSONItemReader
python
pytorch__pytorch
torch/testing/_internal/distributed/_tensor/common_dtensor.py
{ "start": 11823, "end": 12372 }
class ____(MultiProcContinuousTest): @classmethod def device_type(cls) -> str: # if enough GPU/XPU/HPU we can use those devices, otherwise we fallback to CPU if ( not (TEST_CUDA or TEST_XPU or TEST_HPU or TEST_PRIVATEUSE1) or DEVICE_COUNT < cls.world_size ): return "cpu" else: return DEVICE_TYPE @classmethod def backend_str(cls) -> str: backend = dist.get_default_backend_for_device(DEVICE_TYPE) return backend
DTensorContinuousTestBase
python
pypa__setuptools
setuptools/tests/test_editable_install.py
{ "start": 37806, "end": 38892 }
class ____: def install_custom_build_wheel(self, dist): bdist_wheel_cls = dist.get_command_class("bdist_wheel") class MyBdistWheel(bdist_wheel_cls): def get_tag(self): # In issue #3513, we can see that some extensions may try to access # the `plat_name` property in bdist_wheel if self.plat_name.startswith("macosx-"): _ = "macOS platform" return super().get_tag() dist.cmdclass["bdist_wheel"] = MyBdistWheel def test_access_plat_name(self, tmpdir_cwd): # Even when a custom bdist_wheel tries to access plat_name the build should # be successful jaraco.path.build({"module.py": "x = 42"}) dist = Distribution() dist.script_name = "setup.py" dist.set_defaults() self.install_custom_build_wheel(dist) cmd = editable_wheel(dist) cmd.ensure_finalized() cmd.run() wheel_file = str(next(Path().glob('dist/*.whl'))) assert "editable" in wheel_file
TestCustomBuildWheel
python
getsentry__sentry
tests/sentry/seer/explorer/test_explorer_client.py
{ "start": 12993, "end": 16697 }
class ____(TestCase): """Test artifact schema passing and reconstruction""" def setUp(self): super().setUp() self.user = self.create_user() self.organization = self.create_organization(owner=self.user) @patch("sentry.seer.explorer.client.has_seer_explorer_access_with_detail") @patch("sentry.seer.explorer.client.requests.post") @patch("sentry.seer.explorer.client.collect_user_org_context") def test_start_run_with_artifact_schema(self, mock_collect_context, mock_post, mock_access): """Test that artifact schema is serialized and sent to API""" mock_access.return_value = (True, None) mock_collect_context.return_value = {"user_id": self.user.id} mock_response = MagicMock() mock_response.json.return_value = {"run_id": 123} mock_post.return_value = mock_response class IssueAnalysis(BaseModel): issue_count: int severity: str client = SeerExplorerClient(self.organization, self.user, artifact_schema=IssueAnalysis) run_id = client.start_run("Analyze errors") assert run_id == 123 # Verify artifact_schema was included in payload body = orjson.loads(mock_post.call_args[1]["data"]) assert "artifact_schema" in body assert body["artifact_schema"]["type"] == "object" assert "issue_count" in body["artifact_schema"]["properties"] assert "severity" in body["artifact_schema"]["properties"] @patch("sentry.seer.explorer.client.has_seer_explorer_access_with_detail") @patch("sentry.seer.explorer.client.fetch_run_status") def test_get_run_reconstructs_artifact(self, mock_fetch, mock_access): """Test that artifact is automatically reconstructed from dict""" mock_access.return_value = (True, None) class BugReport(BaseModel): bug_count: int severity: str # Mock API returns dict artifact mock_state = SeerRunState( run_id=123, blocks=[], status="completed", updated_at="2024-01-01T00:00:00Z", raw_artifact={"bug_count": 5, "severity": "high"}, # Raw dict from API artifact_reason="Successfully generated", ) mock_fetch.return_value = mock_state client = SeerExplorerClient(self.organization, self.user, artifact_schema=BugReport) result = client.get_run(123) # Verify artifact was reconstructed as Pydantic model assert isinstance(result.artifact, BugReport) assert result.artifact.bug_count == 5 assert result.artifact.severity == "high" assert result.artifact_reason == "Successfully generated" @patch("sentry.seer.explorer.client.has_seer_explorer_access_with_detail") @patch("sentry.seer.explorer.client.fetch_run_status") def test_get_run_with_none_artifact(self, mock_fetch, mock_access): """Test that None artifact is handled gracefully""" mock_access.return_value = (True, None) class MySchema(BaseModel): field: str mock_state = SeerRunState( run_id=123, blocks=[], status="completed", updated_at="2024-01-01T00:00:00Z", raw_artifact=None, artifact_reason="Generation failed", ) mock_fetch.return_value = mock_state client = SeerExplorerClient(self.organization, self.user, artifact_schema=MySchema) result = client.get_run(123) # Verify None artifact is preserved assert result.artifact is None assert result.artifact_reason == "Generation failed"
TestSeerExplorerClientArtifacts
python
joke2k__faker
tests/providers/test_date_time.py
{ "start": 35163, "end": 40996 }
class ____(unittest.TestCase): num_sample_runs = 50 def setUp(self): self.fake = Faker("th_TH") Faker.seed(0) def test_day(self): day = self.fake.day_of_week() assert isinstance(day, str) assert day.startswith("วัน") def test_month(self): month = self.fake.month_name() assert isinstance(month, str) def test_date(self): # default format is "%-d %b %Y" # (date with no padding, abbreviated month, full year [4 digits]) date = self.fake.date(thai_digit=True) for _ in range(self.num_sample_runs): assert re.fullmatch( r"[๐-๙]{1,2} " r"(ม\.ค\.|ก\.พ\.|มี\.ค\.|เม\.ย\.|พ\.ค\.|มิ\.ย\.|ก\.ค\.|ส\.ค\.|ก\.ย\.|ต\.ค\.|พ\.ย\.|ธ\.ค\.) " r"[๐-๙]{4}", date, ) def test_time(self): time = self.fake.time(thai_digit=True) for _ in range(self.num_sample_runs): assert re.fullmatch(r"[๐-๙]{2}:[๐-๙]{2}:[๐-๙]{2}", time) def test_century(self): century = self.fake.century() assert isinstance(century, str) assert len(century) <= 2 century = self.fake.century(thai_digit=True) for _ in range(self.num_sample_runs): assert re.fullmatch(r"[๑-๒]?[๐-๙]", century) def test_date_pattern(self): # unsupported or incomplete directive date = self.fake.date("%Q") assert date == "Q" date = self.fake.date("%%") assert date == "%" date = self.fake.date("%-") assert date == "-" date = self.fake.date("% ") assert date == " " date = self.fake.date("%0") assert date == "0" date = self.fake.date("%") assert date == "%" # may be not supported on Windows, try for coverage date = self.fake.date("%s") date = self.fake.date("%f") # National representation of the full weekday name date = self.fake.date("%A") assert isinstance(date, str) # National representation of the abbreviated weekday date = self.fake.date("%a") assert isinstance(date, str) assert len(date) <= 2 # National representation of the full month name date = self.fake.date("%B") assert isinstance(date, str) assert "." not in date # National representation of the abbreviated month name date = self.fake.date("%b") assert isinstance(date, str) assert "." in date # Century as decimal number date = self.fake.date("%C") assert isinstance(date, str) # Locale’s appropriate date and time representation # พ 6 ต.ค. 01:40:00 2519 <-- left-aligned weekday, right-aligned day date = self.fake.date("%c") assert isinstance(date, str) # Equivalent to ``%m/%d/%y'' date = self.fake.date("%D") assert isinstance(date, str) # Equivalent to ``%Y-%m-%d'' date = self.fake.date("%F") assert isinstance(date, str) # ISO 8601 year with century representing the year that contains # the greater part of the ISO week (%V). Monday as the first day # of the week. date = self.fake.date("%G") assert isinstance(date, str) # Same year as in ``%G'', # but as a decimal number without century (00-99). date = self.fake.date("%g") assert isinstance(date, str) assert len(date) <= 2 # BSD extension, ' 6-ต.ค.-2519' date = self.fake.date("%v") assert isinstance(date, str) # Locale’s appropriate time representation. date = self.fake.date("%X") assert isinstance(date, str) # Locale’s appropriate date representation. date = self.fake.date("%x") assert isinstance(date, str) # Year with century date = self.fake.date("%Y") assert isinstance(date, str) # Year without century date = self.fake.date("%y") assert isinstance(date, str) assert len(date) <= 2 # National representation of the date and time # (the format is similar to that produced by date(1)) # Wed 6 Oct 1976 01:40:00 date = self.fake.date("%+") assert isinstance(date, str) # GNU libc extension, # no padding for _ in range(self.num_sample_runs): date = self.fake.date("%-d") assert isinstance(date, str) assert date[0] != "0" # GNU libc extension, # explicitly specify space (" ") for padding for _ in range(self.num_sample_runs): date = self.fake.date("%_d") assert isinstance(date, str) assert date[0] != "0" # GNU libc extension, # explicitly specify zero ("0") for padding for _ in range(self.num_sample_runs): date = self.fake.date("%0v") assert isinstance(date, str) assert date[0] != " " # GNU libc extension, # convert to upper case date = self.fake.date("%^p") assert isinstance(date, str) assert date.isupper() # GNU libc extension, # swap case - useful for %Z date = self.fake.date("%#p") assert isinstance(date, str) assert date.islower() # POSIX extension, # uses the locale's alternative representation # Not implemented yet # swap case - useful for %Z date = self.fake.date("%Ed") assert isinstance(date, str) # POSIX extension, # uses the locale's alternative numeric symbols date = self.fake.date("%Od") assert isinstance(date, str) assert date[0] not in "0123456789"
TestThTh
python
miyuchina__mistletoe
mistletoe/block_token.py
{ "start": 24725, "end": 27704 }
class ____(BlockToken): """ Table token. See its GFM definition at <https://github.github.com/gfm/#tables-extension->. This is a container block token. Its children are TableRow tokens. Class attributes: interrupt_paragraph: indicates whether tables should interrupt paragraphs during parsing. The default is true. Attributes: header: header row (TableRow). column_align (list): align options for each column (default to [None]). """ repr_attributes = BlockToken.repr_attributes + ("column_align",) interrupt_paragraph = True _column_align = r':?-+:?' column_align_pattern = re.compile(_column_align) delimiter_row_pattern = re.compile(r'\s*\|?\s*' + _column_align + r'\s*(\|\s*' + _column_align + r'\s*)*\|?\s*') def __init__(self, match): lines, start_line = match # note: the following condition is currently always true, because read() guarantees the presence of the delimiter row if '-' in lines[1]: self.column_align = [self.parse_align(column) for column in self.split_delimiter(lines[1])] self.header = TableRow(lines[0], self.column_align, start_line) self.children = [TableRow(line, self.column_align, start_line + offset) for offset, line in enumerate(lines[2:], start=2)] else: self.column_align = [None] self.children = [TableRow(line, line_number=start_line + offset) for offset, line in enumerate(lines)] @classmethod def split_delimiter(cls, delimiter_row): """ Helper function; returns a list of align options. Args: delimiter (str): e.g.: "| :--- | :---: | ---: |\n" Returns: a list of align options (None, 0 or 1). """ return cls.column_align_pattern.findall(delimiter_row) @staticmethod def parse_align(column): """ Helper function; returns align option from cell content. Returns: None if align = left; 0 if align = center; 1 if align = right. """ return (0 if column[0] == ':' else 1) if column[-1] == ':' else None @staticmethod def start(line): return '|' in line @classmethod def check_interrupts_paragraph(cls, lines): if not cls.interrupt_paragraph: return False anchor = lines.get_pos() result = cls.read(lines) lines.set_pos(anchor) return result @classmethod def read(cls, lines): anchor = lines.get_pos() line_buffer = [next(lines)] start_line = lines.line_number() while lines.peek() is not None and '|' in lines.peek(): line_buffer.append(next(lines)) if len(line_buffer) < 2 or not cls.delimiter_row_pattern.fullmatch(line_buffer[1]): lines.set_pos(anchor) return None return line_buffer, start_line
Table
python
kamyu104__LeetCode-Solutions
Python/global-and-local-inversions.py
{ "start": 29, "end": 222 }
class ____(object): def isIdealPermutation(self, A): """ :type A: List[int] :rtype: bool """ return all(abs(v-i) <= 1 for i,v in enumerate(A))
Solution
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/class_interval.py
{ "start": 2410, "end": 2632 }
class ____(C5): def m2(self, x): _test_sink(x) # No issue def sink_two_hops(b: B5): b.m0(_test_source()) """ A6: [1,8] / \ / \ B6: [2,5] C6: [6,7] | | D6: [3,4] E6: [9,10] """
D5
python
dask__distributed
distributed/actor.py
{ "start": 8643, "end": 8734 }
class ____: _e: Exception def unwrap(self) -> NoReturn: raise self._e
_Error
python
numba__numba
numba/tests/test_generators.py
{ "start": 14500, "end": 16627 }
class ____(MemoryLeakMixin, TestCase): def test_issue_1254(self): """ Missing environment for returning array """ @jit(nopython=True) def random_directions(n): for i in range(n): vec = np.empty(3) vec[:] = 12 yield vec outputs = list(random_directions(5)) self.assertEqual(len(outputs), 5) expect = np.empty(3) expect[:] = 12 for got in outputs: np.testing.assert_equal(expect, got) def test_issue_1265(self): """ Double-free for locally allocated, non escaping NRT objects """ def py_gen(rmin, rmax, nr): a = np.linspace(rmin, rmax, nr) yield a[0] yield a[1] c_gen = jit(nopython=True)(py_gen) py_res = list(py_gen(-2, 2, 100)) c_res = list(c_gen(-2, 2, 100)) self.assertEqual(py_res, c_res) def py_driver(args): rmin, rmax, nr = args points = np.empty(nr, dtype=np.complex128) for i, c in enumerate(py_gen(rmin, rmax, nr)): points[i] = c return points @jit(nopython=True) def c_driver(args): rmin, rmax, nr = args points = np.empty(nr, dtype=np.complex128) for i, c in enumerate(c_gen(rmin, rmax, nr)): points[i] = c return points n = 2 patches = (-2, -1, n) py_res = py_driver(patches) # The error will cause a segfault here c_res = c_driver(patches) np.testing.assert_equal(py_res, c_res) def test_issue_1808(self): """ Incorrect return data model """ magic = 0xdeadbeef @njit def generator(): yield magic @njit def get_generator(): return generator() @njit def main(): out = 0 for x in get_generator(): out += x return out self.assertEqual(main(), magic)
TestGeneratorWithNRT
python
ApeWorX__ape
src/ape/contracts/base.py
{ "start": 35722, "end": 39716 }
class ____(ManagerAccessMixin): contract_type: "ContractType" base_path: Optional[Path] = None @property def selector_identifiers(self) -> dict[str, str]: """ Provides a mapping of function signatures (pre-hashed selectors) to selector identifiers. """ return self.contract_type.selector_identifiers @property def identifier_lookup(self) -> dict[str, "ABI_W_SELECTOR_T"]: """ Provides a mapping of method, error, and event selector identifiers to ABI Types. """ return self.contract_type.identifier_lookup @property def source_path(self) -> Optional[Path]: """ Returns the path to the local contract if determined that this container belongs to the active project by cross-checking source_id. """ if not (source_id := self.contract_type.source_id): return None base = self.base_path or self.local_project.path path = base / source_id return path if path.is_file() else None def decode_input(self, calldata: bytes) -> tuple[str, dict[str, Any]]: """ Decode the given calldata using this contract. If the calldata has a method ID prefix, Ape will detect it and find the corresponding method, else it will error. Args: calldata (bytes): The calldata to decode. Returns: tuple[str, dict[str, Any]]: A tuple containing the method selector along a mapping of input names to their decoded values. If an input does not have a number, it will have the stringified index as its key. """ ecosystem = self.provider.network.ecosystem if calldata in self.contract_type.mutable_methods: method = self.contract_type.mutable_methods[calldata] elif calldata in self.contract_type.view_methods: method = self.contract_type.view_methods[calldata] else: method = None if not method: raise ContractDataError( f"Unable to find method ABI from calldata '{to_hex(calldata)}'. " "Try prepending the method ID to the beginning of the calldata." ) method_id = ecosystem.get_method_selector(method) cutoff = len(method_id) rest_calldata = calldata[cutoff:] input_dict = ecosystem.decode_calldata(method, rest_calldata) return method.selector, input_dict def _create_custom_error_type(self, abi: "ErrorABI", **kwargs) -> type[CustomError]: def exec_body(namespace): namespace["abi"] = abi namespace["contract"] = self for key, val in kwargs.items(): namespace[key] = val error_type = types.new_class(abi.name, (CustomError,), {}, exec_body) natspecs = self.contract_type.natspecs def _get_info(enrich: bool = False) -> str: if not (natspec := natspecs.get(abi.selector)): return "" elif enrich: natspec = _enrich_natspec(natspec) return f"{abi.signature}\n {natspec}" def _repr_pretty_(cls, *args, **kwargs): console = get_rich_console() output = _get_info(enrich=True) or repr(cls) console.print(output) def repr_pretty_for_assignment(cls, *args, **kwargs): return _repr_pretty_(error_type, *args, **kwargs) info = _get_info() error_type.info = error_type.__doc__ = info # type: ignore if info: # perf: Avoid forcing everyone to import from IPython. from IPython.lib.pretty import for_type error_type._repr_pretty_ = repr_pretty_for_assignment # type: ignore # Register the dynamically-created type with IPython so it integrates. for_type(type(error_type), _repr_pretty_) return error_type
ContractTypeWrapper
python
Unity-Technologies__ml-agents
ml-agents-trainer-plugin/mlagents_trainer_plugin/a2c/a2c_trainer.py
{ "start": 986, "end": 8183 }
class ____(OnPolicyTrainer): """The A2CTrainer is an implementation of the A2C algorithm.""" def __init__( self, behavior_name: str, reward_buff_cap: int, trainer_settings: TrainerSettings, training: bool, load: bool, seed: int, artifact_path: str, ): """ Responsible for collecting experiences and training A2C model. :param behavior_name: The name of the behavior associated with trainer config :param reward_buff_cap: Max reward history to track in the reward buffer :param trainer_settings: The parameters for the trainer. :param training: Whether the trainer is set for training. :param load: Whether the model should be loaded. :param seed: The seed the model will be initialized with :param artifact_path: The directory within which to store artifacts from this trainer. """ super().__init__( behavior_name, reward_buff_cap, trainer_settings, training, load, seed, artifact_path, ) self.hyperparameters: A2CSettings = cast( A2CSettings, self.trainer_settings.hyperparameters ) self.shared_critic = self.hyperparameters.shared_critic self.policy: TorchPolicy = None # type: ignore def _process_trajectory(self, trajectory: Trajectory) -> None: """ Takes a trajectory and processes it, putting it into the update buffer. Processing involves calculating value and advantage targets for model updating step. :param trajectory: The Trajectory tuple containing the steps to be processed. """ super()._process_trajectory(trajectory) agent_id = trajectory.agent_id # All the agents should have the same ID agent_buffer_trajectory = trajectory.to_agentbuffer() # Check if we used group rewards, warn if so. self._warn_if_group_reward(agent_buffer_trajectory) # Update the normalization if self.is_training: self.policy.actor.update_normalization(agent_buffer_trajectory) self.optimizer.critic.update_normalization(agent_buffer_trajectory) # Get all value estimates ( value_estimates, value_next, value_memories, ) = self.optimizer.get_trajectory_value_estimates( agent_buffer_trajectory, trajectory.next_obs, trajectory.done_reached and not trajectory.interrupted, ) if value_memories is not None: agent_buffer_trajectory[BufferKey.CRITIC_MEMORY].set(value_memories) for name, v in value_estimates.items(): agent_buffer_trajectory[RewardSignalUtil.value_estimates_key(name)].extend( v ) self._stats_reporter.add_stat( f"Policy/{self.optimizer.reward_signals[name].name.capitalize()} Value Estimate", np.mean(v), ) # Evaluate all reward functions self.collected_rewards["environment"][agent_id] += np.sum( agent_buffer_trajectory[BufferKey.ENVIRONMENT_REWARDS] ) for name, reward_signal in self.optimizer.reward_signals.items(): evaluate_result = ( reward_signal.evaluate(agent_buffer_trajectory) * reward_signal.strength ) agent_buffer_trajectory[RewardSignalUtil.rewards_key(name)].extend( evaluate_result ) # Report the reward signals self.collected_rewards[name][agent_id] += np.sum(evaluate_result) # Compute GAE and returns tmp_advantages = [] tmp_returns = [] for name in self.optimizer.reward_signals: bootstrap_value = value_next[name] local_rewards = agent_buffer_trajectory[ RewardSignalUtil.rewards_key(name) ].get_batch() local_value_estimates = agent_buffer_trajectory[ RewardSignalUtil.value_estimates_key(name) ].get_batch() local_advantage = get_gae( rewards=local_rewards, value_estimates=local_value_estimates, value_next=bootstrap_value, gamma=self.optimizer.reward_signals[name].gamma, lambd=self.hyperparameters.lambd, ) local_return = local_advantage + local_value_estimates # This is later use as target for the different value estimates agent_buffer_trajectory[RewardSignalUtil.returns_key(name)].set( local_return ) agent_buffer_trajectory[RewardSignalUtil.advantage_key(name)].set( local_advantage ) tmp_advantages.append(local_advantage) tmp_returns.append(local_return) # Get global advantages global_advantages = list( np.mean(np.array(tmp_advantages, dtype=np.float32), axis=0) ) global_returns = list(np.mean(np.array(tmp_returns, dtype=np.float32), axis=0)) agent_buffer_trajectory[BufferKey.ADVANTAGES].set(global_advantages) agent_buffer_trajectory[BufferKey.DISCOUNTED_RETURNS].set(global_returns) self._append_to_update_buffer(agent_buffer_trajectory) # If this was a terminal trajectory, append stats and reset reward collection if trajectory.done_reached: self._update_end_episode_stats(agent_id, self.optimizer) def create_optimizer(self) -> TorchOptimizer: """ Creates an Optimizer object """ return A2COptimizer( # type: ignore cast(TorchPolicy, self.policy), self.trainer_settings # type: ignore ) # type: ignore def create_policy( self, parsed_behavior_id: BehaviorIdentifiers, behavior_spec: BehaviorSpec ) -> TorchPolicy: """ Creates a policy with a PyTorch backend and PPO hyperparameters :param parsed_behavior_id: :param behavior_spec: specifications for policy construction :return policy """ actor_cls = SimpleActor actor_kwargs = {"conditional_sigma": False, "tanh_squash": False} if self.shared_critic: reward_signal_configs = self.trainer_settings.reward_signals reward_signal_names = [ key.value for key, _ in reward_signal_configs.items() ] actor_cls = SharedActorCritic actor_kwargs.update({"stream_names": reward_signal_names}) policy = TorchPolicy( self.seed, behavior_spec, self.trainer_settings.network_settings, actor_cls, actor_kwargs, ) return policy @staticmethod def get_settings_type(): return A2CSettings @staticmethod def get_trainer_name() -> str: return TRAINER_NAME def get_type_and_setting(): return {A2CTrainer.get_trainer_name(): A2CTrainer}, { A2CTrainer.get_trainer_name(): A2CSettings }
A2CTrainer
python
apache__avro
lang/py/avro/test/mock_tether_parent.py
{ "start": 1058, "end": 1804 }
class ____(avro.ipc.Responder): """ The responder for the mocked parent """ def __init__(self) -> None: super().__init__(avro.tether.tether_task.outputProtocol) def invoke(self, message: avro.protocol.Message, request: Mapping[str, str]) -> None: response = f"MockParentResponder: Received '{message.name}'" responses = { "configure": f"{response}': inputPort={request.get('port')}", "status": f"{response}: message={request.get('message')}", "fail": f"{response}: message={request.get('message')}", } print(responses.get(message.name, response)) sys.stdout.flush() # flush the output so it shows up in the parent process
MockParentResponder