language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/auth_manager/test_aws_auth_manager.py
{ "start": 3242, "end": 25793 }
class ____: def test_avp_facade(self, auth_manager): assert hasattr(auth_manager, "avp_facade") @pytest.mark.parametrize( ("details", "user", "expected_user", "expected_entity_id"), [ (None, mock, ANY, None), (ConfigurationDetails(section="test"), mock, mock, "test"), ], ) @patch.object(AwsAuthManager, "avp_facade") def test_is_authorized_configuration( self, mock_avp_facade, details, user, expected_user, expected_entity_id, auth_manager, ): is_authorized = Mock(return_value=True) mock_avp_facade.is_authorized = is_authorized method: ResourceMethod = "GET" result = auth_manager.is_authorized_configuration(method=method, details=details, user=user) is_authorized.assert_called_once_with( method=method, entity_type=AvpEntities.CONFIGURATION, user=expected_user, entity_id=expected_entity_id, ) assert result @pytest.mark.parametrize( ("details", "user", "expected_user", "expected_entity_id"), [ (None, mock, ANY, None), (ConnectionDetails(conn_id="conn_id"), mock, mock, "conn_id"), ], ) @patch.object(AwsAuthManager, "avp_facade") def test_is_authorized_connection( self, mock_avp_facade, details, user, expected_user, expected_entity_id, auth_manager, ): is_authorized = Mock(return_value=True) mock_avp_facade.is_authorized = is_authorized method: ResourceMethod = "GET" result = auth_manager.is_authorized_connection(method=method, details=details, user=user) is_authorized.assert_called_once_with( method=method, entity_type=AvpEntities.CONNECTION, user=expected_user, entity_id=expected_entity_id, ) assert result @pytest.mark.parametrize( ("access_entity", "details", "user", "expected_user", "expected_entity_id", "expected_context"), [ (None, None, mock, ANY, None, None), (None, DagDetails(id="dag_1"), mock, mock, "dag_1", None), ( DagAccessEntity.CODE, DagDetails(id="dag_1"), mock, mock, "dag_1", { "dag_entity": { "string": "CODE", }, }, ), ], ) @patch.object(AwsAuthManager, "avp_facade") def test_is_authorized_dag( self, mock_avp_facade, access_entity, details, user, expected_user, expected_entity_id, expected_context, auth_manager, ): is_authorized = Mock(return_value=True) mock_avp_facade.is_authorized = is_authorized method: ResourceMethod = "GET" result = auth_manager.is_authorized_dag( method=method, access_entity=access_entity, details=details, user=user ) is_authorized.assert_called_once_with( method=method, entity_type=AvpEntities.DAG, user=expected_user, entity_id=expected_entity_id, context=expected_context, ) assert result @pytest.mark.parametrize( ("details", "user", "expected_user", "expected_entity_id"), [ (None, mock, ANY, None), (BackfillDetails(id=1), mock, mock, 1), ], ) @patch.object(AwsAuthManager, "avp_facade") def test_is_authorized_backfill( self, mock_avp_facade, details, user, expected_user, expected_entity_id, auth_manager, ): is_authorized = Mock(return_value=True) mock_avp_facade.is_authorized = is_authorized method: ResourceMethod = "GET" result = auth_manager.is_authorized_backfill(method=method, details=details, user=user) is_authorized.assert_called_once_with( method=method, entity_type=AvpEntities.BACKFILL, user=expected_user, entity_id=expected_entity_id ) assert result @pytest.mark.parametrize( ("details", "user", "expected_user", "expected_entity_id"), [ (None, mock, ANY, None), (AssetDetails(id="1"), mock, mock, "1"), ], ) @patch.object(AwsAuthManager, "avp_facade") def test_is_authorized_asset( self, mock_avp_facade, details, user, expected_user, expected_entity_id, auth_manager, ): is_authorized = Mock(return_value=True) mock_avp_facade.is_authorized = is_authorized method: ResourceMethod = "GET" result = auth_manager.is_authorized_asset(method=method, details=details, user=user) is_authorized.assert_called_once_with( method=method, entity_type=AvpEntities.ASSET, user=expected_user, entity_id=expected_entity_id ) assert result @pytest.mark.parametrize( ("details", "user", "expected_user", "expected_entity_id"), [ (None, mock, ANY, None), (AssetAliasDetails(id="1"), mock, mock, "1"), ], ) @patch.object(AwsAuthManager, "avp_facade") def test_is_authorized_asset_alias( self, mock_avp_facade, details, user, expected_user, expected_entity_id, auth_manager, ): is_authorized = Mock(return_value=True) mock_avp_facade.is_authorized = is_authorized method: ResourceMethod = "GET" result = auth_manager.is_authorized_asset_alias(method=method, details=details, user=user) is_authorized.assert_called_once_with( method=method, entity_type=AvpEntities.ASSET_ALIAS, user=expected_user, entity_id=expected_entity_id, ) assert result @pytest.mark.parametrize( ("details", "user", "expected_user", "expected_entity_id"), [ (None, mock, ANY, None), (PoolDetails(name="pool1"), mock, mock, "pool1"), ], ) @patch.object(AwsAuthManager, "avp_facade") def test_is_authorized_pool( self, mock_avp_facade, details, user, expected_user, expected_entity_id, auth_manager, ): is_authorized = Mock(return_value=True) mock_avp_facade.is_authorized = is_authorized method: ResourceMethod = "GET" result = auth_manager.is_authorized_pool(method=method, details=details, user=user) is_authorized.assert_called_once_with( method=method, entity_type=AvpEntities.POOL, user=expected_user, entity_id=expected_entity_id ) assert result @pytest.mark.parametrize( ("details", "user", "expected_user", "expected_entity_id"), [ (None, mock, ANY, None), (VariableDetails(key="var1"), mock, mock, "var1"), ], ) @patch.object(AwsAuthManager, "avp_facade") def test_is_authorized_variable( self, mock_avp_facade, details, user, expected_user, expected_entity_id, auth_manager, ): is_authorized = Mock(return_value=True) mock_avp_facade.is_authorized = is_authorized method: ResourceMethod = "GET" result = auth_manager.is_authorized_variable(method=method, details=details, user=user) is_authorized.assert_called_once_with( method=method, entity_type=AvpEntities.VARIABLE, user=expected_user, entity_id=expected_entity_id ) assert result @pytest.mark.parametrize( ("access_view", "user", "expected_user"), [ (AccessView.CLUSTER_ACTIVITY, mock, ANY), (AccessView.PLUGINS, mock, mock), ], ) @patch.object(AwsAuthManager, "avp_facade") def test_is_authorized_view(self, mock_avp_facade, access_view, user, expected_user, auth_manager): is_authorized = Mock(return_value=True) mock_avp_facade.is_authorized = is_authorized result = auth_manager.is_authorized_view(access_view=access_view, user=user) is_authorized.assert_called_once_with( method="GET", entity_type=AvpEntities.VIEW, user=expected_user, entity_id=access_view.value ) assert result def test_filter_authorized_menu_items(self, auth_manager): batch_is_authorized_output = [ { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id"}, "action": {"actionType": "Airflow::Action", "actionId": "Menu.MENU"}, "resource": {"entityType": "Airflow::Menu", "entityId": MenuItem.CONNECTIONS.value}, }, "decision": "DENY", }, { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id"}, "action": {"actionType": "Airflow::Action", "actionId": "Menu.MENU"}, "resource": {"entityType": "Airflow::Menu", "entityId": MenuItem.VARIABLES.value}, }, "decision": "ALLOW", }, { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id"}, "action": {"actionType": "Airflow::Action", "actionId": "Menu.MENU"}, "resource": {"entityType": "Airflow::Menu", "entityId": MenuItem.ASSETS.value}, }, "decision": "DENY", }, { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id"}, "action": {"actionType": "Airflow::Action", "actionId": "Menu.MENU"}, "resource": {"entityType": "Airflow::Menu", "entityId": MenuItem.DAGS.value}, }, "decision": "ALLOW", }, ] auth_manager.avp_facade.get_batch_is_authorized_results = Mock( return_value=batch_is_authorized_output ) result = auth_manager.filter_authorized_menu_items( [MenuItem.CONNECTIONS, MenuItem.VARIABLES, MenuItem.ASSETS, MenuItem.DAGS], user=AwsAuthManagerUser(user_id="test_user_id", groups=[]), ) auth_manager.avp_facade.get_batch_is_authorized_results.assert_called_once_with( requests=[ { "method": "MENU", "entity_type": AvpEntities.MENU, "entity_id": MenuItem.CONNECTIONS.value, }, { "method": "MENU", "entity_type": AvpEntities.MENU, "entity_id": MenuItem.VARIABLES.value, }, { "method": "MENU", "entity_type": AvpEntities.MENU, "entity_id": MenuItem.ASSETS.value, }, { "method": "MENU", "entity_type": AvpEntities.MENU, "entity_id": MenuItem.DAGS.value, }, ], user=ANY, ) assert result == [MenuItem.VARIABLES, MenuItem.DAGS] @patch.object(AwsAuthManager, "avp_facade") def test_batch_is_authorized_connection( self, mock_avp_facade, auth_manager, ): batch_is_authorized = Mock(return_value=True) mock_avp_facade.batch_is_authorized = batch_is_authorized result = auth_manager.batch_is_authorized_connection( requests=[ {"method": "GET"}, {"method": "PUT", "details": ConnectionDetails(conn_id="test")}, ], user=mock, ) batch_is_authorized.assert_called_once_with( requests=[ { "method": "GET", "entity_type": AvpEntities.CONNECTION, "entity_id": None, }, { "method": "PUT", "entity_type": AvpEntities.CONNECTION, "entity_id": "test", }, ], user=ANY, ) assert result @patch.object(AwsAuthManager, "avp_facade") def test_batch_is_authorized_dag( self, mock_avp_facade, auth_manager, ): batch_is_authorized = Mock(return_value=True) mock_avp_facade.batch_is_authorized = batch_is_authorized result = auth_manager.batch_is_authorized_dag( requests=[ {"method": "GET"}, {"method": "GET", "details": DagDetails(id="dag_1")}, ] + [ {"method": "GET", "details": DagDetails(id="dag_1"), "access_entity": dag_access_entity} for dag_access_entity in ( DagAccessEntity.AUDIT_LOG, DagAccessEntity.CODE, DagAccessEntity.DEPENDENCIES, DagAccessEntity.RUN, DagAccessEntity.TASK, DagAccessEntity.TASK_INSTANCE, DagAccessEntity.TASK_LOGS, DagAccessEntity.VERSION, DagAccessEntity.WARNING, DagAccessEntity.XCOM, ) ], user=mock, ) batch_is_authorized.assert_called_once_with( requests=[ { "method": "GET", "entity_type": AvpEntities.DAG, "entity_id": None, "context": None, }, { "method": "GET", "entity_type": AvpEntities.DAG, "entity_id": "dag_1", "context": None, }, ] + [ { "method": "GET", "entity_type": AvpEntities.DAG, "entity_id": "dag_1", "context": {"dag_entity": {"string": dag_entity}}, } for dag_entity in ( DagAccessEntity.AUDIT_LOG.value, DagAccessEntity.CODE.value, DagAccessEntity.DEPENDENCIES.value, DagAccessEntity.RUN.value, DagAccessEntity.TASK.value, DagAccessEntity.TASK_INSTANCE.value, DagAccessEntity.TASK_LOGS.value, DagAccessEntity.VERSION.value, DagAccessEntity.WARNING.value, DagAccessEntity.XCOM.value, ) ], user=ANY, ) assert result @patch.object(AwsAuthManager, "avp_facade") def test_batch_is_authorized_pool( self, mock_avp_facade, auth_manager, ): batch_is_authorized = Mock(return_value=True) mock_avp_facade.batch_is_authorized = batch_is_authorized result = auth_manager.batch_is_authorized_pool( requests=[ {"method": "GET"}, {"method": "PUT", "details": PoolDetails(name="test")}, ], user=mock, ) batch_is_authorized.assert_called_once_with( requests=[ { "method": "GET", "entity_type": AvpEntities.POOL, "entity_id": None, }, { "method": "PUT", "entity_type": AvpEntities.POOL, "entity_id": "test", }, ], user=ANY, ) assert result @patch.object(AwsAuthManager, "avp_facade") def test_batch_is_authorized_variable( self, mock_avp_facade, auth_manager, ): batch_is_authorized = Mock(return_value=True) mock_avp_facade.batch_is_authorized = batch_is_authorized result = auth_manager.batch_is_authorized_variable( requests=[ {"method": "GET"}, {"method": "PUT", "details": VariableDetails(key="test")}, ], user=mock, ) batch_is_authorized.assert_called_once_with( requests=[ { "method": "GET", "entity_type": AvpEntities.VARIABLE, "entity_id": None, }, { "method": "PUT", "entity_type": AvpEntities.VARIABLE, "entity_id": "test", }, ], user=ANY, ) assert result @pytest.mark.parametrize( ("get_authorized_method", "avp_entity", "entities_parameter"), [ ("filter_authorized_connections", AvpEntities.CONNECTION.value, "conn_ids"), ("filter_authorized_dag_ids", AvpEntities.DAG.value, "dag_ids"), ("filter_authorized_pools", AvpEntities.POOL.value, "pool_names"), ("filter_authorized_variables", AvpEntities.VARIABLE.value, "variable_keys"), ], ) @pytest.mark.parametrize( ("method", "user", "expected_result"), [ ("GET", AwsAuthManagerUser(user_id="test_user_id1", groups=[]), {"entity_1"}), ("PUT", AwsAuthManagerUser(user_id="test_user_id1", groups=[]), set()), ("GET", AwsAuthManagerUser(user_id="test_user_id2", groups=[]), set()), ("PUT", AwsAuthManagerUser(user_id="test_user_id2", groups=[]), {"entity_2"}), ], ) def test_filter_authorized( self, get_authorized_method, avp_entity, entities_parameter, method, user, auth_manager, test_user, expected_result, ): entity_ids = {"entity_1", "entity_2"} # test_user_id1 has GET permissions on entity_1 # test_user_id2 has PUT permissions on entity_2 batch_is_authorized_output = [ { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id1"}, "action": {"actionType": "Airflow::Action", "actionId": f"{avp_entity}.GET"}, "resource": {"entityType": f"Airflow::{avp_entity}", "entityId": "entity_1"}, }, "decision": "ALLOW", }, { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id1"}, "action": {"actionType": "Airflow::Action", "actionId": f"{avp_entity}.PUT"}, "resource": {"entityType": f"Airflow::{avp_entity}", "entityId": "entity_1"}, }, "decision": "DENY", }, { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id1"}, "action": {"actionType": "Airflow::Action", "actionId": f"{avp_entity}.GET"}, "resource": {"entityType": f"Airflow::{avp_entity}", "entityId": "entity_2"}, }, "decision": "DENY", }, { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id1"}, "action": {"actionType": "Airflow::Action", "actionId": f"{avp_entity}.PUT"}, "resource": {"entityType": f"Airflow::{avp_entity}", "entityId": "entity_2"}, }, "decision": "DENY", }, { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id2"}, "action": {"actionType": "Airflow::Action", "actionId": f"{avp_entity}.GET"}, "resource": {"entityType": f"Airflow::{avp_entity}", "entityId": "entity_1"}, }, "decision": "DENY", }, { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id2"}, "action": {"actionType": "Airflow::Action", "actionId": f"{avp_entity}.PUT"}, "resource": {"entityType": f"Airflow::{avp_entity}", "entityId": "entity_1"}, }, "decision": "DENY", }, { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id2"}, "action": {"actionType": "Airflow::Action", "actionId": f"{avp_entity}.GET"}, "resource": {"entityType": f"Airflow::{avp_entity}", "entityId": "entity_2"}, }, "decision": "DENY", }, { "request": { "principal": {"entityType": "Airflow::User", "entityId": "test_user_id2"}, "action": {"actionType": "Airflow::Action", "actionId": f"{avp_entity}.PUT"}, "resource": {"entityType": f"Airflow::{avp_entity}", "entityId": "entity_2"}, }, "decision": "ALLOW", }, ] auth_manager.avp_facade.get_batch_is_authorized_results = Mock( return_value=batch_is_authorized_output ) params = { entities_parameter: entity_ids, "method": method, "user": user, } result = getattr(auth_manager, get_authorized_method)(**params) auth_manager.avp_facade.get_batch_is_authorized_results.assert_called() assert result == expected_result def test_get_url_login(self, auth_manager): result = auth_manager.get_url_login() assert result == f"{AUTH_MANAGER_FASTAPI_APP_PREFIX}/login" def test_get_cli_commands_return_cli_commands(self, auth_manager): assert len(auth_manager.get_cli_commands()) > 0
TestAwsAuthManager
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/external.py
{ "start": 3339, "end": 5171 }
class ____(graphene.ObjectType): id = graphene.NonNull(graphene.ID) name = graphene.NonNull(graphene.String) is_reload_supported = graphene.NonNull(graphene.Boolean) environment_path = graphene.String() repositories = non_null_list(lambda: GrapheneRepository) server_id = graphene.String() dagsterLibraryVersions = graphene.List(graphene.NonNull(GrapheneDagsterLibraryVersion)) class Meta: name = "RepositoryLocation" def __init__(self, name: str, location: Optional[CodeLocation] = None): self._location = location super().__init__( name=name, ) def resolve_id(self, _) -> str: return self.name def get_location(self, graphene_info: ResolveInfo) -> CodeLocation: if self._location is None: self._location = graphene_info.context.get_code_location(self.name) return self._location def resolve_repositories(self, graphene_info: ResolveInfo): return [ GrapheneRepository(repository.handle) for repository in self.get_location(graphene_info).get_repositories().values() ] def resolve_dagsterLibraryVersions(self, graphene_info: ResolveInfo): libs = self.get_location(graphene_info).get_dagster_library_versions() if libs is None: return None return [GrapheneDagsterLibraryVersion(name, ver) for name, ver in libs.items()] def resolve_server_id(self, graphene_info: ResolveInfo): location = self.get_location(graphene_info) return location.server_id if isinstance(location, GrpcServerCodeLocation) else None def resolve_is_reload_supported(self, graphene_info: ResolveInfo): location = self.get_location(graphene_info) return location.is_reload_supported
GrapheneRepositoryLocation
python
dagster-io__dagster
python_modules/libraries/dagster-deltalake-polars/dagster_deltalake_polars/deltalake_polars_type_handler.py
{ "start": 2299, "end": 2592 }
class ____(DeltaLakeIOManager): @staticmethod def type_handlers() -> Sequence[DbTypeHandler]: return [DeltaLakePolarsTypeHandler(), DeltaLakePyArrowTypeHandler()] @staticmethod def default_load_type() -> Optional[type]: return pl.DataFrame
DeltaLakePolarsIOManager
python
doocs__leetcode
solution/2300-2399/2368.Reachable Nodes With Restrictions/Solution2.py
{ "start": 0, "end": 516 }
class ____: def reachableNodes( self, n: int, edges: List[List[int]], restricted: List[int] ) -> int: g = defaultdict(list) for a, b in edges: g[a].append(b) g[b].append(a) vis = set(restricted + [0]) q = deque([0]) ans = 0 while q: i = q.popleft() ans += 1 for j in g[i]: if j not in vis: q.append(j) vis.add(j) return ans
Solution
python
django__django
django/core/mail/backends/console.py
{ "start": 171, "end": 1427 }
class ____(BaseEmailBackend): def __init__(self, *args, **kwargs): self.stream = kwargs.pop("stream", sys.stdout) self._lock = threading.RLock() super().__init__(*args, **kwargs) def write_message(self, message): msg = message.message() msg_data = msg.as_bytes() charset = ( msg.get_charset().get_output_charset() if msg.get_charset() else "utf-8" ) msg_data = msg_data.decode(charset) self.stream.write("%s\n" % msg_data) self.stream.write("-" * 79) self.stream.write("\n") def send_messages(self, email_messages): """Write all messages to the stream in a thread-safe way.""" if not email_messages: return msg_count = 0 with self._lock: try: stream_created = self.open() for message in email_messages: self.write_message(message) self.stream.flush() # flush after each message msg_count += 1 if stream_created: self.close() except Exception: if not self.fail_silently: raise return msg_count
EmailBackend
python
ray-project__ray
rllib/core/learner/learner_group.py
{ "start": 2503, "end": 3070 }
class ____(BackendExecutor): # Override `BackendExecutor` placement group creation logic. We need to pass our own # to make sure the one of the Algorithm (Trainable) is used for all the # Algorithm's actors. def _create_placement_group(self): pass # TODO (sven): Change this once there is a better (public) API for this in the # superclass. def set_placement_group(self, placement_group): if placement_group is not None: self._placement_group = placement_group @PublicAPI(stability="alpha")
RLlibBackendExecutor
python
airbytehq__airbyte
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/integration/config.py
{ "start": 533, "end": 2272 }
class ____: def __init__(self) -> None: self._config: MutableMapping[str, Any] = { "account_ids": [ACCOUNT_ID], "access_token": ACCESS_TOKEN, "credentials": { "auth_type": "Service", "access_token": ACCESS_TOKEN, }, "start_date": START_DATE, "end_date": END_DATE, "include_deleted": True, "fetch_thumbnail_images": True, "custom_insights": [], "page_size": 100, "insights_lookback_window": 28, "insights_job_timeout": 60, "action_breakdowns_allow_empty": True, "client_id": CLIENT_ID, "client_secret": CLIENT_SECRET, } def with_account_ids(self, account_ids: List[str]) -> ConfigBuilder: self._config["account_ids"] = account_ids return self def with_start_date(self, start_date: datetime) -> ConfigBuilder: self._config["start_date"] = start_date.strftime(DATE_TIME_FORMAT) return self def with_end_date(self, end_date: datetime) -> ConfigBuilder: self._config["end_date"] = end_date.strftime(DATE_TIME_FORMAT) return self def with_ad_statuses(self, statuses: List[str]) -> ConfigBuilder: self._config["ad_statuses"] = statuses return self def with_campaign_statuses(self, statuses: List[str]) -> ConfigBuilder: self._config["campaign_statuses"] = statuses return self def with_ad_set_statuses(self, statuses: List[str]) -> ConfigBuilder: self._config["adset_statuses"] = statuses return self def build(self) -> MutableMapping[str, Any]: return self._config
ConfigBuilder
python
pandas-dev__pandas
pandas/tests/series/methods/test_to_frame.py
{ "start": 107, "end": 1992 }
class ____: def test_to_frame_respects_name_none(self): # GH#44212 if we explicitly pass name=None, then that should be respected, # not changed to 0 # GH-45448 this is first deprecated & enforced in 2.0 ser = Series(range(3)) result = ser.to_frame(None) exp_index = Index([None], dtype=object) tm.assert_index_equal(result.columns, exp_index) result = ser.rename("foo").to_frame(None) exp_index = Index([None], dtype=object) tm.assert_index_equal(result.columns, exp_index) def test_to_frame(self, datetime_series): datetime_series.name = None rs = datetime_series.to_frame() xp = DataFrame(datetime_series.values, index=datetime_series.index) tm.assert_frame_equal(rs, xp) datetime_series.name = "testname" rs = datetime_series.to_frame() xp = DataFrame( {"testname": datetime_series.values}, index=datetime_series.index ) tm.assert_frame_equal(rs, xp) rs = datetime_series.to_frame(name="testdifferent") xp = DataFrame( {"testdifferent": datetime_series.values}, index=datetime_series.index ) tm.assert_frame_equal(rs, xp) @pytest.mark.filterwarnings( "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" ) def test_to_frame_expanddim(self): # GH#9762 class SubclassedSeries(Series): @property def _constructor_expanddim(self): return SubclassedFrame class SubclassedFrame(DataFrame): pass ser = SubclassedSeries([1, 2, 3], name="X") result = ser.to_frame() assert isinstance(result, SubclassedFrame) expected = SubclassedFrame({"X": [1, 2, 3]}) tm.assert_frame_equal(result, expected)
TestToFrame
python
kamyu104__LeetCode-Solutions
Python/find-the-winner-of-an-array-game.py
{ "start": 29, "end": 446 }
class ____(object): def getWinner(self, arr, k): """ :type arr: List[int] :type k: int :rtype: int """ result = arr[0] count = 0 for i in xrange(1, len(arr)): if arr[i] > result: result = arr[i] count = 0 count += 1 if (count == k): break return result
Solution
python
ipython__ipython
IPython/utils/_process_win32_controller.py
{ "start": 1769, "end": 5124 }
class ____(ctypes.Structure): _fields_ = [("hProcess", HANDLE), ("hThread", HANDLE), ("dwProcessId", DWORD), ("dwThreadId", DWORD)] LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION) # Win32 API constants needed ERROR_HANDLE_EOF = 38 ERROR_BROKEN_PIPE = 109 ERROR_NO_DATA = 232 HANDLE_FLAG_INHERIT = 0x0001 STARTF_USESTDHANDLES = 0x0100 CREATE_SUSPENDED = 0x0004 CREATE_NEW_CONSOLE = 0x0010 CREATE_NO_WINDOW = 0x08000000 STILL_ACTIVE = 259 WAIT_TIMEOUT = 0x0102 WAIT_FAILED = 0xFFFFFFFF INFINITE = 0xFFFFFFFF DUPLICATE_SAME_ACCESS = 0x00000002 ENABLE_ECHO_INPUT = 0x0004 ENABLE_LINE_INPUT = 0x0002 ENABLE_PROCESSED_INPUT = 0x0001 # Win32 API functions needed GetLastError = ctypes.windll.kernel32.GetLastError GetLastError.argtypes = [] GetLastError.restype = DWORD CreateFile = ctypes.windll.kernel32.CreateFileW CreateFile.argtypes = [LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE] CreateFile.restype = HANDLE CreatePipe = ctypes.windll.kernel32.CreatePipe CreatePipe.argtypes = [POINTER(HANDLE), POINTER(HANDLE), LPSECURITY_ATTRIBUTES, DWORD] CreatePipe.restype = BOOL CreateProcess = ctypes.windll.kernel32.CreateProcessW CreateProcess.argtypes = [LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES, LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCWSTR, LPSTARTUPINFO, LPPROCESS_INFORMATION] CreateProcess.restype = BOOL GetExitCodeProcess = ctypes.windll.kernel32.GetExitCodeProcess GetExitCodeProcess.argtypes = [HANDLE, LPDWORD] GetExitCodeProcess.restype = BOOL GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess GetCurrentProcess.argtypes = [] GetCurrentProcess.restype = HANDLE ResumeThread = ctypes.windll.kernel32.ResumeThread ResumeThread.argtypes = [HANDLE] ResumeThread.restype = DWORD ReadFile = ctypes.windll.kernel32.ReadFile ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID] ReadFile.restype = BOOL WriteFile = ctypes.windll.kernel32.WriteFile WriteFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID] WriteFile.restype = BOOL GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode GetConsoleMode.argtypes = [HANDLE, LPDWORD] GetConsoleMode.restype = BOOL SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode SetConsoleMode.argtypes = [HANDLE, DWORD] SetConsoleMode.restype = BOOL FlushConsoleInputBuffer = ctypes.windll.kernel32.FlushConsoleInputBuffer FlushConsoleInputBuffer.argtypes = [HANDLE] FlushConsoleInputBuffer.restype = BOOL WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject WaitForSingleObject.argtypes = [HANDLE, DWORD] WaitForSingleObject.restype = DWORD DuplicateHandle = ctypes.windll.kernel32.DuplicateHandle DuplicateHandle.argtypes = [HANDLE, HANDLE, HANDLE, LPHANDLE, DWORD, BOOL, DWORD] DuplicateHandle.restype = BOOL SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation SetHandleInformation.argtypes = [HANDLE, DWORD, DWORD] SetHandleInformation.restype = BOOL CloseHandle = ctypes.windll.kernel32.CloseHandle CloseHandle.argtypes = [HANDLE] CloseHandle.restype = BOOL CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(ctypes.c_int)] CommandLineToArgvW.restype = POINTER(LPCWSTR) LocalFree = ctypes.windll.kernel32.LocalFree LocalFree.argtypes = [HLOCAL] LocalFree.restype = HLOCAL
PROCESS_INFORMATION
python
openai__openai-python
src/openai/types/audio/transcription_text_done_event.py
{ "start": 1323, "end": 1940 }
class ____(BaseModel): text: str """The text that was transcribed.""" type: Literal["transcript.text.done"] """The type of the event. Always `transcript.text.done`.""" logprobs: Optional[List[Logprob]] = None """The log probabilities of the individual tokens in the transcription. Only included if you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `include[]` parameter set to `logprobs`. """ usage: Optional[Usage] = None """Usage statistics for models billed by token usage."""
TranscriptionTextDoneEvent
python
huggingface__transformers
src/transformers/models/phimoe/modeling_phimoe.py
{ "start": 9178, "end": 12328 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: PhimoeConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) self.rotary_fn = apply_rotary_pos_emb def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights
PhimoeAttention
python
pypa__setuptools
setuptools/_vendor/autocommand/autoparse.py
{ "start": 1464, "end": 11642 }
class ____(DocstringError): ''' The docstring had too many ---- section splits. Currently we only support using up to a single split, to split the docstring into description and epilog parts. ''' def _get_type_description(annotation): ''' Given an annotation, return the (type, description) for the parameter. If you provide an annotation that is somehow both a string and a callable, the behavior is undefined. ''' if annotation is _empty: return None, None elif callable(annotation): return annotation, None elif isinstance(annotation, str): return None, annotation elif isinstance(annotation, tuple): try: arg1, arg2 = annotation except ValueError as e: raise AnnotationError(annotation) from e else: if callable(arg1) and isinstance(arg2, str): return arg1, arg2 elif isinstance(arg1, str) and callable(arg2): return arg2, arg1 raise AnnotationError(annotation) def _add_arguments(param, parser, used_char_args, add_nos): ''' Add the argument(s) to an ArgumentParser (using add_argument) for a given parameter. used_char_args is the set of -short options currently already in use, and is updated (if necessary) by this function. If add_nos is True, this will also add an inverse switch for all boolean options. For instance, for the boolean parameter "verbose", this will create --verbose and --no-verbose. ''' # Impl note: This function is kept separate from make_parser because it's # already very long and I wanted to separate out as much as possible into # its own call scope, to prevent even the possibility of suble mutation # bugs. if param.kind is param.POSITIONAL_ONLY: raise PositionalArgError(param) elif param.kind is param.VAR_KEYWORD: raise KWArgError(param) # These are the kwargs for the add_argument function. arg_spec = {} is_option = False # Get the type and default from the annotation. arg_type, description = _get_type_description(param.annotation) # Get the default value default = param.default # If there is no explicit type, and the default is present and not None, # infer the type from the default. if arg_type is None and default not in {_empty, None}: arg_type = type(default) # Add default. The presence of a default means this is an option, not an # argument. if default is not _empty: arg_spec['default'] = default is_option = True # Add the type if arg_type is not None: # Special case for bool: make it just a --switch if arg_type is bool: if not default or default is _empty: arg_spec['action'] = 'store_true' else: arg_spec['action'] = 'store_false' # Switches are always options is_option = True # Special case for file types: make it a string type, for filename elif isinstance(default, IOBase): arg_spec['type'] = str # TODO: special case for list type. # - How to specificy type of list members? # - param: [int] # - param: int =[] # - action='append' vs nargs='*' else: arg_spec['type'] = arg_type # nargs: if the signature includes *args, collect them as trailing CLI # arguments in a list. *args can't have a default value, so it can never be # an option. if param.kind is param.VAR_POSITIONAL: # TODO: consider depluralizing metavar/name here. arg_spec['nargs'] = '*' # Add description. if description is not None: arg_spec['help'] = description # Get the --flags flags = [] name = param.name if is_option: # Add the first letter as a -short option. for letter in name[0], name[0].swapcase(): if letter not in used_char_args: used_char_args.add(letter) flags.append('-{}'.format(letter)) break # If the parameter is a --long option, or is a -short option that # somehow failed to get a flag, add it. if len(name) > 1 or not flags: flags.append('--{}'.format(name)) arg_spec['dest'] = name else: flags.append(name) parser.add_argument(*flags, **arg_spec) # Create the --no- version for boolean switches if add_nos and arg_type is bool: parser.add_argument( '--no-{}'.format(name), action='store_const', dest=name, const=default if default is not _empty else False) def make_parser(func_sig, description, epilog, add_nos): ''' Given the signature of a function, create an ArgumentParser ''' parser = ArgumentParser(description=description, epilog=epilog) used_char_args = {'h'} # Arange the params so that single-character arguments are first. This # esnures they don't have to get --long versions. sorted is stable, so the # parameters will otherwise still be in relative order. params = sorted( func_sig.parameters.values(), key=lambda param: len(param.name) > 1) for param in params: _add_arguments(param, parser, used_char_args, add_nos) return parser _DOCSTRING_SPLIT = compile_regex(r'\n\s*-{4,}\s*\n') def parse_docstring(docstring): ''' Given a docstring, parse it into a description and epilog part ''' if docstring is None: return '', '' parts = _DOCSTRING_SPLIT.split(docstring) if len(parts) == 1: return docstring, '' elif len(parts) == 2: return parts[0], parts[1] else: raise TooManySplitsError() def autoparse( func=None, *, description=None, epilog=None, add_nos=False, parser=None): ''' This decorator converts a function that takes normal arguments into a function which takes a single optional argument, argv, parses it using an argparse.ArgumentParser, and calls the underlying function with the parsed arguments. If it is not given, sys.argv[1:] is used. This is so that the function can be used as a setuptools entry point, as well as a normal main function. sys.argv[1:] is not evaluated until the function is called, to allow injecting different arguments for testing. It uses the argument signature of the function to create an ArgumentParser. Parameters without defaults become positional parameters, while parameters *with* defaults become --options. Use annotations to set the type of the parameter. The `desctiption` and `epilog` parameters corrospond to the same respective argparse parameters. If no description is given, it defaults to the decorated functions's docstring, if present. If add_nos is True, every boolean option (that is, every parameter with a default of True/False or a type of bool) will have a --no- version created as well, which inverts the option. For instance, the --verbose option will have a --no-verbose counterpart. These are not mutually exclusive- whichever one appears last in the argument list will have precedence. If a parser is given, it is used instead of one generated from the function signature. In this case, no parser is created; instead, the given parser is used to parse the argv argument. The parser's results' argument names must match up with the parameter names of the decorated function. The decorated function is attached to the result as the `func` attribute, and the parser is attached as the `parser` attribute. ''' # If @autoparse(...) is used instead of @autoparse if func is None: return lambda f: autoparse( f, description=description, epilog=epilog, add_nos=add_nos, parser=parser) func_sig = signature(func) docstr_description, docstr_epilog = parse_docstring(getdoc(func)) if parser is None: parser = make_parser( func_sig, description or docstr_description, epilog or docstr_epilog, add_nos) @wraps(func) def autoparse_wrapper(argv=None): if argv is None: argv = sys.argv[1:] # Get empty argument binding, to fill with parsed arguments. This # object does all the heavy lifting of turning named arguments into # into correctly bound *args and **kwargs. parsed_args = func_sig.bind_partial() parsed_args.arguments.update(vars(parser.parse_args(argv))) return func(*parsed_args.args, **parsed_args.kwargs) # TODO: attach an updated __signature__ to autoparse_wrapper, just in case. # Attach the wrapped function and parser, and return the wrapper. autoparse_wrapper.func = func autoparse_wrapper.parser = parser return autoparse_wrapper @contextmanager def smart_open(filename_or_file, *args, **kwargs): ''' This context manager allows you to open a filename, if you want to default some already-existing file object, like sys.stdout, which shouldn't be closed at the end of the context. If the filename argument is a str, bytes, or int, the file object is created via a call to open with the given *args and **kwargs, sent to the context, and closed at the end of the context, just like "with open(filename) as f:". If it isn't one of the openable types, the object simply sent to the context unchanged, and left unclosed at the end of the context. Example: def work_with_file(name=sys.stdout): with smart_open(name) as f: # Works correctly if name is a str filename or sys.stdout print("Some stuff", file=f) # If it was a filename, f is closed at the end here. ''' if isinstance(filename_or_file, (str, bytes, int)): with open(filename_or_file, *args, **kwargs) as file: yield file else: yield filename_or_file
TooManySplitsError
python
apache__airflow
airflow-core/src/airflow/utils/event_scheduler.py
{ "start": 948, "end": 1654 }
class ____(scheduler, LoggingMixin): """General purpose event scheduler.""" def call_regular_interval( self, delay: float, action: Callable, arguments=(), kwargs=None, ): """Call a function at (roughly) a given interval.""" def repeat(*args, **kwargs): self.log.debug("Calling %s", action) action(*args, **kwargs) # This is not perfect. If we want a timer every 60s, but action # takes 10s to run, this will run it every 70s. # Good enough for now self.enter(delay, 1, repeat, args, kwargs) self.enter(delay, 1, repeat, arguments, kwargs or {})
EventScheduler
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/metrics_test.py
{ "start": 61017, "end": 66188 }
class ____(test.TestCase): def setUp(self): np.random.seed(1) ops.reset_default_graph() @test_util.run_deprecated_v1 def testVars(self): metrics.sensitivity_at_specificity( predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)), specificity=0.7) _assert_metric_variables(self, ('sensitivity_at_specificity/true_positives:0', 'sensitivity_at_specificity/false_negatives:0', 'sensitivity_at_specificity/false_positives:0', 'sensitivity_at_specificity/true_negatives:0')) @test_util.run_deprecated_v1 def testMetricsCollection(self): my_collection_name = '__metrics__' mean, _ = metrics.sensitivity_at_specificity( predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)), specificity=0.7, metrics_collections=[my_collection_name]) self.assertListEqual(ops.get_collection(my_collection_name), [mean]) @test_util.run_deprecated_v1 def testUpdatesCollection(self): my_collection_name = '__updates__' _, update_op = metrics.sensitivity_at_specificity( predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)), specificity=0.7, updates_collections=[my_collection_name]) self.assertListEqual(ops.get_collection(my_collection_name), [update_op]) @test_util.run_deprecated_v1 def testValueTensorIsIdempotent(self): predictions = random_ops.random_uniform( (10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1) labels = random_ops.random_uniform( (10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1) sensitivity, update_op = metrics.sensitivity_at_specificity( labels, predictions, specificity=0.7) with self.cached_session(): self.evaluate(variables.local_variables_initializer()) # Run several updates. for _ in range(10): self.evaluate(update_op) # Then verify idempotency. initial_sensitivity = self.evaluate(sensitivity) for _ in range(10): self.assertAlmostEqual(initial_sensitivity, self.evaluate(sensitivity), 5) @test_util.run_deprecated_v1 def testAllCorrect(self): inputs = np.random.randint(0, 2, size=(100, 1)) predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32) labels = constant_op.constant(inputs) specificity, update_op = metrics.sensitivity_at_specificity( labels, predictions, specificity=0.7) with self.cached_session(): self.evaluate(variables.local_variables_initializer()) self.assertAlmostEqual(1.0, self.evaluate(update_op), 6) self.assertAlmostEqual(1.0, self.evaluate(specificity), 6) @test_util.run_deprecated_v1 def testSomeCorrectHighSpecificity(self): predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9] labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] predictions = constant_op.constant( predictions_values, dtype=dtypes_lib.float32) labels = constant_op.constant(labels_values) specificity, update_op = metrics.sensitivity_at_specificity( labels, predictions, specificity=0.8) with self.cached_session(): self.evaluate(variables.local_variables_initializer()) self.assertAlmostEqual(0.8, self.evaluate(update_op)) self.assertAlmostEqual(0.8, self.evaluate(specificity)) @test_util.run_deprecated_v1 def testSomeCorrectLowSpecificity(self): predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26] labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] predictions = constant_op.constant( predictions_values, dtype=dtypes_lib.float32) labels = constant_op.constant(labels_values) specificity, update_op = metrics.sensitivity_at_specificity( labels, predictions, specificity=0.4) with self.cached_session(): self.evaluate(variables.local_variables_initializer()) self.assertAlmostEqual(0.6, self.evaluate(update_op)) self.assertAlmostEqual(0.6, self.evaluate(specificity)) @test_util.run_deprecated_v1 def testWeighted_multipleLabelDtypes(self): for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32): predictions_values = [ 0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26] labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] predictions = constant_op.constant( predictions_values, dtype=dtypes_lib.float32) labels = math_ops.cast(labels_values, dtype=label_dtype) weights = constant_op.constant(weights_values) specificity, update_op = metrics.sensitivity_at_specificity( labels, predictions, weights=weights, specificity=0.4) with self.cached_session(): self.evaluate(variables.local_variables_initializer()) self.assertAlmostEqual(0.675, self.evaluate(update_op)) self.assertAlmostEqual(0.675, self.evaluate(specificity)) # TODO(nsilberman): Break this up into two sets of tests.
SensitivityAtSpecificityTest
python
networkx__networkx
networkx/classes/tests/test_reportviews.py
{ "start": 5362, "end": 6647 }
class ____: @classmethod def setup_class(cls): cls.G = nx.path_graph(9) cls.G.nodes[3]["foo"] = "bar" cls.nv = cls.G.nodes def n_its(self, nodes): return set(nodes) def test_len(self): G = self.G.copy() nv = G.nodes assert len(nv) == 9 G.remove_node(7) assert len(nv) == 8 G.add_node(9) assert len(nv) == 9 def test_and(self): nv = self.nv some_nodes = self.n_its(range(5, 12)) assert nv & some_nodes == self.n_its(range(5, 9)) assert some_nodes & nv == self.n_its(range(5, 9)) def test_or(self): nv = self.nv some_nodes = self.n_its(range(5, 12)) assert nv | some_nodes == self.n_its(range(12)) assert some_nodes | nv == self.n_its(range(12)) def test_xor(self): nv = self.nv some_nodes = self.n_its(range(5, 12)) nodes = {0, 1, 2, 3, 4, 9, 10, 11} assert nv ^ some_nodes == self.n_its(nodes) assert some_nodes ^ nv == self.n_its(nodes) def test_sub(self): nv = self.nv some_nodes = self.n_its(range(5, 12)) assert nv - some_nodes == self.n_its(range(5)) assert some_nodes - nv == self.n_its(range(9, 12))
TestNodeViewSetOps
python
django-extensions__django-extensions
django_extensions/management/commands/admin_generator.py
{ "start": 3505, "end": 9569 }
class ____(UnicodeMixin): PRINTABLE_PROPERTIES = ( "list_display", "list_filter", "raw_id_fields", "search_fields", "prepopulated_fields", "date_hierarchy", ) def __init__( self, model, raw_id_threshold=RAW_ID_THRESHOLD, list_filter_threshold=LIST_FILTER_THRESHOLD, search_field_names=SEARCH_FIELD_NAMES, date_hierarchy_names=DATE_HIERARCHY_NAMES, prepopulated_field_names=PREPOPULATED_FIELD_NAMES, **options, ): self.model = model self.list_display = [] self.list_filter = [] self.raw_id_fields = [] self.search_fields = [] self.prepopulated_fields = {} self.date_hierarchy = None self.search_field_names = search_field_names self.raw_id_threshold = raw_id_threshold self.list_filter_threshold = list_filter_threshold self.date_hierarchy_names = date_hierarchy_names self.prepopulated_field_names = prepopulated_field_names def __repr__(self): return "<%s[%s]>" % ( self.__class__.__name__, self.name, ) @property def name(self): return self.model.__name__ def _process_many_to_many(self, meta): raw_id_threshold = self.raw_id_threshold for field in meta.local_many_to_many: if hasattr(field, "remote_field"): related_model = getattr( field.remote_field, "related_model", field.remote_field.model ) else: raise CommandError("Unable to process ManyToMany relation") related_objects = related_model.objects.all() if related_objects[:raw_id_threshold].count() < raw_id_threshold: yield field.name def _process_fields(self, meta): parent_fields = meta.parents.values() for field in meta.fields: name = self._process_field(field, parent_fields) if name: yield name def _process_foreign_key(self, field): raw_id_threshold = self.raw_id_threshold list_filter_threshold = self.list_filter_threshold max_count = max(list_filter_threshold, raw_id_threshold) if hasattr(field, "remote_field"): related_model = getattr( field.remote_field, "related_model", field.remote_field.model ) else: raise CommandError("Unable to process ForeignKey relation") related_count = related_model.objects.all() related_count = related_count[:max_count].count() if related_count >= raw_id_threshold: self.raw_id_fields.append(field.name) elif related_count < list_filter_threshold: self.list_filter.append(field.name) else: # pragma: no cover pass # Do nothing :) def _process_field(self, field, parent_fields): if field in parent_fields: return field_name = str(field.name) self.list_display.append(field_name) if isinstance(field, LIST_FILTER): if isinstance(field, models.ForeignKey): self._process_foreign_key(field) else: self.list_filter.append(field_name) if field.name in self.search_field_names: self.search_fields.append(field_name) return field_name def __unicode__(self): return "".join(self._unicode_generator()) def _yield_value(self, key, value): if isinstance(value, (list, set, tuple)): return self._yield_tuple(key, tuple(value)) elif isinstance(value, dict): return self._yield_dict(key, value) elif isinstance(value, str): return self._yield_string(key, value) else: # pragma: no cover raise TypeError("%s is not supported in %r" % (type(value), value)) def _yield_string(self, key, value, converter=repr): return PRINT_ADMIN_PROPERTY % dict( key=key, value=converter(value), ) def _yield_dict(self, key, value): row_parts = [] row = self._yield_string(key, value) if len(row) > MAX_LINE_WIDTH: row_parts.append(self._yield_string(key, "{", str)) for k, v in value.items(): row_parts.append("%s%r: %r" % (2 * INDENT_WIDTH * " ", k, v)) row_parts.append(INDENT_WIDTH * " " + "}") row = "\n".join(row_parts) return row def _yield_tuple(self, key, value): row_parts = [] row = self._yield_string(key, value) if len(row) > MAX_LINE_WIDTH: row_parts.append(self._yield_string(key, "(", str)) for v in value: row_parts.append(2 * INDENT_WIDTH * " " + repr(v) + ",") row_parts.append(INDENT_WIDTH * " " + ")") row = "\n".join(row_parts) return row def _unicode_generator(self): self._process() for key in self.PRINTABLE_PROPERTIES: value = getattr(self, key) if value: yield self._yield_value(key, value) def _process(self): meta = self.model._meta self.raw_id_fields += list(self._process_many_to_many(meta)) field_names = list(self._process_fields(meta)) for field_name in self.date_hierarchy_names[::-1]: if field_name in field_names and not self.date_hierarchy: self.date_hierarchy = field_name break for k in sorted(self.prepopulated_field_names): k, vs = k.split("=", 1) vs = vs.split(",") if k in field_names: incomplete = False for v in vs: if v not in field_names: incomplete = True break if not incomplete: self.prepopulated_fields[k] = vs self.processed = True
AdminModel
python
networkx__networkx
networkx/classes/tests/test_multidigraph.py
{ "start": 15272, "end": 16342 }
class ____(TestMultiDiGraph): def setup_method(self): self.Graph = MultiDiGraphSubClass # build K3 self.k3edges = [(0, 1), (0, 2), (1, 2)] self.k3nodes = [0, 1, 2] self.K3 = self.Graph() self.K3._succ = self.K3.adjlist_outer_dict_factory( { 0: self.K3.adjlist_inner_dict_factory(), 1: self.K3.adjlist_inner_dict_factory(), 2: self.K3.adjlist_inner_dict_factory(), } ) # K3._adj is synced with K3._succ self.K3._pred = {0: {}, 1: {}, 2: {}} for u in self.k3nodes: for v in self.k3nodes: if u == v: continue d = {0: {}} self.K3._succ[u][v] = d self.K3._pred[v][u] = d self.K3._node = self.K3.node_dict_factory() self.K3._node[0] = self.K3.node_attr_dict_factory() self.K3._node[1] = self.K3.node_attr_dict_factory() self.K3._node[2] = self.K3.node_attr_dict_factory()
TestMultiDiGraphSubclass
python
huggingface__transformers
src/transformers/models/fuyu/image_processing_fuyu.py
{ "start": 2002, "end": 2563 }
class ____(ImagesKwargs, total=False): r""" patch_size (`dict[str, int]`, *optional*, defaults to `{"height": 30, "width": 30}`): Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches. padding_value (`float`, *optional*, defaults to 1.0): The value to pad the image with. padding_mode (`str`, *optional*, defaults to "constant"): The padding mode to use when padding the image. """ patch_size: Optional[SizeDict] padding_value: float padding_mode: str
FuyuImagesKwargs
python
apache__airflow
providers/common/sql/src/airflow/providers/common/sql/operators/sql.py
{ "start": 42490, "end": 46308 }
class ____(BaseSQLOperator): """ Performs a value check using sql code against a minimum threshold and a maximum threshold. Thresholds can be in the form of a numeric value OR a sql statement that results a numeric. :param sql: the sql to be executed. (templated) :param conn_id: the connection ID used to connect to the database. :param database: name of database which overwrite the defined one in connection :param min_threshold: numerical value or min threshold sql to be executed (templated) :param max_threshold: numerical value or max threshold sql to be executed (templated) .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:SQLThresholdCheckOperator` """ template_fields: Sequence[str] = ( "sql", "min_threshold", "max_threshold", *BaseSQLOperator.template_fields, ) template_ext: Sequence[str] = ( ".hql", ".sql", ) template_fields_renderers: ClassVar[dict] = {"sql": "sql"} def __init__( self, *, sql: str, min_threshold: Any, max_threshold: Any, conn_id: str | None = None, database: str | None = None, **kwargs, ): super().__init__(conn_id=conn_id, database=database, **kwargs) self.sql = sql self.min_threshold = min_threshold self.max_threshold = max_threshold def execute(self, context: Context): hook = self.get_db_hook() result = hook.get_first(self.sql) # if the query returns 0 rows result will be None so cannot be indexed into # also covers indexing out of bounds on empty list, tuple etc. if returned try: result = result[0] except (TypeError, IndexError): self._raise_exception(f"The following query returned zero rows: {self.sql}") min_threshold = _convert_to_float_if_possible(self.min_threshold) max_threshold = _convert_to_float_if_possible(self.max_threshold) if isinstance(min_threshold, float): lower_bound = min_threshold else: lower_bound = hook.get_first(min_threshold)[0] if isinstance(max_threshold, float): upper_bound = max_threshold else: upper_bound = hook.get_first(max_threshold)[0] meta_data = { "result": result, "task_id": self.task_id, "min_threshold": lower_bound, "max_threshold": upper_bound, "within_threshold": lower_bound <= result <= upper_bound, } self.push(meta_data) if not meta_data["within_threshold"]: result = ( round(meta_data.get("result"), 2) # type: ignore[arg-type] if meta_data.get("result") is not None else "<None>" ) error_msg = ( f'Threshold Check: "{meta_data.get("task_id")}" failed.\n' f"DAG: {self.dag_id}\nTask_id: {meta_data.get('task_id')}\n" f"Check description: {meta_data.get('description')}\n" f"SQL: {self.sql}\n" f"Result: {result} is not within thresholds " f"{meta_data.get('min_threshold')} and {meta_data.get('max_threshold')}" ) self._raise_exception(error_msg) self.log.info("Test %s Successful.", self.task_id) def push(self, meta_data): """ Send data check info and metadata to an external database. Default functionality will log metadata. """ info = "\n".join(f"""{key}: {item}""" for key, item in meta_data.items()) self.log.info("Log from %s:\n%s", self.dag_id, info)
SQLThresholdCheckOperator
python
mlflow__mlflow
tests/llama_index/sample_code/simple_workflow.py
{ "start": 208, "end": 860 }
class ____(Workflow): llm = OpenAI() @step async def generate_joke(self, ev: StartEvent) -> JokeEvent: topic = ev.topic prompt = f"Write your best joke about {topic}." response = await self.llm.acomplete(prompt) return JokeEvent(joke=str(response)) @step async def critique_joke(self, ev: JokeEvent) -> StopEvent: joke = ev.joke prompt = f"Give a thorough analysis and critique of the following joke: {joke}" response = await self.llm.acomplete(prompt) return StopEvent(result=str(response)) w = JokeFlow(timeout=10, verbose=False) mlflow.models.set_model(w)
JokeFlow
python
rushter__MLAlgorithms
mla/neuralnet/constraints.py
{ "start": 501, "end": 584 }
class ____(object): def clip(self, p): return np.clip(p, -5, 5)
SmallNorm
python
sqlalchemy__sqlalchemy
test/ext/asyncio/test_session.py
{ "start": 2670, "end": 8557 }
class ____(AsyncFixture): def test_requires_async_engine(self, async_engine): testing.assert_raises_message( exc.ArgumentError, "AsyncEngine expected, got Engine", AsyncSession, bind=async_engine.sync_engine, ) def test_info(self, async_session): async_session.info["foo"] = "bar" eq_(async_session.sync_session.info, {"foo": "bar"}) def test_init(self, async_engine): ss = AsyncSession(bind=async_engine) is_(ss.bind, async_engine) binds = {Table: async_engine} ss = AsyncSession(binds=binds) is_(ss.binds, binds) @async_test @testing.combinations((True,), (False,), argnames="use_scalar") @testing.requires.sequences async def test_sequence_execute( self, async_session: AsyncSession, metadata, use_scalar ): seq = normalize_sequence( config, Sequence("some_sequence", metadata=metadata) ) sync_connection = (await async_session.connection()).sync_connection await (await async_session.connection()).run_sync(metadata.create_all) if use_scalar: eq_( await async_session.scalar(seq), sync_connection.dialect.default_sequence_base, ) else: with expect_deprecated( r"Using the .execute\(\) method to invoke a " r"DefaultGenerator object is deprecated; please use " r"the .scalar\(\) method." ): eq_( await async_session.execute(seq), sync_connection.dialect.default_sequence_base, ) @async_test async def test_close_all(self, async_engine): User = self.classes.User s1 = AsyncSession(async_engine) u1 = User() s1.add(u1) s2 = AsyncSession(async_engine) u2 = User() s2.add(u2) in_(u1, s1) in_(u2, s2) await close_all_sessions() not_in(u1, s1) not_in(u2, s2) @async_test async def test_session_close_all_deprecated(self, async_engine): User = self.classes.User s1 = AsyncSession(async_engine) u1 = User() s1.add(u1) s2 = AsyncSession(async_engine) u2 = User() s2.add(u2) in_(u1, s1) in_(u2, s2) with expect_deprecated( r"The AsyncSession.close_all\(\) method is deprecated and will " "be removed in a future release. " ): await AsyncSession.close_all() not_in(u1, s1) not_in(u2, s2) @async_test @testing.variation("session_type", ["plain", "sessionmaker"]) @testing.variation("merge", [True, False]) @testing.variation("method", ["scalar", "execute", "scalars", "get"]) @testing.variation("add_statement_options", [True, False]) async def test_execution_options( self, async_engine, session_type: testing.Variation, merge: testing.Variation, method: testing.Variation, add_statement_options: testing.Variation, ): User = self.classes.User session_execution_options = { "populate_existing": True, "autoflush": False, "opt1": "z", "opt5": "q", } expected_opts = session_execution_options if add_statement_options: statement_options = {"opt2": "w", "opt4": "y", "opt5": "w"} expected_opts = {**expected_opts, **statement_options} else: statement_options = {} if merge: query_opts = { "compiled_cache": {}, "opt1": "q", "opt2": "p", "opt3": "r", "populate_existing": False, } expected_opts = {**expected_opts, **query_opts} else: query_opts = {} if session_type.plain: sess = AsyncSession( async_engine, execution_options=session_execution_options ) elif session_type.sessionmaker: maker = async_sessionmaker( async_engine, execution_options=session_execution_options ) sess = maker() else: session_type.fail() gather_options = {} @event.listens_for(sess.sync_session, "do_orm_execute") def check(ctx) -> None: assert not gather_options gather_options.update(ctx.execution_options) if method.scalar: statement = select(User).limit(1) if add_statement_options: statement = statement.execution_options(**statement_options) await sess.scalar(statement, execution_options=query_opts) elif method.execute: statement = select(User).limit(1) if add_statement_options: statement = statement.execution_options(**statement_options) await sess.execute(statement, execution_options=query_opts) elif method.scalars: statement = select(User).limit(1) if add_statement_options: statement = statement.execution_options(**statement_options) await sess.scalars(statement, execution_options=query_opts) elif method.get: if add_statement_options: await sess.get( User, 1, execution_options={**statement_options, **query_opts}, ) else: await sess.get(User, 1, execution_options=query_opts) else: method.fail() await sess.close() for key, value in expected_opts.items(): eq_(gather_options[key], value)
AsyncSessionTest
python
pypa__setuptools
setuptools/_distutils/tests/test_install_data.py
{ "start": 220, "end": 2464 }
class ____( support.TempdirManager, ): def test_simple_run(self): pkg_dir, dist = self.create_dist() cmd = install_data(dist) cmd.install_dir = inst = os.path.join(pkg_dir, 'inst') # data_files can contain # - simple files # - a Path object # - a tuple with a path, and a list of file one = os.path.join(pkg_dir, 'one') self.write_file(one, 'xxx') inst2 = os.path.join(pkg_dir, 'inst2') two = os.path.join(pkg_dir, 'two') self.write_file(two, 'xxx') three = pathlib.Path(pkg_dir) / 'three' self.write_file(three, 'xxx') cmd.data_files = [one, (inst2, [two]), three] assert cmd.get_inputs() == [one, (inst2, [two]), three] # let's run the command cmd.ensure_finalized() cmd.run() # let's check the result assert len(cmd.get_outputs()) == 3 rthree = os.path.split(one)[-1] assert os.path.exists(os.path.join(inst, rthree)) rtwo = os.path.split(two)[-1] assert os.path.exists(os.path.join(inst2, rtwo)) rone = os.path.split(one)[-1] assert os.path.exists(os.path.join(inst, rone)) cmd.outfiles = [] # let's try with warn_dir one cmd.warn_dir = True cmd.ensure_finalized() cmd.run() # let's check the result assert len(cmd.get_outputs()) == 3 assert os.path.exists(os.path.join(inst, rthree)) assert os.path.exists(os.path.join(inst2, rtwo)) assert os.path.exists(os.path.join(inst, rone)) cmd.outfiles = [] # now using root and empty dir cmd.root = os.path.join(pkg_dir, 'root') inst5 = os.path.join(pkg_dir, 'inst5') four = os.path.join(cmd.install_dir, 'four') self.write_file(four, 'xx') cmd.data_files = [one, (inst2, [two]), three, ('inst5', [four]), (inst5, [])] cmd.ensure_finalized() cmd.run() # let's check the result assert len(cmd.get_outputs()) == 5 assert os.path.exists(os.path.join(inst, rthree)) assert os.path.exists(os.path.join(inst2, rtwo)) assert os.path.exists(os.path.join(inst, rone))
TestInstallData
python
apache__airflow
helm-tests/tests/helm_tests/airflow_aux/test_cleanup_pods.py
{ "start": 914, "end": 2392 }
class ____: """Tests cleanup pods deployments.""" def test_should_have_a_schedule_with_defaults(self): doc = render_chart( values={ "cleanup": {"enabled": True}, }, show_only=["templates/cleanup/cleanup-cronjob.yaml"], )[0] assert doc["spec"]["schedule"] == "*/15 * * * *" cron_tests = [ ("release-name", "*/5 * * * *", "*/5 * * * *"), ("something-else", "@hourly", "@hourly"), ( "custom-name", '{{- add 3 (regexFind ".$" (adler32sum .Release.Name)) -}}-59/15 * * * *', "7-59/15 * * * *", ), ( "airflow-rules", '{{- add 3 (regexFind ".$" (adler32sum .Release.Name)) -}}-59/15 * * * *', "10-59/15 * * * *", ), ] @pytest.mark.parametrize( ("release_name", "schedule_value", "schedule_result"), cron_tests, ids=[x[0] for x in cron_tests], ) def test_should_work_with_custom_schedule_string(self, release_name, schedule_value, schedule_result): doc = render_chart( name=release_name, values={ "cleanup": { "enabled": True, "schedule": schedule_value, }, }, show_only=["templates/cleanup/cleanup-cronjob.yaml"], )[0] assert doc["spec"]["schedule"] == schedule_result
TestCleanupDeployment
python
ray-project__ray
python/ray/air/tests/test_integration_wandb.py
{ "start": 3397, "end": 22405 }
class ____: def test_wandb_logger_project_group(self, monkeypatch): monkeypatch.setenv(WANDB_PROJECT_ENV_VAR, "test_project_from_env_var") monkeypatch.setenv(WANDB_GROUP_ENV_VAR, "test_group_from_env_var") # Read project and group name from environment variable logger = WandbTestExperimentLogger(api_key="1234") logger.setup() assert logger.project == "test_project_from_env_var" assert logger.group == "test_group_from_env_var" def test_wandb_logger_api_key_config(self, monkeypatch): # No API key with pytest.raises(ValueError): logger = WandbTestExperimentLogger(project="test_project") logger.setup() # Fetch API key from argument even if external hook and WANDB_ENV_VAR set monkeypatch.setenv( WANDB_SETUP_API_KEY_HOOK, "ray._private.test_utils.wandb_setup_api_key_hook" ) monkeypatch.setenv( WANDB_ENV_VAR, "abcde", ) # API Key in config logger = WandbTestExperimentLogger(project="test_project", api_key="1234") logger.setup() assert os.environ[WANDB_ENV_VAR] == "1234" def test_wandb_logger_api_key_file(self, monkeypatch): # Fetch API key from file even if external hook and WANDB_ENV_VAR set monkeypatch.setenv( WANDB_SETUP_API_KEY_HOOK, "ray._private.test_utils.wandb_setup_api_key_hook" ) monkeypatch.setenv( WANDB_ENV_VAR, "abcde", ) # API Key file with tempfile.NamedTemporaryFile("wt") as fp: fp.write("5678") fp.flush() logger = WandbTestExperimentLogger( project="test_project", api_key_file=fp.name ) logger.setup() assert os.environ[WANDB_ENV_VAR] == "5678" def test_wandb_logger_api_key_env_var(self, monkeypatch): # API Key from env var takes precedence over external hook and # logged in W&B API key monkeypatch.setenv( WANDB_SETUP_API_KEY_HOOK, "ray._private.test_utils.wandb_setup_api_key_hook" ) monkeypatch.setenv( WANDB_ENV_VAR, "1234", ) mock_wandb = Mock(api=Mock(api_key="efgh")) with patch.multiple("ray.air.integrations.wandb", wandb=mock_wandb): logger = WandbTestExperimentLogger(project="test_project") logger.setup() assert os.environ[WANDB_ENV_VAR] == "1234" mock_wandb.ensure_configured.assert_not_called() def test_wandb_logger_api_key_external_hook(self, monkeypatch): # API Key from external hook if API key not provided through # argument or WANDB_ENV_VAR and user not already logged in to W&B monkeypatch.setenv( WANDB_SETUP_API_KEY_HOOK, "ray._private.test_utils.wandb_setup_api_key_hook" ) mock_wandb = Mock(api=Mock(api_key=None)) with patch.multiple("ray.air.integrations.wandb", wandb=mock_wandb): logger = WandbTestExperimentLogger(project="test_project") logger.setup() assert os.environ[WANDB_ENV_VAR] == "abcd" mock_wandb.ensure_configured.assert_called_once() mock_wandb = Mock(ensure_configured=Mock(side_effect=AttributeError())) with patch.multiple("ray.air.integrations.wandb", wandb=mock_wandb): logger = WandbTestExperimentLogger(project="test_project") logger.setup() assert os.environ[WANDB_ENV_VAR] == "abcd" def test_wandb_logger_api_key_from_wandb_login(self, monkeypatch): # No API key should get set if user is already logged in to W&B # and they didn't pass API key through argument or env var. # External hook should not be called because user already logged # in takes precedence. monkeypatch.setenv( WANDB_SETUP_API_KEY_HOOK, "ray._private.test_utils.wandb_setup_api_key_hook" ) mock_wandb = Mock() with patch.multiple("ray.air.integrations.wandb", wandb=mock_wandb): logger = WandbTestExperimentLogger(project="test_project") logger.setup() assert os.environ.get(WANDB_ENV_VAR) is None mock_wandb.ensure_configured.assert_called_once() def test_wandb_logger_run_location_external_hook(self, monkeypatch): with patch.dict(os.environ): # No project with pytest.raises(ValueError): logger = WandbTestExperimentLogger(api_key="1234") logger.setup() # Project and group env vars from external hook monkeypatch.setenv( WANDB_POPULATE_RUN_LOCATION_HOOK, FAKE_WANDB_POPULATE_RUN_LOCATION_HOOK_IMPORT_PATH, ) logger = WandbTestExperimentLogger(api_key="1234") logger.setup() assert os.environ[WANDB_PROJECT_ENV_VAR] == "test_project" assert os.environ[WANDB_GROUP_ENV_VAR] == "test_group" def test_wandb_logger_start(self, monkeypatch, trial): monkeypatch.setenv(WANDB_ENV_VAR, "9012") # API Key in env logger = WandbTestExperimentLogger(project="test_project") logger.setup() # From now on, the API key is in the env variable. logger.log_trial_start(trial) logger.log_trial_end(trial) logger.on_experiment_end(trials=[trial]) logger_state = logger.trial_logging_actor_states[trial] assert logger_state.kwargs["project"] == "test_project" assert logger_state.kwargs["id"] == trial.trial_id assert logger_state.kwargs["name"] == trial.trial_name assert logger_state.kwargs["group"] == trial.experiment_dir_name assert "config" in logger_state.exclude del logger # log config. logger = WandbTestExperimentLogger(project="test_project", log_config=True) logger.log_trial_start(trial) logger.log_trial_end(trial) logger.on_experiment_end(trials=[trial]) logger_state = logger.trial_logging_actor_states[trial] assert "config" not in logger_state.exclude assert "metric" not in logger_state.exclude del logger # Exclude metric. logger = WandbTestExperimentLogger(project="test_project", excludes=["metric"]) logger.log_trial_start(trial) logger.log_trial_end(trial) logger.on_experiment_end(trials=[trial]) logger_state = logger.trial_logging_actor_states[trial] assert "config" in logger_state.exclude assert "metric" in logger_state.exclude del logger def test_wandb_logger_reporting(self, trial): logger = WandbTestExperimentLogger( project="test_project", api_key="1234", excludes=["metric2"] ) logger.on_trial_start(0, [], trial) r1 = { "metric1": 0.8, "metric2": 1.4, "metric3": np.asarray(32.0), "metric4": np.float32(32.0), "const": "text", "config": trial.config, } logger.on_trial_result(0, [], trial, r1) logger.on_trial_complete(0, [], trial) logger.on_experiment_end(trials=[trial]) logged = logger.trial_logging_actor_states[trial].logs[0] assert "metric1" in logged assert "metric2" not in logged assert "metric3" in logged assert "metric4" in logged assert "const" not in logged assert "config" not in logged def test_wandb_logger_auto_config_keys(self, trial): logger = WandbTestExperimentLogger(project="test_project", api_key="1234") logger.on_trial_start(iteration=0, trials=[], trial=trial) result = {key: 0 for key in WandbLoggerCallback.AUTO_CONFIG_KEYS} logger.on_trial_result(0, [], trial, result) logger.on_trial_complete(0, [], trial) logger.on_experiment_end(trials=[trial]) config = logger.trial_logging_actor_states[trial].config # The results in `AUTO_CONFIG_KEYS` should be saved as training configuration # instead of output metrics. assert set(WandbLoggerCallback.AUTO_CONFIG_KEYS) < set(config) def test_wandb_logger_exclude_config(self): trial = Trial( config={"param1": 0, "param2": 0}, trial_id=0, trial_name="trial_0", experiment_dir_name="trainable", placement_group_factory=PlacementGroupFactory([{"CPU": 1}]), local_path=tempfile.gettempdir(), ) logger = WandbTestExperimentLogger( project="test_project", api_key="1234", excludes=(["param2"] + WandbLoggerCallback.AUTO_CONFIG_KEYS), ) logger.on_trial_start(iteration=0, trials=[], trial=trial) # We need to test that `excludes` also applies to `AUTO_CONFIG_KEYS`. result = {key: 0 for key in WandbLoggerCallback.AUTO_CONFIG_KEYS} logger.on_trial_result(0, [], trial, result) logger.on_trial_complete(0, [], trial) logger.on_experiment_end(trials=[trial]) config = logger.trial_logging_actor_states[trial].config assert set(config) == {"param1"} def test_set_serializability_result(self, trial): """Tests that objects that contain sets can be serialized by wandb.""" logger = WandbTestExperimentLogger( project="test_project", api_key="1234", excludes=["metric2"] ) logger.on_trial_start(0, [], trial) # Testing for https://github.com/ray-project/ray/issues/28541 rllib_result = { "env": "simple_spread", "framework": "torch", "num_gpus": 1, "num_workers": 20, "num_envs_per_env_runner": 1, "compress_observations": True, "lambda": 0.99, "train_batch_size": 512, "sgd_minibatch_size": 32, "num_sgd_iter": 5, "batch_mode": "truncate_episodes", "entropy_coeff": 0.01, "lr": 2e-05, "multiagent": { "policies": {"shared_policy"}, "policy_mapping_fn": lambda x: x, }, } logger.on_trial_result(0, [], trial, rllib_result) logger.on_trial_complete(0, [], trial) logger.on_experiment_end(trials=[trial]) logged = logger.trial_logging_actor_states[trial].logs[0] assert logged != "serialization error" def test_wandb_logging_actor_api_key(self, trial, monkeypatch): """Tests that the wandb API key get propagated as an environment variable to the remote logging actors.""" def mock_run(actor_cls): return os.environ.get(WANDB_ENV_VAR) monkeypatch.setattr(_MockWandbLoggingActor, "run", mock_run) logger = WandbLoggerCallback( project="test_project", api_key="1234", excludes=["metric2"] ) logger._logger_actor_cls = _MockWandbLoggingActor logger.setup() logger.log_trial_start(trial) actor_env_var = ray.get(logger._trial_logging_futures[trial]) assert actor_env_var == "1234" def test_wandb_finish(self, trial, tmp_path): """Test that logging actors are cleaned up upon experiment completion.""" marker = tmp_path / "hang_marker" marker.write_text("") class HangingFinishMockWandbAPI(_MockWandbAPI): def finish(self): while marker.exists(): time.sleep(0.1) logger = get_mock_wandb_logger( mock_api_cls=HangingFinishMockWandbAPI, upload_timeout=1.0, ) logger.setup() logger.on_trial_start(0, [], trial) logger.on_trial_complete(0, [], trial) # Signalling stop will not cleanup fully due to the hanging finish assert logger._trial_logging_actors marker.unlink() # wandb.finish has ended -> experiment end hook should cleanup actors fully logger.on_experiment_end(trials=[trial]) assert not logger._trial_logging_actors def test_wandb_kill_hanging_actor(self, trial): """Test that logging actors are killed if exceeding the upload timeout upon experiment completion.""" class HangingFinishMockWandbAPI(_MockWandbAPI): def finish(self): time.sleep(5) logger = get_mock_wandb_logger( mock_api_cls=HangingFinishMockWandbAPI, upload_timeout=0.1, ) logger.setup() logger.on_trial_start(0, [], trial) logger.on_trial_complete(0, [], trial) # Signalling stop will not cleanup fully due to the hanging finish assert logger._trial_logging_actors actor = logger._trial_logging_actors[trial] # Experiment end hook should kill actors since upload_timeout < 5 logger.on_experiment_end(trials=[trial]) assert not logger._trial_logging_actors gc.collect() with pytest.raises(RayActorError): ray.get(actor.get_state.remote()) def test_wandb_destructor(self, trial): """Test that the WandbLoggerCallback destructor forcefully cleans up logging actors.""" class SlowFinishMockWandbAPI(_MockWandbAPI): def finish(self): time.sleep(5) logger = get_mock_wandb_logger( mock_api_cls=SlowFinishMockWandbAPI, upload_timeout=1.0, ) logger.setup() # Triggers logging actor run loop logger.on_trial_start(0, [], trial) actor = logger._trial_logging_actors[trial] del logger gc.collect() with pytest.raises(RayActorError): ray.get(actor.get_state.remote()) def test_wandb_logging_actor_fault_tolerance(self, trial): """Tests that failing wandb logging actors are restarted""" with tempfile.TemporaryDirectory() as tempdir: fail_marker = Path(tempdir) / "fail_marker" class _FailingWandbLoggingActor(_MockWandbLoggingActor): def _handle_result(self, result): if ( result.get("training_iteration") == 3 and not fail_marker.exists() ): fail_marker.write_text("Ok") raise SystemExit return super()._handle_result(result) logger = WandbLoggerCallback( project="test_project", api_key="1234", excludes=["metric2"] ) logger._logger_actor_cls = _FailingWandbLoggingActor logger.setup() logger.log_trial_start(trial) actor = logger._trial_logging_actors[trial] queue = logger._trial_queues[trial] logger.log_trial_result(1, trial, result={"training_iteration": 1}) logger.log_trial_result(2, trial, result={"training_iteration": 2}) logger.log_trial_result(3, trial, result={"training_iteration": 3}) logger.log_trial_result(4, trial, result={"training_iteration": 4}) logger.log_trial_result(5, trial, result={"training_iteration": 5}) queue.put((_QueueItem.END, None)) # Wait for the actor's run method to complete ray.get(logger._trial_logging_futures[trial]) state = ray.get(actor.get_state.remote()) assert [metrics["training_iteration"] for metrics in state.logs] == [4, 5] def test_wandb_restart(self, trial): """Test that the WandbLoggerCallback reuses actors for trial restarts.""" logger = WandbLoggerCallback(project="test_project", api_key="1234") logger._logger_actor_cls = _MockWandbLoggingActor logger.setup() assert len(logger._trial_logging_futures) == 0 assert len(logger._logging_future_to_trial) == 0 logger.log_trial_start(trial) assert len(logger._trial_logging_futures) == 1 assert len(logger._logging_future_to_trial) == 1 logger.log_trial_start(trial) assert len(logger._trial_logging_futures) == 1 assert len(logger._logging_future_to_trial) == 1 def test_wandb_logging_process_run_info_hook(monkeypatch): """ Test WANDB_PROCESS_RUN_INFO_HOOK in _WandbLoggingActor is correctly called by calling _WandbLoggingActor.run() mocking out calls to wandb. """ mock_queue = Mock(get=Mock(return_value=(_QueueItem.END, None))) monkeypatch.setenv( "WANDB_PROCESS_RUN_INFO_HOOK", "mock_wandb_process_run_info_hook" ) with patch.object(ray.air.integrations.wandb, "load_class") as mock_load_class: logging_process = _WandbLoggingActor( logdir="/tmp", queue=mock_queue, exclude=[], to_config=[] ) logging_process._wandb = Mock() logging_process.run() logging_process._wandb.init.assert_called_once() run = logging_process._wandb.init.return_value mock_load_class.assert_called_once_with("mock_wandb_process_run_info_hook") external_hook = mock_load_class.return_value external_hook.assert_called_once_with(run) logging_process._wandb.finish.assert_called_once() def test_wandb_logger_rank_zero_only(trial, monkeypatch): """Test that logging is disabled for non-rank-0 workers when rank_zero_only is True.""" monkeypatch.setenv( WANDB_ENV_VAR, "abcde", ) mock_session = Mock() mock_session.experiment_name = "test_project" mock_session.trial_name = "trial_0" mock_session.trial_id = "trial_0" # Test case 1: rank_zero_only=True, rank 0 mock_session.world_rank = 0 with patch("ray.air.integrations.wandb.get_session", return_value=mock_session): run = setup_wandb(project="test_project", rank_zero_only=True, _wandb=Mock()) assert not isinstance(run, RunDisabled) # Test case 2: rank_zero_only=True, non-rank-0 mock_session.world_rank = 1 with patch("ray.air.integrations.wandb.get_session", return_value=mock_session): run = setup_wandb(project="test_project", rank_zero_only=True, _wandb=Mock()) assert isinstance(run, RunDisabled) # Test case 3: rank_zero_only=False, any rank mock_session.world_rank = 1 with patch("ray.air.integrations.wandb.get_session", return_value=mock_session): run = setup_wandb(project="test_project", rank_zero_only=False, _wandb=Mock()) assert not isinstance(run, RunDisabled) # Test case 4: rank_zero_only=True, no session with patch("ray.air.integrations.wandb.get_session", return_value=None): run = setup_wandb(project="test_project", rank_zero_only=True, _wandb=Mock()) assert not isinstance(run, RunDisabled) if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestWandbLogger
python
getsentry__sentry
src/sentry/plugins/bases/notify.py
{ "start": 698, "end": 7129 }
class ____(Plugin): slug = "" description = ( "Notify project members when a new event is seen for the first time, or when an " "already resolved event has changed back to unresolved." ) project_conf_form: type[forms.Form] = NotificationConfigurationForm def get_plugin_type(self) -> str: return "notification" def notify(self, notification: Notification, raise_exception: bool = False) -> None: """ This calls the notify_users method of the plugin. Normally this method eats the error and logs it but if we set raise_exception=True like we do for the test plugin button, the exception is raised """ event = notification.event try: self.notify_users( group=event.group, event=event, triggering_rules=[r.label for r in notification.rules], ) except ( ApiError, HTTPError, InvalidIdentity, PluginError, SSLError, UrllibHTTPError, ) as err: self.logger.info( "notification-plugin.notify-failed", extra={ "error": str(err), "plugin": self.slug, "project_id": event.group.project_id, "organization_id": event.group.project.organization_id, }, ) if raise_exception: raise def rule_notify(self, event, futures): rules = [] extra = {"event_id": event.event_id, "group_id": event.group_id, "plugin": self.slug} for future in futures: rules.append(future.rule) extra["rule_id"] = future.rule.id if not future.kwargs: continue raise NotImplementedError( "The default behavior for notification de-duplication does not support args" ) project = event.group.project extra["project_id"] = project.id notification = Notification(event=event, rules=rules) try: self.notify(notification) # Because plugins are deprecated, we want to ignore any errors that occur except Exception: pass def notify_users(self, group, event, triggering_rules) -> None: raise NotImplementedError def notify_about_activity(self, activity): pass def get_notification_recipients(self, project, user_option: str) -> set[int]: from sentry.users.models.user_option import UserOption alert_settings = { o.user_id: int(o.value) for o in UserOption.objects.filter(project_id=project.id, key=user_option) if o.value is not None } disabled = {u for u, v in alert_settings.items() if v == 0} member_set = set( project.member_set.exclude(user__in=disabled).values_list("user", flat=True) ) # determine members default settings members_to_check = {u for u in member_set if u not in alert_settings} if members_to_check: disabled = { uo.user_id for uo in UserOption.objects.filter( key="subscribe_by_default", user__in=members_to_check ) if str(uo.value) == "0" } member_set -= disabled return member_set def get_sendable_user_objects(self, project): """ Return a collection of user IDs that are eligible to receive notifications for the provided project. """ if self.get_conf_key() == "mail": user_ids = list(project.member_set.values_list("user_id", flat=True)) actors = [Actor(id=uid, actor_type=ActorType.USER) for uid in user_ids] recipients = notifications_service.get_notification_recipients( recipients=actors, type=NotificationSettingEnum.ISSUE_ALERTS, project_ids=[project.id], organization_id=project.organization_id, actor_type=ActorType.USER, ) return recipients.get(ExternalProviders.EMAIL.name) return self.get_notification_recipients(project, f"{self.get_conf_key()}:alert") def __is_rate_limited(self, group, event): return ratelimits.backend.is_limited( project=group.project, key=self.get_conf_key(), limit=10 ) def is_configured(self, project) -> bool: raise NotImplementedError def should_notify(self, group, event): project = event.project if not self.is_configured(project=project): return False # If the plugin doesn't support digests or they are not enabled, # perform rate limit checks to support backwards compatibility with # older plugins. if not ( hasattr(self, "notify_digest") and digests.backend.enabled(project) ) and self.__is_rate_limited(group, event): logger = logging.getLogger(f"sentry.plugins.{self.get_conf_key()}") logger.info("notification.rate_limited", extra={"project_id": project.id}) return False return True def test_configuration(self, project) -> None: from sentry.utils.samples import create_sample_event event = create_sample_event(project, platform="python") notification = Notification(event=event) self.notify(notification, raise_exception=True) def test_configuration_and_get_test_results(self, project): try: self.test_configuration(project) except Exception as exc: if isinstance(exc, HTTPError) and hasattr(exc.response, "text"): test_results = f"{exc}\n{exc.response.text[:256]}" elif hasattr(exc, "read") and callable(exc.read): test_results = f"{exc}\n{exc.read()[:256]}" else: if str(exc).lower().startswith("error communicating with"): test_results = str(exc)[:256] else: test_results = ( "There was an internal error with the Plugin, %s" % str(exc)[:256] ) else: test_results = "No errors returned" return test_results
NotificationPlugin
python
spyder-ide__spyder
spyder/plugins/editor/widgets/status.py
{ "start": 2125, "end": 5094 }
class ____(StatusBarWidget): """Status bar widget for system vcs.""" ID = "vcs_status" def __init__(self, parent): super().__init__(parent) self._worker_manager = WorkerManager(max_threads=1) self._git_is_working = None self._git_job_queue = None self._last_git_job = None # ---- Qt reimplemented def closeEvent(self, event): super().closeEvent(event) self._worker_manager.terminate_all() def update_vcs_state(self, idx, fname, fname2): """Update vcs status.""" self.update_vcs(fname, None) def update_vcs(self, fname, index, force=False): """Update vcs status.""" if self._last_git_job == (fname, index) and not force: self._git_job_queue = None return if self._git_is_working: self._git_job_queue = (fname, index) else: self._worker_manager.terminate_all() worker = self._worker_manager.create_python_worker( self.get_git_refs, fname) worker.sig_finished.connect(self.process_git_data) self._last_git_job = (fname, index) self._git_job_queue = None self._git_is_working = True worker.start() def get_git_refs(self, fname): """Get Git active branch, state, branches (plus tags).""" return get_git_refs(osp.dirname(fname)) def process_git_data(self, worker, output, error): """Receive data from git and update gui.""" # Output can be None under some circumstances, so we need to deal with # it here. # Fixes spyder-ide/spyder#21865 if output is None: branch, files_modified = None, [] else: __, branch, files_modified = output text = branch if branch else '' if len(files_modified): text = text + ' [{}]'.format(len(files_modified)) self.setVisible(bool(branch)) self.set_value(text) self._git_is_working = False if self._git_job_queue: self.update_vcs(*self._git_job_queue) def change_branch(self): """Change current branch.""" pass def get_tooltip(self): """Return localized tool tip for widget.""" return _("Git branch") def get_icon(self): return self.create_icon('code_fork') def test(): from qtpy.QtWidgets import QMainWindow from spyder.utils.qthelpers import qapplication app = qapplication(test_time=5) win = QMainWindow() win.setWindowTitle("Status widgets test") win.resize(900, 300) statusbar = win.statusBar() status_widgets = [] for status_class in (ReadWriteStatus, EOLStatus, EncodingStatus, CursorPositionStatus): status_widget = status_class(win, statusbar) status_widgets.append(status_widget) win.show() app.exec_() if __name__ == "__main__": test()
VCSStatus
python
mlflow__mlflow
tests/utils/test_async_artifacts_logging_queue.py
{ "start": 259, "end": 6334 }
class ____: def __init__(self, throw_exception_on_artifact_number=None): if throw_exception_on_artifact_number is None: throw_exception_on_artifact_number = [] self.received_run_id = "" self.received_artifacts = [] self.received_filenames = [] self.received_artifact_paths = [] self.artifact_count = 0 self.throw_exception_on_artifact_number = throw_exception_on_artifact_number or [] def consume_queue_data(self, filename, artifact_path, artifact): self.artifact_count += 1 if self.artifact_count in self.throw_exception_on_artifact_number: raise MlflowException("Failed to log run data") self.received_artifacts.append(artifact) self.received_filenames.append(filename) self.received_artifact_paths.append(artifact_path) def _get_run_artifacts(total_artifacts=TOTAL_ARTIFACTS): for num in range(0, total_artifacts): filename = f"image_{num}.png" artifact_path = f"images/artifact_{num}" artifact = Image.new("RGB", (100, 100), color="red") yield filename, artifact_path, artifact def _assert_sent_received_artifacts( filenames_sent, artifact_paths_sent, artifacts_sent, received_filenames, received_artifact_paths, received_artifacts, ): for num in range(1, len(filenames_sent)): assert filenames_sent[num] == received_filenames[num] for num in range(1, len(artifact_paths_sent)): assert artifact_paths_sent[num] == received_artifact_paths[num] for num in range(1, len(artifacts_sent)): assert artifacts_sent[num] == received_artifacts[num] def test_single_thread_publish_consume_queue(): run_artifacts = RunArtifacts() async_logging_queue = AsyncArtifactsLoggingQueue(run_artifacts.consume_queue_data) async_logging_queue.activate() filenames_sent = [] artifact_paths_sent = [] artifacts_sent = [] for filename, artifact_path, artifact in _get_run_artifacts(): async_logging_queue.log_artifacts_async( filename=filename, artifact_path=artifact_path, artifact=artifact ) filenames_sent.append(filename) artifact_paths_sent.append(artifact_path) artifacts_sent.append(artifact) async_logging_queue.flush() _assert_sent_received_artifacts( filenames_sent, artifact_paths_sent, artifacts_sent, run_artifacts.received_filenames, run_artifacts.received_artifact_paths, run_artifacts.received_artifacts, ) def test_queue_activation(): run_artifacts = RunArtifacts() async_logging_queue = AsyncArtifactsLoggingQueue(run_artifacts.consume_queue_data) assert not async_logging_queue._is_activated for filename, artifact_path, artifact in _get_run_artifacts(1): with pytest.raises(MlflowException, match="AsyncArtifactsLoggingQueue is not activated."): async_logging_queue.log_artifacts_async( filename=filename, artifact_path=artifact_path, artifact=artifact ) async_logging_queue.activate() assert async_logging_queue._is_activated def test_partial_logging_failed(): run_data = RunArtifacts(throw_exception_on_artifact_number=[3, 4]) async_logging_queue = AsyncArtifactsLoggingQueue(run_data.consume_queue_data) async_logging_queue.activate() filenames_sent = [] artifact_paths_sent = [] artifacts_sent = [] run_operations = [] batch_id = 1 for filename, artifact_path, artifact in _get_run_artifacts(): if batch_id in [3, 4]: with pytest.raises(MlflowException, match="Failed to log run data"): async_logging_queue.log_artifacts_async( filename=filename, artifact_path=artifact_path, artifact=artifact ).wait() else: run_operations.append( async_logging_queue.log_artifacts_async( filename=filename, artifact_path=artifact_path, artifact=artifact ) ) filenames_sent.append(filename) artifact_paths_sent.append(artifact_path) artifacts_sent.append(artifact) batch_id += 1 for run_operation in run_operations: run_operation.wait() _assert_sent_received_artifacts( filenames_sent, artifact_paths_sent, artifacts_sent, run_data.received_filenames, run_data.received_artifact_paths, run_data.received_artifacts, ) def test_publish_multithread_consume_single_thread(): run_data = RunArtifacts(throw_exception_on_artifact_number=[]) async_logging_queue = AsyncArtifactsLoggingQueue(run_data.consume_queue_data) async_logging_queue.activate() def _send_artifact(run_data_queueing_processor, run_operations=None): if run_operations is None: run_operations = [] filenames_sent = [] artifact_paths_sent = [] artifacts_sent = [] for filename, artifact_path, artifact in _get_run_artifacts(): run_operations.append( run_data_queueing_processor.log_artifacts_async( filename=filename, artifact_path=artifact_path, artifact=artifact ) ) time.sleep(random.randint(1, 3)) filenames_sent.append(filename) artifact_paths_sent.append(artifact_path) artifacts_sent.append(artifact) run_operations = [] t1 = threading.Thread(target=_send_artifact, args=(async_logging_queue, run_operations)) t2 = threading.Thread(target=_send_artifact, args=(async_logging_queue, run_operations)) t1.start() t2.start() t1.join() t2.join() for run_operation in run_operations: run_operation.wait() assert len(run_data.received_filenames) == 2 * TOTAL_ARTIFACTS assert len(run_data.received_artifact_paths) == 2 * TOTAL_ARTIFACTS assert len(run_data.received_artifacts) == 2 * TOTAL_ARTIFACTS
RunArtifacts
python
great-expectations__great_expectations
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_general_zipcode.py
{ "start": 984, "end": 2062 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.valid_general_zipcode" condition_value_keys = ("country_code",) # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, country_code, **kwargs): return column.apply(lambda x: is_zipcode_valid(x, country_code)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidGeneralZipcode
python
numba__numba
numba/core/annotations/pretty_annotate.py
{ "start": 7757, "end": 9540 }
class ____: """ Construct syntax highlighted annotation for a given jitted function: Example: >>> import numba >>> from numba.pretty_annotate import Annotate >>> @numba.jit ... def test(q): ... res = 0 ... for i in range(q): ... res += i ... return res ... >>> test(10) 45 >>> Annotate(test) The last line will return an HTML and/or ANSI representation that will be displayed accordingly in Jupyter/IPython. Function annotations persist across compilation for newly encountered type signatures and as a result annotations are shown for all signatures by default. Annotations for a specific signature can be shown by using the ``signature`` parameter. >>> @numba.jit ... def add(x, y): ... return x + y ... >>> add(1, 2) 3 >>> add(1.3, 5.7) 7.0 >>> add.signatures [(int64, int64), (float64, float64)] >>> Annotate(add, signature=add.signatures[1]) # annotation for (float64, float64) """ def __init__(self, function, signature=None, **kwargs): style = kwargs.get('style', 'default') if not function.signatures: raise ValueError('function need to be jitted for at least one signature') ann = function.get_annotation_info(signature=signature) self.ann = ann for k,v in ann.items(): res = hllines(reform_code(v), style) rest = htlines(reform_code(v), style) v['pygments_lines'] = [(a,b,c, d) for (a,b),c, d in zip(v['python_lines'], res, rest)] def _repr_html_(self): return get_html_template().render(func_data=self.ann) def __repr__(self): return get_ansi_template().render(func_data=self.ann)
Annotate
python
kamyu104__LeetCode-Solutions
Python/remove-one-element-to-make-the-array-strictly-increasing.py
{ "start": 29, "end": 503 }
class ____(object): def canBeIncreasing(self, nums): """ :type nums: List[int] :rtype: bool """ deleted = False for i in xrange(1, len(nums)): if nums[i] > nums[i-1]: continue if deleted: return False deleted = True if i >= 2 and nums[i-2] > nums[i]: # delete nums[i] or nums[i-1] nums[i] = nums[i-1] return True
Solution
python
dask__dask
dask/dataframe/dask_expr/io/parquet.py
{ "start": 7862, "end": 8471 }
class ____(Blockwise): _parameters = ToParquet._parameters @property def io_func(self): return ToParquetFunctionWrapper( self.engine, self.path, self.fs, self.partition_on, self.write_metadata_file, self.offset, self.name_function, self.write_kwargs, ) def _divisions(self): return (None,) * (self.frame.npartitions + 1) def _task(self, name: Key, index: int) -> Task: return Task(name, self.io_func, TaskRef((self.frame._name, index)), (index,))
ToParquetData
python
graphql-python__graphene
graphene/relay/tests/test_mutation.py
{ "start": 403, "end": 647 }
class ____(ClientIDMutation): class Input: what = String() phrase = String() @staticmethod def mutate_and_get_payload(self, info, what, client_mutation_id=None): return SaySomething(phrase=str(what))
SaySomething
python
google__jax
jax/_src/errors.py
{ "start": 1394, "end": 4724 }
class ____(JAXTypeError): """ This error occurs when a JAX Tracer object is used in a context where a concrete value is required (see :ref:`faq-different-kinds-of-jax-values` for more on what a Tracer is). In some situations, it can be easily fixed by marking problematic values as static; in others, it may indicate that your program is doing operations that are not directly supported by JAX's JIT compilation model. Examples: Traced value where static value is expected One common cause of this error is using a traced value where a static value is required. For example: >>> from functools import partial >>> from jax import jit >>> import jax.numpy as jnp >>> @jit ... def func(x, axis): ... return x.min(axis) >>> func(jnp.arange(4), 0) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: axis argument to jnp.min(). This can often be fixed by marking the problematic argument as static:: >>> @partial(jit, static_argnums=1) ... def func(x, axis): ... return x.min(axis) >>> func(jnp.arange(4), 0) Array(0, dtype=int32) Shape depends on Traced Value Such an error may also arise when a shape in your JIT-compiled computation depends on the values within a traced quantity. For example:: >>> @jit ... def func(x): ... return jnp.where(x < 0) >>> func(jnp.arange(4)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: The error arose in jnp.nonzero. This is an example of an operation that is incompatible with JAX's JIT compilation model, which requires array sizes to be known at compile-time. Here the size of the returned array depends on the contents of `x`, and such code cannot be JIT compiled. In many cases it is possible to work around this by modifying the logic used in the function; for example here is code with a similar issue:: >>> @jit ... def func(x): ... indices = jnp.where(x > 1) ... return x[indices].sum() >>> func(jnp.arange(4)) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ConcretizationTypeError: Abstract tracer value encountered where concrete value is expected: The error arose in jnp.nonzero. And here is how you might express the same operation in a way that avoids creation of a dynamically-sized index array:: >>> @jit ... def func(x): ... return jnp.where(x > 1, x, 0).sum() >>> func(jnp.arange(4)) Array(5, dtype=int32) To understand more subtleties having to do with tracers vs. regular values, and concrete vs. abstract values, you may want to read :ref:`faq-different-kinds-of-jax-values`. """ def __init__(self, tracer: core.Tracer, context: str = ""): super().__init__( "Abstract tracer value encountered where concrete value is expected: " f"{tracer._error_repr()}\n{context}{tracer._origin_msg()}\n") @export
ConcretizationTypeError
python
django__django
tests/gis_tests/geo3d/models.py
{ "start": 638, "end": 722 }
class ____(NamedModel): line = models.LineStringField(srid=32140)
InterstateProj2D
python
getsentry__sentry
src/sentry/integrations/slack/message_builder/disconnected.py
{ "start": 282, "end": 844 }
class ____(BlockSlackMessageBuilder): def get_docs_block(self) -> SlackBlock: return self.get_action_block( [ ( "Sentry Docs", "https://docs.sentry.io/product/alerts-notifications/alerts/", "sentry_docs_link_clicked", ) ] ) def build(self) -> SlackBlock: return self._build_blocks( self.get_markdown_block(DISCONNECTED_MESSAGE), self.get_docs_block(), )
SlackDisconnectedMessageBuilder
python
wandb__wandb
wandb/vendor/graphql-core-1.1/wandb_graphql/validation/validation.py
{ "start": 985, "end": 1435 }
class ____(Visitor): __slots__ = 'usages', 'type_info' def __init__(self, usages, type_info): self.usages = usages self.type_info = type_info def enter_VariableDefinition(self, node, key, parent, path, ancestors): return False def enter_Variable(self, node, key, parent, path, ancestors): usage = VariableUsage(node, type=self.type_info.get_input_type()) self.usages.append(usage)
UsageVisitor
python
getsentry__sentry
src/sentry/core/endpoints/team_members.py
{ "start": 1408, "end": 2538 }
class ____(Serializer): def __init__(self, *args, **kwargs): self.team = kwargs.pop("team", None) super().__init__(*args, **kwargs) def get_attrs(self, item_list, user, **kwargs): prefetch_related_objects(item_list, "organizationmember") org_member_set = serialize( { org_member_team.organizationmember for org_member_team in item_list if org_member_team.organizationmember } ) org_member_dict = {om["id"]: om for om in org_member_set} attrs = {} for org_member_team in item_list: attrs[org_member_team] = { "org_member": org_member_dict[f"{org_member_team.organizationmember_id}"] } return attrs def serialize(self, obj, attrs, user, **kwargs) -> OrganizationMemberOnTeamResponse: org_member = attrs["org_member"] org_member["teamRole"] = obj.role org_member["teamSlug"] = self.team.slug return org_member @extend_schema(tags=["Teams"]) @region_silo_endpoint
DetailedOrganizationMemberTeamSerializer
python
pypa__warehouse
warehouse/cache/services.py
{ "start": 317, "end": 1418 }
class ____: """ A Redis-based query results cache. Anything using this service must assume that the key results may be empty, and handle the case where the key is not found in the cache. The key is a string, and the value is a JSON-serialized object as a string. """ def __init__(self, redis_client): self.redis_client = redis_client @classmethod def create_service(cls, _context, request: Request) -> RedisQueryResults: redis_url = request.registry.settings["db_results_cache.url"] redis_client = redis.StrictRedis.from_url(redis_url) return cls(redis_client) def get(self, key: str) -> list | dict | None: """Get a cached result by key.""" result = self.redis_client.get(key) # deserialize the value as a JSON object return orjson.loads(result) if result else None def set(self, key: str, value) -> None: """Set a cached result by key.""" # serialize the value as a JSON string value = orjson.dumps(value) self.redis_client.set(key, value)
RedisQueryResults
python
openai__openai-python
src/openai/types/responses/tool_choice_function_param.py
{ "start": 223, "end": 450 }
class ____(TypedDict, total=False): name: Required[str] """The name of the function to call.""" type: Required[Literal["function"]] """For function calling, the type is always `function`."""
ToolChoiceFunctionParam
python
huggingface__transformers
src/transformers/integrations/tensor_parallel.py
{ "start": 19309, "end": 20747 }
class ____: """ General tensor parallel layer for transformers. """ use_dtensor = True device_mesh = None rank = None # Used to compare the shape of the original tensor empty_param = None # Used to init the corresponding DTensor shard = None def __init__(self, device_mesh=None, rank=None, empty_param=None): self.rank = rank self.device_mesh = device_mesh self.empty_param = empty_param @staticmethod def _prepare_input_fn(input_layouts, desired_input_layouts, mod, inputs, device_mesh): ... @staticmethod def _prepare_output_fn(output_layouts, use_local_output, mod, outputs, device_mesh): ... def partition_tensor(self, param, empty_param, param_type, param_casting_dtype, to_contiguous, rank, device_mesh): raise NotImplementedError def prepare_module_tp(self, module: nn.Module, device_mesh) -> nn.Module: if self.use_dtensor: distribute_module( module, device_mesh, partial(self._prepare_input_fn, self.input_layouts, self.desired_input_layouts), partial(self._prepare_output_fn, self.output_layouts, self.use_local_output), ) # use_dtensor needs to be set to false for nn.Parameter when you want to view, chunk, slice # you name it. Whatever you want to do that is a bit unconventional, you need local tensors
TensorParallelLayer
python
huggingface__transformers
src/transformers/models/ovis2/modular_ovis2.py
{ "start": 1797, "end": 1870 }
class ____(LlavaNextModelOutputWithPast): pass
Ovis2ModelOutputWithPast
python
airbytehq__airbyte
airbyte-ci/connectors/erd/src/erd/relationships.py
{ "start": 160, "end": 364 }
class ____(TypedDict): name: str relations: dict[str, str] false_positives: NotRequired[dict[str, str]] Relationships = TypedDict("Relationships", {"streams": List[Relationship]})
Relationship
python
pytorch__pytorch
torch/_inductor/codegen/cpp.py
{ "start": 182582, "end": 183985 }
class ____(CppKernel): def __init__(self, kernel_group): super().__init__(kernel_group.args, kernel_group.ws.num_threads) self.inner: list[LoopNest] = [] def decide_parallel_depth(self, max_parallel_depth, threads): kernels_parallel_depth = [] nested_kernels: list[CppKernel] = [ loop_nest.get_kernel() for loop_nest in self.inner ] # TODO(leslie-fang-intel): only enable parallel within all outer loop levels. for kernel in nested_kernels: # For any ScalarKernel, VecKernel, or Tile2DKernel, # they should all have the same call_ranges call_ranges = kernel.call_ranges assert call_ranges is not None kernels_parallel_depth.append( kernel.decide_parallel_depth( ParallelDepth( parallel_depth=( len(call_ranges) - max_parallel_depth.start_depth ), start_depth=max_parallel_depth.start_depth, ), threads, ).parallel_depth ) return ParallelDepth( parallel_depth=min( max_parallel_depth.parallel_depth, max(kernels_parallel_depth) ), start_depth=max_parallel_depth.start_depth, )
OuterLoopFusedKernel
python
sympy__sympy
sympy/printing/cxx.py
{ "start": 2850, "end": 4211 }
class ____: printmethod = "_cxxcode" language = 'C++' _ns = 'std::' # namespace def __init__(self, settings=None): super().__init__(settings or {}) @requires(headers={'algorithm'}) def _print_Max(self, expr): from sympy.functions.elementary.miscellaneous import Max if len(expr.args) == 1: return self._print(expr.args[0]) return "%smax(%s, %s)" % (self._ns, self._print(expr.args[0]), self._print(Max(*expr.args[1:]))) @requires(headers={'algorithm'}) def _print_Min(self, expr): from sympy.functions.elementary.miscellaneous import Min if len(expr.args) == 1: return self._print(expr.args[0]) return "%smin(%s, %s)" % (self._ns, self._print(expr.args[0]), self._print(Min(*expr.args[1:]))) def _print_using(self, expr): if expr.alias == none: return 'using %s' % expr.type else: raise ValueError("C++98 does not support type aliases") def _print_Raise(self, rs): arg, = rs.args return 'throw %s' % self._print(arg) @requires(headers={'stdexcept'}) def _print_RuntimeError_(self, re): message, = re.args return "%sruntime_error(%s)" % (self._ns, self._print(message))
_CXXCodePrinterBase
python
Textualize__textual
tests/notifications/test_all_levels_notifications.py
{ "start": 215, "end": 387 }
class ____(Screen): def on_mount(self) -> None: self.notify("test", timeout=60) def compose(self) -> ComposeResult: yield NotifyWidget()
NotifyScreen
python
pandas-dev__pandas
pandas/tests/io/parser/conftest.py
{ "start": 230, "end": 2058 }
class ____: engine: str | None = None low_memory = True float_precision_choices: list[str | None] = [] def update_kwargs(self, kwargs): kwargs = kwargs.copy() kwargs.update({"engine": self.engine, "low_memory": self.low_memory}) return kwargs def read_csv(self, *args, **kwargs): kwargs = self.update_kwargs(kwargs) return read_csv(*args, **kwargs) def read_csv_check_warnings( self, warn_type: type[Warning], warn_msg: str, *args, raise_on_extra_warnings=True, check_stacklevel: bool = True, **kwargs, ): # We need to check the stacklevel here instead of in the tests # since this is where read_csv is called and where the warning # should point to. kwargs = self.update_kwargs(kwargs) with tm.assert_produces_warning( warn_type, match=warn_msg, raise_on_extra_warnings=raise_on_extra_warnings, check_stacklevel=check_stacklevel, ): return read_csv(*args, **kwargs) def read_table(self, *args, **kwargs): kwargs = self.update_kwargs(kwargs) return read_table(*args, **kwargs) def read_table_check_warnings( self, warn_type: type[Warning], warn_msg: str, *args, raise_on_extra_warnings=True, **kwargs, ): # We need to check the stacklevel here instead of in the tests # since this is where read_table is called and where the warning # should point to. kwargs = self.update_kwargs(kwargs) with tm.assert_produces_warning( warn_type, match=warn_msg, raise_on_extra_warnings=raise_on_extra_warnings ): return read_table(*args, **kwargs)
BaseParser
python
python-visualization__folium
folium/elements.py
{ "start": 4520, "end": 5025 }
class ____(MacroElement): """Generate an include statement on a class.""" _template = Template( """ {{ this.leaflet_class_name }}.include( {{ this.options | tojavascript }} ) """ ) def __init__(self, leaflet_class_name: str, **kwargs): super().__init__() self.leaflet_class_name = leaflet_class_name self.options = kwargs def render(self, *args, **kwargs): return super().render(*args, **kwargs)
IncludeStatement
python
huggingface__transformers
src/transformers/models/deepseek_vl_hybrid/image_processing_deepseek_vl_hybrid.py
{ "start": 1997, "end": 3734 }
class ____(ImagesKwargs, total=False): r""" min_size (`int`, *optional*, defaults to 14): The minimum allowed size for the resized image. Ensures that neither the height nor width falls below this value after resizing. high_res_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`): Size of the high resolution output image after resizing. Can be overridden by the `high_res_size` parameter in the `preprocess` method. high_res_resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`): Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`. Can be overridden by the `high_res_resample` parameter in the `preprocess` method. high_res_image_mean (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`): Mean to use if normalizing the high resolution image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `high_res_image_mean` parameter in the `preprocess` method. high_res_image_std (`float` or `list[float]`, *optional*, defaults to `OPENAI_CLIP_STD`): Standard deviation to use if normalizing the high resolution image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `high_res_image_std` parameter in the `preprocess` method. """ min_size: int high_res_size: dict high_res_resample: Union["PILImageResampling", int] high_res_image_mean: Union[float, list[float], tuple[float, ...]] high_res_image_std: Union[float, list[float], tuple[float, ...]]
DeepseekVLHybridImageProcessorKwargs
python
readthedocs__readthedocs.org
readthedocs/organizations/tests/test_access.py
{ "start": 8045, "end": 8563 }
class ____(OrganizationAccessMixin, TestCase): """Test organization paths with authed but non-org user.""" url_responses = { "/organizations/": {"status_code": 200}, } def assertResponse(self, path, method=None, data=None, **kwargs): kwargs["status_code"] = 404 super().assertResponse(path, method, data, **kwargs) def login(self): return self.client.login(username="tester", password="test") def is_admin(self): return False
OrganizationNonmemberAccess
python
apache__airflow
providers/standard/src/airflow/providers/standard/operators/smooth.py
{ "start": 1005, "end": 1400 }
class ____(BaseOperator): """Operator that logs a YouTube link to Sade song "Smooth Operator".""" ui_color = "#e8f7e4" yt_link: str = "https://www.youtube.com/watch?v=4TYv2PhG89A" def __init__(self, **kwargs) -> None: super().__init__(**kwargs) def execute(self, context: Context): self.log.info("Enjoy Sade - Smooth Operator: %s", self.yt_link)
SmoothOperator
python
pytorch__pytorch
torch/distributed/_tools/mem_tracker.py
{ "start": 1252, "end": 1397 }
class ____(str, Enum): """Base Class for defining memory reference types, categorizing tensors based on their usage within a model."""
_RefType
python
pyca__cryptography
src/cryptography/x509/extensions.py
{ "start": 57521, "end": 58875 }
class ____(ExtensionType): oid = ExtensionOID.SIGNED_CERTIFICATE_TIMESTAMPS def __init__( self, signed_certificate_timestamps: Iterable[SignedCertificateTimestamp], ) -> None: signed_certificate_timestamps = list(signed_certificate_timestamps) if not all( isinstance(sct, SignedCertificateTimestamp) for sct in signed_certificate_timestamps ): raise TypeError( "Every item in the signed_certificate_timestamps list must be " "a SignedCertificateTimestamp" ) self._signed_certificate_timestamps = signed_certificate_timestamps __len__, __iter__, __getitem__ = _make_sequence_methods( "_signed_certificate_timestamps" ) def __repr__(self) -> str: return f"<SignedCertificateTimestamps({list(self)})>" def __hash__(self) -> int: return hash(tuple(self._signed_certificate_timestamps)) def __eq__(self, other: object) -> bool: if not isinstance(other, SignedCertificateTimestamps): return NotImplemented return ( self._signed_certificate_timestamps == other._signed_certificate_timestamps ) def public_bytes(self) -> bytes: return rust_x509.encode_extension_value(self)
SignedCertificateTimestamps
python
openai__openai-python
src/openai/resources/completions.py
{ "start": 58448, "end": 58705 }
class ____: def __init__(self, completions: AsyncCompletions) -> None: self._completions = completions self.create = _legacy_response.async_to_raw_response_wrapper( completions.create, )
AsyncCompletionsWithRawResponse
python
pypa__pip
src/pip/_internal/metadata/pkg_resources.py
{ "start": 2247, "end": 8415 }
class ____(BaseDistribution): def __init__(self, dist: pkg_resources.Distribution) -> None: self._dist = dist # This is populated lazily, to avoid loading metadata for all possible # distributions eagerly. self.__extra_mapping: Mapping[NormalizedName, str] | None = None @property def _extra_mapping(self) -> Mapping[NormalizedName, str]: if self.__extra_mapping is None: self.__extra_mapping = { canonicalize_name(extra): extra for extra in self._dist.extras } return self.__extra_mapping @classmethod def from_directory(cls, directory: str) -> BaseDistribution: dist_dir = directory.rstrip(os.sep) # Build a PathMetadata object, from path to metadata. :wink: base_dir, dist_dir_name = os.path.split(dist_dir) metadata = pkg_resources.PathMetadata(base_dir, dist_dir) # Determine the correct Distribution object type. if dist_dir.endswith(".egg-info"): dist_cls = pkg_resources.Distribution dist_name = os.path.splitext(dist_dir_name)[0] else: assert dist_dir.endswith(".dist-info") dist_cls = pkg_resources.DistInfoDistribution dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0] dist = dist_cls(base_dir, project_name=dist_name, metadata=metadata) return cls(dist) @classmethod def from_metadata_file_contents( cls, metadata_contents: bytes, filename: str, project_name: str, ) -> BaseDistribution: metadata_dict = { "METADATA": metadata_contents, } dist = pkg_resources.DistInfoDistribution( location=filename, metadata=InMemoryMetadata(metadata_dict, filename), project_name=project_name, ) return cls(dist) @classmethod def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution: try: with wheel.as_zipfile() as zf: info_dir, _ = parse_wheel(zf, name) metadata_dict = { path.split("/", 1)[-1]: read_wheel_metadata_file(zf, path) for path in zf.namelist() if path.startswith(f"{info_dir}/") } except zipfile.BadZipFile as e: raise InvalidWheel(wheel.location, name) from e except UnsupportedWheel as e: raise UnsupportedWheel(f"{name} has an invalid wheel, {e}") dist = pkg_resources.DistInfoDistribution( location=wheel.location, metadata=InMemoryMetadata(metadata_dict, wheel.location), project_name=name, ) return cls(dist) @property def location(self) -> str | None: return self._dist.location @property def installed_location(self) -> str | None: egg_link = egg_link_path_from_location(self.raw_name) if egg_link: location = egg_link elif self.location: location = self.location else: return None return normalize_path(location) @property def info_location(self) -> str | None: return self._dist.egg_info @property def installed_by_distutils(self) -> bool: # A distutils-installed distribution is provided by FileMetadata. This # provider has a "path" attribute not present anywhere else. Not the # best introspection logic, but pip has been doing this for a long time. try: return bool(self._dist._provider.path) except AttributeError: return False @property def canonical_name(self) -> NormalizedName: return canonicalize_name(self._dist.project_name) @property def version(self) -> Version: return parse_version(self._dist.version) @property def raw_version(self) -> str: return self._dist.version def is_file(self, path: InfoPath) -> bool: return self._dist.has_metadata(str(path)) def iter_distutils_script_names(self) -> Iterator[str]: yield from self._dist.metadata_listdir("scripts") def read_text(self, path: InfoPath) -> str: name = str(path) if not self._dist.has_metadata(name): raise FileNotFoundError(name) content = self._dist.get_metadata(name) if content is None: raise NoneMetadataError(self, name) return content def iter_entry_points(self) -> Iterable[BaseEntryPoint]: for group, entries in self._dist.get_entry_map().items(): for name, entry_point in entries.items(): name, _, value = str(entry_point).partition("=") yield EntryPoint(name=name.strip(), value=value.strip(), group=group) def _metadata_impl(self) -> email.message.Message: """ :raises NoneMetadataError: if the distribution reports `has_metadata()` True but `get_metadata()` returns None. """ if isinstance(self._dist, pkg_resources.DistInfoDistribution): metadata_name = "METADATA" else: metadata_name = "PKG-INFO" try: metadata = self.read_text(metadata_name) except FileNotFoundError: if self.location: displaying_path = display_path(self.location) else: displaying_path = repr(self.location) logger.warning("No metadata found in %s", displaying_path) metadata = "" feed_parser = email.parser.FeedParser() feed_parser.feed(metadata) return feed_parser.close() def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]: if extras: relevant_extras = set(self._extra_mapping) & set( map(canonicalize_name, extras) ) extras = [self._extra_mapping[extra] for extra in relevant_extras] return self._dist.requires(extras) def iter_provided_extras(self) -> Iterable[NormalizedName]: return self._extra_mapping.keys()
Distribution
python
pytorch__pytorch
torch/_dynamo/debug_utils.py
{ "start": 18253, "end": 18852 }
class ____: def __init__(self) -> None: self.total = 0 def storage( self, storage_hash: Optional[str], nbytes: int, *, device: Optional[torch._prims_common.DeviceLikeType] = None, dtype_hint: Optional[torch.dtype] = None, ) -> None: self.total += 1 def tensor(self, *args: Any, **kwargs: Any) -> Optional[torch.Tensor]: pass def symint(self, *args: Any, **kwargs: Any) -> Optional[int]: pass # TODO: Support bundling the entire repro into a zip file for ease of # transferring around
NopInputReader
python
readthedocs__readthedocs.org
readthedocs/api/v3/tests/test_remoteorganizations.py
{ "start": 313, "end": 2133 }
class ____(APIEndpointMixin): def setUp(self): super().setUp() self.remote_organization = fixture.get( RemoteOrganization, created=self.created, modified=self.modified, avatar_url="https://avatars.githubusercontent.com/u/366329?v=4", name="Read the Docs", slug="readthedocs", url="https://github.com/readthedocs", vcs_provider=GITHUB, ) social_account = fixture.get(SocialAccount, user=self.me, provider=GITHUB) fixture.get( RemoteOrganizationRelation, remote_organization=self.remote_organization, user=self.me, account=social_account, ) def test_remote_organization_list(self): url = reverse("remoteorganizations-list") self.client.logout() response = self.client.get(url) self.assertEqual(response.status_code, 401) self.client.credentials(HTTP_AUTHORIZATION=f"Token {self.token.key}") response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertDictEqual( response.json(), self._get_response_dict("remoteorganizations-list"), ) def test_remote_organization_list_name_filter(self): self.client.credentials(HTTP_AUTHORIZATION=f"Token {self.token.key}") response = self.client.get( reverse("remoteorganizations-list"), {"name": "Read"} ) self.assertEqual(response.status_code, 200) response_data = response.json() self.assertEqual(len(response_data["results"]), 1) self.assertDictEqual( response_data, self._get_response_dict("remoteorganizations-list"), )
RemoteOrganizationEndpointTests
python
getsentry__sentry
src/sentry/workflow_engine/endpoints/organization_workflow_details.py
{ "start": 1373, "end": 4973 }
class ____(OrganizationWorkflowEndpoint): publish_status = { "GET": ApiPublishStatus.EXPERIMENTAL, "PUT": ApiPublishStatus.EXPERIMENTAL, "DELETE": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.ALERTS_NOTIFICATIONS @extend_schema( operation_id="Fetch a Workflow", parameters=[ GlobalParams.ORG_ID_OR_SLUG, WorkflowParams.WORKFLOW_ID, ], responses={ 201: WorkflowSerializer, 400: RESPONSE_BAD_REQUEST, 401: RESPONSE_UNAUTHORIZED, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, ) def get(self, request: Request, organization: Organization, workflow: Workflow): """ Returns a workflow """ serialized_workflow = serialize( workflow, request.user, WorkflowSerializer(), ) return Response(serialized_workflow) @extend_schema( operation_id="Update a Workflow", parameters=[ GlobalParams.ORG_ID_OR_SLUG, WorkflowParams.WORKFLOW_ID, ], responses={ 201: WorkflowSerializer, 400: RESPONSE_BAD_REQUEST, 401: RESPONSE_UNAUTHORIZED, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, ) def put(self, request: Request, organization: Organization, workflow: Workflow): """ Updates a workflow """ validator = WorkflowValidator( data=request.data, context={ "organization": organization, "request": request, "workflow": workflow, }, ) validator.is_valid(raise_exception=True) with transaction.atomic(router.db_for_write(Workflow)): validator.update(workflow, validator.validated_data) detector_ids = request.data.get("detectorIds") if detector_ids is not None: bulk_validator = BulkWorkflowDetectorsValidator( data={ "workflow_id": workflow.id, "detector_ids": detector_ids, }, context={ "organization": organization, "request": request, }, ) if not bulk_validator.is_valid(): raise ValidationError({"detectorIds": bulk_validator.errors}) bulk_validator.save() create_audit_entry( request=request, organization=organization, target_object=workflow.id, event=audit_log.get_event_id("WORKFLOW_EDIT"), data=workflow.get_audit_log_data(), ) return Response( serialize(workflow, request.user, WorkflowSerializer()), status=200, ) def delete(self, request: Request, organization: Organization, workflow: Workflow): """ Delete a workflow """ RegionScheduledDeletion.schedule(workflow, days=0, actor=request.user) workflow.update(status=ObjectStatus.PENDING_DELETION) create_audit_entry( request=request, organization=organization, target_object=workflow.id, event=audit_log.get_event_id("WORKFLOW_REMOVE"), data=workflow.get_audit_log_data(), ) return Response(status=204)
OrganizationWorkflowDetailsEndpoint
python
PyCQA__pyflakes
pyflakes/messages.py
{ "start": 6163, "end": 6343 }
class ____(Message): """ Two or more starred expressions in an assignment (a, *b, *c = d). """ message = 'two starred expressions in assignment'
TwoStarredExpressions
python
neetcode-gh__leetcode
python/0235-lowest-common-ancestor-of-a-binary-search-tree.py
{ "start": 164, "end": 535 }
class ____: def lowestCommonAncestor( self, root: "TreeNode", p: "TreeNode", q: "TreeNode" ) -> "TreeNode": while True: if root.val < p.val and root.val < q.val: root = root.right elif root.val > p.val and root.val > q.val: root = root.left else: return root
Solution
python
getsentry__sentry
src/sentry/integrations/api/endpoints/organization_code_mapping_codeowners.py
{ "start": 1389, "end": 2679 }
class ____(OrganizationEndpoint): owner = ApiOwner.ISSUES publish_status = { "GET": ApiPublishStatus.PRIVATE, } permission_classes = (OrganizationIntegrationsPermission,) def convert_args(self, request: Request, organization_id_or_slug, config_id, *args, **kwargs): args, kwargs = super().convert_args( request, organization_id_or_slug, config_id, *args, **kwargs ) organization = kwargs["organization"] try: kwargs["config"] = RepositoryProjectPathConfig.objects.get( id=config_id, organization_id=organization.id, ) except RepositoryProjectPathConfig.DoesNotExist: raise Http404 return (args, kwargs) def get(self, request: Request, config_id, organization, config) -> Response: try: codeowner_contents = get_codeowner_contents(config) except ApiError as e: return self.respond({"detail": str(e)}, status=status.HTTP_400_BAD_REQUEST) if not codeowner_contents: return self.respond(status=status.HTTP_404_NOT_FOUND) return self.respond( codeowner_contents, status=status.HTTP_200_OK, )
OrganizationCodeMappingCodeOwnersEndpoint
python
fastapi__sqlmodel
docs_src/tutorial/fastapi/update/tutorial002_py310.py
{ "start": 478, "end": 2810 }
class ____(SQLModel): name: str | None = None secret_name: str | None = None age: int | None = None password: str | None = None sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" connect_args = {"check_same_thread": False} engine = create_engine(sqlite_url, echo=True, connect_args=connect_args) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def hash_password(password: str) -> str: # Use something like passlib here return f"not really hashed {password} hehehe" app = FastAPI() @app.on_event("startup") def on_startup(): create_db_and_tables() @app.post("/heroes/", response_model=HeroPublic) def create_hero(hero: HeroCreate): hashed_password = hash_password(hero.password) with Session(engine) as session: extra_data = {"hashed_password": hashed_password} db_hero = Hero.model_validate(hero, update=extra_data) session.add(db_hero) session.commit() session.refresh(db_hero) return db_hero @app.get("/heroes/", response_model=list[HeroPublic]) def read_heroes(offset: int = 0, limit: int = Query(default=100, le=100)): with Session(engine) as session: heroes = session.exec(select(Hero).offset(offset).limit(limit)).all() return heroes @app.get("/heroes/{hero_id}", response_model=HeroPublic) def read_hero(hero_id: int): with Session(engine) as session: hero = session.get(Hero, hero_id) if not hero: raise HTTPException(status_code=404, detail="Hero not found") return hero @app.patch("/heroes/{hero_id}", response_model=HeroPublic) def update_hero(hero_id: int, hero: HeroUpdate): with Session(engine) as session: db_hero = session.get(Hero, hero_id) if not db_hero: raise HTTPException(status_code=404, detail="Hero not found") hero_data = hero.model_dump(exclude_unset=True) extra_data = {} if "password" in hero_data: password = hero_data["password"] hashed_password = hash_password(password) extra_data["hashed_password"] = hashed_password db_hero.sqlmodel_update(hero_data, update=extra_data) session.add(db_hero) session.commit() session.refresh(db_hero) return db_hero
HeroUpdate
python
lazyprogrammer__machine_learning_examples
rl3/a2c/subproc_vec_env.py
{ "start": 952, "end": 1322 }
class ____(): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob)
CloudpickleWrapper
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/auto_materialize_asset_evaluations.py
{ "start": 1054, "end": 1201 }
class ____(graphene.ObjectType): text = graphene.String() class Meta: name = "TextRuleEvaluationData"
GrapheneTextRuleEvaluationData
python
airbytehq__airbyte
airbyte-integrations/connectors/source-amplitude/components.py
{ "start": 1542, "end": 2198 }
class ____(RecordExtractor): """ Create records from complex response structure Issue: https://github.com/airbytehq/airbyte/issues/23145 """ def extract_records(self, response: requests.Response) -> List[Record]: response_data = response.json().get("data", []) if response_data: series = list(zip(*response_data["series"])) if series: return [ {"date": date, "statistics": dict(zip(response_data["seriesLabels"], users))} for date, users in zip(response_data["xValues"], series) ] return []
ActiveUsersRecordExtractor
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 1145091, "end": 1145604 }
class ____(ScaleInvalidDataShowAsstrokeDash): """ ScaleInvalidDataShowAsValuestrokeDash schema wrapper. Parameters ---------- value : Sequence[float] An array of alternating stroke, space lengths for creating dashed or dotted lines. """ _schema = {"$ref": '#/definitions/ScaleInvalidDataShowAsValue<"strokeDash">'} def __init__(self, value: Optional[Sequence[float]] = Undefined, **kwds): super().__init__(value=value, **kwds)
ScaleInvalidDataShowAsValuestrokeDash
python
getsentry__sentry
src/sentry/spans/buffer.py
{ "start": 6008, "end": 23108 }
class ____: def __init__(self, assigned_shards: list[int], slice_id: int | None = None): self.assigned_shards = list(assigned_shards) self.slice_id = slice_id self.add_buffer_sha: str | None = None self.any_shard_at_limit = False self._current_compression_level = None self._zstd_compressor: zstandard.ZstdCompressor | None = None self._zstd_decompressor = zstandard.ZstdDecompressor() @cached_property def client(self) -> RedisCluster[bytes] | StrictRedis[bytes]: return get_redis_client() # make it pickleable def __reduce__(self): return (SpansBuffer, (self.assigned_shards, self.slice_id)) def _get_span_key(self, project_and_trace: str, span_id: str) -> bytes: return f"span-buf:z:{{{project_and_trace}}}:{span_id}".encode("ascii") @metrics.wraps("spans.buffer.process_spans") def process_spans(self, spans: Sequence[Span], now: int): """ :param spans: List of to-be-ingested spans. :param now: The current time to be used for setting expiration/flush deadlines. Used for unit-testing and managing backlogging behavior. """ compression_level = options.get("spans.buffer.compression.level") if compression_level != self._current_compression_level: self._current_compression_level = compression_level if compression_level == -1: self._zstd_compressor = None else: self._zstd_compressor = zstandard.ZstdCompressor(level=compression_level) redis_ttl = options.get("spans.buffer.redis-ttl") timeout = options.get("spans.buffer.timeout") root_timeout = options.get("spans.buffer.root-timeout") max_segment_bytes = options.get("spans.buffer.max-segment-bytes") result_meta = [] is_root_span_count = 0 min_redirect_depth = float("inf") max_redirect_depth = float("-inf") with metrics.timer("spans.buffer.process_spans.push_payloads"): trees = self._group_by_parent(spans) with self.client.pipeline(transaction=False) as p: for (project_and_trace, parent_span_id), subsegment in trees.items(): set_key = self._get_span_key(project_and_trace, parent_span_id) prepared = self._prepare_payloads(subsegment) p.zadd(set_key, prepared) p.execute() with metrics.timer("spans.buffer.process_spans.insert_spans"): # Workaround to make `evalsha` work in pipelines. We load ensure the # script is loaded just before calling it below. This calls `SCRIPT # EXISTS` once per batch. add_buffer_sha = self._ensure_script() with self.client.pipeline(transaction=False) as p: for (project_and_trace, parent_span_id), subsegment in trees.items(): p.execute_command( "EVALSHA", add_buffer_sha, 1, project_and_trace, len(subsegment), parent_span_id, "true" if any(span.is_segment_span for span in subsegment) else "false", redis_ttl, max_segment_bytes, *[span.span_id for span in subsegment], ) is_root_span_count += sum(span.is_segment_span for span in subsegment) result_meta.append((project_and_trace, parent_span_id)) results = p.execute() with metrics.timer("spans.buffer.process_spans.update_queue"): queue_deletes: dict[bytes, set[bytes]] = {} queue_adds: dict[bytes, MutableMapping[str | bytes, int]] = {} assert len(result_meta) == len(results) for (project_and_trace, parent_span_id), result in zip(result_meta, results): redirect_depth, set_key, has_root_span = result shard = self.assigned_shards[ int(project_and_trace.split(":")[1], 16) % len(self.assigned_shards) ] queue_key = self._get_queue_key(shard) min_redirect_depth = min(min_redirect_depth, redirect_depth) max_redirect_depth = max(max_redirect_depth, redirect_depth) # if the currently processed span is a root span, OR the buffer # already had a root span inside, use a different timeout than # usual. if has_root_span: offset = root_timeout else: offset = timeout zadd_items = queue_adds.setdefault(queue_key, {}) zadd_items[set_key] = now + offset subsegment_spans = trees[project_and_trace, parent_span_id] delete_set = queue_deletes.setdefault(queue_key, set()) delete_set.update( self._get_span_key(project_and_trace, span.span_id) for span in subsegment_spans ) delete_set.discard(set_key) with self.client.pipeline(transaction=False) as p: for queue_key, adds in queue_adds.items(): if adds: p.zadd(queue_key, adds) p.expire(queue_key, redis_ttl) for queue_key, deletes in queue_deletes.items(): if deletes: p.zrem(queue_key, *deletes) p.execute() metrics.timing("spans.buffer.process_spans.num_spans", len(spans)) # This incr metric is needed to get a rate overall. metrics.incr("spans.buffer.process_spans.count_spans", amount=len(spans)) metrics.timing("spans.buffer.process_spans.num_is_root_spans", is_root_span_count) metrics.timing("spans.buffer.process_spans.num_subsegments", len(trees)) metrics.gauge("spans.buffer.min_redirect_depth", min_redirect_depth) metrics.gauge("spans.buffer.max_redirect_depth", max_redirect_depth) def _ensure_script(self): if self.add_buffer_sha is not None: if self.client.script_exists(self.add_buffer_sha)[0]: return self.add_buffer_sha self.add_buffer_sha = self.client.script_load(add_buffer_script.script) return self.add_buffer_sha def _get_queue_key(self, shard: int) -> bytes: if self.slice_id is not None: return f"span-buf:q:{self.slice_id}-{shard}".encode("ascii") else: return f"span-buf:q:{shard}".encode("ascii") def _group_by_parent(self, spans: Sequence[Span]) -> dict[tuple[str, str], list[Span]]: """ Groups partial trees of spans by their top-most parent span ID in the provided list. The result is a dictionary where the keys identify a top-most known parent, and the value is a flat list of all its transitive children. For spans with a known segment_id, the grouping is done by the segment_id instead of the parent_span_id. This is the case for spans extracted from transaction events, or if in the future SDKs provide segment IDs. :param spans: List of spans to be grouped. :return: Dictionary of grouped spans. The key is a tuple of the `project_and_trace`, and the `parent_span_id`. """ trees: dict[tuple[str, str], list[Span]] = {} redirects: dict[str, dict[str, str]] = {} for span in spans: project_and_trace = f"{span.project_id}:{span.trace_id}" parent = span.effective_parent_id() trace_redirects = redirects.setdefault(project_and_trace, {}) while redirect := trace_redirects.get(parent): parent = redirect subsegment = trees.setdefault((project_and_trace, parent), []) if parent != span.span_id: subsegment.extend(trees.pop((project_and_trace, span.span_id), [])) trace_redirects[span.span_id] = parent subsegment.append(span) return trees def _prepare_payloads(self, spans: list[Span]) -> dict[str | bytes, float]: if self._zstd_compressor is None: return {span.payload: span.end_timestamp for span in spans} combined = b"\x00".join(span.payload for span in spans) original_size = len(combined) with metrics.timer("spans.buffer.compression.cpu_time"): compressed = self._zstd_compressor.compress(combined) compressed_size = len(compressed) compression_ratio = compressed_size / original_size if original_size > 0 else 0 metrics.timing("spans.buffer.compression.original_size", original_size) metrics.timing("spans.buffer.compression.compressed_size", compressed_size) metrics.timing("spans.buffer.compression.compression_ratio", compression_ratio) min_timestamp = min(span.end_timestamp for span in spans) return {compressed: min_timestamp} def _decompress_batch(self, compressed_data: bytes) -> list[bytes]: # Check for zstd magic header (0xFD2FB528 in little-endian) -- # backwards compat with code that did not write compressed payloads. with metrics.timer("spans.buffer.decompression.cpu_time"): if not compressed_data.startswith(b"\x28\xb5\x2f\xfd"): return [compressed_data] decompressed_buffer = self._zstd_decompressor.decompress(compressed_data) return decompressed_buffer.split(b"\x00") def record_stored_segments(self): with metrics.timer("spans.buffer.get_stored_segments"): with self.client.pipeline(transaction=False) as p: for shard in self.assigned_shards: key = self._get_queue_key(shard) p.zcard(key) result = p.execute() assert len(result) == len(self.assigned_shards) for shard_i, queue_size in zip(self.assigned_shards, result): metrics.timing( "spans.buffer.flush_segments.queue_size", queue_size, tags={"shard_i": shard_i}, ) def get_memory_info(self) -> Generator[ServiceMemory]: return iter_cluster_memory_usage(self.client) def flush_segments(self, now: int) -> dict[SegmentKey, FlushedSegment]: cutoff = now queue_keys = [] shard_factor = max(1, len(self.assigned_shards)) max_flush_segments = options.get("spans.buffer.max-flush-segments") max_segments_per_shard = math.ceil(max_flush_segments / shard_factor) with metrics.timer("spans.buffer.flush_segments.load_segment_ids"): with self.client.pipeline(transaction=False) as p: for shard in self.assigned_shards: key = self._get_queue_key(shard) p.zrangebyscore(key, 0, cutoff, start=0, num=max_segments_per_shard) queue_keys.append(key) result = p.execute() segment_keys: list[tuple[int, QueueKey, SegmentKey]] = [] for shard, queue_key, keys in zip(self.assigned_shards, queue_keys, result): for segment_key in keys: segment_keys.append((shard, queue_key, segment_key)) with metrics.timer("spans.buffer.flush_segments.load_segment_data"): segments = self._load_segment_data([k for _, _, k in segment_keys]) return_segments = {} num_has_root_spans = 0 any_shard_at_limit = False for shard, queue_key, segment_key in segment_keys: segment_span_id = _segment_key_to_span_id(segment_key).decode("ascii") segment = segments.get(segment_key, []) if len(segment) >= max_segments_per_shard: any_shard_at_limit = True output_spans = [] has_root_span = False metrics.timing("spans.buffer.flush_segments.num_spans_per_segment", len(segment)) # This incr metric is needed to get a rate overall. metrics.incr("spans.buffer.flush_segments.count_spans_per_segment", amount=len(segment)) for payload in segment: span = orjson.loads(payload) if not attribute_value(span, "sentry.segment.id"): span.setdefault("attributes", {})["sentry.segment.id"] = { "type": "string", "value": segment_span_id, } is_segment = segment_span_id == span["span_id"] span["is_segment"] = is_segment if is_segment: has_root_span = True output_spans.append(OutputSpan(payload=span)) metrics.incr( "spans.buffer.flush_segments.num_segments_per_shard", tags={"shard_i": shard} ) return_segments[segment_key] = FlushedSegment(queue_key=queue_key, spans=output_spans) num_has_root_spans += int(has_root_span) metrics.timing("spans.buffer.flush_segments.num_segments", len(return_segments)) metrics.timing("spans.buffer.flush_segments.has_root_span", num_has_root_spans) self.any_shard_at_limit = any_shard_at_limit return return_segments def _load_segment_data(self, segment_keys: list[SegmentKey]) -> dict[SegmentKey, list[bytes]]: """ Loads the segments from Redis, given a list of segment keys. Segments exceeding a certain size are skipped, and an error is logged. :param segment_keys: List of segment keys to load. :return: Dictionary mapping segment keys to lists of span payloads. """ page_size = options.get("spans.buffer.segment-page-size") max_segment_bytes = options.get("spans.buffer.max-segment-bytes") payloads: dict[SegmentKey, list[bytes]] = {key: [] for key in segment_keys} cursors = {key: 0 for key in segment_keys} sizes = {key: 0 for key in segment_keys} while cursors: with self.client.pipeline(transaction=False) as p: current_keys = [] for key, cursor in cursors.items(): if key.startswith(b"span-buf:z:"): p.zscan(key, cursor=cursor, count=page_size) else: p.sscan(key, cursor=cursor, count=page_size) current_keys.append(key) results = p.execute() for key, (cursor, scan_values) in zip(current_keys, results): decompressed_spans = [] for scan_value in scan_values: span_data = scan_value[0] if isinstance(scan_value, tuple) else scan_value decompressed_spans.extend(self._decompress_batch(span_data)) sizes[key] += sum(len(span) for span in decompressed_spans) if sizes[key] > max_segment_bytes: metrics.incr("spans.buffer.flush_segments.segment_size_exceeded") logger.warning("Skipping too large segment, byte size %s", sizes[key]) del payloads[key] del cursors[key] continue payloads[key].extend(decompressed_spans) if cursor == 0: del cursors[key] else: cursors[key] = cursor for key, spans in payloads.items(): if not spans: # This is a bug, most likely the input topic is not # partitioned by trace_id so multiple consumers are writing # over each other. The consequence is duplicated segments, # worst-case. metrics.incr("spans.buffer.empty_segments") return payloads def done_flush_segments(self, segment_keys: dict[SegmentKey, FlushedSegment]): metrics.timing("spans.buffer.done_flush_segments.num_segments", len(segment_keys)) with metrics.timer("spans.buffer.done_flush_segments"): with self.client.pipeline(transaction=False) as p: for segment_key, flushed_segment in segment_keys.items(): p.delete(b"span-buf:hrs:" + segment_key) p.unlink(segment_key) p.zrem(flushed_segment.queue_key, segment_key) project_id, trace_id, _ = parse_segment_key(segment_key) redirect_map_key = b"span-buf:sr:{%s:%s}" % (project_id, trace_id) for span_batch in itertools.batched(flushed_segment.spans, 100): p.hdel( redirect_map_key, *[output_span.payload["span_id"] for output_span in span_batch], ) p.execute()
SpansBuffer
python
PrefectHQ__prefect
tests/server/orchestration/api/test_flow_run_states.py
{ "start": 961, "end": 1476 }
class ____: async def test_read_flow_run_state( self, flow_run, flow_run_states, client, session, ): response = await client.get( "/flow_run_states/", params=dict(flow_run_id=str(flow_run.id)) ) assert response.status_code == status.HTTP_200_OK response_state_ids = {state["id"] for state in response.json()} assert response_state_ids == set([str(state.id) for state in flow_run_states])
TestReadFlowRunStateByFlowRunId
python
dagster-io__dagster
python_modules/libraries/dagster-airlift/dagster_airlift/core/serialization/compute.py
{ "start": 1076, "end": 4887 }
class ____: asset_specs: Iterable[AssetSpec] @cached_property def mapped_task_asset_specs(self) -> list[AssetSpec]: return [spec for spec in self.asset_specs if is_task_mapped_asset_spec(spec)] @cached_property def mapped_dag_asset_specs(self) -> list[AssetSpec]: return [spec for spec in self.asset_specs if is_dag_mapped_asset_spec(spec)] @cached_property def dag_ids(self) -> set[str]: return set(self.all_mapped_asset_keys_by_dag_id.keys()) @cached_property def task_id_map(self) -> dict[str, set[str]]: """Mapping of dag_id to set of task_ids in that dag. This only contains task ids mapped to assets in this object.""" task_id_map_data = { dag_id: set(ta_map.keys()) for dag_id, ta_map in self.asset_keys_by_mapped_task_id.items() } return defaultdict(set, task_id_map_data) @cached_property def all_mapped_asset_keys_by_dag_id(self) -> dict[str, set[AssetKey]]: """Mapping of dag_id to set of asset_keys which are materialized by that dag. If assets within the dag are mapped to individual tasks, all of those assets will be included in this set. If the dag itself is mapped to a set of assets, those assets will be included in this set. """ asset_keys_in_dag_by_id = defaultdict(set) for dag_id, task_to_asset_map in self.asset_keys_by_mapped_task_id.items(): for asset_keys in task_to_asset_map.values(): asset_keys_in_dag_by_id[dag_id].update(asset_keys) for dag_id, asset_keys in self.asset_keys_by_mapped_dag_id.items(): asset_keys_in_dag_by_id[dag_id].update(asset_keys) return defaultdict(set, asset_keys_in_dag_by_id) @cached_property def asset_keys_by_mapped_task_id(self) -> dict[str, dict[str, set[AssetKey]]]: """Mapping of dag_id to task_id to set of asset_keys mapped from that task.""" asset_key_map: dict[str, dict[str, set[AssetKey]]] = defaultdict(lambda: defaultdict(set)) for spec in self.asset_specs: if is_task_mapped_asset_spec(spec): for task_handle in task_handles_for_spec(spec): asset_key_map[task_handle.dag_id][task_handle.task_id].add(spec.key) return asset_key_map @cached_property def asset_keys_by_mapped_dag_id(self) -> dict[str, set[AssetKey]]: """Mapping of dag_id to set of asset_keys mapped from that dag.""" asset_key_map: dict[str, set[AssetKey]] = defaultdict(set) for spec in self.asset_specs: if is_dag_mapped_asset_spec(spec): for dag_handle in dag_handles_for_spec(spec): asset_key_map[dag_handle.dag_id].add(spec.key) return asset_key_map @cached_property def task_handle_map(self) -> dict[AssetKey, set[TaskHandle]]: task_handle_map = defaultdict(set) for dag_id, asset_key_by_task_id in self.asset_keys_by_mapped_task_id.items(): for task_id, asset_keys in asset_key_by_task_id.items(): for asset_key in asset_keys: task_handle_map[asset_key].add(TaskHandle(dag_id=dag_id, task_id=task_id)) return task_handle_map @cached_property def downstream_deps(self) -> dict[AssetKey, set[AssetKey]]: downstreams = defaultdict(set) for spec in self.asset_specs: for dep in spec.deps: downstreams[dep.asset_key].add(spec.key) return downstreams def build_airlift_metadata_mapping_info( mapped_assets: Iterable["MappedAsset"], ) -> AirliftMetadataMappingInfo: asset_specs = list(spec_iterator(mapped_assets)) return AirliftMetadataMappingInfo(asset_specs=asset_specs) @record
AirliftMetadataMappingInfo
python
optuna__optuna
optuna/storages/journal/_file.py
{ "start": 5035, "end": 8336 }
class ____(BaseJournalFileLock): """Lock class for synchronizing processes for NFSv2 or later. On acquiring the lock, link system call is called to create an exclusive file. The file is deleted when the lock is released. In NFS environments prior to NFSv3, use this instead of :class:`~optuna.storages.journal.JournalFileOpenLock`. Args: filepath: The path of the file whose race condition must be protected. grace_period: Grace period before an existing lock is forcibly released. """ def __init__(self, filepath: str, grace_period: int | None = 30) -> None: self._lock_target_file = filepath self._lock_file = filepath + LOCK_FILE_SUFFIX if grace_period is not None: if grace_period <= 0: raise ValueError("The value of `grace_period` should be a positive integer.") if grace_period < 3: optuna_warn("The value of `grace_period` might be too small. ") self.grace_period = grace_period def acquire(self) -> bool: """Acquire a lock in a blocking way by creating a symbolic link of a file. Returns: :obj:`True` if it succeeded in creating a symbolic link of ``self._lock_target_file``. """ sleep_secs = 0.001 last_update_monotonic_time = time.monotonic() mtime = None while True: try: os.symlink(self._lock_target_file, self._lock_file) return True except OSError as err: if err.errno == errno.EEXIST: if self.grace_period is not None: try: current_mtime = os.stat(self._lock_file).st_mtime except OSError: continue if current_mtime != mtime: mtime = current_mtime last_update_monotonic_time = time.monotonic() if time.monotonic() - last_update_monotonic_time > self.grace_period: optuna_warn( "The existing lock file has not been released " "for an extended period. Forcibly releasing the lock file." ) try: self.release() sleep_secs = 0.001 except RuntimeError: continue time.sleep(sleep_secs) sleep_secs = min(sleep_secs * 2, 1) continue raise err except BaseException: self.release() raise def release(self) -> None: """Release a lock by removing the symbolic link.""" lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX try: os.rename(self._lock_file, lock_rename_file) os.unlink(lock_rename_file) except OSError: raise RuntimeError("Error: did not possess lock") except BaseException: os.unlink(lock_rename_file) raise
JournalFileSymlinkLock
python
TheAlgorithms__Python
data_structures/queues/linked_queue.py
{ "start": 334, "end": 3708 }
class ____: """ >>> queue = LinkedQueue() >>> queue.is_empty() True >>> queue.put(5) >>> queue.put(9) >>> queue.put('python') >>> queue.is_empty() False >>> queue.get() 5 >>> queue.put('algorithms') >>> queue.get() 9 >>> queue.get() 'python' >>> queue.get() 'algorithms' >>> queue.is_empty() True >>> queue.get() Traceback (most recent call last): ... IndexError: dequeue from empty queue """ def __init__(self) -> None: self.front: Node | None = None self.rear: Node | None = None def __iter__(self) -> Iterator[Any]: node = self.front while node: yield node.data node = node.next def __len__(self) -> int: """ >>> queue = LinkedQueue() >>> for i in range(1, 6): ... queue.put(i) >>> len(queue) 5 >>> for i in range(1, 6): ... assert len(queue) == 6 - i ... _ = queue.get() >>> len(queue) 0 """ return len(tuple(iter(self))) def __str__(self) -> str: """ >>> queue = LinkedQueue() >>> for i in range(1, 4): ... queue.put(i) >>> queue.put("Python") >>> queue.put(3.14) >>> queue.put(True) >>> str(queue) '1 <- 2 <- 3 <- Python <- 3.14 <- True' """ return " <- ".join(str(item) for item in self) def is_empty(self) -> bool: """ >>> queue = LinkedQueue() >>> queue.is_empty() True >>> for i in range(1, 6): ... queue.put(i) >>> queue.is_empty() False """ return len(self) == 0 def put(self, item: Any) -> None: """ >>> queue = LinkedQueue() >>> queue.get() Traceback (most recent call last): ... IndexError: dequeue from empty queue >>> for i in range(1, 6): ... queue.put(i) >>> str(queue) '1 <- 2 <- 3 <- 4 <- 5' """ node = Node(item) if self.is_empty(): self.front = self.rear = node else: assert isinstance(self.rear, Node) self.rear.next = node self.rear = node def get(self) -> Any: """ >>> queue = LinkedQueue() >>> queue.get() Traceback (most recent call last): ... IndexError: dequeue from empty queue >>> queue = LinkedQueue() >>> for i in range(1, 6): ... queue.put(i) >>> for i in range(1, 6): ... assert queue.get() == i >>> len(queue) 0 """ if self.is_empty(): raise IndexError("dequeue from empty queue") assert isinstance(self.front, Node) node = self.front self.front = self.front.next if self.front is None: self.rear = None return node.data def clear(self) -> None: """ >>> queue = LinkedQueue() >>> for i in range(1, 6): ... queue.put(i) >>> queue.clear() >>> len(queue) 0 >>> str(queue) '' """ self.front = self.rear = None if __name__ == "__main__": from doctest import testmod testmod()
LinkedQueue
python
huggingface__transformers
src/transformers/models/deformable_detr/modeling_deformable_detr.py
{ "start": 44589, "end": 51139 }
class ____(DeformableDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a [`DeformableDetrEncoderLayer`]. The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. Args: config: DeformableDetrConfig """ def __init__(self, config: DeformableDetrConfig): super().__init__(config) self.gradient_checkpointing = False self.dropout = config.dropout self.layers = nn.ModuleList([DeformableDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) # Initialize weights and apply final processing self.post_init() @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): """ Get reference points for each feature map. Used in decoder. Args: spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Valid ratios of each feature map. device (`torch.device`): Device on which to create the tensors. Returns: `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` """ reference_points_list = [] for level, (height, width) in enumerate(spatial_shapes): ref_y, ref_x = meshgrid( torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device), torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device), indexing="ij", ) # TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36 ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def forward( self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer. spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): Starting index of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) spatial_shapes_tuple = tuple(spatial_shapes_list) reference_points = self.get_reference_points(spatial_shapes_tuple, valid_ratios, device=inputs_embeds.device) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions, )
DeformableDetrEncoder
python
getsentry__sentry
src/sentry/integrations/vsts/integration.py
{ "start": 29857, "end": 30256 }
class ____(forms.Form): def __init__(self, accounts: Sequence[Mapping[str, str]], *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self.fields["account"] = forms.ChoiceField( choices=[(acct["accountId"], acct["accountName"]) for acct in accounts], label="Account", help_text="Azure DevOps organization.", )
AccountForm
python
mwaskom__seaborn
seaborn/_base.py
{ "start": 2821, "end": 10211 }
class ____(SemanticMapping): """Mapping that sets artist colors according to data values.""" # A specification of the colors that should appear in the plot palette = None # An object that normalizes data values to [0, 1] range for color mapping norm = None # A continuous colormap object for interpolating in a numeric context cmap = None def __init__( self, plotter, palette=None, order=None, norm=None, saturation=1, ): """Map the levels of the `hue` variable to distinct colors. Parameters ---------- # TODO add generic parameters """ super().__init__(plotter) data = plotter.plot_data.get("hue", pd.Series(dtype=float)) if isinstance(palette, np.ndarray): msg = ( "Numpy array is not a supported type for `palette`. " "Please convert your palette to a list. " "This will become an error in v0.14" ) warnings.warn(msg, stacklevel=4) palette = palette.tolist() if data.isna().all(): if palette is not None: msg = "Ignoring `palette` because no `hue` variable has been assigned." warnings.warn(msg, stacklevel=4) else: map_type = self.infer_map_type( palette, norm, plotter.input_format, plotter.var_types["hue"] ) # Our goal is to end up with a dictionary mapping every unique # value in `data` to a color. We will also keep track of the # metadata about this mapping we will need for, e.g., a legend # --- Option 1: numeric mapping with a matplotlib colormap if map_type == "numeric": data = pd.to_numeric(data) levels, lookup_table, norm, cmap = self.numeric_mapping( data, palette, norm, ) # --- Option 2: categorical mapping using seaborn palette elif map_type == "categorical": cmap = norm = None levels, lookup_table = self.categorical_mapping( data, palette, order, ) # --- Option 3: datetime mapping else: # TODO this needs actual implementation cmap = norm = None levels, lookup_table = self.categorical_mapping( # Casting data to list to handle differences in the way # pandas and numpy represent datetime64 data list(data), palette, order, ) self.saturation = saturation self.map_type = map_type self.lookup_table = lookup_table self.palette = palette self.levels = levels self.norm = norm self.cmap = cmap def _lookup_single(self, key): """Get the color for a single value, using colormap to interpolate.""" try: # Use a value that's in the original data vector value = self.lookup_table[key] except KeyError: if self.norm is None: # Currently we only get here in scatterplot with hue_order, # because scatterplot does not consider hue a grouping variable # So unused hue levels are in the data, but not the lookup table return (0, 0, 0, 0) # Use the colormap to interpolate between existing datapoints # (e.g. in the context of making a continuous legend) try: normed = self.norm(key) except TypeError as err: if np.isnan(key): value = (0, 0, 0, 0) else: raise err else: if np.ma.is_masked(normed): normed = np.nan value = self.cmap(normed) if self.saturation < 1: value = desaturate(value, self.saturation) return value def infer_map_type(self, palette, norm, input_format, var_type): """Determine how to implement the mapping.""" if palette in QUAL_PALETTES: map_type = "categorical" elif norm is not None: map_type = "numeric" elif isinstance(palette, (dict, list)): map_type = "categorical" elif input_format == "wide": map_type = "categorical" else: map_type = var_type return map_type def categorical_mapping(self, data, palette, order): """Determine colors when the hue mapping is categorical.""" # -- Identify the order and name of the levels levels = categorical_order(data, order) n_colors = len(levels) # -- Identify the set of colors to use if isinstance(palette, dict): missing = set(levels) - set(palette) if any(missing): err = "The palette dictionary is missing keys: {}" raise ValueError(err.format(missing)) lookup_table = palette else: if palette is None: if n_colors <= len(get_color_cycle()): colors = color_palette(None, n_colors) else: colors = color_palette("husl", n_colors) elif isinstance(palette, list): colors = self._check_list_length(levels, palette, "palette") else: colors = color_palette(palette, n_colors) lookup_table = dict(zip(levels, colors)) return levels, lookup_table def numeric_mapping(self, data, palette, norm): """Determine colors when the hue variable is quantitative.""" if isinstance(palette, dict): # The presence of a norm object overrides a dictionary of hues # in specifying a numeric mapping, so we need to process it here. levels = list(sorted(palette)) colors = [palette[k] for k in sorted(palette)] cmap = mpl.colors.ListedColormap(colors) lookup_table = palette.copy() else: # The levels are the sorted unique values in the data levels = list(np.sort(remove_na(data.unique()))) # --- Sort out the colormap to use from the palette argument # Default numeric palette is our default cubehelix palette # TODO do we want to do something complicated to ensure contrast? palette = "ch:" if palette is None else palette if isinstance(palette, mpl.colors.Colormap): cmap = palette else: cmap = color_palette(palette, as_cmap=True) # Now sort out the data normalization if norm is None: norm = mpl.colors.Normalize() elif isinstance(norm, tuple): norm = mpl.colors.Normalize(*norm) elif not isinstance(norm, mpl.colors.Normalize): err = "``hue_norm`` must be None, tuple, or Normalize object." raise ValueError(err) if not norm.scaled(): norm(np.asarray(data.dropna())) lookup_table = dict(zip(levels, cmap(norm(levels)))) return levels, lookup_table, norm, cmap
HueMapping
python
spyder-ide__spyder
spyder/utils/svg_colorizer.py
{ "start": 399, "end": 11717 }
class ____: """ A class for modifying SVG files by changing the fill colors of elements with specific class attributes. This implementation uses lxml for XML parsing and XPath for element selection, providing a reliable and maintainable way to manipulate SVG files. The main purpose of this class is to allow theme-based colorization of SVG icons in Spyder. Icons can contain elements with special class names that correspond to theme color variables, allowing them to adapt to different UI themes. """ # Define the SVG namespace SVG_NAMESPACE = {"svg": "http://www.w3.org/2000/svg"} def __init__(self, svg_path): """ Initialize the instance with the path to an SVG file. Parameters ---------- svg_path : str Path to the SVG file to be colorized """ self.tree = None self.root = None try: log.debug(f"Parsing SVG file: {svg_path}") self.tree = etree.parse(svg_path) self.root = self.tree.getroot() except etree.XMLSyntaxError as e: log.error(f"Error parsing SVG file {svg_path}: {str(e)}") except FileNotFoundError as e: log.error(f"SVG file not found {svg_path}: {str(e)}") except Exception as e: log.error(f"Unexpected error with SVG file {svg_path}: {str(e)}") def _find_elements_by_class(self, class_name): """ Find all SVG elements with the specified class. Parameters ---------- class_name : str The class attribute value to search for Returns ------- list List of elements with the specified class """ if self.root is None: return [] try: return self.root.xpath( f"//svg:*[@class='{class_name}']", namespaces=self.SVG_NAMESPACE ) except Exception as e: log.error( f"Error finding elements with class '{class_name}': {str(e)}" ) return [] def save_to_string(self): """ Convert the modified SVG to a string. Returns ------- str or None The SVG as a string, or None if there was an error """ if self.root is None: log.warning("No SVG data to save to string.") return None try: # Use XML declaration to ensure proper rendering xml_declaration = ( '<?xml version="1.0" encoding="UTF-8" standalone="no"?>' ) svg_string = etree.tostring( self.root, encoding="utf-8", pretty_print=True ).decode("utf-8") # Add XML declaration if not present if not svg_string.startswith('<?xml'): svg_string = f"{xml_declaration}\n{svg_string}" return svg_string except Exception as e: log.error(f"Error converting SVG to string: {str(e)}") return None def save_to_file(self, output_path): """ Save the modified SVG to a file. Parameters ---------- output_path : str Path where the modified SVG will be saved Returns ------- bool True if file was saved successfully, False otherwise """ if self.tree is None: log.warning("No SVG data to save to file.") return False if not output_path: log.warning("Empty output path provided.") return False try: self.tree.write(output_path, pretty_print=True, encoding="utf-8") log.debug(f"SVG file saved to {output_path}") return True except Exception as e: log.error(f"Error saving SVG to file {output_path}: {str(e)}") return False def extract_colored_paths(self, theme_colors): """ Extract SVG paths with their associated colors from the theme. Instead of modifying the SVG in place, this method returns a structured representation of the SVG with paths and their colors for external rendering. This approach enables direct rendering of SVG elements with proper colorization without modifying the original SVG file. Parameters ---------- theme_colors : dict Dictionary mapping color names (class attributes) to hex color values. Example: {'ICON_1': '#FF0000', 'ICON_2': '#00FF00'} Returns ------- dict or None A dictionary containing SVG metadata and colored paths: { 'viewbox': str or None, # SVG viewBox attribute 'width': int, # SVG width 'height': int, # SVG height 'paths': [ # List of paths with their colors { 'path_data': str, # The SVG path data 'color': str, # The hex color for this path 'attrs': dict # Original attributes except 'class' }, ... ] } Returns None if there was an error """ if self.root is None: log.warning("No SVG data to extract paths from.") return None if not theme_colors: log.warning("Empty theme colors dictionary provided.") return None try: # Get SVG dimensions width = int(float(self.root.get('width', '24'))) height = int(float(self.root.get('height', '24'))) viewbox = self.root.get('viewBox') # Result structure result = { 'viewbox': viewbox, 'width': width, 'height': height, 'paths': [] } # Find all path elements paths = self.root.xpath("//svg:path", namespaces=self.SVG_NAMESPACE) # Default color if no match default_color = theme_colors.get('ICON_1', '#FAFAFA') # Process each path for path in paths: # Get path data path_data = path.get('d', '') if not path_data: continue # Extract class to determine color class_attr = path.get('class') # Determine color based on class color = default_color if class_attr and class_attr in theme_colors: color = theme_colors[class_attr] # Get all attributes except class attrs = {k: v for k, v in path.items() if k != 'class'} # Add to result result['paths'].append({ 'path_data': path_data, 'color': color, 'attrs': attrs }) return result except Exception as e: log.error(f"Error extracting colored paths: {str(e)}") return None @classmethod def get_colored_paths(cls, icon_path, theme_colors, debug=False): """ Class method to extract colored paths from an SVG icon. This method provides a structured representation of the SVG with paths and their colors based on the theme, which can be used for direct rendering without modifying the original SVG. Parameters ---------- icon_path : str Path to the SVG file theme_colors : dict Dictionary mapping color names (class attributes) to hex color values. Example: {'ICON_1': '#FF0000', 'ICON_2': '#00FF00'} Returns ------- dict or None A dictionary containing SVG metadata and colored paths. See extract_colored_paths for the structure. """ if debug: log.debug(f"Extracting colored paths from SVG: {icon_path}") # Create a new colorizer instance icon = cls(icon_path) if icon.root is None: return None return icon.extract_colored_paths(theme_colors) def render_colored_svg(self, paths, size, width, height, viewbox=None): """ Render colored SVG paths to a pixmap. Parameters ---------- paths : list List of path dictionaries with 'path_data' and 'color' size : int Size of the pixmap to create (used as the maximum dimension) width : int Original SVG width height : int Original SVG height viewbox : str or None SVG viewBox attribute if available Returns ------- QPixmap A pixmap with all paths rendered with their respective colors """ # Calculate proper dimensions preserving aspect ratio aspect_ratio = width / height if width > height: # Width is larger, use size as width pixmap_width = size pixmap_height = int(size / aspect_ratio) else: # Height is larger or equal, use size as height pixmap_height = size pixmap_width = int(size * aspect_ratio) # Create transparent pixmap for the icon with proper aspect ratio pixmap = QPixmap(pixmap_width, pixmap_height) pixmap.fill(QColor(0, 0, 0, 0)) # Transparent # Painter for compositing all parts painter = QPainter(pixmap) painter.setRenderHint(QPainter.Antialiasing) # Process each path for path_data in paths: path_d = path_data.get('path_data', '') color = QColor(path_data.get('color', '#FAFAFA')) if not path_d: continue # Create a temporary SVG with just this path svg_template = ( f'<svg xmlns="http://www.w3.org/2000/svg" ' f'width="{width}" height="{height}"' ) # Add viewBox if available if viewbox: svg_template += f' viewBox="{viewbox}"' svg_template += f'><path d="{path_d}"/></svg>' # Render the path and apply color temp_bytes = QByteArray(svg_template.encode('utf-8')) temp_pixmap = QPixmap(pixmap_width, pixmap_height) temp_pixmap.fill(QColor(0, 0, 0, 0)) # Transparent # Render the path temp_renderer = QSvgRenderer(temp_bytes) temp_painter = QPainter(temp_pixmap) temp_renderer.render(temp_painter) temp_painter.end() # Apply color to the path temp_painter = QPainter(temp_pixmap) temp_painter.setCompositionMode(QPainter.CompositionMode_SourceIn) temp_painter.fillRect(temp_pixmap.rect(), color) temp_painter.end() # Composite this path onto the main pixmap painter.drawPixmap(0, 0, temp_pixmap) # Finish compositing painter.end() return pixmap
SVGColorize
python
anthropics__anthropic-sdk-python
src/anthropic/types/message_delta_usage.py
{ "start": 230, "end": 816 }
class ____(BaseModel): cache_creation_input_tokens: Optional[int] = None """The cumulative number of input tokens used to create the cache entry.""" cache_read_input_tokens: Optional[int] = None """The cumulative number of input tokens read from the cache.""" input_tokens: Optional[int] = None """The cumulative number of input tokens which were used.""" output_tokens: int """The cumulative number of output tokens which were used.""" server_tool_use: Optional[ServerToolUsage] = None """The number of server tool requests."""
MessageDeltaUsage
python
pypa__warehouse
tests/unit/email/test_init.py
{ "start": 63295, "end": 66868 }
class ____: def test_send_new_organization_moreinformationneeded_email( self, pyramid_request, pyramid_config, monkeypatch ): initiator_user = pretend.stub( id="id", username="username", name="", email="email@example.com", primary_email=pretend.stub(email="email@example.com", verified=True), ) organization_name = "example" organization_application_id = "deadbeef-dead-beef-dead-beefdeadbeef" message = "example message" subject_renderer = pyramid_config.testing_add_renderer( "email/new-organization-moreinformationneeded/subject.txt" ) subject_renderer.string_response = "Email Subject" body_renderer = pyramid_config.testing_add_renderer( "email/new-organization-moreinformationneeded/body.txt" ) body_renderer.string_response = "Email Body" html_renderer = pyramid_config.testing_add_renderer( "email/new-organization-moreinformationneeded/body.html" ) html_renderer.string_response = "Email HTML Body" send_email = pretend.stub( delay=pretend.call_recorder(lambda *args, **kwargs: None) ) pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email) monkeypatch.setattr(email, "send_email", send_email) pyramid_request.db = pretend.stub( query=lambda a: pretend.stub( filter=lambda *a: pretend.stub( one=lambda: pretend.stub(user_id=initiator_user.id) ) ), ) pyramid_request.user = initiator_user pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"} result = email.send_new_organization_moreinformationneeded_email( pyramid_request, initiator_user, organization_name=organization_name, organization_application_id=organization_application_id, message=message, ) assert result == { "organization_name": organization_name, "organization_application_id": organization_application_id, "message": message, } subject_renderer.assert_( organization_name=organization_name, message=message, ) body_renderer.assert_( organization_name=organization_name, message=message, ) html_renderer.assert_( organization_name=organization_name, message=message, ) assert send_email.delay.calls == [ pretend.call( f"{initiator_user.username} <{initiator_user.email}>", { "sender": None, "subject": "Email Subject", "body_text": "Email Body", "body_html": ( "<html>\n<head></head>\n" "<body><p>Email HTML Body</p></body>\n</html>\n" ), }, { "tag": "account:email:sent", "user_id": initiator_user.id, "additional": { "from_": "noreply@example.com", "to": initiator_user.email, "subject": "Email Subject", "redact_ip": False, }, }, ) ]
TestSendNewOrganizationRequestMoreInfoEmail
python
streamlit__streamlit
lib/streamlit/config.py
{ "start": 2811, "end": 3558 }
class ____(str, Enum): """Valid options for the "client.showErrorDetails" config.""" FULL = "full" STACKTRACE = "stacktrace" TYPE = "type" NONE = "none" @staticmethod def is_true_variation(val: str | bool) -> bool: return val in ["true", "True", True] @staticmethod def is_false_variation(val: str | bool) -> bool: return val in ["false", "False", False] # Config options can be set from several places including the command-line and # the user's script. Legacy config options (true/false) will have type string # when set via command-line and bool when set via user script # (e.g. st.set_option("client.showErrorDetails", False)).
ShowErrorDetailsConfigOptions
python
airbytehq__airbyte
airbyte-integrations/connectors/source-gcs/source_gcs/config.py
{ "start": 1626, "end": 2831 }
class ____(AbstractFileBasedSpec, BaseModel): """ NOTE: When this Spec is changed, legacy_config_transformer.py must also be modified to uptake the changes because it is responsible for converting legacy GCS configs into file based configs using the File-Based CDK. """ credentials: Union[OAuthCredentials, ServiceAccountCredentials] = Field( title="Authentication", description="Credentials for connecting to the Google Cloud Storage API", type="object", discriminator="auth_type", order=0, ) bucket: str = Field(title="Bucket", description="Name of the GCS bucket where the file(s) exist.", order=2) delivery_method: Union[DeliverRecords, DeliverRawFiles] = Field( title="Delivery Method", discriminator="delivery_type", type="object", order=3, display_type="radio", group="advanced", default="use_records_transfer", airbyte_hidden=True, ) @classmethod def documentation_url(cls) -> AnyUrl: """ Returns the documentation URL. """ return AnyUrl("https://docs.airbyte.com/integrations/sources/gcs", scheme="https")
Config
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/methodOverride4.py
{ "start": 349, "end": 561 }
class ____(Generic[_TSource]): @abstractmethod def method1( self, mapper: Callable[[_TSource, _T1], _TResult], other: "BaseA[_T1]" ) -> "BaseA[_TResult]": raise NotImplementedError
BaseA
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/models/steps.py
{ "start": 923, "end": 1645 }
class ____: path: Union[Path, str] optional: bool = False def _cast_fields(self) -> None: self.path = Path(self.path) self.optional = bool(self.optional) def _check_exists(self) -> None: if not self.get_path().exists(): message = f"{self.path} does not exist." if self.optional: main_logger.warning(message) else: raise FileNotFoundError(message) def get_path(self) -> Path: return Path(self.path) def __post_init__(self) -> None: self._cast_fields() self._check_exists() def __str__(self) -> str: return str(self.path) @dataclass(kw_only=True, frozen=True)
MountPath
python
allegroai__clearml
clearml/backend_api/services/v2_20/events.py
{ "start": 127146, "end": 128363 }
class ____(Request): """ Get single value metrics for the passed tasks :param tasks: List of task Task IDs :type tasks: Sequence[str] """ _service = "events" _action = "get_task_single_value_metrics" _version = "2.20" _schema = { "definitions": {}, "properties": { "tasks": { "description": "List of task Task IDs", "items": {"description": "Task ID", "type": "string"}, "type": "array", } }, "required": ["tasks"], "type": "object", } def __init__(self, tasks: List[str], **kwargs: Any) -> None: super(GetTaskSingleValueMetricsRequest, self).__init__(**kwargs) self.tasks = tasks @schema_property("tasks") def tasks(self) -> List[str]: return self._property_tasks @tasks.setter def tasks(self, value: List[str]) -> None: if value is None: self._property_tasks = None return self.assert_isinstance(value, "tasks", (list, tuple)) self.assert_isinstance(value, "tasks", six.string_types, is_array=True) self._property_tasks = value
GetTaskSingleValueMetricsRequest
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 69055, "end": 69717 }
class ____(FieldValues): """ Valid and invalid values for a `Choice` field that uses a single paired or grouped. """ valid_inputs = { 'poor': 'poor', 'medium': 'medium', 'good': 'good', } invalid_inputs = { 'awful': ['"awful" is not a valid choice.'] } outputs = { 'good': 'good' } field = serializers.ChoiceField( choices=[ ( 'Category', ( ('poor', 'Poor quality'), ), ), 'medium', ('good', 'Good quality'), ] )
TestChoiceFieldWithMixedChoices
python
scikit-learn__scikit-learn
sklearn/tests/test_common.py
{ "start": 2141, "end": 13101 }
class ____(BaseEstimator): """Dummy development stub for an estimator. This is to make sure a callable estimator passes common tests. """ def __call__(self): pass # pragma: nocover @pytest.mark.parametrize( "val, expected", [ (partial(_sample_func, y=1), "_sample_func(y=1)"), (_sample_func, "_sample_func"), (partial(_sample_func, "world"), "_sample_func"), (LogisticRegression(C=2.0), "LogisticRegression(C=2.0)"), ( LogisticRegression( random_state=1, solver="newton-cg", class_weight="balanced", warm_start=True, ), ( "LogisticRegression(class_weight='balanced',random_state=1," "solver='newton-cg',warm_start=True)" ), ), (CallableEstimator(), "CallableEstimator()"), ], ) def test_get_check_estimator_ids(val, expected): assert _get_check_estimator_ids(val) == expected @parametrize_with_checks( list(_tested_estimators()), expected_failed_checks=_get_expected_failed_checks ) def test_estimators(estimator, check, request): # Common tests for estimator instances with ignore_warnings( category=(FutureWarning, ConvergenceWarning, UserWarning, LinAlgWarning) ): check(estimator) @pytest.mark.filterwarnings( "ignore:Since version 1.0, it is not needed to import " "enable_hist_gradient_boosting anymore" ) @pytest.mark.thread_unsafe # import side-effects def test_import_all_consistency(): sklearn_path = [os.path.dirname(sklearn.__file__)] # Smoke test to check that any name in a __all__ list is actually defined # in the namespace of the module or package. pkgs = pkgutil.walk_packages( path=sklearn_path, prefix="sklearn.", onerror=lambda _: None ) submods = [modname for _, modname, _ in pkgs] for modname in submods + ["sklearn"]: if ".tests." in modname or "sklearn.externals" in modname: continue # Avoid test suite depending on build dependencies, for example Cython if "sklearn._build_utils" in modname: continue package = __import__(modname, fromlist="dummy") for name in getattr(package, "__all__", ()): assert hasattr(package, name), "Module '{0}' has no attribute '{1}'".format( modname, name ) def test_root_import_all_completeness(): sklearn_path = [os.path.dirname(sklearn.__file__)] EXCEPTIONS = ("utils", "tests", "base", "conftest") for _, modname, _ in pkgutil.walk_packages( path=sklearn_path, onerror=lambda _: None ): if "." in modname or modname.startswith("_") or modname in EXCEPTIONS: continue assert modname in sklearn.__all__ @pytest.mark.thread_unsafe # import side-effects def test_all_tests_are_importable(): # Ensure that for each contentful subpackage, there is a test directory # within it that is also a subpackage (i.e. a directory with __init__.py) HAS_TESTS_EXCEPTIONS = re.compile( r"""(?x) \.externals(\.|$)| \.tests(\.|$)| \._ """ ) resource_modules = { "sklearn.datasets.data", "sklearn.datasets.descr", "sklearn.datasets.images", } sklearn_path = [os.path.dirname(sklearn.__file__)] lookup = { name: ispkg for _, name, ispkg in pkgutil.walk_packages(sklearn_path, prefix="sklearn.") } missing_tests = [ name for name, ispkg in lookup.items() if ispkg and name not in resource_modules and not HAS_TESTS_EXCEPTIONS.search(name) and name + ".tests" not in lookup ] assert missing_tests == [], ( "{0} do not have `tests` subpackages. " "Perhaps they require " "__init__.py or a meson.build " "in the parent " "directory".format(missing_tests) ) def test_class_support_removed(): # Make sure passing classes to check_estimator or parametrize_with_checks # raises an error msg = "Passing a class was deprecated.* isn't supported anymore" with pytest.raises(TypeError, match=msg): check_estimator(LogisticRegression) with pytest.raises(TypeError, match=msg): parametrize_with_checks([LogisticRegression]) def _estimators_that_predict_in_fit(): for estimator in _tested_estimators(): est_params = set(estimator.get_params()) if "oob_score" in est_params: yield estimator.set_params(oob_score=True, bootstrap=True) elif "early_stopping" in est_params: est = estimator.set_params(early_stopping=True, n_iter_no_change=1) if est.__class__.__name__ in {"MLPClassifier", "MLPRegressor"}: # TODO: FIX MLP to not check validation set during MLP yield pytest.param( est, marks=pytest.mark.xfail(msg="MLP still validates in fit") ) else: yield est elif "n_iter_no_change" in est_params: yield estimator.set_params(n_iter_no_change=1) # NOTE: When running `check_dataframe_column_names_consistency` on a meta-estimator that # delegates validation to a base estimator, the check is testing that the base estimator # is checking for column name consistency. column_name_estimators = list( chain( _tested_estimators(), [make_pipeline(LogisticRegression(C=1))], _estimators_that_predict_in_fit(), ) ) @pytest.mark.parametrize( "estimator_orig", column_name_estimators, ids=_get_check_estimator_ids ) def test_pandas_column_name_consistency(estimator_orig): if isinstance(estimator_orig, ColumnTransformer): pytest.skip("ColumnTransformer is not tested here") if "check_dataframe_column_names_consistency" in _get_expected_failed_checks( estimator_orig ): pytest.skip( "Estimator does not support check_dataframe_column_names_consistency" ) for estimator in _yield_instances_for_check( check_dataframe_column_names_consistency, estimator_orig ): with ignore_warnings(category=(FutureWarning)): with warnings.catch_warnings(record=True) as record: check_dataframe_column_names_consistency( estimator.__class__.__name__, estimator ) for warning in record: assert "was fitted without feature names" not in str(warning.message) # TODO: As more modules support get_feature_names_out they should be removed # from this list to be tested GET_FEATURES_OUT_MODULES_TO_IGNORE = [ "ensemble", "kernel_approximation", ] def _include_in_get_feature_names_out_check(transformer): if hasattr(transformer, "get_feature_names_out"): return True module = transformer.__module__.split(".")[1] return module not in GET_FEATURES_OUT_MODULES_TO_IGNORE GET_FEATURES_OUT_ESTIMATORS = [ est for est in _tested_estimators("transformer") if _include_in_get_feature_names_out_check(est) ] @pytest.mark.parametrize( "transformer", GET_FEATURES_OUT_ESTIMATORS, ids=_get_check_estimator_ids ) def test_transformers_get_feature_names_out(transformer): with ignore_warnings(category=(FutureWarning)): check_transformer_get_feature_names_out( transformer.__class__.__name__, transformer ) check_transformer_get_feature_names_out_pandas( transformer.__class__.__name__, transformer ) ESTIMATORS_WITH_GET_FEATURE_NAMES_OUT = [ est for est in _tested_estimators() if hasattr(est, "get_feature_names_out") ] @pytest.mark.parametrize( "estimator", ESTIMATORS_WITH_GET_FEATURE_NAMES_OUT, ids=_get_check_estimator_ids ) def test_estimators_get_feature_names_out_error(estimator): estimator_name = estimator.__class__.__name__ check_get_feature_names_out_error(estimator_name, estimator) @pytest.mark.parametrize( "estimator", list(_tested_estimators()), ids=_get_check_estimator_ids ) def test_check_param_validation(estimator): if isinstance(estimator, FeatureUnion): pytest.skip("FeatureUnion is not tested here") name = estimator.__class__.__name__ check_param_validation(name, estimator) SET_OUTPUT_ESTIMATORS = list( chain( _tested_estimators("transformer"), [ make_pipeline(StandardScaler(), MinMaxScaler()), OneHotEncoder(sparse_output=False), FunctionTransformer(feature_names_out="one-to-one"), ], ) ) @pytest.mark.parametrize( "estimator_orig", SET_OUTPUT_ESTIMATORS, ids=_get_check_estimator_ids ) def test_set_output_transform(estimator_orig): name = estimator_orig.__class__.__name__ if not hasattr(estimator_orig, "set_output"): pytest.skip( f"Skipping check_set_output_transform for {name}: Does not support" " set_output API" ) for estimator in _yield_instances_for_check( check_set_output_transform, estimator_orig ): with ignore_warnings(category=(FutureWarning)): check_set_output_transform(estimator.__class__.__name__, estimator) @pytest.mark.parametrize( "estimator_orig", SET_OUTPUT_ESTIMATORS, ids=_get_check_estimator_ids ) @pytest.mark.parametrize( "check_func", [ check_set_output_transform_pandas, check_global_output_transform_pandas, check_set_output_transform_polars, check_global_set_output_transform_polars, ], ) def test_set_output_transform_configured(estimator_orig, check_func): name = estimator_orig.__class__.__name__ if not hasattr(estimator_orig, "set_output"): pytest.skip( f"Skipping {check_func.__name__} for {name}: Does not support" " set_output API yet" ) for estimator in _yield_instances_for_check(check_func, estimator_orig): with ignore_warnings(category=(FutureWarning)): check_func(estimator.__class__.__name__, estimator) @pytest.mark.parametrize( "estimator", _tested_estimators(), ids=_get_check_estimator_ids ) def test_check_inplace_ensure_writeable(estimator): name = estimator.__class__.__name__ if hasattr(estimator, "copy"): estimator.set_params(copy=False) elif hasattr(estimator, "copy_X"): estimator.set_params(copy_X=False) else: raise SkipTest(f"{name} doesn't require writeable input.") # The following estimators can work inplace only with certain settings if name == "HDBSCAN": estimator.set_params(metric="precomputed", algorithm="brute") if name == "PCA": estimator.set_params(svd_solver="full") if name == "KernelPCA": estimator.set_params(kernel="precomputed") check_inplace_ensure_writeable(name, estimator)
CallableEstimator
python
bottlepy__bottle
test/test_multipart.py
{ "start": 37595, "end": 38333 }
class ____(BaseMultipartTest): def test_werkzeug_examples(self): """Tests multipart parsing against data collected from webbrowsers""" for name in browser_test_cases: self.reset() self.data = BytesIO(browser_test_cases[name]['data']) boundary = browser_test_cases[name]['boundary'] files = browser_test_cases[name]['files'] forms = browser_test_cases[name]['forms'] self.parse('multipart/form-data; boundary=%s'%boundary, clen=-1) for name, file in files.items(): self.assertFile(name, file[0], file[1], file[2]) for name, form in forms.items(): self.assertForm(name, form)
TestWerkzeugExamples
python
rapidsai__cudf
python/cudf/cudf/core/udf/masked_typing.py
{ "start": 11079, "end": 12159 }
class ____(AbstractTemplate): def generic(self, args, kws): """ Typing for `Masked` <op> a scalar (and vice-versa). handles situations like `x + 1` """ # In the case of op(Masked, scalar), we resolve the type between # the Masked value_type and the scalar's type directly to_resolve_types = None if isinstance(args[0], MaskedType) and isinstance( args[1], SUPPORTED_NUMBA_TYPES ): to_resolve_types = (args[0].value_type, args[1]) elif isinstance(args[0], SUPPORTED_NUMBA_TYPES) and isinstance( args[1], MaskedType ): to_resolve_types = (args[1].value_type, args[0]) else: # fail typing return None return_type = self.context.resolve_function_type( self.key, to_resolve_types, kws ).return_type return nb_signature( MaskedType(return_type), args[0], args[1], ) @cuda_decl_registry.register_global(operator.is_)
MaskedScalarScalarOp
python
celery__celery
t/unit/fixups/test_django.py
{ "start": 643, "end": 5671 }
class ____(FixupCase): Fixup = DjangoFixup def test_setting_default_app(self): from celery import _state prev, _state.default_app = _state.default_app, None try: app = Mock(name='app') DjangoFixup(app) app.set_default.assert_called_with() finally: _state.default_app = prev @patch('celery.fixups.django.DjangoWorkerFixup') def test_worker_fixup_property(self, DjangoWorkerFixup): f = DjangoFixup(self.app) f._worker_fixup = None assert f.worker_fixup is DjangoWorkerFixup() assert f.worker_fixup is DjangoWorkerFixup() def test_on_import_modules(self): f = DjangoFixup(self.app) f.worker_fixup = Mock(name='worker_fixup') f.on_import_modules() f.worker_fixup.validate_models.assert_called_with() def test_autodiscover_tasks(self, patching): patching.modules('django.apps') from django.apps import apps f = DjangoFixup(self.app) configs = [Mock(name='c1'), Mock(name='c2')] apps.get_app_configs.return_value = configs assert f.autodiscover_tasks() == [c.name for c in configs] @pytest.mark.masked_modules('django') def test_fixup_no_django(self, patching, mask_modules): with patch('celery.fixups.django.DjangoFixup') as Fixup: patching.setenv('DJANGO_SETTINGS_MODULE', '') fixup(self.app) Fixup.assert_not_called() patching.setenv('DJANGO_SETTINGS_MODULE', 'settings') with pytest.warns(FixupWarning): fixup(self.app) Fixup.assert_not_called() def test_fixup(self, patching): with patch('celery.fixups.django.DjangoFixup') as Fixup: patching.setenv('DJANGO_SETTINGS_MODULE', '') fixup(self.app) Fixup.assert_not_called() patching.setenv('DJANGO_SETTINGS_MODULE', 'settings') with conftest.module_exists('django'): import django django.VERSION = (1, 11, 1) fixup(self.app) Fixup.assert_called() def test_maybe_close_fd(self): with patch('os.close'): _maybe_close_fd(Mock()) _maybe_close_fd(object()) def test_init(self): with self.fixup_context(self.app) as (f, importmod, sym): assert f @pytest.mark.patched_module( 'django', 'django.db', 'django.db.transaction', ) def test_install(self, patching, module): self.app.loader = Mock() self.cw = patching('os.getcwd') self.p = patching('sys.path') self.sigs = patching('celery.fixups.django.signals') with self.fixup_context(self.app) as (f, _, _): self.cw.return_value = '/opt/vandelay' f.install() self.sigs.worker_init.connect.assert_called_with(f.on_worker_init) assert self.app.loader.now == f.now # Specialized DjangoTask class is used assert self.app.task_cls == 'celery.contrib.django.task:DjangoTask' from celery.contrib.django.task import DjangoTask assert issubclass(f.app.Task, DjangoTask) assert hasattr(f.app.Task, 'delay_on_commit') assert hasattr(f.app.Task, 'apply_async_on_commit') self.p.insert.assert_called_with(0, '/opt/vandelay') def test_install_custom_user_task(self, patching): patching('celery.fixups.django.signals') self.app.task_cls = 'myapp.celery.tasks:Task' self.app._custom_task_cls_used = True with self.fixup_context(self.app) as (f, _, _): f.install() # Specialized DjangoTask class is NOT used, # The one from the user's class is assert self.app.task_cls == 'myapp.celery.tasks:Task' def test_install_custom_user_task_as_class_attribute(self, patching): patching('celery.fixups.django.signals') from celery.app import Celery class MyCeleryApp(Celery): task_cls = 'myapp.celery.tasks:Task' app = MyCeleryApp('mytestapp') with self.fixup_context(app) as (f, _, _): f.install() # Specialized DjangoTask class is NOT used, # The one from the user's class is assert app.task_cls == 'myapp.celery.tasks:Task' def test_now(self): with self.fixup_context(self.app) as (f, _, _): assert f.now(utc=True) f._now.assert_not_called() assert f.now(utc=False) f._now.assert_called() def test_on_worker_init(self): with self.fixup_context(self.app) as (f, _, _): with patch('celery.fixups.django.DjangoWorkerFixup') as DWF: f.on_worker_init() DWF.assert_called_with(f.app) DWF.return_value.install.assert_called_with() assert f._worker_fixup is DWF.return_value
test_DjangoFixup
python
gevent__gevent
src/gevent/select.py
{ "start": 7332, "end": 7973 }
class ____(object): __slots__ = ('events', 'event') def __init__(self): self.events = set() self.event = Event() def add_event(self, events, fd): if events < 0: result_flags = POLLNVAL else: result_flags = 0 if events & _EV_READ: result_flags = POLLIN if events & _EV_WRITE: result_flags |= POLLOUT self.events.add((fd, result_flags)) self.event.set() def add_error_before_io(self, fd): # This is before we do any IO, don't set the event self.events.add((fd, POLLNVAL))
PollResult
python
pytorch__pytorch
torch/testing/_internal/common_dtype.py
{ "start": 412, "end": 5106 }
class ____(tuple): __slots__ = () def __add__(self, other): assert isinstance(other, tuple) return _dispatch_dtypes(tuple.__add__(self, other)) _empty_types = _dispatch_dtypes(()) def empty_types(): return _empty_types _floating_types = _dispatch_dtypes((torch.float32, torch.float64)) def floating_types(): return _floating_types _floating_types_and_half = _floating_types + (torch.half,) def floating_types_and_half(): return _floating_types_and_half def floating_types_and(*dtypes): return _floating_types + _validate_dtypes(*dtypes) _floating_and_complex_types = _floating_types + (torch.cfloat, torch.cdouble) def floating_and_complex_types(): return _floating_and_complex_types def floating_and_complex_types_and(*dtypes): return _floating_and_complex_types + _validate_dtypes(*dtypes) _double_types = _dispatch_dtypes((torch.float64, torch.complex128)) def double_types(): return _double_types # NB: Does not contain uint16/uint32/uint64 for BC reasons _integral_types = _dispatch_dtypes( (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64) ) def integral_types(): return _integral_types def integral_types_and(*dtypes): return _integral_types + _validate_dtypes(*dtypes) _all_types = _floating_types + _integral_types def all_types(): return _all_types def all_types_and(*dtypes): return _all_types + _validate_dtypes(*dtypes) _complex_types = _dispatch_dtypes((torch.cfloat, torch.cdouble)) def complex_types(): return _complex_types def complex_types_and(*dtypes): return _complex_types + _validate_dtypes(*dtypes) _all_types_and_complex = _all_types + _complex_types def all_types_and_complex(): return _all_types_and_complex def all_types_and_complex_and(*dtypes): return _all_types_and_complex + _validate_dtypes(*dtypes) _all_types_and_half = _all_types + (torch.half,) def all_types_and_half(): return _all_types_and_half _all_mps_types = ( _dispatch_dtypes({torch.float, torch.half, torch.bfloat16}) + _integral_types ) def all_mps_types(): return _all_mps_types def all_mps_types_and(*dtypes): return _all_mps_types + _validate_dtypes(*dtypes) _float8_types = _dispatch_dtypes( ( torch.float8_e4m3fn, torch.float8_e4m3fnuz, torch.float8_e5m2, torch.float8_e5m2fnuz, ) ) def float8_types(): return _float8_types def float8_types_and(*dtypes): return _float8_types + _validate_dtypes(*dtypes) def all_types_complex_float8_and(*dtypes): return _all_types + _complex_types + _float8_types + _validate_dtypes(*dtypes) def custom_types(*dtypes): """Create a list of arbitrary dtypes""" return _empty_types + _validate_dtypes(*dtypes) # The functions below are used for convenience in our test suite and thus have no corresponding C++ dispatch macro # See AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS. def get_all_dtypes( include_half=True, include_bfloat16=True, include_bool=True, include_complex=True, include_complex32=False, include_qint=False, ) -> list[torch.dtype]: dtypes = get_all_int_dtypes() + get_all_fp_dtypes( include_half=include_half, include_bfloat16=include_bfloat16 ) if include_bool: dtypes.append(torch.bool) if include_complex: dtypes += get_all_complex_dtypes(include_complex32) if include_qint: dtypes += get_all_qint_dtypes() return dtypes def get_all_math_dtypes(device) -> list[torch.dtype]: return ( get_all_int_dtypes() + get_all_fp_dtypes( include_half=device.startswith("cuda"), include_bfloat16=False ) + get_all_complex_dtypes() ) def get_all_complex_dtypes(include_complex32=False) -> list[torch.dtype]: return ( [torch.complex32, torch.complex64, torch.complex128] if include_complex32 else [torch.complex64, torch.complex128] ) def get_all_int_dtypes() -> list[torch.dtype]: return [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64] def get_all_fp_dtypes(include_half=True, include_bfloat16=True) -> list[torch.dtype]: dtypes = [torch.float32, torch.float64] if include_half: dtypes.append(torch.float16) if include_bfloat16: dtypes.append(torch.bfloat16) return dtypes def get_all_qint_dtypes() -> list[torch.dtype]: return [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4] float_to_corresponding_complex_type_map = { torch.float16: torch.complex32, torch.float32: torch.complex64, torch.float64: torch.complex128, }
_dispatch_dtypes
python
airbytehq__airbyte
airbyte-integrations/connectors/source-intercom/components.py
{ "start": 10334, "end": 14355 }
class ____(SimpleRetriever): """ Custom retriever for Intercom's companies stream with reset handling. Only compatible with streams that sync using a single date time window instead of multiple windows when the step is defined. This is okay for the companies stream since it only allows for single-threaded processing. For the companies stream, we need to implement a custom retriever since we cannot simply retry on HTTP 500 errors. Instead, the stream must restart from the beginning to ensure data integrity. See Docs: https://developers.intercom.com/docs/references/2.1/rest-api/companies/iterating-over-all-companies We need to implement a 'RESTART' action to restart the stream from the beginning in the CDK, which is tracked here: https://github.com/airbytehq/airbyte-internal-issues/issues/12107. However, the team does not have the bandwidth to implement this at the moment, so this custom component provides a workaround by resetting the cursor on errors. """ RESET_TOKEN = {"_ab_reset": True} def __post_init__(self, parameters: Mapping[str, Any]) -> None: super().__post_init__(parameters) self.reset_signal = ResetCursorSignal() def _next_page_token( self, response: requests.Response, last_page_size: int, last_record: Optional[Record], last_page_token_value: Optional[Any], ) -> Optional[Mapping[str, Any]]: """ Determines the next page token or signals a reset. """ if self.reset_signal.is_reset_triggered(): self.reset_signal.clear_reset() return self.RESET_TOKEN next_token = self._paginator.next_page_token( response=response, last_page_size=last_page_size, last_record=last_record, last_page_token_value=last_page_token_value, ) return next_token def _read_pages( self, records_generator_fn: Callable[[Optional[requests.Response]], Iterable[Record]], stream_slice: StreamSlice, ) -> Iterable[Record]: """ Reads pages with pagination and reset handling using _next_page_token. """ pagination_complete = False initial_token = self._paginator.get_initial_token() next_page_token = {"next_page_token": initial_token} if initial_token is not None else None while not pagination_complete: # Needed for _next_page_token response = self.requester.send_request( path=self._paginator_path(next_page_token=next_page_token), stream_state=None, # stream_state as an interpolation context is deprecated stream_slice=stream_slice, next_page_token=next_page_token, request_headers=self._request_headers(next_page_token=next_page_token), request_params=self._request_params(next_page_token=next_page_token), request_body_data=self._request_body_data(next_page_token=next_page_token), request_body_json=self._request_body_json(next_page_token=next_page_token), ) for record in records_generator_fn(response): yield record if not response: pagination_complete = True else: next_page_token = self._next_page_token( response=response, last_page_size=0, # Simplified, not tracking size here last_record=None, # Not needed for reset logic last_page_token_value=(next_page_token.get("next_page_token") if next_page_token else None), ) if next_page_token == self.RESET_TOKEN: next_page_token = {"next_page_token": initial_token} if initial_token is not None else None elif not next_page_token: pagination_complete = True yield from []
IntercomScrollRetriever
python
sqlalchemy__sqlalchemy
test/dialect/oracle/test_compiler.py
{ "start": 65279, "end": 69586 }
class ____(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = "oracle" def setup_test(self): self.table = table( "mytable", column("myid", String), column("name", String) ) def test_regexp_match(self): self.assert_compile( self.table.c.myid.regexp_match("pattern"), "REGEXP_LIKE(mytable.myid, :myid_1)", checkparams={"myid_1": "pattern"}, ) def test_regexp_match_column(self): self.assert_compile( self.table.c.myid.regexp_match(self.table.c.name), "REGEXP_LIKE(mytable.myid, mytable.name)", checkparams={}, ) def test_regexp_match_str(self): self.assert_compile( literal("string").regexp_match(self.table.c.name), "REGEXP_LIKE(:param_1, mytable.name)", checkparams={"param_1": "string"}, ) def test_regexp_match_flags(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="ig"), "REGEXP_LIKE(mytable.myid, :myid_1, 'ig')", checkparams={"myid_1": "pattern"}, ) def test_regexp_match_flags_safestring(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="i'g"), "REGEXP_LIKE(mytable.myid, :myid_1, 'i''g')", checkparams={"myid_1": "pattern"}, ) def test_not_regexp_match(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern"), "NOT REGEXP_LIKE(mytable.myid, :myid_1)", checkparams={"myid_1": "pattern"}, ) def test_not_regexp_match_column(self): self.assert_compile( ~self.table.c.myid.regexp_match(self.table.c.name), "NOT REGEXP_LIKE(mytable.myid, mytable.name)", checkparams={}, ) def test_not_regexp_match_str(self): self.assert_compile( ~literal("string").regexp_match(self.table.c.name), "NOT REGEXP_LIKE(:param_1, mytable.name)", checkparams={"param_1": "string"}, ) def test_not_regexp_match_flags(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern", flags="ig"), "NOT REGEXP_LIKE(mytable.myid, :myid_1, 'ig')", checkparams={"myid_1": "pattern"}, ) def test_regexp_replace(self): self.assert_compile( self.table.c.myid.regexp_replace("pattern", "replacement"), "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2)", checkparams={"myid_1": "pattern", "myid_2": "replacement"}, ) def test_regexp_replace_column(self): self.assert_compile( self.table.c.myid.regexp_replace("pattern", self.table.c.name), "REGEXP_REPLACE(mytable.myid, :myid_1, mytable.name)", checkparams={"myid_1": "pattern"}, ) def test_regexp_replace_column2(self): self.assert_compile( self.table.c.myid.regexp_replace(self.table.c.name, "replacement"), "REGEXP_REPLACE(mytable.myid, mytable.name, :myid_1)", checkparams={"myid_1": "replacement"}, ) def test_regexp_replace_string(self): self.assert_compile( literal("string").regexp_replace("pattern", self.table.c.name), "REGEXP_REPLACE(:param_1, :param_2, mytable.name)", checkparams={"param_2": "pattern", "param_1": "string"}, ) def test_regexp_replace_flags(self): self.assert_compile( self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2, 'ig')", checkparams={ "myid_1": "pattern", "myid_2": "replacement", }, ) def test_regexp_replace_flags_safestring(self): self.assert_compile( self.table.c.myid.regexp_replace( "pattern", "replacement", flags="i'g" ), "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2, 'i''g')", checkparams={ "myid_1": "pattern", "myid_2": "replacement", }, )
RegexpTest
python
pypa__warehouse
warehouse/cli/db/dbml.py
{ "start": 1696, "end": 1873 }
class ____(TypedDict): type: NotRequired[Literal["1-1", "1-n", "n-n"]] table_from: str table_from_field: str table_to: str table_to_field: str
RelationshipInfo
python
sympy__sympy
sympy/physics/mechanics/joint.py
{ "start": 53058, "end": 67550 }
class ____(Joint): """Planar Joint. .. raw:: html :file: ../../../doc/src/modules/physics/mechanics/api/PlanarJoint.svg Explanation =========== A planar joint is defined such that the child body translates over a fixed plane of the parent body as well as rotate about the rotation axis, which is perpendicular to that plane. The origin of this plane is the ``parent_point`` and the plane is spanned by two nonparallel planar vectors. The location of the ``child_point`` is based on the planar vectors ($\\vec{v}_1$, $\\vec{v}_2$) and generalized coordinates ($q_1$, $q_2$), i.e. $\\vec{r} = q_1 \\hat{v}_1 + q_2 \\hat{v}_2$. The direction cosine matrix between the ``child_interframe`` and ``parent_interframe`` is formed using a simple rotation ($q_0$) about the rotation axis. In order to simplify the definition of the ``PlanarJoint``, the ``rotation_axis`` and ``planar_vectors`` are set to be the unit vectors of the ``parent_interframe`` according to the table below. This ensures that you can only define these vectors by creating a separate frame and supplying that as the interframe. If you however would only like to supply the normals of the plane with respect to the parent and child bodies, then you can also supply those to the ``parent_interframe`` and ``child_interframe`` arguments. An example of both of these cases is in the examples section below and the page on the joints framework provides a more detailed explanation of the intermediate frames. .. list-table:: * - ``rotation_axis`` - ``parent_interframe.x`` * - ``planar_vectors[0]`` - ``parent_interframe.y`` * - ``planar_vectors[1]`` - ``parent_interframe.z`` Parameters ========== name : string A unique name for the joint. parent : Particle or RigidBody The parent body of joint. child : Particle or RigidBody The child body of joint. rotation_coordinate : dynamicsymbol, optional Generalized coordinate corresponding to the rotation angle. The default value is ``dynamicsymbols(f'q0_{joint.name}')``. planar_coordinates : iterable of dynamicsymbols, optional Two generalized coordinates used for the planar translation. The default value is ``dynamicsymbols(f'q1_{joint.name} q2_{joint.name}')``. rotation_speed : dynamicsymbol, optional Generalized speed corresponding to the angular velocity. The default value is ``dynamicsymbols(f'u0_{joint.name}')``. planar_speeds : dynamicsymbols, optional Two generalized speeds used for the planar translation velocity. The default value is ``dynamicsymbols(f'u1_{joint.name} u2_{joint.name}')``. parent_point : Point or Vector, optional Attachment point where the joint is fixed to the parent body. If a vector is provided, then the attachment point is computed by adding the vector to the body's mass center. The default value is the parent's mass center. child_point : Point or Vector, optional Attachment point where the joint is fixed to the child body. If a vector is provided, then the attachment point is computed by adding the vector to the body's mass center. The default value is the child's mass center. parent_interframe : ReferenceFrame, optional Intermediate frame of the parent body with respect to which the joint transformation is formulated. If a Vector is provided then an interframe is created which aligns its X axis with the given vector. The default value is the parent's own frame. child_interframe : ReferenceFrame, optional Intermediate frame of the child body with respect to which the joint transformation is formulated. If a Vector is provided then an interframe is created which aligns its X axis with the given vector. The default value is the child's own frame. Attributes ========== name : string The joint's name. parent : Particle or RigidBody The joint's parent body. child : Particle or RigidBody The joint's child body. rotation_coordinate : dynamicsymbol Generalized coordinate corresponding to the rotation angle. planar_coordinates : Matrix Two generalized coordinates used for the planar translation. rotation_speed : dynamicsymbol Generalized speed corresponding to the angular velocity. planar_speeds : Matrix Two generalized speeds used for the planar translation velocity. coordinates : Matrix Matrix of the joint's generalized coordinates. speeds : Matrix Matrix of the joint's generalized speeds. parent_point : Point Attachment point where the joint is fixed to the parent body. child_point : Point Attachment point where the joint is fixed to the child body. parent_interframe : ReferenceFrame Intermediate frame of the parent body with respect to which the joint transformation is formulated. child_interframe : ReferenceFrame Intermediate frame of the child body with respect to which the joint transformation is formulated. kdes : Matrix Kinematical differential equations of the joint. rotation_axis : Vector The axis about which the rotation occurs. planar_vectors : list The vectors that describe the planar translation directions. Examples ========= A single planar joint is created between two bodies and has the following basic attributes: >>> from sympy.physics.mechanics import RigidBody, PlanarJoint >>> parent = RigidBody('P') >>> parent P >>> child = RigidBody('C') >>> child C >>> joint = PlanarJoint('PC', parent, child) >>> joint PlanarJoint: PC parent: P child: C >>> joint.name 'PC' >>> joint.parent P >>> joint.child C >>> joint.parent_point P_masscenter >>> joint.child_point C_masscenter >>> joint.rotation_axis P_frame.x >>> joint.planar_vectors [P_frame.y, P_frame.z] >>> joint.rotation_coordinate q0_PC(t) >>> joint.planar_coordinates Matrix([ [q1_PC(t)], [q2_PC(t)]]) >>> joint.coordinates Matrix([ [q0_PC(t)], [q1_PC(t)], [q2_PC(t)]]) >>> joint.rotation_speed u0_PC(t) >>> joint.planar_speeds Matrix([ [u1_PC(t)], [u2_PC(t)]]) >>> joint.speeds Matrix([ [u0_PC(t)], [u1_PC(t)], [u2_PC(t)]]) >>> child.frame.ang_vel_in(parent.frame) u0_PC(t)*P_frame.x >>> child.frame.dcm(parent.frame) Matrix([ [1, 0, 0], [0, cos(q0_PC(t)), sin(q0_PC(t))], [0, -sin(q0_PC(t)), cos(q0_PC(t))]]) >>> joint.child_point.pos_from(joint.parent_point) q1_PC(t)*P_frame.y + q2_PC(t)*P_frame.z >>> child.masscenter.vel(parent.frame) u1_PC(t)*P_frame.y + u2_PC(t)*P_frame.z To further demonstrate the use of the planar joint, the kinematics of a block sliding on a slope, can be created as follows. >>> from sympy import symbols >>> from sympy.physics.mechanics import PlanarJoint, RigidBody, ReferenceFrame >>> a, d, h = symbols('a d h') First create bodies to represent the slope and the block. >>> ground = RigidBody('G') >>> block = RigidBody('B') To define the slope you can either define the plane by specifying the ``planar_vectors`` or/and the ``rotation_axis``. However it is advisable to create a rotated intermediate frame, so that the ``parent_vectors`` and ``rotation_axis`` will be the unit vectors of this intermediate frame. >>> slope = ReferenceFrame('A') >>> slope.orient_axis(ground.frame, ground.y, a) The planar joint can be created using these bodies and intermediate frame. We can specify the origin of the slope to be ``d`` above the slope's center of mass and the block's center of mass to be a distance ``h`` above the slope's surface. Note that we can specify the normal of the plane using the rotation axis argument. >>> joint = PlanarJoint('PC', ground, block, parent_point=d * ground.x, ... child_point=-h * block.x, parent_interframe=slope) Once the joint is established the kinematics of the bodies can be accessed. First the ``rotation_axis``, which is normal to the plane and the ``plane_vectors``, can be found. >>> joint.rotation_axis A.x >>> joint.planar_vectors [A.y, A.z] The direction cosine matrix of the block with respect to the ground can be found with: >>> block.frame.dcm(ground.frame) Matrix([ [ cos(a), 0, -sin(a)], [sin(a)*sin(q0_PC(t)), cos(q0_PC(t)), sin(q0_PC(t))*cos(a)], [sin(a)*cos(q0_PC(t)), -sin(q0_PC(t)), cos(a)*cos(q0_PC(t))]]) The angular velocity of the block can be computed with respect to the ground. >>> block.frame.ang_vel_in(ground.frame) u0_PC(t)*A.x The position of the block's center of mass can be found with: >>> block.masscenter.pos_from(ground.masscenter) d*G_frame.x + h*B_frame.x + q1_PC(t)*A.y + q2_PC(t)*A.z Finally, the linear velocity of the block's center of mass can be computed with respect to the ground. >>> block.masscenter.vel(ground.frame) u1_PC(t)*A.y + u2_PC(t)*A.z In some cases it could be your preference to only define the normals of the plane with respect to both bodies. This can most easily be done by supplying vectors to the ``interframe`` arguments. What will happen in this case is that an interframe will be created with its ``x`` axis aligned with the provided vector. For a further explanation of how this is done see the notes of the ``Joint`` class. In the code below, the above example (with the block on the slope) is recreated by supplying vectors to the interframe arguments. Note that the previously described option is however more computationally efficient, because the algorithm now has to compute the rotation angle between the provided vector and the 'x' axis. >>> from sympy import symbols, cos, sin >>> from sympy.physics.mechanics import PlanarJoint, RigidBody >>> a, d, h = symbols('a d h') >>> ground = RigidBody('G') >>> block = RigidBody('B') >>> joint = PlanarJoint( ... 'PC', ground, block, parent_point=d * ground.x, ... child_point=-h * block.x, child_interframe=block.x, ... parent_interframe=cos(a) * ground.x + sin(a) * ground.z) >>> block.frame.dcm(ground.frame).simplify() Matrix([ [ cos(a), 0, sin(a)], [-sin(a)*sin(q0_PC(t)), cos(q0_PC(t)), sin(q0_PC(t))*cos(a)], [-sin(a)*cos(q0_PC(t)), -sin(q0_PC(t)), cos(a)*cos(q0_PC(t))]]) """ def __init__(self, name, parent, child, rotation_coordinate=None, planar_coordinates=None, rotation_speed=None, planar_speeds=None, parent_point=None, child_point=None, parent_interframe=None, child_interframe=None): # A ready to merge implementation of setting the planar_vectors and # rotation_axis was added and removed in PR #24046 coordinates = (rotation_coordinate, planar_coordinates) speeds = (rotation_speed, planar_speeds) super().__init__(name, parent, child, coordinates, speeds, parent_point, child_point, parent_interframe=parent_interframe, child_interframe=child_interframe) def __str__(self): return (f'PlanarJoint: {self.name} parent: {self.parent} ' f'child: {self.child}') @property def rotation_coordinate(self): """Generalized coordinate corresponding to the rotation angle.""" return self.coordinates[0] @property def planar_coordinates(self): """Two generalized coordinates used for the planar translation.""" return self.coordinates[1:, 0] @property def rotation_speed(self): """Generalized speed corresponding to the angular velocity.""" return self.speeds[0] @property def planar_speeds(self): """Two generalized speeds used for the planar translation velocity.""" return self.speeds[1:, 0] @property def rotation_axis(self): """The axis about which the rotation occurs.""" return self.parent_interframe.x @property def planar_vectors(self): """The vectors that describe the planar translation directions.""" return [self.parent_interframe.y, self.parent_interframe.z] def _generate_coordinates(self, coordinates): rotation_speed = self._fill_coordinate_list(coordinates[0], 1, 'q', number_single=True) planar_speeds = self._fill_coordinate_list(coordinates[1], 2, 'q', 1) return rotation_speed.col_join(planar_speeds) def _generate_speeds(self, speeds): rotation_speed = self._fill_coordinate_list(speeds[0], 1, 'u', number_single=True) planar_speeds = self._fill_coordinate_list(speeds[1], 2, 'u', 1) return rotation_speed.col_join(planar_speeds) def _orient_frames(self): self.child_interframe.orient_axis( self.parent_interframe, self.rotation_axis, self.rotation_coordinate) def _set_angular_velocity(self): self.child_interframe.set_ang_vel( self.parent_interframe, self.rotation_speed * self.rotation_axis) def _set_linear_velocity(self): self.child_point.set_pos( self.parent_point, self.planar_coordinates[0] * self.planar_vectors[0] + self.planar_coordinates[1] * self.planar_vectors[1]) self.parent_point.set_vel(self.parent_interframe, 0) self.child_point.set_vel(self.child_interframe, 0) self.child_point.set_vel( self._parent_frame, self.planar_speeds[0] * self.planar_vectors[0] + self.planar_speeds[1] * self.planar_vectors[1]) self.child.masscenter.v2pt_theory(self.child_point, self._parent_frame, self._child_frame)
PlanarJoint
python
readthedocs__readthedocs.org
readthedocs/projects/migrations/0018_fix-translation-model.py
{ "start": 133, "end": 674 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("projects", "0017_add_domain_https"), ] operations = [ migrations.AlterField( model_name="project", name="main_language_project", field=models.ForeignKey( related_name="translations", on_delete=django.db.models.deletion.SET_NULL, blank=True, to="projects.Project", null=True, ), ), ]
Migration
python
kamyu104__LeetCode-Solutions
Python/plus-one.py
{ "start": 29, "end": 453 }
class ____(object): def plusOne(self, digits): """ :type digits: List[int] :rtype: List[int] """ for i in reversed(xrange(len(digits))): if digits[i] == 9: digits[i] = 0 else: digits[i] += 1 return digits digits[0] = 1 digits.append(0) return digits # Time: O(n) # Space: O(n)
Solution
python
dask__dask
dask/dataframe/dask_expr/io/io.py
{ "start": 959, "end": 1059 }
class ____(Expr): def __str__(self): return f"{type(self).__name__}({self._name[-7:]})"
IO
python
scipy__scipy
benchmarks/benchmarks/stats.py
{ "start": 2861, "end": 3290 }
class ____(Benchmark): param_names = ['alternative'] params = [ ['two-sided', 'less', 'greater'] ] def setup(self, alternative): rng = np.random.default_rng(0xb6acd7192d6e5da0f68b5d8ab8ce7af2) self.u1 = rng.uniform(-1, 1, 200) self.u2 = rng.uniform(-0.5, 1.5, 300) def time_ranksums(self, alternative): stats.ranksums(self.u1, self.u2, alternative=alternative)
RankSums
python
ray-project__ray
rllib/env/tests/test_multi_agent_env.py
{ "start": 7919, "end": 11106 }
class ____(MultiAgentEnv): """Multi-agent env in which sometimes, no agent acts. At each timestep, we determine, which agents emit observations (and thereby request actions). This set of observing (and action-requesting) agents could be anything from the empty set to the full set of all agents. For simplicity, all agents terminate after n timesteps. """ def __init__(self, num=3): super().__init__() self.agents = list(range(num)) self.envs = [MockEnv(25) for _ in range(self.num_agents)] self._observations = {} self._infos = {} self.terminateds = set() self.truncateds = set() self.observation_space = gym.spaces.Discrete(2) self.action_space = gym.spaces.Discrete(2) def reset(self, *, seed=None, options=None): self.terminateds = set() self.truncateds = set() self._observations = {} self._infos = {} for aid in self._get_random_agents(): self._observations[aid], self._infos[aid] = self.envs[aid].reset() return self._observations, self._infos def step(self, action_dict): rew, terminated, truncated = {}, {}, {} # Step those agents, for which we have actions from RLlib. for aid, action in action_dict.items(): ( self._observations[aid], rew[aid], terminated[aid], truncated[aid], self._infos[aid], ) = self.envs[aid].step(action) if terminated[aid]: self.terminateds.add(aid) if truncated[aid]: self.truncateds.add(aid) # Must add the __all__ flag. terminated["__all__"] = len(self.terminateds) == self.num_agents truncated["__all__"] = len(self.truncateds) == self.num_agents # Select some of our observations to be published next (randomly). obs = {} infos = {} for aid in self._get_random_agents(): if aid not in self._observations: self._observations[aid] = self.observation_space.sample() self._infos[aid] = {"fourty-two": 42} obs[aid] = self._observations.pop(aid) infos[aid] = self._infos.pop(aid) # Override some of the rewards. Rewards and dones should be always publishable, # even if no observation/action for an agent was sent/received. # An agent might get a reward because of the action of another agent. In this # case, the rewards for that agent are accumulated over the in-between timesteps # (in which the other agents step, but not this agent). for aid in self._get_random_agents(): rew[aid] = np.random.rand() return obs, rew, terminated, truncated, infos def _get_random_agents(self): num_observing_agents = np.random.randint(self.num_agents) aids = np.random.permutation(self.num_agents)[:num_observing_agents] return { aid for aid in aids if aid not in self.terminateds and aid not in self.truncateds }
SometimesZeroAgentsMultiAgent
python
viewflow__viewflow
viewflow/fsm/admin.py
{ "start": 873, "end": 7886 }
class ____(object): """ A Mixin for providing Finite State Machine (FSM) management support in Django admin. """ flow_state: State change_list_template = "admin/fsm_change_list.html" change_form_template = "admin/fsm_change_form.html" transition_form_template = None def get_flow_state(self, request) -> State: return self.flow_state def get_object_flow(self, request, obj): """ Returns the flow object associated with the specified model object. This function retrieves the flow object associated with the given model object. Override this function if your flow class does not have a constructor that accepts the model object as the only argument. """ try: return self.get_flow_state()._owner(obj) except TypeError: raise ValueError( f"{self.flow_state._owner} does not have a constructor that accepts" " a single argument. Please redefine 'get_object_flow' on the model" " admin." ) def get_transition_fields(self, request, obj, slug): """ Override this method to return a list of editable fields for the form associated with the specified transition slug. If no fields are editable for the transition, this function should return None. """ return None def save_model(self, request, obj, form, change): if not change: state = self.get_flow_state(request) flow = self.get_object_flow(request, obj) state.set(flow, state.get(flow)) super().save_model(request, obj, form, change) @contextmanager def create_revision(self, request): """ Save wrapper to use in state transition views, in case of subclass of django-reversion VersionAdmin. """ if hasattr(super(), "create_revision"): with super().create_revision(request): yield else: yield def get_urls(self): info = self.model._meta.app_label, self.model._meta.model_name return [ path( "<path:object_id>/transition/<slug:slug>/", self.admin_site.admin_view(self.transition_view), name="%s_%s_transition" % info, ), ] + super().get_urls() @property def media(self): extra = ".min" # '' if settings.DEBUG else '.min' return super().media + Media( css={ "screen": ( "viewflow/css/vis-network%s.css" % extra, "viewflow/css/viewflow%s.css" % extra, ) }, js=[ "viewflow/js/vis-network%s.js" % extra, "viewflow/js/viewflow%s.js" % extra, ], ) def changelist_view(self, request, extra_context=None): state = self.get_flow_state(request) flow_chart = fsm.chart(state) return super().changelist_view( request, extra_context={"state": state, "flow_chart": flow_chart} ) def render_change_form( self, request, context, add=False, change=False, form_url="", obj=None ): if change: state = self.get_flow_state(request) flow = self.get_object_flow(request, obj) context.update( { "state": state, "flow": flow, "transitions": [ ( transition, transition.conditions_met(flow), transition.has_perm(flow, request.user), ) for transition in state.get_outgoing_transitions( state.get(flow) ) ], } ) return super().render_change_form( request, context, add=add, change=change, form_url=form_url, obj=obj ) def transition_view(self, request, object_id, slug): opts = self.model._meta obj = self.get_object(request, object_id) if obj is None: return self._get_obj_does_not_exist_redirect(request, opts, object_id) flow = self.get_object_flow(request, obj) transition = getattr(flow, slug, None) if not slug.startswith("_") else None if not transition or not isinstance(transition, TransitionBoundMethod): raise DisallowedModelAdminLookup if not transition.has_perm(request.user): raise PermissionDenied if not transition.can_proceed(): raise PermissionDenied(_("Transition is not allowed")) # build form fields = self.get_transition_fields(request, obj, slug) or [] readonly_fields = [ field for field in flatten_fieldsets(self.get_fieldsets(request, obj)) if field not in fields ] ModelForm = modelform_factory( self.model, **{ "form": self.form, "fields": fields, "formfield_callback": partial( self.formfield_for_dbfield, request=request ), }, ) form = ModelForm(request.POST or None, instance=obj) adminForm = helpers.AdminForm( form, list(self.get_fieldsets(request, obj)), {}, readonly_fields, model_admin=self, ) media = self.media + adminForm.media if form.is_valid(): # perform transition self.save_model(request, obj, form, change=True) transition() obj_url = reverse( "admin:%s_%s_change" % (opts.app_label, opts.model_name), args=(quote(obj.pk),), current_app=self.admin_site.name, ) return HttpResponseRedirect(obj_url) context = { **self.admin_site.each_context(request), "title": _("%(label)s %(name)s") % ({"label": transition.label, "name": opts.verbose_name}), "adminform": adminForm, "object_id": object_id, "original": obj, "media": media, "preserved_filters": self.get_preserved_filters(request), "transition": transition, "opts": opts, "has_view_permission": self.has_view_permission(request, obj), } return render( request, self.transition_form_template or [ "admin/%s/%s/fsm_transition_form.html" % (opts.app_label, opts.model_name), "admin/%s/fsm_transition_form.html" % opts.app_label, "admin/fsm_transition_form.html", ], context, )
FlowAdminMixin