language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
sqlalchemy__sqlalchemy
test/sql/test_delete.py
{ "start": 1126, "end": 4497 }
class ____(_DeleteTestBase, fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = "default" def test_delete_literal_binds(self): table1 = self.tables.mytable stmt = table1.delete().where(table1.c.name == "jill") self.assert_compile( stmt, "DELETE FROM mytable WHERE mytable.name = 'jill'", literal_binds=True, ) def test_delete(self): table1 = self.tables.mytable self.assert_compile( delete(table1).where(table1.c.myid == 7), "DELETE FROM mytable WHERE mytable.myid = :myid_1", ) self.assert_compile( table1.delete().where(table1.c.myid == 7), "DELETE FROM mytable WHERE mytable.myid = :myid_1", ) self.assert_compile( table1.delete() .where(table1.c.myid == 7) .where(table1.c.name == "somename"), "DELETE FROM mytable " "WHERE mytable.myid = :myid_1 " "AND mytable.name = :name_1", ) def test_where_empty(self): table1 = self.tables.mytable with expect_deprecated( r"Invoking and_\(\) without arguments is deprecated" ): self.assert_compile( table1.delete().where(and_()), "DELETE FROM mytable" ) with expect_deprecated( r"Invoking or_\(\) without arguments is deprecated" ): self.assert_compile( table1.delete().where(or_()), "DELETE FROM mytable" ) def test_prefix_with(self): table1 = self.tables.mytable stmt = ( table1.delete() .prefix_with("A", "B", dialect="mysql") .prefix_with("C", "D") ) self.assert_compile(stmt, "DELETE C D FROM mytable") self.assert_compile( stmt, "DELETE A B C D FROM mytable", dialect=mysql.dialect() ) def test_alias(self): table1 = self.tables.mytable talias1 = table1.alias("t1") stmt = delete(talias1).where(talias1.c.myid == 7) self.assert_compile( stmt, "DELETE FROM mytable AS t1 WHERE t1.myid = :myid_1" ) def test_non_correlated_select(self): table1, table2 = self.tables.mytable, self.tables.myothertable # test a non-correlated WHERE clause s = select(table2.c.othername).where(table2.c.otherid == 7) self.assert_compile( delete(table1).where(table1.c.name == s.scalar_subquery()), "DELETE FROM mytable " "WHERE mytable.name = (" "SELECT myothertable.othername " "FROM myothertable " "WHERE myothertable.otherid = :otherid_1" ")", ) def test_correlated_select(self): table1, table2 = self.tables.mytable, self.tables.myothertable # test one that is actually correlated... s = select(table2.c.othername).where(table2.c.otherid == table1.c.myid) self.assert_compile( table1.delete().where(table1.c.name == s.scalar_subquery()), "DELETE FROM mytable " "WHERE mytable.name = (" "SELECT myothertable.othername " "FROM myothertable " "WHERE myothertable.otherid = mytable.myid" ")", )
DeleteTest
python
getsentry__sentry
src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py
{ "start": 5111, "end": 31794 }
class ____: """ OnCallDataBlob is a specific type that represents the data blob for a PagerDuty or Opsgenie notification action. """ priority: str = "" action_schema_mapping: dict[str, ActionSchemas] = { ActionType.EMAIL: ActionSchemas( config_schema={ "$schema": "https://json-schema.org/draft/2020-12/schema", "description": "The configuration schema for an email Action", "type": "object", "properties": { "target_identifier": {"type": ["string", "null"]}, "target_display": {"type": ["null"]}, "target_type": { "type": ["integer"], "enum": [*ActionTarget], }, }, "required": ["target_type"], "additionalProperties": False, }, data_schema={}, ), ActionType.PAGERDUTY: ActionSchemas( config_schema={ "$schema": "https://json-schema.org/draft/2020-12/schema", "description": "The configuration schema for a on-call Action", "type": "object", "properties": { "target_identifier": {"type": ["string"]}, "target_display": {"type": ["string", "null"]}, "target_type": {"type": ["integer"], "enum": [0]}, }, "required": ["target_identifier", "target_type"], "additionalProperties": False, }, data_schema={ "$schema": "https://json-schema.org/draft/2020-12/schema", "type": "object", "properties": { "priority": { "type": "string", "description": "The priority of the pagerduty action", "enum": [*PagerdutySeverity], }, "additionalProperties": False, }, }, ), ActionType.SLACK: ActionSchemas( config_schema={ "$schema": "https://json-schema.org/draft/2020-12/schema", "description": "The configuration schema for a Messaging Action", "type": "object", "properties": { "target_identifier": {"type": ["string"]}, "target_display": {"type": ["string"]}, "target_type": { "type": ["integer"], "enum": [ActionTarget.SPECIFIC.value], }, }, "required": ["target_identifier", "target_display", "target_type"], "additionalProperties": False, }, data_schema={}, ), ActionType.MSTEAMS: ActionSchemas( config_schema={ "$schema": "https://json-schema.org/draft/2020-12/schema", "description": "The configuration schema for a Messaging Action", "type": "object", "properties": { "target_identifier": {"type": ["string"]}, "target_display": {"type": ["string"]}, "target_type": { "type": ["integer"], "enum": [ActionTarget.SPECIFIC.value], }, }, "required": ["target_identifier", "target_display", "target_type"], "additionalProperties": False, }, data_schema={}, ), ActionType.SENTRY_APP: ActionSchemas( config_schema={ "$schema": "https://json-schema.org/draft/2020-12/schema", "description": "The configuration schema for a Sentry App Action", "type": "object", "properties": { "target_identifier": {"type": ["string"]}, "target_display": {"type": ["string", "null"]}, "target_type": {"type": ["integer"], "enum": [3]}, "sentry_app_identifier": {"type": ["string"], "enum": [*SentryAppIdentifier]}, }, "required": ["target_type", "target_identifier", "sentry_app_identifier"], "additionalProperties": False, }, data_schema={ "$schema": "https://json-schema.org/draft/2020-12/schema", "type": "object", "properties": {"settings": {"type": ["array", "object"]}}, "additionalProperties": False, }, ), ActionType.OPSGENIE: ActionSchemas( config_schema={ "$schema": "https://json-schema.org/draft/2020-12/schema", "description": "The configuration schema for a on-call Action", "type": "object", "properties": { "target_identifier": {"type": ["string"]}, "target_display": {"type": ["string", "null"]}, "target_type": { "type": ["integer"], "enum": [ActionTarget.SPECIFIC.value], }, }, "required": ["target_identifier", "target_type"], "additionalProperties": False, }, data_schema={ "$schema": "https://json-schema.org/draft/2020-12/schema", "type": "object", "properties": { "priority": { "type": "string", "description": "The priority of the opsgenie action", "enum": ["P1", "P2", "P3", "P4", "P5"], }, "additionalProperties": False, }, }, ), ActionType.DISCORD: ActionSchemas( config_schema={ "$schema": "https://json-schema.org/draft/2020-12/schema", "description": "The configuration schema for a Discord Action", "type": "object", "properties": { "target_identifier": {"type": "string"}, "target_display": { "type": ["string", "null"], }, "target_type": { "type": ["integer"], "enum": [ActionTarget.SPECIFIC.value], }, }, "required": ["target_identifier", "target_type"], "additionalProperties": False, }, data_schema={}, ), } def _enforce_action_json_schema(action: Any) -> None: schemas = action_schema_mapping.get(action.type) if not schemas: logger.error( "No schema found for action type", extra={"action_type": action.type}, ) return config_schema = schemas.config_schema data_schema = schemas.data_schema if config_schema is not None: try: validate(action.config, config_schema) except ValidationError as e: raise ValidationError(f"Invalid config: {e.message}") if data_schema is not None: try: validate(action.data, data_schema) except ValidationError as e: raise ValidationError(f"Invalid data: {e.message}") def _get_trigger_action_target(apps: Apps, trigger_action: Any) -> Any: OrganizationMember = apps.get_model("sentry", "OrganizationMember") Team = apps.get_model("sentry", "Team") if trigger_action.target_identifier is None: return None if trigger_action.target_type == ActionTarget.USER.value: try: return OrganizationMember.objects.get( user_id=int(trigger_action.target_identifier), organization=trigger_action.alert_rule_trigger.alert_rule.organization_id, ) except OrganizationMember.DoesNotExist: pass elif trigger_action.target_type == ActionTarget.TEAM.value: try: return Team.objects.get(id=int(trigger_action.target_identifier)) except Team.DoesNotExist: pass elif trigger_action.target_type == ActionTarget.SPECIFIC.value: return trigger_action.target_identifier return None def _get_action_description(apps: Apps, action: Any) -> str: """ Returns a human readable action description """ if action.type == AlertRuleTriggerActionType.EMAIL.value: target = _get_trigger_action_target(apps, action) if target: if action.target_type == ActionTarget.USER.value: action_target_user = target return "Email " + action_target_user.user_email elif action.target_type == ActionTarget.TEAM.value: action_target_team = target return "Email #" + action_target_team.slug elif action.type == AlertRuleTriggerActionType.SENTRY_APP.value: return f"Notify {action.target_display}" return f"Notify {action.target_display} via {ACTION_TYPE_TO_STRING[action.type]}" def _get_workflow_name(apps: Apps, alert_rule: Any) -> str: """ Generate a workflow name like 'Slack @michelle.fu, Email michelle.fu@sentry.io...(+3)' if there is only a critical trigger or with priority label: 'Critical - Slack @michelle.fu, Warning email michelle.fu@sentry.io...(+3)'' """ AlertRuleTrigger = apps.get_model("sentry", "AlertRuleTrigger") AlertRuleTriggerAction = apps.get_model("sentry", "AlertRuleTriggerAction") name = "" triggers = AlertRuleTrigger.objects.filter(alert_rule_id=alert_rule.id).order_by("label") include_label = False if triggers.count() == 1 else True actions = AlertRuleTriggerAction.objects.filter( alert_rule_trigger_id__in=[trigger.id for trigger in triggers], status=0 ) actions_counter = 0 for trigger in triggers: name += f"{trigger.label.title()} - " if include_label else "" for action in actions.filter(alert_rule_trigger_id=trigger.id): description = _get_action_description(apps, action) + ", " if actions_counter < MAX_ACTIONS: name += description actions_counter += 1 else: remaining_actions = actions.count() - actions_counter name = name[:-2] name += f"...(+{remaining_actions})" break if name[-2:] == ", ": name = name[:-2] # chop off the trailing comma return name def _migrate_trigger(apps: Apps, trigger: Any, detector: Any, workflow: Any) -> None: AlertRuleTriggerAction = apps.get_model("sentry", "AlertRuleTriggerAction") DataCondition = apps.get_model("workflow_engine", "DataCondition") DataConditionGroup = apps.get_model("workflow_engine", "DataConditionGroup") WorkflowDataConditionGroup = apps.get_model("workflow_engine", "WorkflowDataConditionGroup") DataConditionAlertRuleTrigger = apps.get_model( "workflow_engine", "DataConditionAlertRuleTrigger" ) alert_rule = trigger.alert_rule condition_result = PRIORITY_MAP.get(trigger.label, DetectorPriorityLevel.HIGH) # create detector trigger detector_trigger = DataCondition.objects.create( type=Condition.ANOMALY_DETECTION, comparison={ "sensitivity": alert_rule.sensitivity, "seasonality": alert_rule.seasonality, "threshold_type": alert_rule.threshold_type, }, condition_result=condition_result, condition_group=detector.workflow_condition_group, ) DataConditionAlertRuleTrigger.objects.create( data_condition=detector_trigger, alert_rule_trigger_id=trigger.id, ) # create action filter data_condition_group = DataConditionGroup.objects.create( organization_id=alert_rule.organization_id ) WorkflowDataConditionGroup.objects.create( condition_group=data_condition_group, workflow=workflow, ) action_filter = DataCondition.objects.create( comparison=PRIORITY_MAP.get(trigger.label, DetectorPriorityLevel.HIGH), condition_result=True, type=Condition.ISSUE_PRIORITY_GREATER_OR_EQUAL, condition_group=data_condition_group, ) # resolve action filter DataCondition.objects.create( comparison=PRIORITY_MAP.get(trigger.label, DetectorPriorityLevel.HIGH), condition_result=True, type=Condition.ISSUE_PRIORITY_DEESCALATING, condition_group=data_condition_group, ) trigger_actions = AlertRuleTriggerAction.objects.filter(alert_rule_trigger=trigger) for trigger_action in trigger_actions: # 0 is active status if trigger_action.status != 0: continue _migrate_trigger_action(apps, trigger_action, action_filter.condition_group.id) def _migrate_trigger_action(apps: Apps, trigger_action: Any, condition_group_id: int) -> None: DataConditionGroupAction = apps.get_model("workflow_engine", "DataConditionGroupAction") Action = apps.get_model("workflow_engine", "Action") ActionAlertRuleTriggerAction = apps.get_model("workflow_engine", "ActionAlertRuleTriggerAction") if trigger_action.sentry_app_id: action_type = ActionType.SENTRY_APP elif trigger_action.integration_id: try: action_type = ActionType(TYPE_TO_PROVIDER[trigger_action.type]) except Exception: logger.info( "could not find a matching action type for the trigger action", extra={"trigger_action_id": trigger_action.id}, ) raise else: action_type = ActionType.EMAIL # build data blob if action_type == ActionType.SENTRY_APP: if not trigger_action.sentry_app_config: data = {} else: settings = ( [trigger_action.sentry_app_config] if isinstance(trigger_action.sentry_app_config, dict) else trigger_action.sentry_app_config ) data = dataclasses.asdict(SentryAppDataBlob.from_list(settings)) elif action_type in (ActionType.OPSGENIE, ActionType.PAGERDUTY): default_priority = ( OPSGENIE_DEFAULT_PRIORITY if action_type == ActionType.OPSGENIE else PAGERDUTY_DEFAULT_SEVERITY ) if not trigger_action.sentry_app_config: data = {"priority": default_priority} else: # Ensure sentry_app_config is a dict before accessing config = trigger_action.sentry_app_config if not isinstance(config, dict): data = {"priority": default_priority} else: priority = config.get("priority", default_priority) data = dataclasses.asdict(OnCallDataBlob(priority=priority)) else: data = {} # get target identifier if action_type == ActionType.SENTRY_APP: if not trigger_action.sentry_app_id: logger.info( "trigger action missing sentry app ID", extra={"trigger_action_id": trigger_action.id}, ) raise Exception("Trigger action missing Sentry app ID") target_identifier = str(trigger_action.sentry_app_id) else: target_identifier = trigger_action.target_identifier # build config target_type = trigger_action.target_type config = { "target_display": trigger_action.target_display, "target_identifier": target_identifier, "target_type": target_type, } if target_type == ActionTarget.SENTRY_APP.value: config["sentry_app_identifier"] = SentryAppIdentifier.SENTRY_APP_ID # create the models action = Action.objects.create( type=action_type, data=data, integration_id=trigger_action.integration_id, config=config, ) _enforce_action_json_schema(action) DataConditionGroupAction.objects.create( condition_group_id=condition_group_id, action_id=action.id, ) ActionAlertRuleTriggerAction.objects.create( action_id=action.id, alert_rule_trigger_action_id=trigger_action.id, ) def _create_data_source(apps: Apps, alert_rule: Any) -> Any: DataSource = apps.get_model("workflow_engine", "DataSource") QuerySubscription = apps.get_model("sentry", "QuerySubscription") snuba_query = alert_rule.snuba_query if not snuba_query: logger.info("alert rule missing snuba query", extra={"alert_rule_id": alert_rule.id}) raise Exception("Alert rule missing snuba query") try: query_subscription = QuerySubscription.objects.get(snuba_query=snuba_query.id) except QuerySubscription.DoesNotExist: logger.info( "query subscription does not exist", extra={"snuba_query_id": snuba_query.id}, ) raise Exception("Query subscription does not exist") data_source = DataSource.objects.create( organization_id=alert_rule.organization_id, source_id=str(query_subscription.id), type="snuba_query_subscription", ) return data_source def _create_detector( apps: Apps, alert_rule: Any, project: Any, data_condition_group: Any, create_activity: Any, enabled: bool, ) -> Any: Detector = apps.get_model("workflow_engine", "Detector") detector = Detector.objects.create( project_id=project.id, enabled=enabled, created_by_id=create_activity.user_id if create_activity else None, name=alert_rule.name if len(alert_rule.name) < 200 else alert_rule.name[:197] + "...", workflow_condition_group=data_condition_group, type="metric_issue", description=alert_rule.description, owner_user_id=alert_rule.user_id, owner_team=alert_rule.team, config={ "threshold_period": alert_rule.threshold_period, "sensitivity": alert_rule.sensitivity, "seasonality": alert_rule.seasonality, "comparison_delta": alert_rule.comparison_delta, "detection_type": alert_rule.detection_type, }, ) Detector.objects.filter(id=detector.id).update(date_added=alert_rule.date_added) return detector def _create_detector_state(apps: Apps, alert_rule: Any, project: Any, detector: Any) -> None: Incident = apps.get_model("sentry", "Incident") DetectorState = apps.get_model("workflow_engine", "DetectorState") incident_query = Incident.objects.filter( type=IncidentType.ALERT_TRIGGERED.value, alert_rule=alert_rule, projects=project, ) open_incident = ( incident_query.exclude(status=IncidentStatus.CLOSED.value).order_by("-date_added").first() ) if open_incident: state = ( DetectorPriorityLevel.MEDIUM if open_incident.status == IncidentStatus.WARNING.value else DetectorPriorityLevel.HIGH ) else: state = DetectorPriorityLevel.OK # create detector state DetectorState.objects.create( detector=detector, is_triggered=True if open_incident else False, state=state, ) def _update_migrated_detector_triggers(apps: Apps, alert_rule: Any, detector: Any) -> None: Detector = apps.get_model("workflow_engine", "Detector") DataCondition = apps.get_model("workflow_engine", "DataCondition") if DataCondition.objects.filter( condition_group=detector.workflow_condition_group, type=Condition.ANOMALY_DETECTION ).exists(): # data conditions are correctly formed return try: with transaction.atomic(router.db_for_write(Detector)): critical_detector_trigger = DataCondition.objects.get( condition_group=detector.workflow_condition_group, condition_result=DetectorPriorityLevel.HIGH, ) critical_detector_trigger.type = Condition.ANOMALY_DETECTION critical_detector_trigger.comparison = { "sensitivity": alert_rule.sensitivity, "seasonality": alert_rule.seasonality, "threshold_type": alert_rule.threshold_type, } critical_detector_trigger.save() resolve_detector_trigger = DataCondition.objects.get( condition_group=detector.workflow_condition_group, condition_result=DetectorPriorityLevel.OK, ) resolve_detector_trigger.delete() logger.info( "Updated migrated detector triggers", extra={"alert_rule_id": alert_rule.id} ) except Exception as e: logger.info( "error when updating detector triggers", extra={"error": str(e), "alert_rule_id": alert_rule.id}, ) sentry_sdk.capture_exception(e) def migrate_anomaly_detection_alerts(apps: Apps, schema_editor: BaseDatabaseSchemaEditor) -> None: AlertRule = apps.get_model("sentry", "AlertRule") AlertRuleTrigger = apps.get_model("sentry", "AlertRuleTrigger") AlertRuleActivity = apps.get_model("sentry", "AlertRuleActivity") Organization = apps.get_model("sentry", "Organization") RuleSnooze = apps.get_model("sentry", "RuleSnooze") AlertRuleDetector = apps.get_model("workflow_engine", "AlertRuleDetector") AlertRuleWorkflow = apps.get_model("workflow_engine", "AlertRuleWorkflow") DataConditionGroup = apps.get_model("workflow_engine", "DataConditionGroup") DetectorWorkflow = apps.get_model("workflow_engine", "DetectorWorkflow") Workflow = apps.get_model("workflow_engine", "Workflow") # MAIN MIGRATION LOOP STARTS HERE for organization in RangeQuerySetWrapper(Organization.objects.all()): organization_id = organization.id with transaction.atomic(router.db_for_write(AlertRule)): alert_rules = AlertRule.objects_with_snapshots.select_for_update().filter( organization=organization, status__in=[AlertRuleStatus.PENDING.value, AlertRuleStatus.NOT_ENOUGH_DATA.value], detection_type="dynamic", ) for alert_rule in RangeQuerySetWrapper(alert_rules): if AlertRuleDetector.objects.filter(alert_rule_id=alert_rule.id).exists(): # this alert was changed to an anomaly detection alert after the initial migration # we should update this rule's detector triggers logger.info( "Alert rule already migrated", extra={"alert_rule_id": alert_rule.id}, ) detector = AlertRuleDetector.objects.get(alert_rule_id=alert_rule.id).detector _update_migrated_detector_triggers(apps, alert_rule, detector) continue try: with transaction.atomic(router.db_for_write(AlertRule)): project = alert_rule.projects.first() if not project: logger.info( "alert rule missing project, skipping", extra={"alert_rule_id": alert_rule.id}, ) continue snoozed = None try: snoozed = RuleSnooze.objects.get( alert_rule_id=alert_rule.id, user_id=None ) except RuleSnooze.DoesNotExist: pass enabled = True if snoozed is None else False create_activity = AlertRuleActivity.objects.filter( alert_rule_id=alert_rule.id, type=AlertRuleActivityType.CREATED.value ).first() # create data source data_source = _create_data_source(apps, alert_rule) # create detector DCG data_condition_group = DataConditionGroup.objects.create( organization_id=organization_id, ) # create detector detector = _create_detector( apps, alert_rule, project, data_condition_group, create_activity, enabled, ) # create workflow workflow = Workflow.objects.create( name=_get_workflow_name(apps, alert_rule), organization_id=organization_id, when_condition_group=None, enabled=True, created_by_id=create_activity.user_id if create_activity else None, owner_user_id=alert_rule.user_id, owner_team=alert_rule.team, config={}, ) Workflow.objects.filter(id=workflow.id).update( date_added=alert_rule.date_added ) data_source.detectors.set([detector]) # create detector state _create_detector_state(apps, alert_rule, project, detector) # create lookup tables AlertRuleDetector.objects.create( alert_rule_id=alert_rule.id, detector=detector ) AlertRuleWorkflow.objects.create( alert_rule_id=alert_rule.id, workflow=workflow ) DetectorWorkflow.objects.create(detector=detector, workflow=workflow) # migrate triggers triggers = AlertRuleTrigger.objects.filter(alert_rule_id=alert_rule.id) for trigger in triggers: # anomaly detection alerts only have critical triggers # migrates the trigger and its associated actions _migrate_trigger(apps, trigger, detector, workflow) logger.info( "Successfully migrated alert rule", extra={"alert_rule_id": alert_rule.id}, ) except Exception as e: logger.info( "error when migrating alert rule", extra={"error": str(e), "alert_rule_id": alert_rule.id}, ) sentry_sdk.capture_exception(e)
OnCallDataBlob
python
getsentry__sentry
src/sentry/grouping/enhancer/matchers.py
{ "start": 11403, "end": 11825 }
class ____(FrameMatch): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._ref_val = bool_from_string(self.pattern) def _positive_frame_match( self, match_frame: MatchFrame, exception_data: dict[str, Any], cache: ReturnValueCache ) -> bool: ref_val = self._ref_val return ref_val is not None and ref_val == bool(match_frame["in_app"])
InAppMatch
python
getsentry__sentry
src/sentry/remote_subscriptions/consumers/result_consumer.py
{ "start": 3765, "end": 12065 }
class ____(ProcessingStrategyFactory[KafkaPayload], Generic[T, U]): parallel_executor: ThreadPoolExecutor | None = None batched_parallel = False """ Does the consumer process unrelated messages in parallel? """ max_batch_size = 500 """ How many messages will be batched at once when in parallel mode. """ max_batch_time = 10 """ The maximum time in seconds to accumulate a bach of check-ins. """ parallel = False """ Does the consumer process all messages in parallel. """ multiprocessing_pool: MultiprocessingPool | None = None input_block_size: int | None = None output_block_size: int | None = None def __init__( self, consumer_group: str, mode: Literal["batched-parallel", "parallel", "serial"] = "serial", max_batch_size: int | None = None, max_batch_time: int | None = None, max_workers: int | None = None, num_processes: int | None = None, input_block_size: int | None = None, output_block_size: int | None = None, ) -> None: self.mode = mode self.consumer_group = consumer_group metric_tags = {"identifier": self.identifier, "mode": self.mode} self.result_processor = self.result_processor_cls(use_subscription_lock=mode == "parallel") if mode == "batched-parallel": self.batched_parallel = True self.parallel_executor = ThreadPoolExecutor(max_workers=max_workers) if max_workers is None: metric_tags["workers"] = "default" else: metric_tags["workers"] = str(max_workers) if mode == "parallel": self.parallel = True if num_processes is None: num_processes = multiprocessing.cpu_count() self.multiprocessing_pool = MultiprocessingPool(num_processes) metrics.incr( "remote_subscriptions.result_consumer.start", 1, tags=metric_tags, ) if max_batch_size is not None: self.max_batch_size = max_batch_size if max_batch_time is not None: self.max_batch_time = max_batch_time if input_block_size is not None: self.input_block_size = input_block_size if output_block_size is not None: self.output_block_size = output_block_size @property @abc.abstractmethod def topic_for_codec(self) -> Topic: pass @property @abc.abstractmethod def result_processor_cls(self) -> type[ResultProcessor[T, U]]: pass @abc.abstractmethod def build_payload_grouping_key(self, result: T) -> str: """ Used in parallel processing mode. This method should return a string used to group related results together for serial processing. """ pass @property @abc.abstractmethod def identifier(self) -> str: """ A unique identifier for this consumer - used to differentiate it in stats """ pass def shutdown(self) -> None: if self.parallel_executor: self.parallel_executor.shutdown() def create_with_partitions( self, commit: Commit, partitions: Mapping[Partition, int], ) -> ProcessingStrategy[KafkaPayload]: if self.batched_parallel: return self.create_thread_parallel_worker(commit) if self.parallel: return self.create_multiprocess_worker(commit) else: return self.create_serial_worker(commit) def create_serial_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: return RunTask( function=partial( process_single, self.result_processor, self.topic_for_codec, self.identifier ), next_step=CommitOffsets(commit), ) def create_multiprocess_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: assert self.multiprocessing_pool is not None return run_task_with_multiprocessing( function=partial( process_single, self.result_processor, self.topic_for_codec, self.identifier ), next_step=CommitOffsets(commit), max_batch_size=self.max_batch_size, max_batch_time=self.max_batch_time, pool=self.multiprocessing_pool, input_block_size=self.input_block_size, output_block_size=self.output_block_size, ) def create_thread_parallel_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]: assert self.parallel_executor is not None batch_processor = RunTask( function=self.process_batch, next_step=CommitOffsets(commit), ) return BatchStep( max_batch_size=self.max_batch_size, max_batch_time=self.max_batch_time, next_step=batch_processor, ) def partition_message_batch(self, message: Message[ValuesBatch[KafkaPayload]]) -> list[list[T]]: """ Takes a batch of messages and partitions them based on the `build_payload_grouping_key` method. Returns a generator that yields each partitioned list of messages. """ batch = message.payload batch_mapping: Mapping[str, list[T]] = defaultdict(list) for item in batch: assert isinstance(item, BrokerValue) result = decode_payload(self.topic_for_codec, item.payload, self.result_processor) if result is None: continue key = self.build_payload_grouping_key(result) batch_mapping[key].append(result) # Number of messages that are being processed in this batch metrics.gauge( "remote_subscriptions.result_consumer.parallel_batch_count", len(batch), tags={"identifier": self.identifier, "mode": self.mode}, ) # Number of groups we've collected to be processed in parallel metrics.gauge( "remote_subscriptions.result_consumer.parallel_batch_groups", len(batch_mapping), tags={"identifier": self.identifier, "mode": self.mode}, ) return list(batch_mapping.values()) def process_batch(self, message: Message[ValuesBatch[KafkaPayload]]): """ Receives batches of messages. This function will take the batch and group them together using `build_payload_grouping_key`, which ensures order is preserved. Each group is then executed using a ThreadPoolWorker. By batching we're able to process messages in parallel while guaranteeing that no messages are processed out of order. """ assert self.parallel_executor is not None partitioned_values = self.partition_message_batch(message) # Submit groups for processing with sentry_sdk.start_transaction( op="process_batch", name=f"monitors.{self.identifier}.result_consumer" ): futures = [ self.parallel_executor.submit(self.process_group, group) for group in partitioned_values ] wait(futures) def process_group(self, items: list[T]): """ Process a group of related messages serially. """ for item in items: self.result_processor(self.identifier, item) def decode_payload( topic_for_codec: Topic, payload: KafkaPayload | FilteredPayload, result_processor: ResultProcessor[T, U] | None = None, ) -> T | None: assert not isinstance(payload, FilteredPayload) try: codec = get_topic_codec(topic_for_codec) return codec.decode(payload.value) except Exception: logger.exception( "Failed to decode message payload", extra={"payload": payload.value}, ) return None def process_single( result_processor: ResultProcessor[T, U], topic: Topic, identifier: str, message: Message[KafkaPayload | FilteredPayload], ) -> None: result = decode_payload(topic, message.payload, result_processor) if result is not None: result_processor(identifier, result)
ResultsStrategyFactory
python
streamlit__streamlit
lib/tests/streamlit/elements/vega_charts_test.py
{ "start": 19606, "end": 29352 }
class ____(DeltaGeneratorTestCase): """Test altair_chart width parameter functionality.""" @parameterized.expand( [ # width, expected_width_spec, expected_width_value ("stretch", "use_stretch", True), ("content", "use_content", True), (500, "pixel_width", 500), (None, "use_stretch", True), # Default to stretch when None ] ) def test_altair_chart_width_combinations( self, width: str | int | None, expected_width_spec: str, expected_width_value: bool | int, ): """Test altair_chart with various width combinations.""" df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T chart = ( alt.Chart(df) .mark_bar() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), ) ) if width is None: st.altair_chart(chart) else: st.altair_chart(chart, width=width) el = self.get_delta_from_queue().new_element # Check width configuration assert el.width_config.WhichOneof("width_spec") == expected_width_spec assert getattr(el.width_config, expected_width_spec) == expected_width_value @parameterized.expand( [ # Test parameters: use_container_width, width, expected_width_spec, expected_width_value ( True, None, "use_stretch", True, ), # use_container_width=True -> width="stretch" ( False, None, "use_content", True, ), # use_container_width=False -> width="content" ( True, 500, "use_stretch", True, ), # use_container_width=True overrides integer width ( True, "content", "use_stretch", True, ), # use_container_width=True overrides string width ( False, "content", "use_content", True, ), # use_container_width=False, width="content" ( False, 500, "pixel_width", 500, ), # use_container_width=False, integer width -> respect integer ] ) @patch("streamlit.elements.vega_charts.show_deprecation_warning") def test_altair_chart_use_container_width_deprecation( self, use_container_width: bool, width: int | str | None, expected_width_spec: str, expected_width_value: bool | int, mock_warning: Mock, ): """Test that use_container_width shows deprecation warning and is correctly translated to the new width parameter.""" df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T chart = ( alt.Chart(df) .mark_bar() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), ) ) kwargs = {"use_container_width": use_container_width} if width is not None: kwargs["width"] = width st.altair_chart(chart, **kwargs) mock_warning.assert_called_once() el = self.get_delta_from_queue().new_element # Should be translated to the correct width configuration assert el.width_config.WhichOneof("width_spec") == expected_width_spec assert getattr(el.width_config, expected_width_spec) == expected_width_value @parameterized.expand( [ ("width", "invalid_width"), ("width", 0), # width must be positive ("width", -100), # negative width ] ) def test_altair_chart_validation_errors( self, param_name: str, invalid_value: str | int ): """Test that invalid width values raise validation errors.""" df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T chart = ( alt.Chart(df) .mark_bar() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), ) ) kwargs = {param_name: invalid_value} with pytest.raises(StreamlitAPIException): st.altair_chart(chart, **kwargs) @pytest.mark.skipif( is_altair_version_less_than("5.0.0"), reason="This test only runs if altair is >= 5.0.0", ) def test_altair_chart_width_with_selections(self): """Test that width works correctly with selections enabled.""" df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T chart = ( alt.Chart(df) .mark_bar() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), ) .add_params(alt.selection_point("my_param")) ) result = st.altair_chart( chart, width=600, on_select="rerun", key="test_altair_chart" ) # Check that the chart element has the correct width configuration el = self.get_delta_from_queue().new_element assert el.width_config.WhichOneof("width_spec") == "pixel_width" assert el.width_config.pixel_width == 600 # Check that selections are still working assert hasattr(result, "selection") assert result.selection.my_param == {} @parameterized.expand( [ # Test name, chart description, chart creation function ( "regular_chart", "Regular charts", lambda df: alt.Chart(df) .mark_bar() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), ), ), ( "vconcat_chart", "Vertical concatenation charts", lambda df: alt.vconcat( alt.Chart(df) .mark_bar() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), ), alt.Chart(df) .mark_point() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), ), ), ), ] ) def test_altair_chart_default_width_stretch_charts( self, test_name: str, chart_description: str, chart_func: Callable ): """Test that certain Altair chart types default to 'stretch' width.""" df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T chart = chart_func(df) st.altair_chart(chart) el = self.get_delta_from_queue().new_element assert el.width_config.WhichOneof("width_spec") == "use_stretch" assert el.width_config.use_stretch is True @parameterized.expand( [ # Test name, chart description, chart creation function ( "facet_chart", "Facet charts", lambda df: alt.Chart(df) .mark_bar() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), facet=alt.Facet("a:O"), ), ), ( "facet_chart_row", "Charts with row faceting", lambda df: alt.Chart(df) .mark_bar() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), row=alt.Row("a:O"), ), ), ( "facet_chart_column", "Charts with column faceting", lambda df: alt.Chart(df) .mark_bar() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), column=alt.Column("a:O"), ), ), ( "hconcat_chart", "Horizontal concatenation charts", lambda df: alt.hconcat( alt.Chart(df) .mark_bar() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), ), alt.Chart(df) .mark_point() .encode( x=alt.X("a:O"), y=alt.Y("b:Q"), ), ), ), ( "repeat_chart", "Repeat charts", lambda df: alt.Chart(df) .mark_bar() .encode( x=alt.X(alt.repeat("row"), type="ordinal"), y=alt.Y("b:Q"), ) .repeat(row=["a", "b"]), ), ] ) def test_altair_chart_default_width_content_charts( self, test_name: str, chart_description: str, chart_func: Callable ): """Test that certain Altair chart types default to 'content' width.""" df = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T chart = chart_func(df) st.altair_chart(chart) el = self.get_delta_from_queue().new_element assert el.width_config.WhichOneof("width_spec") == "use_content" assert el.width_config.use_content is True
AltairChartWidthTest
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/sagemaker.py
{ "start": 20567, "end": 29136 }
class ____(SageMakerBaseOperator): """ When you create a serverless endpoint, SageMaker provisions and manages the compute resources for you. Then, you can make inference requests to the endpoint and receive model predictions in response. SageMaker scales the compute resources up and down as needed to handle your request traffic. Requires an Endpoint Config. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:SageMakerEndpointOperator` :param config: The configuration necessary to create an endpoint. If you need to create a SageMaker endpoint based on an existed SageMaker model and an existed SageMaker endpoint config:: config = endpoint_configuration If you need to create all of SageMaker model, SageMaker endpoint-config and SageMaker endpoint:: config = { "Model": model_configuration, "EndpointConfig": endpoint_config_configuration, "Endpoint": endpoint_configuration, } For details of the configuration parameter of model_configuration see :py:meth:`SageMaker.Client.create_model` For details of the configuration parameter of endpoint_config_configuration see :py:meth:`SageMaker.Client.create_endpoint_config` For details of the configuration parameter of endpoint_configuration see :py:meth:`SageMaker.Client.create_endpoint` :param wait_for_completion: Whether the operator should wait until the endpoint creation finishes. :param check_interval: If wait is set to True, this is the time interval, in seconds, that this operation waits before polling the status of the endpoint creation. :param max_ingestion_time: If wait is set to True, this operation fails if the endpoint creation doesn't finish within max_ingestion_time seconds. If you set this parameter to None it never times out. :param operation: Whether to create an endpoint or update an endpoint. Must be either 'create or 'update'. :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param deferrable: Will wait asynchronously for completion. :return Dict: Returns The ARN of the endpoint created in Amazon SageMaker. """ def __init__( self, *, config: dict, wait_for_completion: bool = True, check_interval: int = CHECK_INTERVAL_SECOND, max_ingestion_time: int | None = None, operation: str = "create", deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs, ): super().__init__(config=config, **kwargs) self.wait_for_completion = wait_for_completion self.check_interval = check_interval self.max_ingestion_time = max_ingestion_time or 3600 * 10 self.operation = operation.lower() if self.operation not in ["create", "update"]: raise ValueError('Invalid value! Argument operation has to be one of "create" and "update"') self.deferrable = deferrable def _create_integer_fields(self) -> None: """Set fields which should be cast to integers.""" if "EndpointConfig" in self.config: self.integer_fields: list[list[str]] = [ ["EndpointConfig", "ProductionVariants", "InitialInstanceCount"] ] def expand_role(self) -> None: """Expand an IAM role name into an ARN.""" if "Model" not in self.config: return hook = AwsBaseHook(self.aws_conn_id, client_type="iam") config = self.config["Model"] if "ExecutionRoleArn" in config: config["ExecutionRoleArn"] = hook.expand_role(config["ExecutionRoleArn"]) def execute(self, context: Context) -> dict: self.preprocess_config() model_info = self.config.get("Model") endpoint_config_info = self.config.get("EndpointConfig") endpoint_info = self.config.get("Endpoint", self.config) if model_info: self.log.info("Creating SageMaker model %s.", model_info["ModelName"]) self.hook.create_model(model_info) if endpoint_config_info: self.log.info("Creating endpoint config %s.", endpoint_config_info["EndpointConfigName"]) self.hook.create_endpoint_config(endpoint_config_info) if self.operation == "create": sagemaker_operation = self.hook.create_endpoint log_str = "Creating" elif self.operation == "update": sagemaker_operation = self.hook.update_endpoint log_str = "Updating" else: raise ValueError('Invalid value! Argument operation has to be one of "create" and "update"') self.log.info("%s SageMaker endpoint %s.", log_str, endpoint_info["EndpointName"]) try: response = sagemaker_operation( endpoint_info, wait_for_completion=False, # waiting for completion is handled here in the operator ) except ClientError as ce: if self.operation == "create" and ce.response["Error"]["Message"].startswith( "Cannot create already existing endpoint" ): # if we get an error because the endpoint already exists, we try to update it instead self.operation = "update" sagemaker_operation = self.hook.update_endpoint self.log.warning( "cannot create already existing endpoint %s, updating it with the given config instead", endpoint_info["EndpointName"], ) if "Tags" in endpoint_info: self.log.warning( "Provided tags will be ignored in the update operation " "(tags on the existing endpoint will be unchanged)" ) endpoint_info.pop("Tags") response = sagemaker_operation( endpoint_info, wait_for_completion=False, ) else: raise if response["ResponseMetadata"]["HTTPStatusCode"] != 200: raise AirflowException(f"Sagemaker endpoint creation failed: {response}") if self.deferrable: self.defer( trigger=SageMakerTrigger( job_name=endpoint_info["EndpointName"], job_type="endpoint", poke_interval=self.check_interval, aws_conn_id=self.aws_conn_id, ), method_name="execute_complete", timeout=datetime.timedelta(seconds=self.max_ingestion_time), ) elif self.wait_for_completion: self.hook.get_waiter("endpoint_in_service").wait( EndpointName=endpoint_info["EndpointName"], WaiterConfig={"Delay": self.check_interval, "MaxAttempts": self.max_ingestion_time}, ) return { "EndpointConfig": serialize( self.hook.describe_endpoint_config(endpoint_info["EndpointConfigName"]) ), "Endpoint": serialize(self.hook.describe_endpoint(endpoint_info["EndpointName"])), } def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> dict[str, dict]: validated_event = validate_execute_complete_event(event) if validated_event["status"] != "success": raise AirflowException(f"Error while running job: {validated_event}") response = self.hook.describe_endpoint(validated_event["job_name"]) return { "EndpointConfig": serialize(self.hook.describe_endpoint_config(response["EndpointConfigName"])), "Endpoint": serialize(self.hook.describe_endpoint(response["EndpointName"])), }
SageMakerEndpointOperator
python
ipython__ipython
IPython/core/oinspect.py
{ "start": 4299, "end": 10943 }
class ____: """Data passed to the mime hook""" obj: Any info: Optional[OInfo] info_dict: InfoDict detail_level: int omit_sections: list[str] @undoc def object_info( *, name: str, found: bool, isclass: bool = False, isalias: bool = False, ismagic: bool = False, **kw, ) -> InfoDict: """Make an object info dict with all fields present.""" infodict = dict(kw) infodict.update({k: None for k in _info_fields if k not in infodict}) infodict["name"] = name # type: ignore infodict["found"] = found # type: ignore infodict["isclass"] = isclass # type: ignore infodict["isalias"] = isalias # type: ignore infodict["ismagic"] = ismagic # type: ignore return InfoDict(**infodict) # type:ignore def get_encoding(obj): """Get encoding for python source file defining obj Returns None if obj is not defined in a sourcefile. """ ofile = find_file(obj) # run contents of file through pager starting at line where the object # is defined, as long as the file isn't binary and is actually on the # filesystem. if ofile is None: return None elif ofile.endswith(('.so', '.dll', '.pyd')): return None elif not os.path.isfile(ofile): return None else: # Print only text files, not extension binaries. Note that # getsourcelines returns lineno with 1-offset and page() uses # 0-offset, so we must adjust. with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2 encoding, _lines = openpy.detect_encoding(buffer.readline) return encoding def getdoc(obj) -> Union[str, None]: """Stable wrapper around inspect.getdoc. This can't crash because of attribute problems. It also attempts to call a getdoc() method on the given object. This allows objects which provide their docstrings via non-standard mechanisms (like Pyro proxies) to still be inspected by ipython's ? system. """ # Allow objects to offer customized documentation via a getdoc method: try: ds = obj.getdoc() except Exception: pass else: if isinstance(ds, str): return inspect.cleandoc(ds) docstr = inspect.getdoc(obj) return docstr def getsource(obj, oname='') -> Union[str,None]: """Wrapper around inspect.getsource. This can be modified by other projects to provide customized source extraction. Parameters ---------- obj : object an object whose source code we will attempt to extract oname : str (optional) a name under which the object is known Returns ------- src : unicode or None """ if isinstance(obj, property): sources = [] for attrname in ['fget', 'fset', 'fdel']: fn = getattr(obj, attrname) if fn is not None: oname_prefix = ('%s.' % oname) if oname else '' sources.append(''.join(('# ', oname_prefix, attrname))) if inspect.isfunction(fn): _src = getsource(fn) if _src: # assert _src is not None, "please mypy" sources.append(dedent(_src)) else: # Default str/repr only prints function name, # pretty.pretty prints module name too. sources.append( '%s%s = %s\n' % (oname_prefix, attrname, pretty(fn)) ) if sources: return '\n'.join(sources) else: return None else: # Get source for non-property objects. obj = _get_wrapped(obj) try: src = inspect.getsource(obj) except TypeError: # The object itself provided no meaningful source, try looking for # its class definition instead. try: src = inspect.getsource(obj.__class__) except (OSError, TypeError): return None except OSError: return None return src def is_simple_callable(obj): """True if obj is a function ()""" return (inspect.isfunction(obj) or inspect.ismethod(obj) or \ isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type)) def _get_wrapped(obj): """Get the original object if wrapped in one or more @decorators Some objects automatically construct similar objects on any unrecognised attribute access (e.g. unittest.mock.call). To protect against infinite loops, this will arbitrarily cut off after 100 levels of obj.__wrapped__ attribute access. --TK, Jan 2016 """ orig_obj = obj i = 0 while safe_hasattr(obj, '__wrapped__'): obj = obj.__wrapped__ i += 1 if i > 100: # __wrapped__ is probably a lie, so return the thing we started with return orig_obj return obj def find_file(obj) -> Optional[str]: """Find the absolute path to the file where an object was defined. This is essentially a robust wrapper around `inspect.getabsfile`. Returns None if no file can be found. Parameters ---------- obj : any Python object Returns ------- fname : str The absolute path to the file where the object was defined. """ obj = _get_wrapped(obj) fname: Optional[str] = None try: fname = inspect.getabsfile(obj) except TypeError: # For an instance, the file that matters is where its class was # declared. try: fname = inspect.getabsfile(obj.__class__) except (OSError, TypeError): # Can happen for builtins pass except OSError: pass return fname def find_source_lines(obj): """Find the line number in a file where an object was defined. This is essentially a robust wrapper around `inspect.getsourcelines`. Returns None if no file can be found. Parameters ---------- obj : any Python object Returns ------- lineno : int The line number where the object definition starts. """ obj = _get_wrapped(obj) try: lineno = inspect.getsourcelines(obj)[1] except TypeError: # For instances, try the class object like getsource() does try: lineno = inspect.getsourcelines(obj.__class__)[1] except (OSError, TypeError): return None except OSError: return None return lineno _sentinel = object()
InspectorHookData
python
explosion__spaCy
spacy/lang/ga/__init__.py
{ "start": 237, "end": 350 }
class ____(BaseDefaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS stop_words = STOP_WORDS
IrishDefaults
python
pytorch__pytorch
test/package/package_d/imports_directly.py
{ "start": 75, "end": 218 }
class ____(torch.nn.Module): key = important_string def forward(self, inp): return torch.sum(inp)
ImportsDirectlyFromSubSubPackage
python
huggingface__transformers
src/transformers/models/blip_2/modeling_blip_2.py
{ "start": 6848, "end": 11087 }
class ____(nn.Module): def __init__(self, config: Blip2VisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(1, 1, self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim)) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 num_positions = self.position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding class_pos_embed = self.position_embedding[:, :1] patch_pos_embed = self.position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: position_embedding = self.interpolate_pos_encoding(embeddings, height, width) else: position_embedding = self.position_embedding embeddings = embeddings + position_embedding[:, : embeddings.size(1), :].to(target_dtype) return embeddings # Adapted from transformers.models.siglip.modeling_siglip.eager_attention_forward -> BLIP doesn't cast attn weights to fp32 def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights
Blip2VisionEmbeddings
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/bigtable.py
{ "start": 1458, "end": 1660 }
class ____(BaseGoogleLink): """Helper class for constructing Bigtable Cluster link.""" name = "Bigtable Cluster" key = "cluster_key" format_str = BIGTABLE_CLUSTER_LINK
BigtableClusterLink
python
jazzband__django-oauth-toolkit
tests/models.py
{ "start": 1567, "end": 1731 }
class ____(AbstractIDToken): """Exists to be improperly configured for multiple databases.""" # The other token types will be in 'alpha' database.
LocalIDToken
python
getsentry__sentry
src/sentry/notifications/types.py
{ "start": 6022, "end": 7435 }
class ____: implicit = -1 # not for use as a persisted field value committed = -2 # not for use as a persisted field value processing_issue = -3 # not for use as a persisted field value unknown = 0 comment = 1 assigned = 2 bookmark = 3 status_change = 4 deploy_setting = 5 mentioned = 6 team_mentioned = 7 descriptions = { implicit: "have opted to receive updates for all issues within " "projects that you are a member of", committed: "were involved in a commit that is part of this release", processing_issue: "are subscribed to alerts for this project", comment: "have commented on this issue", assigned: "have been assigned to this issue", bookmark: "have bookmarked this issue", status_change: "have changed the resolution status of this issue", deploy_setting: "opted to receive all deploy notifications for this organization", mentioned: "have been mentioned in this issue", team_mentioned: "are a member of a team mentioned in this issue", } SUBSCRIPTION_REASON_MAP = { GroupSubscriptionReason.comment: "commented", GroupSubscriptionReason.assigned: "assigned", GroupSubscriptionReason.bookmark: "bookmarked", GroupSubscriptionReason.status_change: "changed_status", GroupSubscriptionReason.mentioned: "mentioned", }
GroupSubscriptionReason
python
django__django
tests/model_forms/models.py
{ "start": 711, "end": 855 }
class ____(models.Manager): def get_queryset(self): qs = super().get_queryset() return qs.filter(archived=False)
WriterManager
python
spack__spack
lib/spack/spack/modules/common.py
{ "start": 16885, "end": 18932 }
class ____: """Provides information on the layout of module files. Needs to be sub-classed for specific module types. """ #: This needs to be redefined extension: Optional[str] = None def __init__(self, configuration): self.conf = configuration @property def spec(self): """Spec under consideration""" return self.conf.spec def dirname(self): """Root folder for module files of this type.""" module_system = str(self.conf.module.__name__).split(".")[-1] return root_path(module_system, self.conf.name) @property def use_name(self): """Returns the 'use' name of the module i.e. the name you have to type to console to use it. This implementation fits the needs of most non-hierarchical layouts. """ projection = proj.get_projection(self.conf.projections, self.spec) if not projection: projection = self.conf.default_projections["all"] name = self.spec.format_path(projection) # Not everybody is working on linux... parts = name.split("/") name = os.path.join(*parts) # Add optional suffixes based on constraints path_elements = [name] path_elements.extend(map(self.spec.format, self.conf.suffixes)) return "-".join(path_elements) @property def filename(self): """Name of the module file for the current spec.""" # Just the name of the file filename = self.use_name if self.extension: filename = f"{self.use_name}.{self.extension}" # Architecture sub-folder arch_folder_conf = spack.config.get("modules:%s:arch_folder" % self.conf.name, True) if arch_folder_conf: # include an arch specific folder between root and filename arch_folder = str(self.spec.architecture) filename = os.path.join(arch_folder, filename) # Return the absolute path return os.path.join(self.dirname(), filename)
BaseFileLayout
python
ray-project__ray
rllib/env/tests/test_multi_agent_episode.py
{ "start": 4928, "end": 159285 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls) -> None: ray.init() @classmethod def tearDownClass(cls) -> None: ray.shutdown() def test_init(self): # Create an empty episode. episode = MultiAgentEpisode() # Empty episode should have a start point and count of zero. self.assertTrue(episode.env_t_started == episode.env_t == 0) # Create an episode with a specific starting point, but no data. episode = MultiAgentEpisode(env_t_started=10) self.assertTrue(episode.env_t == episode.env_t_started == 10) # Generate a simple multi-agent episode and check all internals after # construction. observations = [{"a0": 0, "a1": 0}, {"a1": 1}, {"a1": 2}, {"a1": 3}] actions = [{"a0": 0, "a1": 0}, {"a1": 1}, {"a1": 2}] rewards = [{"a0": 0.1, "a1": 0.1}, {"a1": 0.2}, {"a1": 0.3}] episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards ) check(episode.agent_episodes["a0"].observations.data, [0]) check(episode.agent_episodes["a1"].observations.data, [0, 1, 2, 3]) check(episode.agent_episodes["a0"].actions.data, []) check(episode.agent_episodes["a1"].actions.data, [0, 1, 2]) check(episode.agent_episodes["a0"].rewards.data, []) check(episode.agent_episodes["a1"].rewards.data, [0.1, 0.2, 0.3]) check(episode._hanging_actions_end, {"a0": 0}) check(episode._hanging_rewards_end, {"a0": 0.1}) check(episode._hanging_extra_model_outputs_end, {"a0": {}}) check(episode.env_t_to_agent_t["a0"].data, [0, "S", "S", "S"]) check(episode.env_t_to_agent_t["a1"].data, [0, 1, 2, 3]) check(episode.env_t_to_agent_t["a0"].lookback, 3) check(episode.env_t_to_agent_t["a1"].lookback, 3) # One of the agents doesn't step after reset. observations = [{"a0": 0}, {"a1": 1}, {"a0": 2, "a1": 2}, {"a1": 3}, {"a1": 4}] actions = [{"a0": 0}, {"a1": 1}, {"a0": 2, "a1": 2}, {"a1": 3}] rewards = [{"a0": 0.1}, {"a1": 0.2}, {"a0": 0.3, "a1": 0.3}, {"a1": 0.4}] episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards ) check(episode.agent_episodes["a0"].observations.data, [0, 2]) check(episode.agent_episodes["a1"].observations.data, [1, 2, 3, 4]) check(episode.agent_episodes["a0"].actions.data, [0]) check(episode.agent_episodes["a1"].actions.data, [1, 2, 3]) check(episode.agent_episodes["a0"].rewards.data, [0.1]) check(episode.agent_episodes["a1"].rewards.data, [0.2, 0.3, 0.4]) check(episode._hanging_actions_end, {"a0": 2}) check(episode._hanging_rewards_end, {"a0": 0.3}) check(episode._hanging_extra_model_outputs_end, {"a0": {}}) check(episode.env_t_to_agent_t["a0"].data, [0, "S", 1, "S", "S"]) check(episode.env_t_to_agent_t["a1"].data, ["S", 0, 1, 2, 3]) check(episode.env_t_to_agent_t["a0"].lookback, 4) check(episode.env_t_to_agent_t["a1"].lookback, 4) # Sample 100 values and initialize the episode with observations. env = MultiAgentTestEnv() # Initialize containers. observations = [] rewards = [] actions = [] infos = [] terminateds = {} truncateds = {} extra_model_outputs = [] agent_0_steps = [] agent_0_num_steps = 0 # Initialize observation and info. obs, info = env.reset(seed=0) # If "agent_0" is part of the reset obs, it steps in the first ts. agent_0_steps.append( agent_0_num_steps if "agent_0" in obs else episode.SKIP_ENV_TS_TAG ) if "agent_0" in obs: agent_0_num_steps += 1 observations.append(obs) infos.append(info) # Run 100 samples. for i in range(100): agents_to_step_next = [ aid for aid in obs.keys() if aid in env._agents_alive ] action = {agent_id: i + 1 for agent_id in agents_to_step_next} obs, reward, terminated, truncated, info = env.step(action) # If "agent_0" is part of the reset obs, it steps in the first ts. agent_0_steps.append( agent_0_num_steps if "agent_0" in obs else episode.SKIP_ENV_TS_TAG ) if "agent_0" in obs: agent_0_num_steps += 1 observations.append(obs) actions.append(action) rewards.append(reward) infos.append(info) terminateds.update(terminated) truncateds.update(truncated) extra_model_outputs.append( {agent_id: {"extra_1": 10.5} for agent_id in agents_to_step_next} ) # Now create the episode from the recorded data. Pretend that the given data # is all part of the lookback buffer and the episode (chunk) started at the # end of that lookback buffer data. episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards, infos=infos, terminateds=terminateds, truncateds=truncateds, extra_model_outputs=extra_model_outputs, env_t_started=len(rewards), len_lookback_buffer="auto", # default ) # The starting point and count should now be at `len(observations) - 1`.+ self.assertTrue(episode.env_t == episode.env_t_started == len(rewards)) # Assert that agent 1 and agent 5 are both terminated. self.assertTrue(episode.agent_episodes["agent_1"].is_terminated) self.assertTrue(episode.agent_episodes["agent_5"].is_terminated) # Assert that the other agents are neither terminated nor truncated. for agent_id in env.agents: if agent_id != "agent_1" and agent_id != "agent_5": self.assertFalse(episode.agent_episodes[agent_id].is_done) # Assert that the agent_0 env_t_to_agent_t mapping is correct: check(episode.env_t_to_agent_t["agent_0"].data, agent_0_steps) # Test now initializing an episode and setting its starting timestep. episode = MultiAgentEpisode( observations=observations[-11:], actions=actions[-10:], rewards=rewards[-10:], infos=infos[-11:], terminateds=terminateds, truncateds=truncateds, extra_model_outputs=extra_model_outputs[-10:], env_t_started=100, len_lookback_buffer="auto", # default: all data goes into lookback buffers ) # Assert that the episode starts indeed at 100. check(episode.env_t, episode.env_t_started, 100) # B/c all data went into lookback buffers, all single-agent episodes and # the multi-agent episode itself should have len=0. check(len(episode), 0) for agent_id in episode.agent_ids: check(len(episode.agent_episodes[agent_id]), 0) check(len(episode.agent_episodes[agent_id].observations), 1) check(len(episode.agent_episodes[agent_id].actions), 0) check(len(episode.agent_episodes[agent_id].rewards), 0) check(episode.agent_episodes[agent_id].is_truncated, False) check(episode.agent_episodes[agent_id].is_numpy, False) check(episode.agent_episodes["agent_5"].is_terminated, True) check( episode.env_t_to_agent_t["agent_5"].data, ["S", 0, 1, "S", 2, 3, 4, 5, 6, 7, 8], ) # Now test, if agents that have never stepped are handled correctly. # agent 5 will be the agent that never stepped. ( observations, actions, rewards, terminateds, truncateds, infos, ) = self._mock_multi_agent_records() # Create the episode from the mock data. episode = MultiAgentEpisode( # agent_ids=["agent_1", "agent_2", "agent_3", "agent_4", "agent_5"], observations=observations, actions=actions, rewards=rewards, infos=infos, terminateds=terminateds, truncateds=truncateds, len_lookback_buffer=0, ) # Assert that the length of `SingleAgentEpisode`s are all correct. check(len(episode.agent_episodes["agent_1"]), 1) check(len(episode.agent_episodes["agent_2"]), 1) check(len(episode.agent_episodes["agent_3"]), 1) check(len(episode.agent_episodes["agent_4"]), 1) # check(len(episode.agent_episodes["agent_5"]), 0) # TODO (simon): Also test the other structs inside the MAE for agent 5 and # the other agents. def test_add_env_reset(self): # Generate an environment. env = MultiAgentTestEnv() # Generate an empty multi-agent episode. Note. we have to provide the # agent ids. episode = MultiAgentEpisode( observation_space=env.observation_space, action_space=env.action_space, ) # Generate initial observations and infos and add them to the episode. obs, infos = env.reset(seed=0) episode.add_env_reset( observations=obs, infos=infos, ) # Assert that timestep is at zero. self.assertTrue(episode.env_t == episode.env_t_started == 0) # Assert that the agents with initial observations have their single-agent # episodes in place. for agent_id in env.agents: # Ensure that all agents have a single env_ts=0 -> agent_ts=0 # entry in their env- to agent-timestep mappings. if agent_id in obs: self.assertGreater( len(episode.agent_episodes[agent_id].observations), 0 ) self.assertGreater(len(episode.agent_episodes[agent_id].infos), 0) check(episode.env_t_to_agent_t[agent_id].data, [0]) # Agents that have no reset obs, will not step in next ts -> They should NOT # have a single agent episod yet and their mappings should be empty. else: self.assertTrue(agent_id not in episode.agent_episodes) check(episode.env_t_to_agent_t[agent_id].data, []) # TODO (simon): Test the buffers and reward storage. def test_add_env_step(self): # Create an environment and add the initial observations and infos. env = MultiAgentTestEnv() episode = MultiAgentEpisode() agent_0_steps = [] agent_0_num_steps = 0 obs, infos = env.reset(seed=10) episode.add_env_reset( observations=obs, infos=infos, ) # If "agent_0" is part of the reset obs, it steps in the first ts. agent_0_steps.append( agent_0_num_steps if "agent_0" in obs else episode.SKIP_ENV_TS_TAG ) if "agent_0" in obs: agent_0_num_steps += 1 # Sample 100 timesteps and add them to the episode. for i in range(100): action = { agent_id: i + 1 for agent_id in obs if agent_id in env._agents_alive } obs, reward, terminated, truncated, info = env.step(action) # If "agent_0" is part of the reset obs, it steps in the first ts. agent_0_steps.append( agent_0_num_steps if "agent_0" in obs else episode.SKIP_ENV_TS_TAG ) if "agent_0" in obs: agent_0_num_steps += 1 episode.add_env_step( observations=obs, actions=action, rewards=reward, infos=info, terminateds=terminated, truncateds=truncated, extra_model_outputs={agent_id: {"extra": 10.5} for agent_id in action}, ) # Assert that the timestep is at 100. check(episode.env_t, 100) # Ensure that the episode is not done yet. self.assertFalse(episode.is_done) # Ensure that agent 1 and agent 5 are indeed done. self.assertTrue(episode.agent_episodes["agent_1"].is_done) self.assertTrue(episode.agent_episodes["agent_5"].is_done) # Also ensure that their buffers are all empty: for agent_id in ["agent_1", "agent_5"]: self.assertTrue(agent_id not in episode._hanging_actions_end) self.assertTrue(agent_id not in episode._hanging_rewards_end) self.assertTrue(agent_id not in episode._hanging_extra_model_outputs_end) # Check validity of agent_0's env_t_to_agent_t mapping. check(episode.env_t_to_agent_t["agent_0"].data, agent_0_steps) # Run another 100 timesteps. for i in range(100, 200): action = { agent_id: i + 1 for agent_id in obs if agent_id in env._agents_alive } obs, reward, terminated, truncated, info = env.step(action) episode.add_env_step( observations=obs, actions=action, rewards=reward, infos=info, terminateds=terminated, truncateds=truncated, extra_model_outputs={agent_id: {"extra": 10.5} for agent_id in action}, ) # Assert that the environment is done. self.assertTrue(truncated["__all__"]) # Assert that each agent is done. for agent_id in episode.agent_ids: self.assertTrue(episode.agent_episodes[agent_id].is_done) # Assert that agent 1 and agent 5 have no observations/actions/etc. # after the timesteps in which they terminated. self.assertGreaterEqual(50, episode.agent_episodes["agent_1"].observations[-1]) self.assertGreaterEqual(50, episode.agent_episodes["agent_1"].actions[-1]) self.assertGreaterEqual(100, episode.agent_episodes["agent_5"].observations[-1]) self.assertGreaterEqual(100, episode.agent_episodes["agent_5"].actions[-1]) # Now test, if agents that have never stepped are handled correctly. # agent 5 will be the agent that never stepped. ( observations, actions, rewards, terminated, truncated, infos, ) = self._mock_multi_agent_records() episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards, infos=infos, terminateds=terminated, truncateds=truncated, # len_lookback_buffer=0, # agent_t_started={}, ) # Now test that intermediate rewards will get recorded and actions buffered. action = {"agent_2": 3, "agent_4": 3} reward = {"agent_1": 1.0, "agent_2": 1.0, "agent_3": 1.0, "agent_5": 1.0} observation = {"agent_1": 3, "agent_2": 3} infos = {"agent_1": {}, "agent_2": {}} terminated = {k: False for k in observation.keys()} terminated.update({"__all__": False}) truncated = {k: False for k in observation.keys()} truncated.update({"__all__": False}) episode.add_env_step( observations=observation, actions=action, rewards=reward, infos=infos, terminateds=terminated, truncateds=truncated, ) # Assert that the action cache for agent 4 is used. # Note, agent 4 acts, but receives no observation. # Note also, all other caches are always used, due to their defaults. self.assertTrue(episode._hanging_actions_end["agent_4"] is not None) # Assert that the reward caches of agents 3 and 5 are there. # For agent_5 (b/c it has never done anything), we add to the begin cache. check(episode._hanging_rewards_end["agent_3"], 2.2) check(episode._hanging_rewards_begin["agent_5"], 1.0) def test_get_observations(self): # Generate simple records for a multi agent environment. ( observations, actions, rewards, is_terminateds, is_truncateds, infos, ) = self._mock_multi_agent_records() # Create a multi-agent episode. episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards, infos=infos, terminateds=is_terminateds, truncateds=is_truncateds, len_lookback_buffer="auto", # default: use all data as lookback ) # Get last observations for the multi-agent episode. obs = episode.get_observations(indices=-1) check(obs, {"agent_2": 2, "agent_4": 2}) # Return last two observations for the entire env. # Also, we flip the indices here and require -1 before -2, this # should reflect in the returned results. obs = episode.get_observations(indices=[-1, -2]) # Note, agent 4 has two observations in the last two ones. # Note, `get_observations()` returns in the order of the `indices` arg. check(obs, {"agent_1": [1], "agent_2": [2], "agent_3": [1], "agent_4": [2, 1]}) # Return last two observations for the entire env using slice. obs = episode.get_observations(slice(-2, None)) check( obs, {"agent_1": [1], "agent_2": [2], "agent_3": [1], "agent_4": [1, 2]}, ) # Return last four observations for the entire env using slice # and `fill`. obs = episode.get_observations(slice(-5, None), fill=-10) check( obs, { # All first two ts should be 0s (fill before episode even # started). # 3rd items are the reset obs for agents "agent_1": [-10, -10, 0, 1, -10], # ag1 stepped the first 2 ts "agent_2": [-10, -10, 0, -10, 2], # ag2 stepped first and last ts "agent_3": [-10, -10, 0, 1, -10], # ag3 same as ag1 "agent_4": [-10, -10, -10, 1, 2], # ag4 steps in last 2 ts }, ) # Use `fill` to look into the future (ts=100 and 101). obs = episode.get_observations(slice(100, 102), fill=9.9) check( obs, { "agent_1": [9.9, 9.9], "agent_2": [9.9, 9.9], "agent_3": [9.9, 9.9], "agent_4": [9.9, 9.9], }, ) # Return two observations in lookback buffers for the entire env using # `neg_index_as_lookback=True` and an index list. # w/ fill obs = episode.get_observations( indices=[-2, -1], fill=-10, neg_index_as_lookback=True, ) check( obs, { "agent_1": [0, 1], "agent_2": [0, -10], "agent_3": [0, 1], "agent_4": [-10, 1], }, ) # Same, but w/o fill obs = episode.get_observations(indices=[-2, -1], neg_index_as_lookback=True) check( obs, {"agent_1": [0, 1], "agent_2": [0], "agent_3": [0, 1], "agent_4": [1]}, ) # Get last observations for each individual agent. obs = episode.get_observations(indices=-1, env_steps=False) check(obs, {"agent_1": 1, "agent_2": 2, "agent_3": 1, "agent_4": 2}) # Same, but with `agent_ids` filters. obs = episode.get_observations(-1, env_steps=False, agent_ids="agent_1") check(obs, {"agent_1": 1}) obs = episode.get_observations(-1, env_steps=False, agent_ids=["agent_2"]) check(obs, {"agent_2": 2}) obs = episode.get_observations(-1, env_steps=False, agent_ids=("agent_3",)) check(obs, {"agent_3": 1}) obs = episode.get_observations(-1, env_steps=False, agent_ids={"agent_4"}) check(obs, {"agent_4": 2}) obs = episode.get_observations( -1, env_steps=False, agent_ids=["agent_1", "agent_2"] ) check(obs, {"agent_1": 1, "agent_2": 2}) obs = episode.get_observations(-2, env_steps=True, agent_ids={"agent_4"}) check(obs, {"agent_4": 1}) obs = episode.get_observations([-1, -2], env_steps=True, agent_ids={"agent_4"}) check(obs, {"agent_4": [2, 1]}) # Return the last two observations for each individual agent. obs = episode.get_observations(indices=[-1, -2], env_steps=False) check( obs, { "agent_1": [1, 0], "agent_2": [2, 0], "agent_3": [1, 0], "agent_4": [2, 1], }, ) # Now, test the same when returning a list. obs = episode.get_observations(return_list=True) check(obs, [{"agent_2": 2, "agent_4": 2}]) # Expect error when calling with env_steps=False. with self.assertRaises(ValueError): episode.get_observations(env_steps=False, return_list=True) # List of indices. obs = episode.get_observations(indices=[-1, -2], return_list=True) check( obs, [ {"agent_2": 2, "agent_4": 2}, {"agent_1": 1, "agent_3": 1, "agent_4": 1}, ], ) # Slice of indices w/ fill. obs = episode.get_observations( slice(-1, 1), return_list=True, fill=-8, neg_index_as_lookback=True, ) check( obs, [ {"agent_1": 1, "agent_2": -8, "agent_3": 1, "agent_4": 1}, {"agent_1": -8, "agent_2": 2, "agent_3": -8, "agent_4": 2}, ], ) # B/c we have lookback="auto" in the ma episode, all data we sent into # the c"tor was pushed into the lookback buffers and only the last # observations are outside these buffers and will be returned here. obs = episode.get_observations(env_steps=False) check( obs, {"agent_1": [1], "agent_2": [2], "agent_3": [1], "agent_4": [2]}, ) # Test with initial observations only. episode = MultiAgentEpisode() episode.add_env_reset( observations=observations[0], infos=infos[0], ) # Get the last observation for agents and assert that they are correct. obs = episode.get_observations() for agent_id, agent_obs in observations[0].items(): check(obs[agent_id][0], agent_obs) # Now the same as list. obs = episode.get_observations(return_list=True) for agent_id, agent_obs in observations[0].items(): check(obs[0][agent_id], agent_obs) # Now by agent steps. obs = episode.get_observations(env_steps=False) for agent_id, agent_obs in observations[0].items(): check(obs[agent_id][0], agent_obs) def test_get_infos(self): # Generate simple records for a multi agent environment. ( observations, actions, rewards, is_terminateds, is_truncateds, infos, ) = self._mock_multi_agent_records() # Create a multi-agent episode. episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards, infos=infos, terminateds=is_terminateds, truncateds=is_truncateds, len_lookback_buffer="auto", # default: use all data as lookback ) # Get last infos for the multi-agent episode. inf = episode.get_infos(indices=-1) check(inf, infos[-1]) # Return last two infos for the entire env. # Also, we flip the indices here and require -1 before -2, this # should reflect in the returned results. inf = episode.get_infos(indices=[-1, -2]) # Note, agent 4 has two infos in the last two ones. # Note, `get_infos()` returns in the order of the `indices` arg. check( inf, { "agent_1": [{"a1_i1": 1.1}], "agent_2": [{"a2_i2": 2.2}], "agent_3": [{"a3_i1": 3.1}], "agent_4": [{"a4_i2": 4.2}, {"a4_i1": 4.1}], }, ) # Return last two infos for the entire env using slice. inf = episode.get_infos(slice(-2, None)) check( inf, { "agent_1": [{"a1_i1": 1.1}], "agent_2": [{"a2_i2": 2.2}], "agent_3": [{"a3_i1": 3.1}], "agent_4": [{"a4_i1": 4.1}, {"a4_i2": 4.2}], }, ) # Return last four infos for the entire env using slice # and `fill`. inf = episode.get_infos(slice(-5, None), fill={"4": "2"}) check( inf, { # All first two ts should be 0s (fill before episode even # started). # 3rd items are the reset obs for agents "agent_1": [ {"4": "2"}, {"4": "2"}, {"a1_i0": 1}, {"a1_i1": 1.1}, {"4": "2"}, ], # ag1 stepped the first 2 ts "agent_2": [ {"4": "2"}, {"4": "2"}, {"a2_i0": 2}, {"4": "2"}, {"a2_i2": 2.2}, ], # ag2 stepped first and last ts "agent_3": [ {"4": "2"}, {"4": "2"}, {"a3_i0": 3}, {"a3_i1": 3.1}, {"4": "2"}, ], # ag3 same as ag1 "agent_4": [ {"4": "2"}, {"4": "2"}, {"4": "2"}, {"a4_i1": 4.1}, {"a4_i2": 4.2}, ], # ag4 steps in last 2 ts }, ) # Use `fill` (but as a non-dict, just to check) to look into the future # (ts=100 and 101). inf = episode.get_infos(slice(100, 102), fill=9.9) check( inf, { "agent_1": [9.9, 9.9], "agent_2": [9.9, 9.9], "agent_3": [9.9, 9.9], "agent_4": [9.9, 9.9], }, ) # Return two infos in lookback buffers for the entire env using # `neg_index_as_lookback=True` and an index list. # w/ fill inf = episode.get_infos( indices=[-2, -1], fill=-10, neg_index_as_lookback=True, ) check( inf, { "agent_1": [{"a1_i0": 1}, {"a1_i1": 1.1}], "agent_2": [{"a2_i0": 2}, -10], "agent_3": [{"a3_i0": 3}, {"a3_i1": 3.1}], "agent_4": [-10, {"a4_i1": 4.1}], }, ) # Same, but w/o fill inf = episode.get_infos(indices=[-2, -1], neg_index_as_lookback=True) check( inf, { "agent_1": [{"a1_i0": 1}, {"a1_i1": 1.1}], "agent_2": [{"a2_i0": 2}], "agent_3": [{"a3_i0": 3}, {"a3_i1": 3.1}], "agent_4": [{"a4_i1": 4.1}], }, ) # Get last infos for each individual agent. inf = episode.get_infos(indices=-1, env_steps=False) check( inf, { "agent_1": {"a1_i1": 1.1}, "agent_2": {"a2_i2": 2.2}, "agent_3": {"a3_i1": 3.1}, "agent_4": {"a4_i2": 4.2}, }, ) # Same, but with `agent_ids` filters. inf = episode.get_infos(-1, env_steps=False, agent_ids="agent_1") check(inf, {"agent_1": {"a1_i1": 1.1}}) inf = episode.get_infos(-1, env_steps=False, agent_ids=["agent_2"]) check(inf, {"agent_2": {"a2_i2": 2.2}}) inf = episode.get_infos(-1, env_steps=False, agent_ids=("agent_3",)) check(inf, {"agent_3": {"a3_i1": 3.1}}) inf = episode.get_infos(-1, env_steps=False, agent_ids={"agent_4"}) check(inf, {"agent_4": {"a4_i2": 4.2}}) inf = episode.get_infos(-1, env_steps=False, agent_ids=["agent_1", "agent_2"]) check(inf, {"agent_1": {"a1_i1": 1.1}, "agent_2": {"a2_i2": 2.2}}) inf = episode.get_infos(-2, env_steps=True, agent_ids={"agent_4"}) check(inf, {"agent_4": {"a4_i1": 4.1}}) inf = episode.get_infos([-1, -2], env_steps=True, agent_ids={"agent_4"}) check(inf, {"agent_4": [{"a4_i2": 4.2}, {"a4_i1": 4.1}]}) # Return the last two infos for each individual agent. inf = episode.get_infos(indices=[-1, -2], env_steps=False) check( inf, { "agent_1": [{"a1_i1": 1.1}, {"a1_i0": 1}], "agent_2": [{"a2_i2": 2.2}, {"a2_i0": 2}], "agent_3": [{"a3_i1": 3.1}, {"a3_i0": 3}], "agent_4": [{"a4_i2": 4.2}, {"a4_i1": 4.1}], }, ) # Now, test the same when returning a list. inf = episode.get_infos(return_list=True) check(inf, [{"agent_2": {"a2_i2": 2.2}, "agent_4": {"a4_i2": 4.2}}]) # Expect error when calling with env_steps=False. with self.assertRaises(ValueError): episode.get_infos(env_steps=False, return_list=True) # List of indices. inf = episode.get_infos(indices=[-1, -2], return_list=True) check( inf, [ {"agent_2": {"a2_i2": 2.2}, "agent_4": {"a4_i2": 4.2}}, { "agent_1": {"a1_i1": 1.1}, "agent_3": {"a3_i1": 3.1}, "agent_4": {"a4_i1": 4.1}, }, ], ) # Slice of indices w/ fill. inf = episode.get_infos( slice(-1, 1), return_list=True, fill=-8, neg_index_as_lookback=True, ) check( inf, [ { "agent_1": {"a1_i1": 1.1}, "agent_2": -8, "agent_3": {"a3_i1": 3.1}, "agent_4": {"a4_i1": 4.1}, }, { "agent_1": -8, "agent_2": {"a2_i2": 2.2}, "agent_3": -8, "agent_4": {"a4_i2": 4.2}, }, ], ) # B/c we have lookback="auto" in the ma episode, all data we sent into # the c"tor was pushed into the lookback buffers and only the last # infos are outside these buffers and will be returned here. inf = episode.get_infos(env_steps=False) check( inf, { "agent_1": [{"a1_i1": 1.1}], "agent_2": [{"a2_i2": 2.2}], "agent_3": [{"a3_i1": 3.1}], "agent_4": [{"a4_i2": 4.2}], }, ) # Test with initial infos only. episode = MultiAgentEpisode() episode.add_env_reset( observations=observations[0], infos=infos[0], ) # Get the last infos for agents and assert that they are correct. inf = episode.get_infos() for agent_id, agent_inf in infos[0].items(): check(inf[agent_id][0], agent_inf) # Now the same as list. inf = episode.get_infos(return_list=True) for agent_id, agent_inf in infos[0].items(): check(inf[0][agent_id], agent_inf) # Now by agent steps. inf = episode.get_infos(env_steps=False) for agent_id, agent_inf in infos[0].items(): check(inf[agent_id][0], agent_inf) def test_get_actions(self): """Tests whether the `MultiAgentEpisode.get_actions()` API works as expected.""" # Generate a simple multi-agent episode. observations = [ {"a0": 0, "a1": 0}, {"a0": 1, "a1": 1}, {"a1": 2}, {"a1": 3}, {"a1": 4}, ] actions = [{"a0": 0, "a1": 0}, {"a0": 1, "a1": 1}, {"a1": 2}, {"a1": 3}] rewards = [{"a0": 1, "a1": 1}, {"a0": 2, "a1": 2}, {"a1": 3}, {"a1": 4}] episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards ) # Access single indices, env steps. for i in range(-1, -5, -1): act = episode.get_actions(i) check(act, actions[i]) # Access >=0 integer indices (expect index error as everything is in # lookback buffer). for i in range(0, 5): with self.assertRaises(IndexError): episode.get_actions(i) # Access <= -5 integer indices (expect index error as this goes beyond length of # lookback buffer). for i in range(-5, -10, -1): with self.assertRaises(IndexError): episode.get_actions(i) # Access list of indices, env steps. act = episode.get_actions([-1, -2]) check(act, {"a1": [3, 2]}) act = episode.get_actions([-2, -3]) check(act, {"a0": [1], "a1": [2, 1]}) act = episode.get_actions([-3, -4]) check(act, {"a0": [1, 0], "a1": [1, 0]}) # Access slices of indices, env steps. act = episode.get_actions(slice(-1, -3, -1)) check(act, {"a1": [3, 2]}) act = episode.get_actions(slice(-2, -4, -1)) check(act, {"a0": [1], "a1": [2, 1]}) act = episode.get_actions(slice(-3, -5, -1)) check(act, {"a0": [1, 0], "a1": [1, 0]}) act = episode.get_actions(slice(-3, -6, -1), fill="skip") check(act, {"a0": [1, 0, "skip"], "a1": [1, 0, "skip"]}) act = episode.get_actions(slice(1, -6, -1), fill="s") check( act, {"a0": ["s", "s", "s", "s", 1, 0, "s"], "a1": ["s", "s", 3, 2, 1, 0, "s"]}, ) act = episode.get_actions(slice(0, -5, -1), fill="s") check( act, {"a0": ["s", "s", "s", 1, 0], "a1": ["s", 3, 2, 1, 0]}, ) # Access single indices, agent steps. act = episode.get_actions(-1, env_steps=False) check(act, {"a0": 1, "a1": 3}) act = episode.get_actions(-2, env_steps=False) check(act, {"a0": 0, "a1": 2}) act = episode.get_actions(-3, env_steps=False, agent_ids="a1") check(act, {"a1": 1}) act = episode.get_actions(-3, env_steps=False, fill="skip") check(act, {"a0": "skip", "a1": 1}) act = episode.get_actions(-4, env_steps=False, agent_ids="a1") check(act, {"a1": 0}) act = episode.get_actions(-4, env_steps=False, fill="skip") check(act, {"a0": "skip", "a1": 0}) episode.add_env_step( observations={"a0": 5, "a1": 5}, actions={"a1": 4}, rewards={"a1": 4} ) check(episode.get_actions(0), {"a1": 4}) check(episode.get_actions(-1), {"a1": 4}) check(episode.get_actions(-2), {"a1": 3}) episode.add_env_step( observations={"a1": 6}, actions={"a0": 5, "a1": 5}, rewards={"a0": 5, "a1": 5}, ) check(episode.get_actions(0), {"a1": 4}) check(episode.get_actions(1), {"a0": 5, "a1": 5}) check(episode.get_actions(-1), {"a0": 5, "a1": 5}) # Generate a simple multi-agent episode, where a hanging action is at the end. observations = [ {"a0": 0, "a1": 0}, {"a0": 0, "a1": 1}, {"a0": 2}, ] actions = [{"a0": 0, "a1": 0}, {"a0": 1, "a1": 1}] rewards = [{"a0": 0.0, "a1": 0.0}, {"a0": 0.1, "a1": 0.1}] episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards, len_lookback_buffer=0, ) # Test, whether the hanging action of a1 at the end gets returned properly # for idx=-1. act = episode.get_actions(-1) check(act, {"a0": 1, "a1": 1}) act = episode.get_actions(-2) check(act, {"a0": 0, "a1": 0}) act = episode.get_actions(0) check(act, {"a0": 0, "a1": 0}) act = episode.get_actions(1) check(act, {"a0": 1, "a1": 1}) with self.assertRaises(IndexError): episode.get_actions(2) with self.assertRaises(IndexError): episode.get_actions(-3) # Generate a simple multi-agent episode, where one agent is done. # observations = [ # {"a0": 0, "a1": 0}, # {"a0": 1, "a1": 1}, # {"a0": 2}, # ] # actions = [{"a0": 0, "a1": 0}, {"a0": 1}] # rewards = [{"a0": 1, "a1": 1}, {"a0": 2}] # terminateds = {"a1": True} # episode = MultiAgentEpisode( # observations=observations, # actions=actions, # rewards=rewards, # terminateds=terminateds, # len_lookback_buffer=0, # ) episode = MultiAgentEpisode() episode.add_env_reset(observations={"a0": 0, "a1": 0}) episode.add_env_step( observations={"a0": 1, "a1": 1}, actions={"a0": 0, "a1": 0}, rewards={"a0": 0.0, "a1": 0.0}, terminateds={"a1": True}, ) episode.add_env_step( observations={"a0": 2}, actions={"a0": 1}, rewards={"a0": 1.0} ) act = episode.get_actions(-1) check(act, {"a0": 1}) # Generate simple records for a multi agent environment. ( observations, actions, rewards, is_terminateds, is_truncateds, infos, ) = self._mock_multi_agent_records() # Create a multi-agent episode. episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards, infos=infos, terminateds=is_terminateds, truncateds=is_truncateds, len_lookback_buffer="auto", # default: use all data as lookback ) # Get last actions for the multi-agent episode. act = episode.get_actions(indices=-1) check(act, {"agent_1": 1, "agent_3": 1, "agent_4": 1}) # Return last two actions for the entire env. # Also, we flip the indices here and require -1 before -2, this # should reflect in the returned results. act = episode.get_actions(indices=[-1, -2]) check( act, {"agent_1": [1, 0], "agent_2": [0], "agent_3": [1, 0], "agent_4": [1]}, ) # Return last two actions for the entire env using slice. act = episode.get_actions(slice(-2, None)) check( act, {"agent_1": [0, 1], "agent_2": [0], "agent_3": [0, 1], "agent_4": [1]}, ) # Return last four actions for the entire env using slice # and `fill`. act = episode.get_actions(slice(-5, None), fill=-10) check( act, { # All first three ts should be 0s (fill before episode even # started). # 4th items are the 1st actions (after reset obs) for agents "agent_1": [-10, -10, -10, 0, 1], # ag1 stepped the first 2 ts "agent_2": [-10, -10, -10, 0, -10], # ag2 stepped first and last ts "agent_3": [-10, -10, -10, 0, 1], # ag3 same as ag1 "agent_4": [-10, -10, -10, -10, 1], # ag4 steps in last 2 ts }, ) # Use `fill` to look into the future (ts=100 and 101). act = episode.get_actions(slice(100, 102), fill=9.9) check( act, { "agent_1": [9.9, 9.9], "agent_2": [9.9, 9.9], "agent_3": [9.9, 9.9], "agent_4": [9.9, 9.9], }, ) # Return two actions in lookback buffers for the entire env using # `neg_index_as_lookback=True` and an index list. # w/ fill act = episode.get_actions( indices=[-2, -1], fill=-10, neg_index_as_lookback=True, ) check( act, { "agent_1": [0, 1], "agent_2": [0, -10], "agent_3": [0, 1], "agent_4": [-10, 1], }, ) # Same, but w/o fill. act = episode.get_actions(indices=[-2, -1], neg_index_as_lookback=True) check( act, { "agent_1": [0, 1], "agent_2": [0], "agent_3": [0, 1], "agent_4": [1], }, ) # Get last actions for each individual agent. act = episode.get_actions(indices=-1, env_steps=False) check(act, {"agent_1": 1, "agent_2": 0, "agent_3": 1, "agent_4": 1}) # Same, but with `agent_ids` filters. act = episode.get_actions(-1, env_steps=False, agent_ids="agent_1") check(act, {"agent_1": 1}) act = episode.get_actions(-1, env_steps=False, agent_ids=["agent_2"]) check(act, {"agent_2": 0}) act = episode.get_actions(-1, env_steps=False, agent_ids=("agent_3",)) check(act, {"agent_3": 1}) act = episode.get_actions(-1, env_steps=False, agent_ids={"agent_4"}) check(act, {"agent_4": 1}) act = episode.get_actions(-1, env_steps=False, agent_ids=["agent_1", "agent_2"]) check(act, {"agent_1": 1, "agent_2": 0}) act = episode.get_actions(-2, env_steps=True, agent_ids={"agent_4"}) check(act, {}) act = episode.get_actions([-1, -2], env_steps=True, agent_ids={"agent_4"}) check(act, {"agent_4": [1]}) # Agent 4 has only acted 2x, so there is no (local) ts=-2 for it. with self.assertRaises(IndexError): episode.get_actions([-1, -2], env_steps=False, agent_ids={"agent_4"}) act = episode.get_actions([-2], env_steps=False, agent_ids="agent_4", fill=-10) check(act, {"agent_4": [-10]}) # Now, test the same when returning a list. # B/c we have lookback="auto" in the ma episode, all data we sent into # the c"tor was pushed into the lookback buffers and thus all # actions are in these buffers (and won't get returned here). act = episode.get_actions(return_list=True) self.assertTrue(act == []) # Expect error when calling with env_steps=False AND return_list=True. with self.assertRaises(ValueError): episode.get_actions(env_steps=False, return_list=True) # List of indices. act = episode.get_actions(indices=[-1, -2], return_list=True) check(act, [actions[-1], actions[-2]]) # Slice of indices w/ fill. # From the last ts in lookback buffer to first actual ts (empty as all data is # in lookback buffer, but we fill). act = episode.get_actions( slice(-1, 1), return_list=True, fill=-8, neg_index_as_lookback=True ) check( act, [ {"agent_1": 1, "agent_2": -8, "agent_3": 1, "agent_4": 1}, {"agent_1": -8, "agent_2": -8, "agent_3": -8, "agent_4": -8}, ], ) # B/c we have lookback="auto" in the ma episode, all data we sent into # the c"tor was pushed into the lookback buffers and thus all # actions are in these buffers. act = episode.get_actions(env_steps=False) self.assertTrue(act == {}) # Test with initial actions only. episode = MultiAgentEpisode() episode.add_env_reset(observations=observations[0], infos=infos[0]) # Get the last action for agents and assert that it's correct. act = episode.get_actions() check(act, {}) # Now the same as list. act = episode.get_actions(return_list=True) self.assertTrue(act == []) # Now agent steps. act = episode.get_actions(env_steps=False) self.assertTrue(act == {}) def test_get_rewards(self): # Generate a simple multi-agent episode. observations = [ {"a0": 0, "a1": 0}, {"a0": 1, "a1": 1}, {"a1": 2}, {"a1": 3}, {"a1": 4}, ] actions = [{"a0": 0, "a1": 0}, {"a0": 1, "a1": 1}, {"a1": 2}, {"a1": 3}] rewards = [ {"a0": 0.0, "a1": 0.0}, {"a0": 1.0, "a1": 1.0}, {"a1": 2.0}, {"a1": 3.0}, ] episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards ) # Access single indices, env steps. for i in range(-1, -5, -1): rew = episode.get_rewards(i) check(rew, rewards[i]) # Access list of indices, env steps. rew = episode.get_rewards([-1, -2]) check(rew, {"a0": [], "a1": [3, 2]}) rew = episode.get_rewards([-2, -3]) check(rew, {"a0": [1], "a1": [2, 1]}) rew = episode.get_rewards([-3, -4]) check(rew, {"a0": [1, 0], "a1": [1, 0]}) # Access slices of indices, env steps. rew = episode.get_rewards(slice(-1, -3, -1)) check(rew, {"a0": [], "a1": [3, 2]}) rew = episode.get_rewards(slice(-2, -4, -1)) check(rew, {"a0": [1], "a1": [2, 1]}) rew = episode.get_rewards(slice(-3, -5, -1)) check(rew, {"a0": [1, 0], "a1": [1, 0]}) rew = episode.get_rewards(slice(-3, -6, -1), fill=-10.0) check(rew, {"a0": [1, 0, -10.0], "a1": [1, 0, -10.0]}) rew = episode.get_rewards(slice(1, -6, -1), fill=-1) check( rew, {"a0": [-1, -1, -1, -1, 1, 0, -1], "a1": [-1, -1, 3, 2, 1, 0, -1]}, ) rew = episode.get_rewards(slice(0, -5, -1), fill=-2) check( rew, {"a0": [-2, -2, -2, 1, 0], "a1": [-2, 3, 2, 1, 0]}, ) # Access single indices, agent steps. rew = episode.get_rewards(-1, env_steps=False) check(rew, {"a0": 1, "a1": 3}) rew = episode.get_rewards(-2, env_steps=False) check(rew, {"a0": 0, "a1": 2}) rew = episode.get_rewards(-3, env_steps=False, agent_ids="a1") check(rew, {"a1": 1}) rew = episode.get_rewards(-3, env_steps=False, fill=-4) check(rew, {"a0": -4, "a1": 1}) rew = episode.get_rewards(-4, env_steps=False, agent_ids="a1") check(rew, {"a1": 0}) rew = episode.get_rewards(-4, env_steps=False, fill=-5) check(rew, {"a0": -5, "a1": 0}) # Generate simple records for a multi agent environment. ( observations, actions, rewards, is_terminateds, is_truncateds, infos, ) = self._mock_multi_agent_records() # Create a multi-agent episode. episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards, infos=infos, terminateds=is_terminateds, truncateds=is_truncateds, len_lookback_buffer="auto", # default: use all data as lookback ) # Get last rewards for the multi-agent episode. rew = episode.get_rewards(indices=-1) check(rew, {"agent_1": 1.1, "agent_3": 1.2, "agent_4": 1.3}) # Return last two rewards for the entire env. # Also, we flip the indices here and require -1 before -2, this # should reflect in the returned results. rew = episode.get_rewards(indices=[-1, -2]) check( rew, { "agent_1": [1.1, 0.5], "agent_2": [0.6], "agent_3": [1.2, 0.7], "agent_4": [1.3], }, ) # Return last two rewards for the entire env using slice. rew = episode.get_rewards(slice(-2, None)) check( rew, { "agent_1": [0.5, 1.1], "agent_2": [0.6], "agent_3": [0.7, 1.2], "agent_4": [1.3], }, ) # Return last four rewards for the entire env using slice # and `fill`. rew = episode.get_rewards(slice(-5, None), fill=-10) check( rew, { # All first three ts should be 0s (fill before episode even # started). # 4th items are the 1st rewards (after reset obs) for agents "agent_1": [-10, -10, -10, 0.5, 1.1], # ag1 stepped the first 2 ts "agent_2": [-10, -10, -10, 0.6, -10], # ag2 stepped first ts "agent_3": [-10, -10, -10, 0.7, 1.2], # ag3 same as ag1 "agent_4": [-10, -10, -10, -10, 1.3], # ag4 steps in last 2 ts }, ) # Use `fill` to look into the future (ts=100 and 101). rew = episode.get_rewards(slice(100, 102), fill=9.9) check( rew, { "agent_1": [9.9, 9.9], "agent_2": [9.9, 9.9], "agent_3": [9.9, 9.9], "agent_4": [9.9, 9.9], }, ) # Return two rewards in lookback buffers for the entire env using # `neg_index_as_lookback=True` and an index list. # w/ fill rew = episode.get_rewards( indices=[-2, -1], fill=-10, neg_index_as_lookback=True, ) check( rew, { "agent_1": [0.5, 1.1], "agent_2": [0.6, -10], "agent_3": [0.7, 1.2], "agent_4": [-10, 1.3], }, ) # Same, but w/o fill. episode.get_rewards(indices=[-2, -1], neg_index_as_lookback=True) # Get last rewards for each individual agent. rew = episode.get_rewards(indices=-1, env_steps=False) check(rew, {"agent_1": 1.1, "agent_2": 0.6, "agent_3": 1.2, "agent_4": 1.3}) # Same, but with `agent_ids` filters. rew = episode.get_rewards(-1, env_steps=False, agent_ids="agent_1") check(rew, {"agent_1": 1.1}) rew = episode.get_rewards(-1, env_steps=False, agent_ids=["agent_2"]) check(rew, {"agent_2": 0.6}) rew = episode.get_rewards(-1, env_steps=False, agent_ids=("agent_3",)) check(rew, {"agent_3": 1.2}) rew = episode.get_rewards(-1, env_steps=False, agent_ids={"agent_4"}) check(rew, {"agent_4": 1.3}) rew = episode.get_rewards(-1, env_steps=False, agent_ids=["agent_1", "agent_2"]) check(rew, {"agent_1": 1.1, "agent_2": 0.6}) rew = episode.get_rewards(-2, env_steps=True, agent_ids={"agent_3"}) check(rew, {"agent_3": 0.7}) rew = episode.get_rewards(-2, env_steps=True, agent_ids={"agent_4"}) check(rew, {}) rew = episode.get_rewards([-1, -2], env_steps=True, agent_ids={"agent_3"}) check(rew, {"agent_3": [1.2, 0.7]}) rew = episode.get_rewards([-1, -2], env_steps=True, agent_ids={"agent_4"}) check(rew, {"agent_4": [1.3]}) # Agent 4 has only acted 2x, so there is no (local) ts=-2 for it. with self.assertRaises(IndexError): episode.get_rewards([-1, -2], env_steps=False, agent_ids={"agent_4"}) rew = episode.get_rewards([-2], env_steps=False, agent_ids="agent_4", fill=-10) check(rew, {"agent_4": [-10]}) # Now, test the same when returning a list. # B/c we have lookback="auto" in the ma episode, all data we sent into # the c"tor was pushed into the lookback buffers and thus all # rewards are in these buffers (and won't get returned here). rew = episode.get_rewards(return_list=True) self.assertTrue(rew == []) # Expect error when calling with combination of env_steps=False, but # return_list=True. with self.assertRaises(ValueError): episode.get_rewards(env_steps=False, return_list=True) # List of indices. rew = episode.get_rewards(indices=[-1, -2], return_list=True) check(rew, [rewards[-1], rewards[-2]]) # Slice of indices w/ fill. # From the last ts in lookback buffer to first actual ts (empty as all data is # in lookback buffer). rew = episode.get_rewards( slice(-1, 1), return_list=True, fill=-8, neg_index_as_lookback=True, ) check( rew, [ {"agent_1": 1.1, "agent_2": -8, "agent_3": 1.2, "agent_4": 1.3}, {"agent_1": -8, "agent_2": -8, "agent_3": -8, "agent_4": -8}, ], ) # B/c we have lookback="auto" in the ma episode, all data we sent into # the c"tor was pushed into the lookback buffers and thus all # rewards are in these buffers. rew = episode.get_rewards(env_steps=False) self.assertTrue(rew == {}) # Test with initial rewards only. episode = MultiAgentEpisode() episode.add_env_reset(observations=observations[0], infos=infos[0]) # Get the last action for agents and assert that it's correct. rew = episode.get_rewards() check(rew, {}) # Now the same as list. rew = episode.get_rewards(return_list=True) self.assertTrue(rew == []) # Now agent steps. rew = episode.get_rewards(env_steps=False) self.assertTrue(rew == {}) def test_other_getters(self): # TODO (simon): Revisit this test and the MultiAgentEpisode.episode_concat API. return ( observations, actions, rewards, is_terminateds, is_truncateds, infos, ) = self._mock_multi_agent_records() # Define some extra model outputs. extra_model_outputs = [ # Here agent_2 has to buffer. {"agent_1": {"extra": 0}, "agent_2": {"extra": 0}, "agent_3": {"extra": 0}}, {"agent_1": {"extra": 1}, "agent_3": {"extra": 1}, "agent_4": {"extra": 1}}, ] # Create a multi-agent episode. episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards, infos=infos, terminateds=is_terminateds, truncateds=is_truncateds, # extra_model_outputs=extra_model_outputs, # len_lookback_buffer=0, ) # --- extra_model_outputs --- last_extra_model_outputs = episode.get_extra_model_outputs("extra") check( last_extra_model_outputs["agent_1"][0], extra_model_outputs[-1]["agent_1"]["extra"], ) check( last_extra_model_outputs["agent_3"][0], extra_model_outputs[-1]["agent_3"]["extra"], ) check( last_extra_model_outputs["agent_4"][0], extra_model_outputs[-1]["agent_4"]["extra"], ) # Request the last two outputs. last_extra_model_outputs = episode.get_extra_model_outputs( "extra", indices=[-1, -2] ) check( last_extra_model_outputs["agent_1"][0], extra_model_outputs[-1]["agent_1"]["extra"], ) check( last_extra_model_outputs["agent_3"][0], extra_model_outputs[-1]["agent_3"]["extra"], ) check( last_extra_model_outputs["agent_4"][0], extra_model_outputs[-1]["agent_4"]["extra"], ) check( last_extra_model_outputs["agent_1"][1], extra_model_outputs[-2]["agent_1"]["extra"], ) check( last_extra_model_outputs["agent_2"][0], extra_model_outputs[-2]["agent_2"]["extra"], ) check( last_extra_model_outputs["agent_3"][1], extra_model_outputs[-2]["agent_3"]["extra"], ) # Now request lists. # last_extra_model_outputs = episode.get_extra_model_outputs( # "extra", as_list=True # ) # check( # last_extra_model_outputs[0]["agent_1"], # extra_model_outputs[-1]["agent_1"]["extra"], # ) # check( # last_extra_model_outputs[0]["agent_3"], # extra_model_outputs[-1]["agent_3"]["extra"], # ) # check( # last_extra_model_outputs[0]["agent_4"], # extra_model_outputs[-1]["agent_4"]["extra"], # ) # Request the last two extra model outputs and return as a list. # last_extra_model_outputs = episode.get_extra_model_outputs( # "extra", [-1, -2], as_list=True # ) # check( # last_extra_model_outputs[0]["agent_1"], # extra_model_outputs[-1]["agent_1"]["extra"], # ) # check( # last_extra_model_outputs[0]["agent_3"], # extra_model_outputs[-1]["agent_3"]["extra"], # ) # check( # last_extra_model_outputs[0]["agent_4"], # extra_model_outputs[-1]["agent_4"]["extra"], # ) # check( # last_extra_model_outputs[1]["agent_1"], # extra_model_outputs[-2]["agent_1"]["extra"], # ) # check( # last_extra_model_outputs[1]["agent_2"], # extra_model_outputs[-2]["agent_2"]["extra"], # ) # check( # last_extra_model_outputs[1]["agent_3"], # extra_model_outputs[-2]["agent_3"]["extra"], # ) # Now request the last extra model outputs at the local timesteps, i.e. # for each agent its last two actions. last_extra_model_outputs = episode.get_extra_model_outputs( "extra", [-1, -2], env_steps=False ) check( last_extra_model_outputs["agent_1"][0], extra_model_outputs[-1]["agent_1"]["extra"], ) check( last_extra_model_outputs["agent_3"][0], extra_model_outputs[-1]["agent_3"]["extra"], ) check( last_extra_model_outputs["agent_4"][0], extra_model_outputs[-1]["agent_4"]["extra"], ) check( last_extra_model_outputs["agent_1"][1], extra_model_outputs[-2]["agent_1"]["extra"], ) check( last_extra_model_outputs["agent_2"][0], extra_model_outputs[-2]["agent_2"]["extra"], ) check( last_extra_model_outputs["agent_3"][1], extra_model_outputs[-2]["agent_3"]["extra"], ) # TODO (simon): Not tested with `env_steps=False`. # --- rewards --- # Start with the case of no partial or buffered rewards. last_rewards = episode.get_rewards(partial=False, consider_buffer=False) self.assertTrue( last_rewards["agent_4"][0], rewards[0]["agent_4"] + rewards[1]["agent_4"] ) self.assertTrue(last_rewards["agent_2"][0], rewards[1]["agent_2"]) # Now test the same case, but with the last two rewards. last_rewards = episode.get_rewards( [-1, -2], partial=False, consider_buffer=False ) self.assertTrue( last_rewards["agent_4"][0], rewards[0]["agent_4"] + rewards[1]["agent_4"] ) self.assertTrue(last_rewards["agent_2"][0], rewards[1]["agent_2"]) self.assertTrue(last_rewards["agent_1"][0], rewards[0]["agent_1"]) self.assertTrue(last_rewards["agent_3"][0], rewards[0]["agent_3"]) # Now request these rewards as list. last_rewards = episode.get_rewards( as_list=True, partial=False, consider_buffer=False ) self.assertTrue( last_rewards[0]["agent_4"], rewards[0]["agent_4"] + rewards[1]["agent_4"] ) self.assertTrue(last_rewards[0]["agent_2"], rewards[1]["agent_2"]) # Now test the same case, but with the last two rewards. last_rewards = episode.get_rewards( [-1, -2], as_list=True, partial=False, consider_buffer=False ) self.assertTrue( last_rewards[0]["agent_4"], rewards[0]["agent_4"] + rewards[1]["agent_4"] ) self.assertTrue(last_rewards[0]["agent_2"], rewards[1]["agent_2"]) self.assertTrue(last_rewards[1]["agent_1"], rewards[0]["agent_1"]) self.assertTrue(last_rewards[1]["agent_3"], rewards[0]["agent_3"]) # Create an environment. env = MultiAgentTestEnv() # Create an empty episode. episode_1 = MultiAgentEpisode(agent_ids=env.agent_ids) # Generate initial observation and info. obs, info = env.reset(seed=42) episode_1.add_env_reset( observations=obs, infos=info, ) # Now, generate 100 samples. for i in range(100): action = {agent_id: i for agent_id in obs} obs, reward, terminated, truncated, info = env.step(action) episode_1.add_env_step( observations=obs, actions=action, rewards=reward, infos=info, terminateds=terminated, truncateds=truncated, extra_model_outputs={agent_id: {"extra": 10} for agent_id in action}, ) # First, receive the last rewards without considering buffered values. last_rewards = episode_1.get_rewards(partial=False, consider_buffer=False) self.assertIn("agent_9", last_rewards) check(episode_1.global_t_to_local_t["agent_9"][-1], 100) check(episode_1.agent_episodes["agent_9"].rewards[-1], 1.0) check(last_rewards["agent_9"][0], 1.0) self.assertIn("agent_0", last_rewards) check(episode_1.global_t_to_local_t["agent_0"][-1], 100) check(episode_1.agent_episodes["agent_0"].rewards[-1], 1.0) check(last_rewards["agent_0"][0], 1.0) self.assertIn("agent_2", last_rewards) check(episode_1.global_t_to_local_t["agent_2"][-1], 100) check(episode_1.agent_episodes["agent_2"].rewards[-1], 1.0) check(last_rewards["agent_2"][0], 1.0) self.assertIn("agent_5", last_rewards) check(episode_1.global_t_to_local_t["agent_5"][-1], 100) check(episode_1.agent_episodes["agent_5"].rewards[-1], 1.0) check(last_rewards["agent_5"][0], 1.0) self.assertIn("agent_8", last_rewards) check(episode_1.global_t_to_local_t["agent_8"][-1], 100) check(episode_1.agent_episodes["agent_8"].rewards[-1], 1.0) check(last_rewards["agent_8"][0], 1.0) self.assertIn("agent_4", last_rewards) check(episode_1.global_t_to_local_t["agent_4"][-1], 100) check(episode_1.agent_episodes["agent_4"].rewards[-1], 1.0) check(last_rewards["agent_4"][0], 1.0) self.assertIn("agent_3", last_rewards) check(episode_1.global_t_to_local_t["agent_3"][-1], 100) # Agent 3 had a partial reward before the last recorded observation. check(episode_1.agent_episodes["agent_3"].rewards[-1], 2.0) check(last_rewards["agent_3"][0], 2.0) # Assert that all the other agents are not in the returned rewards. self.assertNotIn("agent_1", last_rewards) self.assertNotIn("agent_6", last_rewards) self.assertNotIn("agent_7", last_rewards) # Now return the same as list. last_rewards = episode_1.get_rewards( partial=False, consider_buffer=False, as_list=True ) self.assertIn("agent_9", last_rewards[0]) check(last_rewards[0]["agent_9"], 1.0) self.assertIn("agent_0", last_rewards[0]) check(last_rewards[0]["agent_0"], 1.0) self.assertIn("agent_2", last_rewards[0]) check(last_rewards[0]["agent_2"], 1.0) self.assertIn("agent_5", last_rewards[0]) check(last_rewards[0]["agent_5"], 1.0) self.assertIn("agent_8", last_rewards[0]) check(last_rewards[0]["agent_8"], 1.0) self.assertIn("agent_4", last_rewards[0]) check(last_rewards[0]["agent_4"], 1.0) self.assertIn("agent_3", last_rewards[0]) check(last_rewards[0]["agent_3"], 2.0) # Assert that all the other agents are not in the returned rewards. self.assertNotIn("agent_1", last_rewards) self.assertNotIn("agent_6", last_rewards) self.assertNotIn("agent_7", last_rewards) # Now request the last two indices. last_rewards = episode_1.get_rewards( [-1, -2], partial=False, consider_buffer=False ) self.assertIn("agent_9", last_rewards) self.assertListEqual(episode_1.global_t_to_local_t["agent_9"][-2:], [99, 100]) self.assertListEqual( episode_1.agent_episodes["agent_9"].rewards[-2:], last_rewards["agent_9"] ) self.assertIn("agent_5", last_rewards) self.assertListEqual(episode_1.global_t_to_local_t["agent_5"][-2:], [99, 100]) # Agent 5 has already died, so we need to convert back to list. self.assertListEqual( episode_1.agent_episodes["agent_5"].rewards[-2:], last_rewards["agent_5"], ) self.assertIn("agent_2", last_rewards) self.assertListEqual(episode_1.global_t_to_local_t["agent_2"][-2:], [99, 100]) self.assertListEqual( episode_1.agent_episodes["agent_2"].rewards[-1:-3:-1], last_rewards["agent_2"], ) # Agent 2 had no observation at `ts=98`, but partial rewards. self.assertGreater(99, episode_1.global_t_to_local_t["agent_2"][-3]) # Ensure that for agent 2 there had been three partial rewards in between the # observation at `ts=95` and the next at `ts=99`. self.assertListEqual( episode_1.partial_rewards_t["agent_2"][-4:-1], [96, 98, 99] ) self.assertIn("agent_3", last_rewards) # Agent 3 had no observation at `ts=99`. self.assertListEqual(episode_1.global_t_to_local_t["agent_3"][-2:], [98, 100]) check( episode_1.agent_episodes["agent_3"].rewards[-1], last_rewards["agent_3"][0] ) # Ensure that there was a partial reward at `ts=99`. self.assertListEqual(episode_1.partial_rewards_t["agent_3"][-2:], [99, 100]) self.assertIn("agent_4", last_rewards) self.assertListEqual(episode_1.global_t_to_local_t["agent_4"][-2:], [99, 100]) self.assertListEqual( episode_1.agent_episodes["agent_4"].rewards[-2:], last_rewards["agent_4"] ) self.assertIn("agent_8", last_rewards) # Ensure that the third-last observation is before `ts=98`. self.assertListEqual( episode_1.global_t_to_local_t["agent_8"][-3:], [97, 99, 100] ) # Ensure also that at `ts=97` there was a reward. self.assertListEqual(episode_1.partial_rewards_t["agent_8"][-3:-1], [98, 99]) self.assertListEqual([1.0, 2.0], last_rewards["agent_8"]) self.assertIn("agent_7", last_rewards) # Agent 7 has no observation at `ts=100`, but at `ts=98`. self.assertListEqual(episode_1.global_t_to_local_t["agent_7"][-2:], [98, 99]) check( episode_1.agent_episodes["agent_7"].rewards[-1], last_rewards["agent_7"][0] ) self.assertIn("agent_0", last_rewards) self.assertListEqual(episode_1.global_t_to_local_t["agent_0"][-2:], [99, 100]) self.assertListEqual( episode_1.agent_episodes["agent_0"].rewards[-2:], last_rewards["agent_0"] ) self.assertNotIn("agent_1", last_rewards) self.assertNotIn("agent_6", last_rewards) # Now request the last two indices as list. last_rewards = episode_1.get_rewards( [-1, -2], partial=False, consider_buffer=False, as_list=True ) self.assertIn("agent_9", last_rewards[0]) self.assertIn("agent_9", last_rewards[1]) check( episode_1.agent_episodes["agent_9"].rewards[-1], last_rewards[0]["agent_9"] ) check( episode_1.agent_episodes["agent_9"].rewards[-2], last_rewards[1]["agent_9"] ) self.assertIn("agent_5", last_rewards[0]) self.assertIn("agent_5", last_rewards[1]) check( episode_1.agent_episodes["agent_5"].rewards[-1], last_rewards[0]["agent_5"] ) check( episode_1.agent_episodes["agent_5"].rewards[-2], last_rewards[1]["agent_5"] ) self.assertIn("agent_2", last_rewards[0]) self.assertIn("agent_2", last_rewards[1]) check( episode_1.agent_episodes["agent_2"].rewards[-1], last_rewards[0]["agent_2"] ) check(3.0, last_rewards[1]["agent_2"]) # Agent 3 has only recorded rewards at `ts=100`. self.assertIn("agent_3", last_rewards[0]) check( episode_1.agent_episodes["agent_3"].rewards[-1], last_rewards[0]["agent_3"] ) self.assertIn("agent_4", last_rewards[0]) self.assertIn("agent_4", last_rewards[1]) check( episode_1.agent_episodes["agent_4"].rewards[-1], last_rewards[0]["agent_4"] ) check( episode_1.agent_episodes["agent_4"].rewards[-2], last_rewards[1]["agent_4"] ) self.assertIn("agent_8", last_rewards[0]) self.assertIn("agent_8", last_rewards[1]) check( episode_1.agent_episodes["agent_8"].rewards[-1], last_rewards[0]["agent_8"] ) check( episode_1.agent_episodes["agent_8"].rewards[-2], last_rewards[1]["agent_8"] ) # Agent 7 has no observation at `ts=100`. self.assertIn("agent_7", last_rewards[1]) check( episode_1.agent_episodes["agent_7"].rewards[-1], last_rewards[1]["agent_7"] ) self.assertIn("agent_0", last_rewards[0]) self.assertIn("agent_0", last_rewards[1]) check( episode_1.agent_episodes["agent_0"].rewards[-1], last_rewards[0]["agent_0"] ) check( episode_1.agent_episodes["agent_0"].rewards[-2], last_rewards[1]["agent_0"] ) self.assertNotIn("agent_1", last_rewards[0]) self.assertNotIn("agent_6", last_rewards[0]) self.assertNotIn("agent_1", last_rewards[1]) self.assertNotIn("agent_6", last_rewards[1]) # Second, get the last rewards with a single index, consider all partial # rewards after the last recorded observation of an agent, i.e. set # `consider_buffer` to `True`. last_rewards = episode_1.get_rewards(partial=False, consider_buffer=True) self.assertIn("agent_9", last_rewards) check( episode_1.agent_episodes["agent_9"].rewards[-1], last_rewards["agent_9"][0] ) self.assertIn("agent_0", last_rewards) check( episode_1.agent_episodes["agent_0"].rewards[-1], last_rewards["agent_0"][0] ) self.assertIn("agent_2", last_rewards) check( episode_1.agent_episodes["agent_2"].rewards[-1], last_rewards["agent_2"][0] ) self.assertIn("agent_5", last_rewards) check( episode_1.agent_episodes["agent_5"].rewards[-1], last_rewards["agent_5"][0] ) self.assertIn("agent_8", last_rewards) check( episode_1.agent_episodes["agent_8"].rewards[-1], last_rewards["agent_8"][0] ) self.assertIn("agent_4", last_rewards) check( episode_1.agent_episodes["agent_4"].rewards[-1], last_rewards["agent_4"][0] ) self.assertIn("agent_3", last_rewards) # Agent 3 had a partial reward before the last recorded observation. check( episode_1.agent_episodes["agent_3"].rewards[-1], last_rewards["agent_3"][0] ) # Agent 7 has a partial reward at `ts=100` after its last observation at # `ts=99`. self.assertIn("agent_7", last_rewards) check(episode_1.partial_rewards_t["agent_7"][-1], 100) check(episode_1.partial_rewards["agent_7"][-1], last_rewards["agent_7"][0]) # Assert that all the other agents are not in the returned rewards. self.assertNotIn("agent_1", last_rewards) self.assertNotIn("agent_6", last_rewards) # Now request the last rewards as a list while considering the buffer. last_rewards = episode_1.get_rewards( partial=False, consider_buffer=True, as_list=True ) self.assertIn("agent_9", last_rewards[0]) check( episode_1.agent_episodes["agent_9"].rewards[-1], last_rewards[0]["agent_9"] ) self.assertIn("agent_0", last_rewards[0]) check( episode_1.agent_episodes["agent_0"].rewards[-1], last_rewards[0]["agent_0"] ) self.assertIn("agent_2", last_rewards[0]) check( episode_1.agent_episodes["agent_2"].rewards[-1], last_rewards[0]["agent_2"] ) self.assertIn("agent_5", last_rewards[0]) check( episode_1.agent_episodes["agent_5"].rewards[-1], last_rewards[0]["agent_5"] ) self.assertIn("agent_8", last_rewards[0]) check( episode_1.agent_episodes["agent_8"].rewards[-1], last_rewards[0]["agent_8"] ) self.assertIn("agent_4", last_rewards[0]) check( episode_1.agent_episodes["agent_4"].rewards[-1], last_rewards[0]["agent_4"] ) self.assertIn("agent_3", last_rewards[0]) # Agent 3 had a partial reward before the last recorded observation. check( episode_1.agent_episodes["agent_3"].rewards[-1], last_rewards[0]["agent_3"] ) # Agent 7 has a partial reward at `ts=100` after its last observation at # `ts=99`. self.assertIn("agent_7", last_rewards[0]) check(episode_1.partial_rewards["agent_7"][-1], last_rewards[0]["agent_7"]) # Assert that all the other agents are not in the returned rewards. self.assertNotIn("agent_1", last_rewards[0]) self.assertNotIn("agent_6", last_rewards[0]) # Now request the last two indices and consider buffered partial rewards after # the last observation. last_rewards = episode_1.get_rewards( [-1, -2], partial=False, consider_buffer=True ) self.assertIn("agent_9", last_rewards) self.assertListEqual( episode_1.agent_episodes["agent_9"].rewards[-1:-3:-1], last_rewards["agent_9"], ) self.assertIn("agent_0", last_rewards) self.assertListEqual( episode_1.agent_episodes["agent_0"].rewards[-1:-3:-1], last_rewards["agent_0"], ) self.assertIn("agent_2", last_rewards) self.assertListEqual( episode_1.agent_episodes["agent_2"].rewards[-1:-3:-1], last_rewards["agent_2"], ) self.assertIn("agent_5", last_rewards) # Agent 5 already died, so we need to convert to list first. self.assertListEqual( episode_1.agent_episodes["agent_5"].rewards[-1:-3:-1], last_rewards["agent_5"], ) self.assertIn("agent_8", last_rewards) self.assertListEqual( episode_1.agent_episodes["agent_8"].rewards[-1:-3:-1], last_rewards["agent_8"], ) self.assertIn("agent_4", last_rewards) self.assertListEqual( episode_1.agent_episodes["agent_4"].rewards[-1:-3:-1], last_rewards["agent_4"], ) # Nothing changes for agent 3 as it has an observation at the last requested # timestep 100, but not at `ts=99`. self.assertIn("agent_3", last_rewards) check( episode_1.agent_episodes["agent_3"].rewards[-1], last_rewards["agent_3"][0] ) # The entries for agent 6 have changed now b/c it has partial rewards during the # requested timesteps 100 and 99. self.assertIn("agent_6", last_rewards) self.assertListEqual(episode_1.global_t_to_local_t["agent_6"][-2:], [95, 98]) self.assertListEqual(episode_1.partial_rewards_t["agent_6"][-2:], [99, 100]) self.assertListEqual( episode_1.partial_rewards["agent_6"][-2:], last_rewards["agent_6"] ) # Entries for agent 7 also change b/c this agent has a partial reward at # `ts=100` while it has no observation recorded at this timestep. self.assertIn("agent_7", last_rewards) self.assertListEqual(episode_1.global_t_to_local_t["agent_7"][-2:], [98, 99]) self.assertListEqual(episode_1.partial_rewards_t["agent_7"][-2:], [99, 100]) check(episode_1.partial_rewards["agent_7"][-1], last_rewards["agent_7"][0]) check( episode_1.agent_episodes["agent_7"].rewards[-1], last_rewards["agent_7"][1] ) # Assert that all the other agents are not in the returned rewards. self.assertNotIn("agent_1", last_rewards) # Now request the same indices with `consider_buffer=True` and return them as # a list. last_rewards = episode_1.get_rewards( [-1, -2], partial=False, consider_buffer=True, as_list=True ) self.assertIn("agent_9", last_rewards[0]) self.assertIn("agent_9", last_rewards[1]) check( episode_1.agent_episodes["agent_9"].rewards[-1], last_rewards[0]["agent_9"] ) check( episode_1.agent_episodes["agent_9"].rewards[-2], last_rewards[1]["agent_9"] ) self.assertIn("agent_0", last_rewards[0]) self.assertIn("agent_0", last_rewards[1]) check( episode_1.agent_episodes["agent_0"].rewards[-1], last_rewards[0]["agent_0"] ) check( episode_1.agent_episodes["agent_0"].rewards[-2], last_rewards[1]["agent_0"] ) self.assertIn("agent_2", last_rewards[0]) self.assertIn("agent_2", last_rewards[1]) check( episode_1.agent_episodes["agent_2"].rewards[-1], last_rewards[0]["agent_2"] ) check( episode_1.agent_episodes["agent_2"].rewards[-2], last_rewards[1]["agent_2"] ) self.assertIn("agent_5", last_rewards[0]) self.assertIn("agent_5", last_rewards[1]) check( episode_1.agent_episodes["agent_5"].rewards[-1], last_rewards[0]["agent_5"] ) check( episode_1.agent_episodes["agent_5"].rewards[-2], last_rewards[1]["agent_5"] ) self.assertIn("agent_8", last_rewards[0]) self.assertIn("agent_8", last_rewards[1]) check( episode_1.agent_episodes["agent_8"].rewards[-1], last_rewards[0]["agent_8"] ) check( episode_1.agent_episodes["agent_8"].rewards[-2], last_rewards[1]["agent_8"] ) self.assertIn("agent_4", last_rewards[0]) self.assertIn("agent_4", last_rewards[1]) check( episode_1.agent_episodes["agent_4"].rewards[-1], last_rewards[0]["agent_4"] ) check( episode_1.agent_episodes["agent_4"].rewards[-2], last_rewards[1]["agent_4"] ) # Nothing changes for agent 3 as it has an observation at the last requested # timestep 100. self.assertIn("agent_3", last_rewards[0]) self.assertNotIn("agent_3", last_rewards[1]) check( episode_1.agent_episodes["agent_3"].rewards[-1], last_rewards[0]["agent_3"] ) # The entries for agent 6 have changed now b/c it has partial rewards during the # requested timesteps 100 and 99. self.assertIn("agent_6", last_rewards[0]) self.assertIn("agent_6", last_rewards[1]) check(episode_1.partial_rewards["agent_6"][-1], last_rewards[0]["agent_6"]) check(episode_1.partial_rewards["agent_6"][-2], last_rewards[1]["agent_6"]) # Entries for agent 7 also change b/c this agent has a partial reward at # `ts=100` while it has no observation recorded at this timestep. self.assertIn("agent_7", last_rewards[0]) self.assertIn("agent_7", last_rewards[1]) check(episode_1.partial_rewards["agent_7"][-1], last_rewards[0]["agent_7"]) check( episode_1.agent_episodes["agent_7"].rewards[-1], last_rewards[1]["agent_7"] ) # Assert that all the other agents are not in the returned rewards. self.assertNotIn("agent_1", last_rewards[0]) self.assertNotIn("agent_1", last_rewards[1]) # Third, request only partial rewards, i.e. rewards do not get buffered and # added up. last_rewards = episode_1.get_rewards(partial=True, consider_buffer=False) self.assertIn("agent_9", last_rewards) check(episode_1.partial_rewards_t["agent_9"][-1], 100) check(episode_1.partial_rewards["agent_9"][-1], last_rewards["agent_9"][-1]) self.assertIn("agent_0", last_rewards) check(episode_1.partial_rewards_t["agent_0"][-1], 100) check(episode_1.partial_rewards["agent_0"][-1], last_rewards["agent_0"][-1]) self.assertIn("agent_2", last_rewards) check(episode_1.partial_rewards_t["agent_2"][-1], 100) check(episode_1.partial_rewards["agent_2"][-1], last_rewards["agent_2"][-1]) self.assertIn("agent_8", last_rewards) check(episode_1.partial_rewards_t["agent_8"][-1], 100) check(episode_1.partial_rewards["agent_8"][-1], last_rewards["agent_8"][-1]) self.assertIn("agent_4", last_rewards) check(episode_1.partial_rewards_t["agent_4"][-1], 100) check(episode_1.partial_rewards["agent_4"][-1], last_rewards["agent_4"][-1]) self.assertIn("agent_3", last_rewards) check(episode_1.partial_rewards_t["agent_3"][-1], 100) check(episode_1.partial_rewards["agent_3"][-1], last_rewards["agent_3"][-1]) self.assertIn("agent_6", last_rewards) check(episode_1.partial_rewards_t["agent_6"][-1], 100) check(episode_1.partial_rewards["agent_6"][-1], last_rewards["agent_6"][-1]) self.assertIn("agent_7", last_rewards) check(episode_1.partial_rewards_t["agent_7"][-1], 100) check(episode_1.partial_rewards["agent_7"][-1], last_rewards["agent_7"][-1]) # Assert that all the other agents are not in the returned rewards. self.assertNotIn("agent_1", last_rewards) # Now request all partial rewards at the last timestep and return them as # a list. last_rewards = episode_1.get_rewards( partial=True, consider_buffer=False, as_list=True ) self.assertIn("agent_9", last_rewards[0]) check(episode_1.partial_rewards["agent_9"][-1], last_rewards[0]["agent_9"]) self.assertIn("agent_0", last_rewards[0]) check(episode_1.partial_rewards["agent_0"][-1], last_rewards[0]["agent_0"]) self.assertIn("agent_2", last_rewards[0]) check(episode_1.partial_rewards["agent_2"][-1], last_rewards[0]["agent_2"]) self.assertIn("agent_8", last_rewards[0]) check(episode_1.partial_rewards["agent_8"][-1], last_rewards[0]["agent_8"]) self.assertIn("agent_4", last_rewards[0]) check(episode_1.partial_rewards["agent_4"][-1], last_rewards[0]["agent_4"]) self.assertIn("agent_3", last_rewards[0]) check(episode_1.partial_rewards["agent_3"][-1], last_rewards[0]["agent_3"]) self.assertIn("agent_6", last_rewards[0]) check(episode_1.partial_rewards["agent_6"][-1], last_rewards[0]["agent_6"]) self.assertIn("agent_7", last_rewards[0]) check(episode_1.partial_rewards["agent_7"][-1], last_rewards[0]["agent_7"]) # Assert that all the other agents are not in the returned rewards. self.assertNotIn("agent_1", last_rewards[0]) # Request the last two indices, but consider only partial rewards. last_rewards = episode_1.get_rewards( [-1, -2], partial=True, consider_buffer=False ) self.assertIn("agent_9", last_rewards) self.assertListEqual(episode_1.partial_rewards_t["agent_9"][-2:], [99, 100]) check(episode_1.partial_rewards["agent_9"][-1:-3:-1], last_rewards["agent_9"]) self.assertIn("agent_0", last_rewards) self.assertListEqual(episode_1.partial_rewards_t["agent_0"][-2:], [99, 100]) check(episode_1.partial_rewards["agent_0"][-1:-3:-1], last_rewards["agent_0"]) self.assertIn("agent_2", last_rewards) self.assertListEqual(episode_1.partial_rewards_t["agent_2"][-2:], [99, 100]) check(episode_1.partial_rewards["agent_2"][-1:-3:-1], last_rewards["agent_2"]) self.assertIn("agent_8", last_rewards) self.assertListEqual(episode_1.partial_rewards_t["agent_8"][-2:], [99, 100]) check(episode_1.partial_rewards["agent_8"][-1:-3:-1], last_rewards["agent_8"]) self.assertIn("agent_4", last_rewards) self.assertListEqual(episode_1.partial_rewards_t["agent_4"][-2:], [99, 100]) check(episode_1.partial_rewards["agent_4"][-1:-3:-1], last_rewards["agent_4"]) self.assertIn("agent_3", last_rewards) self.assertListEqual(episode_1.partial_rewards_t["agent_3"][-2:], [99, 100]) check(episode_1.partial_rewards["agent_3"][-1:-3:-1], last_rewards["agent_3"]) self.assertIn("agent_6", last_rewards) self.assertListEqual(episode_1.partial_rewards_t["agent_6"][-2:], [99, 100]) check(episode_1.partial_rewards["agent_6"][-1:-3:-1], last_rewards["agent_6"]) self.assertIn("agent_7", last_rewards) self.assertListEqual(episode_1.partial_rewards_t["agent_7"][-2:], [99, 100]) check(episode_1.partial_rewards["agent_7"][-1:-3:-1], last_rewards["agent_7"]) # Assert that all the other agents are not in the returned rewards. self.assertNotIn("agent_1", last_rewards) # At last, request the last two indices for only partial rewards and return # them as list. last_rewards = episode_1.get_rewards( [-1, -2], partial=True, consider_buffer=False, as_list=True ) self.assertIn("agent_9", last_rewards[0]) self.assertIn("agent_9", last_rewards[1]) check(episode_1.partial_rewards["agent_9"][-1], last_rewards[0]["agent_9"]) check(episode_1.partial_rewards["agent_9"][-2], last_rewards[1]["agent_9"]) self.assertIn("agent_0", last_rewards[0]) self.assertIn("agent_0", last_rewards[1]) check(episode_1.partial_rewards["agent_0"][-1], last_rewards[0]["agent_0"]) check(episode_1.partial_rewards["agent_0"][-2], last_rewards[1]["agent_0"]) self.assertIn("agent_2", last_rewards[0]) self.assertIn("agent_2", last_rewards[1]) check(episode_1.partial_rewards["agent_2"][-1], last_rewards[0]["agent_2"]) check(episode_1.partial_rewards["agent_2"][-2], last_rewards[1]["agent_2"]) self.assertIn("agent_8", last_rewards[0]) self.assertIn("agent_8", last_rewards[1]) check(episode_1.partial_rewards["agent_8"][-1], last_rewards[0]["agent_8"]) check(episode_1.partial_rewards["agent_8"][-2], last_rewards[1]["agent_8"]) self.assertIn("agent_4", last_rewards[0]) self.assertIn("agent_4", last_rewards[1]) check(episode_1.partial_rewards["agent_4"][-1], last_rewards[0]["agent_4"]) check(episode_1.partial_rewards["agent_4"][-2], last_rewards[1]["agent_4"]) self.assertIn("agent_3", last_rewards[0]) self.assertIn("agent_3", last_rewards[1]) check(episode_1.partial_rewards["agent_3"][-1], last_rewards[0]["agent_3"]) check(episode_1.partial_rewards["agent_3"][-2], last_rewards[1]["agent_3"]) self.assertIn("agent_6", last_rewards[0]) self.assertIn("agent_6", last_rewards[1]) check(episode_1.partial_rewards["agent_6"][-1], last_rewards[0]["agent_6"]) check(episode_1.partial_rewards["agent_6"][-2], last_rewards[1]["agent_6"]) self.assertIn("agent_7", last_rewards[0]) self.assertIn("agent_7", last_rewards[1]) check(episode_1.partial_rewards["agent_7"][-1], last_rewards[0]["agent_7"]) check(episode_1.partial_rewards["agent_7"][-2], last_rewards[1]["agent_7"]) # Assert that all the other agents are not in the returned rewards. self.assertNotIn("agent_1", last_rewards[0]) self.assertNotIn("agent_1", last_rewards[1]) # Now, test with `global_ts=False`, i.e. on local level. # Begin with `partial=False` and `consider_buffer=False` # --- is_terminated, is_truncated --- def test_cut(self): # Simple multi-agent episode, in which all agents always step. episode = self._create_simple_episode( [ {"a0": 0, "a1": 0}, {"a0": 1, "a1": 1}, {"a0": 2, "a1": 2}, ] ) successor = episode.cut() check(len(successor), 0) check(successor.env_t_started, 2) check(successor.env_t, 2) check(successor.env_t_to_agent_t, {"a0": [0], "a1": [0]}) a0 = successor.agent_episodes["a0"] a1 = successor.agent_episodes["a1"] check((len(a0), len(a1)), (0, 0)) check((a0.t_started, a1.t_started), (2, 2)) check((a0.t, a1.t), (2, 2)) check((a0.observations, a1.observations), ([2], [2])) check((a0.actions, a1.actions), ([], [])) check((a0.rewards, a1.rewards), ([], [])) check(successor._hanging_actions_end, {}) check(successor._hanging_rewards_end, {}) check(successor._hanging_extra_model_outputs_end, {}) # Multi-agent episode with lookback buffer, in which all agents always step. episode = self._create_simple_episode( [ {"a0": 0, "a1": 0}, {"a0": 1, "a1": 1}, {"a0": 2, "a1": 2}, {"a0": 3, "a1": 3}, ], len_lookback_buffer=2, ) # Cut with lookback=0 argument (default). successor = episode.cut() check(len(successor), 0) check(successor.env_t_started, 1) check(successor.env_t, 1) check(successor.env_t_to_agent_t, {"a0": [0], "a1": [0]}) a0 = successor.agent_episodes["a0"] a1 = successor.agent_episodes["a1"] check((len(a0), len(a1)), (0, 0)) check((a0.t_started, a1.t_started), (1, 1)) check((a0.t, a1.t), (1, 1)) check((a0.observations, a1.observations), ([3], [3])) check((a0.actions, a1.actions), ([], [])) check((a0.rewards, a1.rewards), ([], [])) check(successor._hanging_actions_end, {}) check(successor._hanging_rewards_end, {}) check(successor._hanging_extra_model_outputs_end, {}) # Cut with lookback=2 argument. successor = episode.cut(len_lookback_buffer=2) check(len(successor), 0) check(successor.env_t_started, 1) check(successor.env_t, 1) check(successor.env_t_to_agent_t["a0"].data, [0, 1, 2]) check(successor.env_t_to_agent_t["a1"].data, [0, 1, 2]) check(successor.env_t_to_agent_t["a0"].lookback, 2) check(successor.env_t_to_agent_t["a1"].lookback, 2) a0 = successor.agent_episodes["a0"] a1 = successor.agent_episodes["a1"] check((len(a0), len(a1)), (0, 0)) check((a0.t_started, a1.t_started), (1, 1)) check((a0.t, a1.t), (1, 1)) check((a0.observations, a1.observations), ([3], [3])) check((a0.actions, a1.actions), ([], [])) check((a0.rewards, a1.rewards), ([], [])) check(successor._hanging_actions_end, {}) check(successor._hanging_rewards_end, {}) check(successor._hanging_extra_model_outputs_end, {}) # Multi-agent episode, in which one agent has a long sequence of not acting, # but does receive (intermittend/hanging) rewards during this time. observations = [ {"a0": 0, "a1": 0}, # 0 {"a0": 1}, # 1 {"a0": 2}, # 2 {"a0": 3}, # 3 ] episode = MultiAgentEpisode( observations=observations, actions=observations[:-1], rewards=[ {"a0": 0.0, "a1": 0.0}, # 0 {"a0": 0.1, "a1": 0.1}, # 1 {"a0": 0.2, "a1": 0.2}, # 2 ], len_lookback_buffer=0, ) successor = episode.cut() check(len(successor), 0) check(successor.env_t_started, 3) check(successor.env_t, 3) a0 = successor.agent_episodes["a0"] self.assertTrue("a1" not in successor.agent_episodes) check(len(a0), 0) check(a0.t_started, 3) check(a0.t, 3) check(a0.observations, [3]) check(a0.actions, []) check(a0.rewards, []) check(successor._hanging_rewards_begin, {"a1": 0.3}) check(successor._hanging_actions_end, {}) check(successor._hanging_rewards_end, {"a1": 0.0}) check(successor._hanging_extra_model_outputs_end, {}) # Add a few timesteps to successor and test the resulting episode. successor.add_env_step( observations={"a0": 4}, actions={"a0": 3}, rewards={"a0": 0.3, "a1": 0.3}, ) check(len(successor), 1) check(successor.env_t_started, 3) check(successor.env_t, 4) # Just b/c we added an intermittend reward for a1 does not mean it should # already have a SAEps in `successor`. It still hasn't received its first obs # yet after the cut. self.assertTrue("a1" not in successor.agent_episodes) check(len(a0), 1) check(a0.t_started, 3) check(a0.t, 4) check(a0.observations, [3, 4]) check(a0.actions, [3]) check(a0.rewards, [0.3]) check(successor._hanging_rewards_begin, {"a1": 0.6}) check(successor._hanging_actions_end, {}) check(successor._hanging_rewards_end, {"a1": 0.0}) check(successor._hanging_extra_model_outputs_end, {}) # Now a1 actually does receive its next obs. successor.add_env_step( observations={"a0": 5, "a1": 5}, # <- this is a1's 1st obs in this chunk actions={"a0": 4}, rewards={"a0": 0.4, "a1": 0.4}, ) check(len(successor), 2) check(successor.env_t_started, 3) check(successor.env_t, 5) a1 = successor.agent_episodes["a1"] check((len(a0), len(a1)), (2, 0)) check((a0.t_started, a1.t_started), (3, 0)) check((a0.t, a1.t), (5, 0)) check((a0.observations, a1.observations), ([3, 4, 5], [5])) check((a0.actions, a1.actions), ([3, 4], [])) check((a0.rewards, a1.rewards), ([0.3, 0.4], [])) # Begin caches keep accumulating a1's rewards. check(successor._hanging_rewards_begin, {"a1": 1.0}) # But end caches are now empty (due to a1's observation/finished step). check(successor._hanging_actions_end, {}) check(successor._hanging_rewards_end, {"a1": 0.0}) check(successor._hanging_extra_model_outputs_end, {}) # Generate a simple multi-agent episode and check all internals after # construction. episode_1 = self._create_simple_episode( [ {"a0": 0, "a1": 0}, {"a1": 1}, {"a1": 2}, {"a1": 3}, ], len_lookback_buffer="auto", ) episode_2 = episode_1.cut() check(episode_1.id_, episode_2.id_) check(len(episode_1), 0) check(len(episode_2), 0) # Assert that all `SingleAgentEpisode`s have identical ids. for agent_id, agent_eps in episode_2.agent_episodes.items(): check(agent_eps.id_, episode_1.agent_episodes[agent_id].id_) # Assert that the timestep starts at the end of the last episode. check(episode_1.env_t_started, 0) check(episode_1.env_t, episode_2.env_t_started) check(episode_2.env_t_started, episode_2.env_t) # Make sure our mappings have been adjusted properly. We expect the mapping for # a0 to have this agent's last obs added to the mapping's lookback buffer, such # that we can add the buffered action to the new episode without problems. check(episode_2.env_t_to_agent_t["a0"].data, [0, "S", "S", "S"]) check(episode_2.env_t_to_agent_t["a0"].lookback, 3) check(episode_2.env_t_to_agent_t["a1"].data, [0, 1, 2, 3]) check(episode_2.env_t_to_agent_t["a1"].lookback, 3) # Check all other internals of the cut episode chunk. check(episode_2.agent_episodes["a0"].observations.data, [0]) check(episode_2.agent_episodes["a0"].observations.lookback, 0) check(episode_2.agent_episodes["a0"].actions.data, []) check(episode_2.agent_episodes["a0"].actions.lookback, 0) # Test getting data from the cut chunk via the getter APIs. check(episode_2.get_observations(-1), {"a1": 3}) check(episode_2.get_observations(-1, env_steps=False), {"a0": 0, "a1": 3}) check(episode_2.get_observations([-2, -1]), {"a1": [2, 3]}) check(episode_2.get_observations(slice(-3, None)), {"a1": [1, 2, 3]}) check( episode_2.get_observations(slice(-4, None)), {"a0": [0], "a1": [0, 1, 2, 3]} ) # Episode was just cut -> There can't be any actions in it yet (only in the # lookback buffer). check(episode_2.get_actions(), {}) check(episode_2.get_actions(-1), {"a1": 2}) check(episode_2.get_actions(-2), {"a1": 1}) check(episode_2.get_actions([-3]), {"a0": [0], "a1": [0]}) with self.assertRaises(IndexError): episode_2.get_actions([-4]) # Don't expect index error if slice is given. check(episode_2.get_actions(slice(-4, -3)), {}) episode_2.add_env_step( actions={"a1": 4}, rewards={"a1": 0.4}, observations={"a0": 1, "a1": 4}, ) # Check everything again, but this time with the additional timestep taken. check(len(episode_2), 1) check(episode_2.env_t_to_agent_t["a0"].data, [0, "S", "S", "S", 1]) check(episode_2.env_t_to_agent_t["a0"].lookback, 3) check(episode_2.env_t_to_agent_t["a1"].data, [0, 1, 2, 3, 4]) check(episode_2.env_t_to_agent_t["a1"].lookback, 3) check(episode_2.agent_episodes["a0"].observations.data, [0, 1]) check(episode_2.agent_episodes["a0"].observations.lookback, 0) # Action was "logged" -> Buffer should now be completely empty. check(episode_2.agent_episodes["a0"].actions.data, [0]) check(episode_2._hanging_actions_end, {}) check(episode_2.agent_episodes["a0"].actions.lookback, 0) check(episode_2.get_observations(-1), {"a0": 1, "a1": 4}) check(episode_2.get_observations(-1, env_steps=False), {"a0": 1, "a1": 4}) check(episode_2.get_observations([-2, -1]), {"a0": [1], "a1": [3, 4]}) check(episode_2.get_observations(slice(-3, None)), {"a0": [1], "a1": [2, 3, 4]}) check( episode_2.get_observations(slice(-4, None)), {"a0": [1], "a1": [1, 2, 3, 4]} ) # Episode was just cut -> There can't be any actions in it yet (only in the # lookback buffer). check(episode_2.get_actions(), {"a1": [4]}) check(episode_2.get_actions(-1), {"a1": 4}) check(episode_2.get_actions(-2), {"a1": 2}) check(episode_2.get_actions([-3]), {"a1": [1]}) check(episode_2.get_actions([-4]), {"a0": [0], "a1": [0]}) with self.assertRaises(IndexError): episode_2.get_actions([-5]) # Don't expect index error if slice is given. check(episode_2.get_actions(slice(-5, -4)), {}) # Create an environment. episode_1, _ = self._mock_multi_agent_records_from_env(size=100) # Assert that the episode has 100 timesteps. check(episode_1.env_t, 100) # Create a successor. episode_2 = episode_1.cut() # Assert that it has the same id. check(episode_1.id_, episode_2.id_) check(len(episode_1), 100) check(len(episode_2), 0) # Assert that all `SingleAgentEpisode`s have identical ids. for agent_id, agent_eps in episode_2.agent_episodes.items(): check(agent_eps.id_, episode_1.agent_episodes[agent_id].id_) # Assert that the timestep starts at the end of the last episode. check(episode_1.env_t_started, 0) check(episode_2.env_t, episode_2.env_t_started) check(episode_1.env_t, episode_2.env_t_started) # Another complex case. episode = self._create_simple_episode( [ {"a0": 0}, {"a2": 0}, {"a2": 1}, {"a2": 2}, {"a0": 1}, {"a2": 3}, {"a2": 4}, # <- BUT: actual cut here, b/c of hanging action of a2 {"a2": 5}, # <- would expect cut here (b/c of lookback==1) {"a0": 2}, {"a1": 0}, ], len_lookback_buffer=0, ) successor = episode.cut(len_lookback_buffer=1) check(len(successor), 0) check(successor.env_t, 9) check(successor.env_t_started, 9) self.assertTrue(all(len(e) == 0 for e in successor.agent_episodes.values())) self.assertTrue(all(len(e) == 1 for e in successor.env_t_to_agent_t.values())) self.assertTrue( all(e.lookback == 2 for e in successor.env_t_to_agent_t.values()) ) check(successor.env_t_to_agent_t["a0"].data, ["S", 0, "S"]) check(successor.env_t_to_agent_t["a1"].data, ["S", "S", 0]) check(successor.env_t_to_agent_t["a2"].data, [0, "S", "S"]) check(successor.get_observations(0), {"a1": 0}) with self.assertRaises(IndexError): successor.get_observations(1) check(successor.get_observations(-2), {"a0": 2}) check(successor.get_observations(-3), {"a2": 5}) with self.assertRaises(IndexError): successor.get_observations(-4) # TODO (sven): Revisit this test and the MultiAgentEpisode.episode_concat API. return # Assert that the last observation and info of `episode_1` are the first # observation and info of `episode_2`. for agent_id, agent_obs in episode_1.get_observations( -1, env_steps=False ).items(): # If agents are done only ensure that the `SingleAgentEpisode` does not # exist in episode_2. if episode_1.agent_episodes[agent_id].is_done: self.assertTrue(agent_id not in episode_2.agent_episodes) else: check( agent_obs, episode_2.get_observations( -1, neg_index_as_lookback=True, env_steps=False, agent_ids=agent_id, ), ) agent_infos = episode_1.get_infos(-1, env_steps=False) check( agent_infos, episode_2.get_infos(0, agent_ids=agent_id), ) # Now test the buffers. for agent_id, agent_buffer in episode_1.agent_buffers.items(): # Make sure the action buffers are either both full or both empty. check( agent_buffer["actions"].full(), episode_2.agent_buffers[agent_id]["actions"].full(), ) # If the action buffers are full they should share the same value. if agent_buffer["actions"].full(): check( agent_buffer["actions"].queue[0], episode_2.agent_buffers[agent_id]["actions"].queue[0], ) # If the agent is not done, the buffers should be equal in value. if not episode_1.agent_episodes[agent_id].is_done: # The other buffers have default values, if the agent is not done. # Note, reward buffers could be full of partial rewards. check( agent_buffer["rewards"].queue[0], episode_2.agent_buffers[agent_id]["rewards"].queue[0], ) # Here we want to know, if they are both different from `None`. check( agent_buffer["extra_model_outputs"].queue[0], episode_2.agent_buffers[agent_id]["extra_model_outputs"].queue[0], ) # If an agent is done the buffers should be empty for both, predecessor # and successor. else: self.assertTrue(agent_buffer["actions"].empty()) self.assertTrue(agent_buffer["rewards"].empty()) self.assertTrue(agent_buffer["extra_model_outputs"].empty()) self.assertTrue(agent_buffer["actions"].empty()) self.assertTrue(agent_buffer["rewards"].empty()) self.assertTrue(agent_buffer["extra_model_outputs"].empty()) # Ensure that the timestep mappings match. for agent_id, agent_global_ts in episode_2.global_t_to_local_t.items(): # If an agent is not done, we write over the timestep from its last # observation. if not episode_2.agent_episodes[agent_id].is_done: check(agent_global_ts[0], episode_1.global_t_to_local_t[agent_id][-1]) # In the other case this mapping should be empty. else: check(len(agent_global_ts), 0) # Assert that the global action timestep mappings match. for agent_id, agent_global_ts in episode_2.global_actions_t.items(): # If an agent is not done, we write over the timestep from its last # action. if not episode_2.agent_episodes[agent_id].is_done: # If a timestep mapping for actions was copied over the last timestep # of the üredecessor and the first of the successor must match. if agent_global_ts: check(agent_global_ts[0], episode_1.global_actions_t[agent_id][-1]) # If no action timestep mapping was copied over the last action must # have been at or before the last observation in the predecessor. else: self.assertGreaterEqual( episode_1.global_t_to_local_t[agent_id][-1], episode_1.global_actions_t[agent_id][-1], ) # In the other case this mapping should be empty. else: check(len(agent_global_ts), 0) # Assert that the partial reward mappings and histories match. for agent_id, agent_global_ts in episode_2.partial_rewards_t.items(): # Ensure that timestep mapping and history have the same length. check(len(agent_global_ts), len(episode_2.partial_rewards[agent_id])) # If an agent is not done, we write over the timestep from its last # partial rewards. if not episode_2.agent_episodes[agent_id].is_done: # If there are partial rewards after the last observation ensure # they are correct. if ( episode_1.global_t_to_local_t[agent_id][-1] < episode_1.partial_rewards_t[agent_id][-1] ): indices_after_last_obs = episode_1.partial_rewards_t[ agent_id ].find_indices_right(episode_1.global_t_to_local_t[agent_id][-1]) episode_1_partial_rewards = list( map( episode_1.partial_rewards[agent_id].__getitem__, indices_after_last_obs, ) ) check( sum(episode_2.partial_rewards[agent_id]), sum(episode_1_partial_rewards), ) # Also ensure that the timestep mappings are correct. episode_1_partial_rewards_t = list( map( episode_1.partial_rewards_t[agent_id].__getitem__, indices_after_last_obs, ) ) self.assertListEqual( episode_2.partial_rewards_t[agent_id], episode_1_partial_rewards_t, ) # In the other case this mapping should be empty. else: check(len(agent_global_ts), 0) # In the other case this mapping should be empty. else: check(len(agent_global_ts), 0) # Now test, if the specific values in the buffers are correct. ( observations, actions, rewards, terminateds, truncateds, infos, ) = self._mock_multi_agent_records() # Create the episode. episode_1 = MultiAgentEpisode( agent_ids=["agent_1", "agent_2", "agent_3", "agent_4", "agent_5"], observations=observations, actions=actions, rewards=rewards, infos=infos, terminateds=terminateds, truncateds=truncateds, ) # Assert that agents 1 and 3's buffers are indeed full. for agent_id in ["agent_1", "agent_3"]: check( actions[1][agent_id], episode_1.agent_buffers[agent_id]["actions"].queue[0], ) # # Put the action back into the buffer. # episode_1.agent_buffers[agent_id]["actions"].put_nowait( # actions[1][agent_id] # ) # Now step once. action = {"agent_2": 3, "agent_4": 3} # This time agent 4 should have the buffer full, while agent 1 has emptied # its buffer. observation = {"agent_1": 3, "agent_2": 3} # Agents 1 and 2 add the reward to its timestep, but agent 3 and agent 5 # add this to the buffer and to the global reward history. reward = {"agent_1": 2.0, "agent_2": 2.0, "agent_3": 2.0, "agent_5": 2.0} info = {"agent_1": {}, "agent_2": {}} terminateds = {k: False for k in observation.keys()} terminateds.update({"__all__": False}) truncateds = {k: False for k in observation.keys()} truncateds.update({"__all__": False}) episode_1.add_env_step( observations=observation, actions=action, rewards=reward, infos=info, terminateds=terminateds, truncateds=truncateds, ) # Check that the partial reward history is correct. check(len(episode_1.partial_rewards_t["agent_5"]), 1) check(len(episode_1.partial_rewards["agent_5"]), 1) check(len(episode_1.partial_rewards_t["agent_3"]), 2) check(len(episode_1.partial_rewards["agent_3"]), 2) check(len(episode_1.partial_rewards_t["agent_2"]), 2) check(len(episode_1.partial_rewards_t["agent_2"]), 2) self.assertListEqual(episode_1.partial_rewards["agent_3"], [0.5, 2.0]) self.assertListEqual(episode_1.partial_rewards_t["agent_3"], [1, 3]) self.assertListEqual(episode_1.partial_rewards["agent_2"], [1.0, 2.0]) self.assertListEqual(episode_1.partial_rewards_t["agent_2"], [2, 3]) check(len(episode_1.partial_rewards["agent_4"]), 2) self.assertListEqual(episode_1.partial_rewards["agent_4"], [0.5, 1.0]) self.assertListEqual(episode_1.partial_rewards_t["agent_4"], [1, 2]) # Now check that the reward buffers are full. for agent_id in ["agent_3", "agent_5"]: check(episode_1.agent_buffers[agent_id]["rewards"].queue[0], 2.0) # Check that the reward history is correctly recorded. check(episode_1.partial_rewards_t[agent_id][-1], episode_1.t) check(episode_1.partial_rewards[agent_id][-1], 2.0) # Now create the successor. episode_2 = episode_1.cut() for agent_id, agent_eps in episode_2.agent_episodes.items(): if len(agent_eps.observations) > 0: # The successor's first observations should be the predecessor's last. check( agent_eps.observations[0], episode_1.agent_episodes[agent_id].observations[-1], ) # The successor's first entry in the timestep mapping should be the # predecessor's last. check( episode_2.global_t_to_local_t[agent_id][ -1 ], # + episode_2.t_started, episode_1.global_t_to_local_t[agent_id][-1], ) # Now test that the partial rewards fit. for agent_id in ["agent_3", "agent_5"]: check(len(episode_2.partial_rewards_t[agent_id]), 1) check(episode_2.partial_rewards_t[agent_id][-1], 3) check(episode_2.agent_buffers[agent_id]["rewards"].queue[0], 2.0) # Assert that agent 3's and 4's action buffers are full. self.assertTrue(episode_2.agent_buffers["agent_4"]["actions"].full()) self.assertTrue(episode_2.agent_buffers["agent_3"]["actions"].full()) # Also assert that agent 1's action b uffer was emptied with the last # observations. self.assertTrue(episode_2.agent_buffers["agent_1"]["actions"].empty()) def test_slice(self): # Generate a simple multi-agent episode. episode = self._create_simple_episode( [ {"a0": 0, "a1": 0}, {"a1": 1}, {"a1": 2}, {"a0": 3, "a1": 3}, {"a0": 4}, {"a0": 5, "a1": 5}, {"a0": 6, "a1": 6}, {"a1": 7}, {"a1": 8}, {"a0": 9}, ] ) check(len(episode), 9) # Slice the episode in different ways and check results. # Empty slice. slice_ = episode[100:100] check(len(slice_), 0) check(slice_.env_t_started, 9) check(slice_.env_t, 9) # All-include slices. for s in [ slice(None, None, None), slice(-100, None, None), slice(None, 1000, None), slice(-1000, 1000, None), ]: slice_ = episode[s] check(len(slice_), len(episode)) check(slice_.env_t_started, 0) check(slice_.env_t, 9) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (5, 7)) check((a0.t_started, a1.t_started), (0, 0)) check((a0.t, a1.t), (5, 7)) check( (a0.observations, a1.observations), ([0, 3, 4, 5, 6, 9], [0, 1, 2, 3, 5, 6, 7, 8]), ) check((a0.actions, a1.actions), ([0, 3, 4, 5, 6], [0, 1, 2, 3, 5, 6, 7])) check( (a0.rewards, a1.rewards), ([0.0, 0.3, 0.4, 0.5, 0.6], [0.0, 0.1, 0.2, 0.3, 0.5, 0.6, 0.7]), ) check((a0.is_done, a1.is_done), (False, False)) # From pos start. slice_ = episode[2:] check(len(slice_), 7) check(slice_.env_t_started, 2) check(slice_.env_t, 9) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (4, 5)) check((a0.t_started, a1.t_started), (1, 2)) check((a0.t, a1.t), (5, 7)) check( (a0.observations, a1.observations), ([3, 4, 5, 6, 9], [2, 3, 5, 6, 7, 8]), ) check((a0.actions, a1.actions), ([3, 4, 5, 6], [2, 3, 5, 6, 7])) check( (a0.rewards, a1.rewards), ([0.3, 0.4, 0.5, 0.6], [0.2, 0.3, 0.5, 0.6, 0.7]), ) check((a0.is_done, a1.is_done), (False, False)) # If a slice ends in a "gap" for an agent, expect actions and rewards to be # cached in the agent's buffer. slice_ = episode[:1] check(len(slice_), 1) check(slice_.env_t_started, 0) check(slice_.env_t, 1) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (0, 1)) check((a0.t_started, a1.t_started), (0, 0)) check((a0.t, a1.t), (0, 1)) check((a0.observations, a1.observations), ([0], [0, 1])) check((a0.actions, a1.actions), ([], [0])) check((a0.rewards, a1.rewards), ([], [0.0])) check((a0.is_done, a1.is_done), (False, False)) check(slice_._hanging_actions_end["a0"], 0) check(slice_._hanging_rewards_end["a0"], 0.0) # To pos stop. slice_ = episode[:3] check(len(slice_), 3) check(slice_.env_t_started, 0) check(slice_.env_t, 3) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (1, 3)) check((a0.t_started, a1.t_started), (0, 0)) check((a0.t, a1.t), (1, 3)) check((a0.observations, a1.observations), ([0, 3], [0, 1, 2, 3])) check((a0.actions, a1.actions), ([0], [0, 1, 2])) check((a0.rewards, a1.rewards), ([0.0], [0.0, 0.1, 0.2])) check((a0.is_done, a1.is_done), (False, False)) # To neg stop. slice_ = episode[:-1] check(len(slice_), 8) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (4, 7)) check((a0.t_started, a1.t_started), (0, 0)) check((a0.t, a1.t), (4, 7)) check( (a0.observations, a1.observations), ([0, 3, 4, 5, 6], [0, 1, 2, 3, 5, 6, 7, 8]), ) check((a0.actions, a1.actions), ([0, 3, 4, 5], [0, 1, 2, 3, 5, 6, 7])) check( (a0.rewards, a1.rewards), ([0.0, 0.3, 0.4, 0.5], [0.0, 0.1, 0.2, 0.3, 0.5, 0.6, 0.7]), ) check((a0.is_done, a1.is_done), (False, False)) # Expect the hanging action to be found in the buffer. check(slice_._hanging_actions_end["a0"], 6) slice_ = episode[:-4] check(len(slice_), 5) check(slice_.env_t_started, 0) check(slice_.env_t, 5) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (3, 4)) check((a0.t_started, a1.t_started), (0, 0)) check((a0.t, a1.t), (3, 4)) check((a0.observations, a1.observations), ([0, 3, 4, 5], [0, 1, 2, 3, 5])) check((a0.actions, a1.actions), ([0, 3, 4], [0, 1, 2, 3])) check( (a0.rewards, a1.rewards), ([0.0, 0.3, 0.4], [0.0, 0.1, 0.2, 0.3]), ) check((a0.is_done, a1.is_done), (False, False)) # From neg start. slice_ = episode[-2:] check(len(slice_), 2) check(slice_.env_t_started, 7) check(slice_.env_t, 9) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (0, 1)) check((a0.t_started, a1.t_started), (5, 6)) check((a0.t, a1.t), (5, 7)) check((a0.observations, a1.observations), ([9], [7, 8])) check((a0.actions, a1.actions), ([], [7])) check((a0.rewards, a1.rewards), ([], [0.7])) check((a0.is_done, a1.is_done), (False, False)) # From neg start. slice_ = episode[-3:] check(len(slice_), 3) check(slice_.env_t_started, 6) check(slice_.env_t, 9) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (1, 2)) check((a0.t_started, a1.t_started), (4, 5)) check((a0.t, a1.t), (5, 7)) check((a0.observations, a1.observations), ([6, 9], [6, 7, 8])) check((a0.actions, a1.actions), ([6], [6, 7])) check((a0.rewards, a1.rewards), ([0.6], [0.6, 0.7])) check((a0.is_done, a1.is_done), (False, False)) # From neg start. slice_ = episode[-5:] check(len(slice_), 5) check(slice_.env_t_started, 4) check(slice_.env_t, 9) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (3, 3)) check((a0.t_started, a1.t_started), (2, 4)) check((a0.t, a1.t), (5, 7)) check((a0.observations, a1.observations), ([4, 5, 6, 9], [5, 6, 7, 8])) check((a0.actions, a1.actions), ([4, 5, 6], [5, 6, 7])) check((a0.rewards, a1.rewards), ([0.4, 0.5, 0.6], [0.5, 0.6, 0.7])) check((a0.is_done, a1.is_done), (False, False)) # From neg start to neg stop. slice_ = episode[-4:-2] check(len(slice_), 2) check(slice_.env_t_started, 5) check(slice_.env_t, 7) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (1, 2)) check((a0.t_started, a1.t_started), (3, 4)) check((a0.t, a1.t), (4, 6)) check((a0.observations, a1.observations), ([5, 6], [5, 6, 7])) check((a0.actions, a1.actions), ([5], [5, 6])) check((a0.rewards, a1.rewards), ([0.5], [0.5, 0.6])) check((a0.is_done, a1.is_done), (False, False)) # Test what happens if one single-agent episode terminates earlier than the # other. observations = [ {"a0": 0, "a1": 0}, {"a0": 1, "a1": 1}, {"a1": 2}, {"a1": 3}, ] actions = [ {"a0": 0, "a1": 0}, {"a1": 1}, {"a1": 2}, ] rewards = [{aid: a / 10 for aid, a in a.items()} for a in actions] # TODO (sven): Do NOT use self._create_simple_episode here b/c this util does # not handle terminateds (should not create actions after final observations). episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards, terminateds={"a0": True}, len_lookback_buffer=0, ) # --- slice_ = episode[:1] a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check(len(slice_), 1) check(slice_.env_t_started, 0) check(slice_.env_t, 1) check((len(a0), len(a1)), (1, 1)) check((a0.t_started, a1.t_started), (0, 0)) check((a0.t, a1.t), (1, 1)) check((a0.observations, a1.observations), ([0, 1], [0, 1])) check((a0.actions, a1.actions), ([0], [0])) check((a0.rewards, a1.rewards), ([0.0], [0.0])) check((a0.is_done, a1.is_done), (True, False)) # --- slice_ = episode[:2] a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check(len(slice_), 2) check(slice_.env_t_started, 0) check(slice_.env_t, 2) check((len(a0), len(a1)), (1, 2)) check((a0.t_started, a1.t_started), (0, 0)) check((a0.t, a1.t), (1, 2)) check((a0.observations, a1.observations), ([0, 1], [0, 1, 2])) check((a0.actions, a1.actions), ([0], [0, 1])) check((a0.rewards, a1.rewards), ([0.0], [0.0, 0.1])) check((a0.is_done, a1.is_done), (True, False)) # --- slice_ = episode[2:] self.assertTrue("a0" not in slice_.agent_episodes) a1 = slice_.agent_episodes["a1"] check(len(slice_), 1) check(slice_.env_t_started, 2) check(slice_.env_t, 3) check(len(a1), 1) check(a1.t_started, 2) check(a1.t, 3) check(a1.observations, [2, 3]) check(a1.actions, [2]) check(a1.rewards, [0.2]) check(a1.is_done, False) # Test what happens if we have lookback buffers. observations = [ {"a0": 0, "a1": 0}, # lookback -2 {"a0": 1, "a1": 1}, # lookback -1 {"a1": 2}, # 0 {"a1": 3}, # 1 {"a1": 4}, # 2 {"a0": 5, "a1": 5}, # 3 {"a0": 6}, # 4 {"a0": 7, "a1": 7}, # 5 {"a0": 8}, # 6 {"a1": 9}, # 7 ] episode = self._create_simple_episode(observations, len_lookback_buffer=2) # --- slice_ = episode[1:3] check(len(slice_), 2) check(slice_.env_t_started, 1) check(slice_.env_t, 3) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (0, 2)) check((a0.t_started, a1.t_started), (2, 3)) check((a0.t, a1.t), (2, 5)) check((a0.observations, a1.observations), ([5], [3, 4, 5])) check((a0.actions, a1.actions), ([], [3, 4])) check((a0.rewards, a1.rewards), ([], [0.3, 0.4])) check((a0.is_done, a1.is_done), (False, False)) # --- slice_ = episode[None:4] check(len(slice_), 4) check(slice_.env_t_started, 0) check(slice_.env_t, 4) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (1, 3)) check((a0.t_started, a1.t_started), (2, 2)) check((a0.t, a1.t), (3, 5)) check((a0.observations, a1.observations), ([5, 6], [2, 3, 4, 5])) check((a0.actions, a1.actions), ([5], [2, 3, 4])) check((a0.rewards, a1.rewards), ([0.5], [0.2, 0.3, 0.4])) check((a0.is_done, a1.is_done), (False, False)) # --- slice_ = episode[-3:-1] check(len(slice_), 2) check(slice_.env_t_started, 4) check(slice_.env_t, 6) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (2, 0)) check((a0.t_started, a1.t_started), (3, 6)) check((a0.t, a1.t), (5, 6)) check((a0.observations, a1.observations), ([6, 7, 8], [7])) check((a0.actions, a1.actions), ([6, 7], [])) check((a0.rewards, a1.rewards), ([0.6, 0.7], [])) check((a0.is_done, a1.is_done), (False, False)) # --- slice_ = episode[-1:None] check(len(slice_), 1) check(slice_.env_t_started, 6) check(slice_.env_t, 7) a0 = slice_.agent_episodes["a0"] a1 = slice_.agent_episodes["a1"] check((len(a0), len(a1)), (0, 0)) check((a0.t_started, a1.t_started), (5, 7)) check((a0.t, a1.t), (5, 7)) check((a0.observations, a1.observations), ([8], [9])) check((a0.actions, a1.actions), ([], [])) check((a0.rewards, a1.rewards), ([], [])) check((a0.is_done, a1.is_done), (False, False)) def test_concat_episode(self): # Generate a simple multi-agent episode. base_episode = self._create_simple_episode( [ {"a0": 0, "a1": 0}, {"a0": 1, "a1": 1}, # <- split here, then concat {"a0": 2, "a1": 2}, ] ) check(len(base_episode), 2) # Split it into two slices. episode_1, episode_2 = base_episode[:1], base_episode[1:] check(len(episode_1), 1) check(len(episode_2), 1) # Re-concat these slices. episode_1.concat_episode(episode_2) check(len(episode_1), 2) check(episode_1.env_t_started, 0) check(episode_1.env_t, 2) a0 = episode_1.agent_episodes["a0"] a1 = episode_1.agent_episodes["a1"] check((len(a0), len(a1)), (2, 2)) check((a0.t_started, a1.t_started), (0, 0)) check((a0.t, a1.t), (2, 2)) check((a0.observations, a1.observations), ([0, 1, 2], [0, 1, 2])) check((a0.actions, a1.actions), ([0, 1], [0, 1])) check((a0.rewards, a1.rewards), ([0.0, 0.1], [0.0, 0.1])) check((a0.is_done, a1.is_done), (False, False)) # Generate a more complex multi-agent episode. base_episode = self._create_simple_episode( [ {"a0": 0, "a1": 0}, {"a0": 1, "a1": 1}, {"a1": 2}, {"a1": 3}, {"a1": 4}, # <- split here, then concat {"a0": 5, "a1": 5}, {"a0": 6}, # <- split here, then concat {"a0": 7, "a1": 7}, # <- split here, then concat {"a0": 8}, # <- split here, then concat {"a1": 9}, ] ) check(len(base_episode), 9) # Split it into two slices. for split_ in [(4, (4, 5)), (6, (6, 3)), (7, (7, 2)), (8, (8, 1))]: episode_1, episode_2 = base_episode[: split_[0]], base_episode[split_[0] :] check(len(episode_1), split_[1][0]) check(len(episode_2), split_[1][1]) # Re-concat these slices. episode_1.concat_episode(episode_2) check(len(episode_1), 9) check(episode_1.env_t_started, 0) check(episode_1.env_t, 9) a0 = episode_1.agent_episodes["a0"] a1 = episode_1.agent_episodes["a1"] check((len(a0), len(a1)), (5, 7)) check((a0.t_started, a1.t_started), (0, 0)) check((a0.t, a1.t), (5, 7)) check( (a0.observations, a1.observations), ([0, 1, 5, 6, 7, 8], [0, 1, 2, 3, 4, 5, 7, 9]), ) check((a0.actions, a1.actions), ([0, 1, 5, 6, 7], [0, 1, 2, 3, 4, 5, 7])) check( (a0.rewards, a1.rewards), ([0, 0.1, 0.5, 0.6, 0.7], [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.7]), ) check((a0.is_done, a1.is_done), (False, False)) # Test hanging rewards. observations = [ {"a0": 0, "a1": 0}, # 0 {"a0": 1}, # 1 {"a0": 2}, # 2 <- split here, then concat {"a0": 3}, # 3 {"a0": 4}, # 4 ] actions = observations[:-1] # a1 continues receiving rewards (along with a0's actions). rewards = [ {"a0": 0.0, "a1": 0.0}, # 0 {"a0": 0.1, "a1": 1.0}, # 1 {"a0": 0.2, "a1": 2.0}, # 2 {"a0": 0.3, "a1": 3.0}, # 3 ] base_episode = MultiAgentEpisode( observations=observations, actions=actions, rewards=rewards, len_lookback_buffer=0, ) check(len(base_episode), 4) check(base_episode._hanging_rewards_end, {"a1": 6.0}) episode_1, episode_2 = base_episode[:2], base_episode[2:] check(len(episode_1), 2) check(len(episode_2), 2) # Re-concat these slices. episode_1.concat_episode(episode_2) check(len(episode_1), 4) check(episode_1.env_t_started, 0) check(episode_1.env_t, 4) a0 = episode_1.agent_episodes["a0"] a1 = episode_1.agent_episodes["a1"] check((len(a0), len(a1)), (4, 0)) check((a0.t_started, a1.t_started), (0, 0)) check((a0.t, a1.t), (4, 0)) check( (a0.observations, a1.observations), ([0, 1, 2, 3, 4], [0]), ) check((a0.actions, a1.actions), ([0, 1, 2, 3], [])) check( (a0.rewards, a1.rewards), ([0, 0.1, 0.2, 0.3], []), ) check(episode_1._hanging_rewards_end, {"a1": 6.0}) check((a0.is_done, a1.is_done), (False, False)) def test_get_return(self): # Generate an empty episode and ensure that the return is zero. episode = MultiAgentEpisode() # Now sample 100 timesteps. episode, env = self._mock_multi_agent_records_from_env() ret = episode.get_return() # Ensure that the return is now at least zero. self.assertGreaterEqual(ret, 0.0) # Assert that the return is indeed the sum of all agent returns. agent_returns = sum( agent_eps.get_return() for agent_eps in episode.agent_episodes.values() ) self.assertTrue(ret, agent_returns) # Assert that adding the buffered rewards to the agent returns # gives the expected result when considering the buffer in # `get_return()`. buffered_rewards = sum(episode._hanging_rewards_end.values()) self.assertTrue( episode.get_return(include_hanging_rewards=True), agent_returns + buffered_rewards, ) def test_len(self): # Generate an empty episode and ensure that `len()` raises an error. episode = MultiAgentEpisode() # Generate a new episode with some initialization data. obs = [ {"a0": 0, "a1": 0}, {"a1": 1}, {"a0": 2}, {"a0": 3, "a1": 3}, ] episode = MultiAgentEpisode( observations=obs, actions=obs[:-1], rewards=obs[:-1], len_lookback_buffer=0 ) check(len(episode), 3) obs.append({"a1": 4}) episode = MultiAgentEpisode( observations=obs, actions=obs[:-1], rewards=obs[:-1], len_lookback_buffer=0 ) check(len(episode), 4) obs.append({"a0": 5, "a1": 5}) episode = MultiAgentEpisode( observations=obs, actions=obs[:-1], rewards=obs[:-1], len_lookback_buffer=0 ) check(len(episode), 5) obs.append({"a0": 6}) episode = MultiAgentEpisode( observations=obs, actions=obs[:-1], rewards=obs[:-1], len_lookback_buffer=0 ) check(len(episode), 6) # Create an episode and environment and sample 100 timesteps. episode, env = self._mock_multi_agent_records_from_env() # Assert that the length is indeed 100. check(len(episode), 100) # Now, build a successor. successor = episode.cut() # Sample another 100 timesteps. successor, env = self._mock_multi_agent_records_from_env( episode=successor, env=env, init=False ) # Ensure that the length of the successor is 100. self.assertTrue(len(successor), 100) # Now concatenate the two episodes. # episode.concat_episode(successor) # Assert that the length is now 100. # self.assertTrue(len(episode), 200) def test_get_state_and_from_state(self): # Generate an empty episode and ensure that the state is empty. # Generate a simple multi-agent episode. episode = self._create_simple_episode( [ {"a0": 0, "a1": 0}, {"a1": 1}, {"a1": 2}, {"a0": 3, "a1": 3}, {"a0": 4}, {"a0": 5, "a1": 5}, {"a0": 6, "a1": 6}, {"a1": 7}, {"a1": 8}, {"a0": 9}, ] ) # Get the state of the episode. state = episode.get_state() # Ensure that the state is not empty. self.assertTrue(state) episode_2 = MultiAgentEpisode.from_state(state) # Assert that the two episodes are identical. self.assertEqual(episode_2.id_, episode.id_) self.assertEqual( episode_2.agent_to_module_mapping_fn, episode.agent_to_module_mapping_fn ) self.assertEqual( type(episode_2.observation_space), type(episode.observation_space) ) self.assertEqual(type(episode_2.action_space), type(episode.action_space)) check(episode_2.env_t_started, episode.env_t_started) check(episode_2.env_t, episode.env_t) check(episode_2.agent_t_started, episode.agent_t_started) self.assertEqual(episode_2.env_t_to_agent_t, episode.env_t_to_agent_t) for agent_id, env_t_to_agent_t in episode_2.env_t_to_agent_t.items(): check(env_t_to_agent_t.data, episode.env_t_to_agent_t[agent_id].data) check( env_t_to_agent_t.lookback, episode.env_t_to_agent_t[agent_id].lookback ) check(episode_2._hanging_actions_end, episode._hanging_actions_end) check( episode_2._hanging_extra_model_outputs_end, episode._hanging_extra_model_outputs_end, ) check(episode_2._hanging_rewards_end, episode._hanging_rewards_end) check(episode_2._hanging_rewards_begin, episode._hanging_rewards_begin) check(episode_2.is_terminated, episode.is_terminated) check(episode_2.is_truncated, episode.is_truncated) self.assertSetEqual( set(episode_2.agent_episodes.keys()), set(episode.agent_episodes.keys()) ) for agent_id, agent_eps in episode_2.agent_episodes.items(): self.assertEqual(agent_eps.id_, episode.agent_episodes[agent_id].id_) check(episode_2._start_time, episode._start_time) check(episode_2._last_step_time, episode._last_step_time) def test_get_sample_batch(self): # TODO (simon): Revisit this test and the MultiAgentEpisode.episode_concat API. return # Generate an environment and episode and sample 100 timesteps. episode, env = self._mock_multi_agent_records_from_env() # Now convert to sample batch. batch = episode.get_sample_batch() # Assert that the timestep in the `MultiAgentBatch` is identical # to the episode timestep. check(len(batch), len(episode)) # Assert that all agents are present in the multi-agent batch. # Note, all agents have collected samples. for agent_id in episode.agent_ids: self.assertTrue(agent_id in batch.policy_batches) # Assert that the recorded history length is correct. for agent_id, agent_eps in episode.agent_episodes.items(): check(len(agent_eps), len(batch[agent_id])) # Assert that terminated agents are terminated in the sample batch. for agent_id in ["agent_1", "agent_5"]: self.assertTrue(batch[agent_id]["terminateds"][-1]) # Now test that when creating a successor its sample batch will # contain the correct values. successor = episode.cut() # Run 100 more timesteps for the successor. successor, env = self._mock_multi_agent_records_from_env( episode=successor, env=env, init=False ) # Convert this episode to a `MultiAgentBatch`. batch = successor.get_sample_batch() # Assert that the number of timesteps match between episode and batch. # Note, the successor starts at `ts=100`. check(len(batch), len(successor)) # Assert that all agents that were not done, yet, are present in the batch. for agent_id in env._agents_alive: self.assertTrue(agent_id in batch.policy_batches) # Ensure that the timesteps for each agent matches the it's batch length. for agent_id, agent_eps in successor.agent_episodes.items(): # Note, we take over agent_ids if not agent_eps.is_done: check(len(agent_eps), len(batch[agent_id])) # Assert that now all agents are truncated b/c the environment truncated # them. for agent_id in batch.policy_batches: self.assertTrue(batch[agent_id]["truncateds"][-1]) # Test now that when we concatenate the same logic holds. episode.concat_episode(successor) # Convert the concatenated episode to a sample batch now. batch = episode.get_sample_batch() # Assert that the length of episode and batch match. check(len(batch), len(episode)) # Assert that all agents are present in the multi-agent batch. # Note, in the concatenated episode - in contrast to the successor # - we have all agents stepped. for agent_id in episode.agent_ids: self.assertTrue(agent_id in batch.policy_batches) # Assert that the recorded history length is correct. for agent_id, agent_eps in episode.agent_episodes.items(): check(len(agent_eps), len(batch[agent_id])) # Assert that terminated agents are terminated in the sample batch. for agent_id in ["agent_1", "agent_5"]: self.assertTrue(batch[agent_id]["terminateds"][-1]) # Assert that all the other agents are truncated by the environment. for agent_id in env._agents_alive: self.assertTrue(batch[agent_id]["truncateds"][-1]) # Finally, test that an empty episode, gives an empty batch. episode = MultiAgentEpisode(agent_ids=env.agents) # Convert now to sample batch. batch = episode.get_sample_batch() # Ensure that this batch is empty. check(len(batch), 0) def _create_simple_episode(self, obs, len_lookback_buffer=0): return MultiAgentEpisode( observations=obs, actions=obs[:-1], rewards=[{aid: o / 10 for aid, o in o_dict.items()} for o_dict in obs[:-1]], len_lookback_buffer=len_lookback_buffer, ) def _mock_multi_agent_records_from_env( self, size: int = 100, episode: MultiAgentEpisode = None, env: gym.Env = None, init: bool = True, truncate: bool = True, seed: Optional[int] = 42, ) -> Tuple[MultiAgentEpisode, gym.Env]: # If the environment does not yet exist, create one. env = env or MultiAgentTestEnv(truncate=truncate) # If no episode is given, construct one. # We give it the `agent_ids` to make it create all objects. episode = MultiAgentEpisode() if episode is None else episode # We initialize the episode, if requested. if init: obs, info = env.reset(seed=seed) episode.add_env_reset(observations=obs, infos=info) # In the other case we need at least the last observations for the next # actions. else: obs = dict(episode.get_observations(-1)) # Sample `size` many records. done_agents = {aid for aid, t in episode.get_terminateds().items() if t} for i in range(env.t, env.t + size): action = { agent_id: i + 1 for agent_id in obs if agent_id not in done_agents } obs, reward, terminated, truncated, info = env.step(action) done_agents |= {a for a, v in terminated.items() if v is True} done_agents |= {a for a, v in truncated.items() if v is True} episode.add_env_step( observations=obs, actions=action, rewards=reward, infos=info, terminateds=terminated, truncateds=truncated, extra_model_outputs={agent_id: {"extra": 10} for agent_id in action}, ) # Return both, episode and environment. return episode, env @staticmethod def _mock_multi_agent_records(): # Create some simple observations, actions, rewards, infos and # extra model outputs. observations = [ {"agent_1": 0, "agent_2": 0, "agent_3": 0}, # Here agent 2 is stepping, but does not receive a next # observation. {"agent_1": 1, "agent_3": 1, "agent_4": 1}, # Here agents 1 and 3 have stepped, but received no next # observation. their actions should go into the buffers. {"agent_2": 2, "agent_4": 2}, ] actions = [ # Here agent_2 has to buffer. {"agent_1": 0, "agent_2": 0, "agent_3": 0}, {"agent_1": 1, "agent_3": 1, "agent_4": 1}, ] rewards = [ # Here agent 4 has to buffer the reward as does not have # actions nor observation. {"agent_1": 0.5, "agent_2": 0.6, "agent_3": 0.7}, # Agent 4 should now release the buffer with reward 1.0 # and add the next reward to it, as it stepped and received # a next observation. {"agent_1": 1.1, "agent_3": 1.2, "agent_4": 1.3}, ] infos = [ {"agent_1": {"a1_i0": 1}, "agent_2": {"a2_i0": 2}, "agent_3": {"a3_i0": 3}}, { "agent_1": {"a1_i1": 1.1}, "agent_3": {"a3_i1": 3.1}, "agent_4": {"a4_i1": 4.1}, }, {"agent_2": {"a2_i2": 2.2}, "agent_4": {"a4_i2": 4.2}}, ] # Let no agent be terminated or truncated. terminateds = { "__all__": False, "agent_1": False, "agent_3": False, "agent_4": False, } truncateds = { "__all__": False, "agent_1": False, "agent_3": False, "agent_4": False, } return observations, actions, rewards, terminateds, truncateds, infos def test_setters(self): """Tests whether the MultiAgentEpisode's setter methods work as expected. Also tests numpy'ized episodes. This test covers all setter methods: - set_observations - set_actions - set_rewards - set_extra_model_outputs Each setter is tested with various indexing scenarios including: - Single index - List of indices - Slice objects - Negative indices (both regular and lookback buffer interpretation) Uses two agents: a0 and a1 """ import copy SOME_KEY = "some_key" # Create a simple multi-agent episode with two agents without lookback buffer first for basic tests episode = MultiAgentEpisode( observations=[ {"a0": 100, "a1": 200}, # Initial observations {"a0": 101, "a1": 201}, {"a0": 102, "a1": 202}, {"a0": 103, "a1": 203}, {"a0": 104, "a1": 204}, {"a0": 105, "a1": 205}, {"a0": 106, "a1": 206}, ], actions=[ {"a0": 1, "a1": 11}, {"a0": 2, "a1": 12}, {"a0": 3, "a1": 13}, {"a0": 4, "a1": 14}, {"a0": 5, "a1": 15}, {"a0": 6, "a1": 16}, ], rewards=[ {"a0": 0.1, "a1": 1.1}, {"a0": 0.2, "a1": 1.2}, {"a0": 0.3, "a1": 1.3}, {"a0": 0.4, "a1": 1.4}, {"a0": 0.5, "a1": 1.5}, {"a0": 0.6, "a1": 1.6}, ], extra_model_outputs=[ {"a0": {SOME_KEY: 0.01}, "a1": {SOME_KEY: 1.01}}, {"a0": {SOME_KEY: 0.02}, "a1": {SOME_KEY: 1.02}}, {"a0": {SOME_KEY: 0.03}, "a1": {SOME_KEY: 1.03}}, {"a0": {SOME_KEY: 0.04}, "a1": {SOME_KEY: 1.04}}, {"a0": {SOME_KEY: 0.05}, "a1": {SOME_KEY: 1.05}}, {"a0": {SOME_KEY: 0.06}, "a1": {SOME_KEY: 1.06}}, ], len_lookback_buffer=0, ) test_patterns = [ # (description, new_data, indices) ("zero index", {"a0": 7353.0, "a1": 8353.0}, 0), ("single index", {"a0": 7353.0, "a1": 8353.0}, 2), ("negative index", {"a0": 7353.0, "a1": 8353.0}, -1), ("short list of indices", {"a0": [7353.0], "a1": [8353.0]}, [1]), ( "long list of indices", {"a0": [73.0, 53.0, 35.0, 53.0], "a1": [83.0, 63.0, 45.0, 63.0]}, [1, 2, 3, 4], ), ("short slice", {"a0": [7353.0], "a1": [8353.0]}, slice(2, 3)), ( "long slice", {"a0": [7.0, 3.0, 5.0, 3.0], "a1": [17.0, 13.0, 15.0, 13.0]}, slice(2, 6), ), ] # Test setters with all patterns numpy_episode = copy.deepcopy(episode).to_numpy() for e in [episode, numpy_episode]: print(f"Testing MultiAgent numpy'ized={e.is_numpy}...") for desc, new_data, indices in test_patterns: print(f"Testing MultiAgent {desc}...") expected_data = new_data test_new_data = new_data # Convert lists to numpy arrays for numpy episodes if e.is_numpy and isinstance(list(new_data.values())[0], list): test_new_data = { agent_id: np.array(agent_data) for agent_id, agent_data in new_data.items() } # Test set_observations e.set_observations(new_data=test_new_data, at_indices=indices) result = e.get_observations(indices) for agent_id in ["a0", "a1"]: check(result[agent_id], expected_data[agent_id]) # Test set_actions e.set_actions(new_data=test_new_data, at_indices=indices) result = e.get_actions(indices) for agent_id in ["a0", "a1"]: check(result[agent_id], expected_data[agent_id]) # Test set_rewards e.set_rewards(new_data=test_new_data, at_indices=indices) result = e.get_rewards(indices) for agent_id in ["a0", "a1"]: check(result[agent_id], expected_data[agent_id]) # Test set_extra_model_outputs # Note: We test this by directly checking the underlying agent episodes # since get_extra_model_outputs can be complex with indices e.set_extra_model_outputs( key=SOME_KEY, new_data=test_new_data, at_indices=indices ) # Verify that the setter worked by checking the individual agent episodes if desc in ["single index", "zero index"]: for agent_id in ["a0", "a1"]: actual_idx = e.agent_episodes[agent_id].t_started + indices if actual_idx < len( e.agent_episodes[agent_id].get_extra_model_outputs(SOME_KEY) ): check( e.agent_episodes[agent_id].get_extra_model_outputs( SOME_KEY )[actual_idx], expected_data[agent_id], ) elif desc == "negative index": for agent_id in ["a0", "a1"]: agent_ep = e.agent_episodes[agent_id] actual_idx = ( len(agent_ep.get_extra_model_outputs(SOME_KEY)) + indices ) if actual_idx >= 0: check( agent_ep.get_extra_model_outputs(SOME_KEY)[actual_idx], expected_data[agent_id], ) elif desc in ["long list of indices", "short list of indices"]: for agent_id in ["a0", "a1"]: agent_ep = e.agent_episodes[agent_id] for i, expected_val in enumerate(expected_data[agent_id]): actual_idx = agent_ep.t_started + indices[i] if actual_idx < len( agent_ep.get_extra_model_outputs(SOME_KEY) ): check( agent_ep.get_extra_model_outputs(SOME_KEY)[ actual_idx ], expected_val, ) elif desc in ["long slice", "short slice"]: for agent_id in ["a0", "a1"]: agent_ep = e.agent_episodes[agent_id] slice_indices = list(range(indices.start, indices.stop)) for i, expected_val in enumerate(expected_data[agent_id]): actual_idx = agent_ep.t_started + slice_indices[i] if actual_idx < len( agent_ep.get_extra_model_outputs(SOME_KEY) ): check( agent_ep.get_extra_model_outputs(SOME_KEY)[ actual_idx ], expected_val, ) if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestMultiAgentEpisode
python
pyparsing__pyparsing
tests/test_simple_unit.py
{ "start": 7036, "end": 10381 }
class ____(PyparsingExpressionTestCase): tests = [ PyparsingTest( desc="Match several words", expr=(pp.Word("x") | pp.Word("y"))[...], text="xxyxxyyxxyxyxxxy", expected_list=["xx", "y", "xx", "yy", "xx", "y", "x", "y", "xxx", "y"], ), PyparsingTest( desc="Match several words, skipping whitespace", expr=(pp.Word("x") | pp.Word("y"))[...], text="x x y xxy yxx y xyx xxy", # fmt: off expected_list=["x", "x", "y", "xx", "y", "y", "xx", "y", "x", "y", "x", "xx", "y"], # fmt: on ), PyparsingTest( desc="Match several words, skipping whitespace (old style)", expr=pp.OneOrMore(pp.Word("x") | pp.Word("y")), text="x x y xxy yxx y xyx xxy", # fmt: off expected_list=["x", "x", "y", "xx", "y", "y", "xx", "y", "x", "y", "x", "xx", "y"], # fmt: on ), PyparsingTest( desc="Match words and numbers - show use of results names to collect types of tokens", expr=(pp.Word(pp.alphas)("alpha*") | pp.pyparsing_common.integer("int*"))[ ... ], text="sdlfj23084ksdfs08234kjsdlfkjd0934", expected_list=["sdlfj", 23084, "ksdfs", 8234, "kjsdlfkjd", 934], expected_dict={ "alpha": ["sdlfj", "ksdfs", "kjsdlfkjd"], "int": [23084, 8234, 934], }, ), PyparsingTest( desc="Using DelimitedList (comma is the default delimiter)", expr=pp.DelimitedList(pp.Word(pp.alphas)), text="xxyx,xy,y,xxyx,yxx, xy", expected_list=["xxyx", "xy", "y", "xxyx", "yxx", "xy"], ), PyparsingTest( desc="Using DelimitedList (comma is the default delimiter) with trailing delimiter", expr=pp.DelimitedList(pp.Word(pp.alphas), allow_trailing_delim=True), text="xxyx,xy,y,xxyx,yxx, xy,", expected_list=["xxyx", "xy", "y", "xxyx", "yxx", "xy"], ), PyparsingTest( desc="Using DelimitedList (comma is the default delimiter) with minimum size", expr=pp.DelimitedList(pp.Word(pp.alphas), min=3), text="xxyx,xy", expected_fail_locn=7, ), PyparsingTest( desc="Using DelimitedList (comma is the default delimiter) with maximum size", expr=pp.DelimitedList(pp.Word(pp.alphas), max=3), text="xxyx,xy,y,xxyx,yxx, xy,", expected_list=["xxyx", "xy", "y"], ), PyparsingTest( desc="Using DelimitedList, with ':' delimiter", expr=pp.DelimitedList( pp.Word(pp.hexnums, exact=2), delim=":", combine=True ), text="0A:4B:73:21:FE:76", expected_list=["0A:4B:73:21:FE:76"], ), PyparsingTest( desc="Using DelimitedList, with ':' delimiter", expr=pp.DelimitedList( pp.Word(pp.hexnums, exact=2), delim=":", combine=True, allow_trailing_delim=True, ), text="0A:4B:73:21:FE:76:", expected_list=["0A:4B:73:21:FE:76:"], ), ]
TestRepetition
python
falconry__falcon
falcon/routing/compiled.py
{ "start": 45130, "end": 45443 }
class ____(_CxChild): def __init__(self, dict_value_name: str) -> None: self._dict_value_name = dict_value_name def src(self, indentation: int) -> str: return '{0}params.update({1})'.format( _TAB_STR * indentation, self._dict_value_name, )
_CxSetParamsFromDict
python
explosion__spaCy
spacy/training/pretrain.py
{ "start": 8051, "end": 9710 }
class ____: def __init__(self, frequency=1000000): self.loss = 0.0 self.prev_loss = 0.0 self.nr_word = 0 self.words_per_epoch = Counter() self.frequency = frequency self.last_time = time.time() self.last_update = 0 self.epoch_loss = 0.0 def update(self, epoch, loss, docs): self.loss += loss self.epoch_loss += loss words_in_batch = sum(len(doc) for doc in docs) self.words_per_epoch[epoch] += words_in_batch self.nr_word += words_in_batch words_since_update = self.nr_word - self.last_update if words_since_update >= self.frequency: wps = words_since_update / (time.time() - self.last_time) self.last_update = self.nr_word self.last_time = time.time() loss_per_word = self.loss - self.prev_loss status = ( epoch, self.nr_word, _smart_round(self.loss, width=10), _smart_round(loss_per_word, width=6), int(wps), ) self.prev_loss = float(self.loss) return status else: return None def _smart_round( figure: Union[float, int], width: int = 10, max_decimal: int = 4 ) -> str: """Round large numbers as integers, smaller numbers as decimals.""" n_digits = len(str(int(figure))) n_decimal = width - (n_digits + 1) if n_decimal <= 1: return str(int(figure)) else: n_decimal = min(n_decimal, max_decimal) format_str = "%." + str(n_decimal) + "f" return format_str % figure
ProgressTracker
python
walkccc__LeetCode
solutions/3336. Find the Number of Subsequences With Equal GCD/3336-2.py
{ "start": 0, "end": 1006 }
class ____: def subsequencePairCount(self, nums: list[int]) -> int: MOD = 1_000_000_007 maxNum = max(nums) # dp[i][x][y] := number of disjoint pairs `seq1` and `seq2` of # nums[0..i - 1], where GCD(seq1) == x and GCD(seq2) == y dp = [[[0] * (maxNum + 1) for _ in range(maxNum + 1)] for _ in range(len(nums) + 1)] dp[0][0][0] = 1 for i, num in enumerate(nums): for x in range(maxNum + 1): for y in range(maxNum + 1): # 1. Skip nums[i]. dp[i + 1][x][y] += dp[i][x][y] dp[i + 1][x][y] %= MOD # 2. Pick nums[i] in the first subsequence. newX = math.gcd(x, num) dp[i + 1][newX][y] += dp[i][x][y] dp[i + 1][newX][y] %= MOD # 3. Pick nums[i] in the second subsequence. newY = math.gcd(y, num) dp[i + 1][x][newY] += dp[i][x][y] dp[i + 1][x][newY] %= MOD return sum(dp[-1][g][g] for g in range(1, maxNum + 1)) % MOD
Solution
python
huggingface__transformers
src/transformers/models/marian/modeling_marian.py
{ "start": 26264, "end": 37432 }
class ____(MarianPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MarianDecoderLayer`] Args: config: MarianConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: MarianConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.decoder_vocab_size, config.d_model, self.padding_idx) self.embed_positions = MarianSinusoidalPositionalEmbedding( config.max_position_embeddings, config.d_model, self.padding_idx ) self.layers = nn.ModuleList([MarianDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*): Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache in the correct position and to infer the complete sequence length. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # retrieve input_ids and inputs_embeds if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") elif input_ids is not None: input = input_ids input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] input = inputs_embeds[:, :, -1] if inputs_embeds is None: inputs_embeds = self.embed_tokens(input) # Important to apply outside of the above `if`, in case user passes `embeds` inputs_embeds = inputs_embeds * self.embed_scale # initialize `past_key_values` if use_cache and past_key_values is None: past_key_values = ( EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) if encoder_hidden_states is not None or self.config.is_encoder_decoder else DynamicCache(config=self.config) ) batch_size, seq_length = inputs_embeds.size()[:-1] past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device ) if attention_mask is None and not is_torchdynamo_compiling(): # required mask seq length can be calculated via length of past cache mask_seq_length = past_key_values_length + seq_length attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) self_attn_cache = ( past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values ) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=self_attn_cache, ) encoder_attention_mask = create_bidirectional_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, ) # embed positions position_ids = self.embed_positions( (batch_size, seq_length), past_key_values_length, position_ids=cache_position ) hidden_states = inputs_embeds + position_ids hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue layer_outputs = decoder_layer( hidden_states, causal_mask, encoder_hidden_states, # as a positional argument for gradient checkpointing encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @auto_docstring
MarianDecoder
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/ext/associationproxy.py
{ "start": 11584, "end": 11738 }
class ____(Protocol): def __call__( self, proxy: _AssociationCollection[Any], collection: Iterable[Any] ) -> None: ...
_ProxyBulkSetProtocol
python
ansible__ansible
lib/ansible/playbook/conditional.py
{ "start": 869, "end": 1317 }
class ____: """ This is a mix-in class, to be used with Base to allow the object to be run conditionally when a condition is met or skipped. """ when = FieldAttribute(isa='list', default=list, extend=True, prepend=True) def __init__(self, *args, **kwargs): super().__init__() def _validate_when(self, attr, name, value): if not isinstance(value, list): setattr(self, name, [value])
Conditional
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 867535, "end": 868313 }
class ____(sgqlc.types.relay.Connection): """The connection type for PullRequestChangedFile.""" __schema__ = github_schema __field_names__ = ("edges", "nodes", "page_info", "total_count") edges = sgqlc.types.Field(sgqlc.types.list_of("PullRequestChangedFileEdge"), graphql_name="edges") """A list of edges.""" nodes = sgqlc.types.Field(sgqlc.types.list_of(PullRequestChangedFile), graphql_name="nodes") """A list of nodes.""" page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo") """Information to aid in pagination.""" total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount") """Identifies the total count of items in the connection."""
PullRequestChangedFileConnection
python
pyparsing__pyparsing
examples/TAP.py
{ "start": 2540, "end": 7553 }
class ____: def __init__(self, results): self.passedTests = [] self.failedTests = [] self.skippedTests = [] self.todoTests = [] self.bonusTests = [] self.bail = False if results.plan: expected = list(range(1, int(results.plan.ubound) + 1)) else: expected = list(range(1, len(results.tests) + 1)) for i, res in enumerate(results.tests): # test for bail out if res.BAIL: # ~ print "Test suite aborted: " + res.reason # ~ self.failedTests += expected[i:] self.bail = True self.skippedTests += [TAPTest.bailedTest(ii) for ii in expected[i:]] self.bailReason = res.reason break # ~ print res.dump() testnum = i + 1 if res.testNumber != "": if testnum != int(res.testNumber): print("ERROR! test %(testNumber)s out of sequence" % res) testnum = int(res.testNumber) res["testNumber"] = testnum test = TAPTest(res) if test.passed: self.passedTests.append(test) else: self.failedTests.append(test) if test.skipped: self.skippedTests.append(test) if test.todo: self.todoTests.append(test) if test.todo and test.passed: self.bonusTests.append(test) self.passedSuite = not self.bail and ( set(self.failedTests) - set(self.todoTests) == set() ) def summary(self, showPassed=False, showAll=False): testListStr = lambda tl: "[" + ",".join(str(t.num) for t in tl) + "]" summaryText = [] if showPassed or showAll: summaryText.append(f"PASSED: {testListStr(self.passedTests)}") if self.failedTests or showAll: summaryText.append(f"FAILED: {testListStr(self.failedTests)}") if self.skippedTests or showAll: summaryText.append(f"SKIPPED: {testListStr(self.skippedTests)}") if self.todoTests or showAll: summaryText.append(f"TODO: {testListStr(self.todoTests)}") if self.bonusTests or showAll: summaryText.append(f"BONUS: {testListStr(self.bonusTests)}") if self.passedSuite: summaryText.append("PASSED") else: summaryText.append("FAILED") return "\n".join(summaryText) # create TAPSummary objects from tapOutput parsed results, by setting # class as parse action tapOutputParser.set_parse_action(TAPSummary) def main(): import contextlib with contextlib.suppress(Exception): tapOutputParser.create_diagram("TAP_diagram.html", vertical=3) test1 = """\ 1..4 ok 1 - Input file opened not ok 2 - First line of the input valid ok 3 - Read the rest of the file not ok 4 - Summarized correctly # TODO Not written yet """ test2 = """\ ok 1 not ok 2 some description # TODO with a directive ok 3 a description only, no directive ok 4 # TODO directive only ok a description only, no directive ok # Skipped only a directive, no description ok """ test3 = """\ ok - created Board ok ok not ok ok ok ok ok # +------+------+------+------+ # | |16G | |05C | # | |G N C | |C C G | # | | G | | C +| # +------+------+------+------+ # |10C |01G | |03C | # |R N G |G A G | |C C C | # | R | G | | C +| # +------+------+------+------+ # | |01G |17C |00C | # | |G A G |G N R |R N R | # | | G | R | G | # +------+------+------+------+ ok - board has 7 tiles + starter tile 1..9 """ test4 = """\ 1..4 ok 1 - Creating test program ok 2 - Test program runs, no error not ok 3 - infinite loop # TODO halting problem unsolved not ok 4 - infinite loop 2 # TODO halting problem unsolved """ test5 = """\ 1..20 ok - database handle not ok - failed database login Bail out! Couldn't connect to database. """ test6 = """\ ok 1 - retrieving servers from the database # need to ping 6 servers ok 2 - pinged diamond ok 3 - pinged ruby not ok 4 - pinged sapphire ok 5 - pinged onyx not ok 6 - pinged quartz ok 7 - pinged gold 1..7 """ for test in (test1, test2, test3, test4, test5, test6): print(test) tapResult = tapOutputParser.parse_string(test)[0] print(tapResult.summary(showAll=True)) print() if __name__ == "__main__": main()
TAPSummary
python
PyCQA__pylint
tests/checkers/unittest_typecheck.py
{ "start": 5797, "end": 7465 }
class ____: """Tests for the _string_distance helper in pylint.checkers.typecheck.""" def test_string_distance_identical_strings(self) -> None: seq1 = "hi" seq2 = "hi" assert typecheck._string_distance(seq1, seq2, len(seq1), len(seq2)) == 0 seq1, seq2 = seq2, seq1 assert typecheck._string_distance(seq1, seq2, len(seq1), len(seq2)) == 0 def test_string_distance_empty_string(self) -> None: seq1 = "" seq2 = "hi" assert typecheck._string_distance(seq1, seq2, len(seq1), len(seq2)) == 2 seq1, seq2 = seq2, seq1 assert typecheck._string_distance(seq1, seq2, len(seq1), len(seq2)) == 2 def test_string_distance_edit_distance_one_character(self) -> None: seq1 = "hi" seq2 = "he" assert typecheck._string_distance(seq1, seq2, len(seq1), len(seq2)) == 1 seq1, seq2 = seq2, seq1 assert typecheck._string_distance(seq1, seq2, len(seq1), len(seq2)) == 1 def test_string_distance_edit_distance_multiple_similar_characters(self) -> None: seq1 = "hello" seq2 = "yelps" assert typecheck._string_distance(seq1, seq2, len(seq1), len(seq2)) == 3 seq1, seq2 = seq2, seq1 assert typecheck._string_distance(seq1, seq2, len(seq1), len(seq2)) == 3 def test_string_distance_edit_distance_all_dissimilar_characters(self) -> None: seq1 = "yellow" seq2 = "orange" assert typecheck._string_distance(seq1, seq2, len(seq1), len(seq2)) == 6 seq1, seq2 = seq2, seq1 assert typecheck._string_distance(seq1, seq2, len(seq1), len(seq2)) == 6
TestTypeCheckerStringDistance
python
pallets__werkzeug
src/werkzeug/middleware/lint.py
{ "start": 817, "end": 890 }
class ____(Warning): """Warning class for WSGI warnings."""
WSGIWarning
python
getsentry__sentry
tests/sentry/options/test_store.py
{ "start": 519, "end": 6428 }
class ____(TestCase): @cached_property def store(self): c = LocMemCache("test", settings.CACHES["default"]) c.clear() return OptionsStore(cache=c) @cached_property def manager(self): return OptionsManager(store=self.store) @cached_property def key(self): return self.make_key() @pytest.fixture(autouse=True) def flush_local_cache(self): self.store.flush_local_cache() def make_key(self, ttl=10, grace=10, key_name: str | None = None): if key_name is None: key_name = uuid1().hex return self.manager.make_key(key_name, "", object, 0, ttl, grace, None) def test_simple(self) -> None: store, key = self.store, self.key assert store.get(key) is None assert store.set(key, "bar", UpdateChannel.CLI) assert store.get(key) == "bar" assert store.get_last_update_channel(key) == UpdateChannel.CLI assert store.delete(key) def test_not_in_store(self) -> None: assert self.store.get_last_update_channel(self.key) is None def test_simple_without_cache(self) -> None: store = OptionsStore(cache=None) key = self.make_key(key_name="foo") with pytest.raises(AssertionError) as e: store.get(key) assert ( str(e.value) == "Option 'foo' requested before cache initialization, which could result in excessive store queries" ) with pytest.raises(AssertionError) as e: store.set(key, "bar", UpdateChannel.CLI) assert str(e.value) == "cache must be configured before mutating options" with pytest.raises(AssertionError) as e: store.delete(key) assert str(e.value) == "cache must be configured before mutating options" @override_settings(SENTRY_OPTIONS_COMPLAIN_ON_ERRORS=False) def test_db_and_cache_unavailable(self) -> None: store, key = self.store, self.key with patch.object(Option.objects, "get_queryset", side_effect=RuntimeError()): # we can't update options if the db is unavailable with pytest.raises(RuntimeError): store.set(key, "bar", UpdateChannel.CLI) # Assert nothing was written to the local_cache assert not store._local_cache store.set(key, "bar", UpdateChannel.CLI) with patch.object(Option.objects, "get_queryset", side_effect=RuntimeError()): assert store.get(key) == "bar" with patch.object(store.cache, "get", side_effect=RuntimeError()): assert store.get(key) == "bar" store.flush_local_cache() assert store.get(key) is None @override_settings(SENTRY_OPTIONS_COMPLAIN_ON_ERRORS=False) @patch("sentry.options.store.time") def test_key_with_grace(self, mocked_time: MagicMock) -> None: store, key = self.store, self.make_key(10, 10) mocked_time.return_value = 0 store.set(key, "bar", UpdateChannel.CLI) with patch.object(Option.objects, "get_queryset", side_effect=RuntimeError()): with patch.object(store.cache, "get", side_effect=RuntimeError()): # Serves the value beyond TTL mocked_time.return_value = 15 assert store.get(key) == "bar" mocked_time.return_value = 21 assert store.get(key) is None # It should have also been evicted assert not store._local_cache @override_settings(SENTRY_OPTIONS_COMPLAIN_ON_ERRORS=False) @patch("sentry.options.store.time") def test_key_ttl(self, mocked_time: MagicMock) -> None: store, key = self.store, self.make_key(10, 0) mocked_time.return_value = 0 store.set(key, "bar", UpdateChannel.CLI) with patch.object(Option.objects, "get_queryset", side_effect=RuntimeError()): with patch.object(store.cache, "get", side_effect=RuntimeError()): assert store.get(key) == "bar" Option.objects.filter(key=key.name).update(value="lol") store.cache.delete(key.cache_key) # Still within TTL, so don't check database assert store.get(key) == "bar" mocked_time.return_value = 15 with patch.object(Option.objects, "get_queryset", side_effect=RuntimeError()): with patch.object(store.cache, "get", side_effect=RuntimeError()): assert store.get(key) is None assert store.get(key) == "lol" @patch("sentry.options.store.time") def test_clean_local_cache(self, mocked_time: MagicMock) -> None: store = self.store mocked_time.return_value = 0 key1 = self.make_key(10, 0) # should expire after 10 key2 = self.make_key(10, 5) # should expire after 15 key3 = self.make_key(10, 10) # should expire after 20 key4 = self.make_key(10, 15) # should expire after 25 store.set(key1, "x", UpdateChannel.CLI) store.set(key2, "x", UpdateChannel.CLI) store.set(key3, "x", UpdateChannel.CLI) store.set(key4, "x", UpdateChannel.CLI) assert len(store._local_cache) == 4 mocked_time.return_value = 0 store.clean_local_cache() assert len(store._local_cache) == 4 mocked_time.return_value = 11 store.clean_local_cache() assert len(store._local_cache) == 3 assert key1.cache_key not in store._local_cache mocked_time.return_value = 21 store.clean_local_cache() assert len(store._local_cache) == 1 assert key1.cache_key not in store._local_cache assert key2.cache_key not in store._local_cache assert key3.cache_key not in store._local_cache mocked_time.return_value = 26 store.clean_local_cache() assert not store._local_cache
OptionsStoreTest
python
sqlalchemy__sqlalchemy
test/orm/test_unitofwork.py
{ "start": 2630, "end": 4696 }
class ____(fixtures.MappedTest): __requires__ = ("unicode_connections",) @classmethod def define_tables(cls, metadata): uni_type = sa.Unicode(50).with_variant( sa.Unicode(50, collation="utf8_unicode_ci"), "mysql" ) Table( "uni_t1", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("txt", uni_type, unique=True), ) Table( "uni_t2", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("txt", uni_type, ForeignKey("uni_t1")), ) @classmethod def setup_classes(cls): class Test(cls.Basic): pass class Test2(cls.Basic): pass def test_basic(self): Test, uni_t1 = self.classes.Test, self.tables.uni_t1 self.mapper_registry.map_imperatively(Test, uni_t1) txt = "\u0160\u0110\u0106\u010c\u017d" t1 = Test(id=1, txt=txt) self.assert_(t1.txt == txt) session = fixture_session() session.add(t1) session.commit() self.assert_(t1.txt == txt) def test_relationship(self): Test, uni_t2, uni_t1, Test2 = ( self.classes.Test, self.tables.uni_t2, self.tables.uni_t1, self.classes.Test2, ) self.mapper_registry.map_imperatively( Test, uni_t1, properties={"t2s": relationship(Test2)} ) self.mapper_registry.map_imperatively(Test2, uni_t2) txt = "\u0160\u0110\u0106\u010c\u017d" t1 = Test(txt=txt) t1.t2s.append(Test2()) t1.t2s.append(Test2()) session = fixture_session(expire_on_commit=False) session.add(t1) session.commit() session.close() session = fixture_session() t1 = session.query(Test).filter_by(id=t1.id).one() assert len(t1.t2s) == 2
UnicodeTest
python
wandb__wandb
landfill/functional_tests/artifacts/public-link-model.py
{ "start": 126, "end": 1475 }
class ____(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout(0.25) self.dropout2 = nn.Dropout(0.5) self.fc1 = nn.Linear(9216, 128) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = F.max_pool2d(x, 2) x = self.dropout1(x) x = torch.flatten(x, 1) x = self.fc1(x) x = F.relu(x) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output def main(): # create an artifact # call .wait() to get a Public Artifact bound to it # and then do link on that artifact run = wandb.init() with open("my-dataset.txt", "w") as fp: fp.write("this-is-data") try: artifact = run.use_artifact("my-art-name:latest", "my-art-type") except CommError: artifact = wandb.Artifact("my-art-name", "my-art-type") artifact.add_file("my-dataset.txt") artifact = run.log_artifact(artifact) artifact.wait() artifact.link("project/test_portfolio_public_link_test", aliases="best") run.finish() if __name__ == "__main__": main()
Net
python
fluentpython__example-code-2e
10-dp-1class-func/strategy_param.py
{ "start": 1247, "end": 1487 }
class ____: def __init__(self, product: str, quantity: int, price: float): self.product = product self.quantity = quantity self.price = price def total(self): return self.price * self.quantity
LineItem
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_bar20.py
{ "start": 315, "end": 1421 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_bar20.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "bar"}) chart.axis_ids = [45925120, 45927040] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) worksheet.write("A7", "Pear") chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5", "name": "Apple"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5", "name": "=Sheet1!$A$7"}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
streamlit__streamlit
lib/streamlit/elements/widgets/file_uploader.py
{ "start": 5035, "end": 21218 }
class ____: # Multiple overloads are defined on `file_uploader()` below to represent # the different return types of `file_uploader()`. # These return types differ according to the value of the `accept_multiple_files` argument. # There must be 2x2=4 overloads to cover all the possible arguments, # as these overloads must be mutually exclusive for mypy. # There are 3 associated variables, each with 2+ options. # 1. The `accept_multiple_files` argument is set as `True` or `"directory"`, # or it is set as `False` or omitted, in which case the default value `False`. # 2. The `type` argument may or may not be provided as a keyword-only argument. # 3. Directory uploads always return a list of UploadedFile objects. # 1. type is given as not a keyword-only argument # 2. accept_multiple_files = True or "directory" @overload def file_uploader( self, label: str, type: str | Sequence[str] | None, accept_multiple_files: Literal[True, "directory"], key: Key | None = None, help: str | None = None, on_change: WidgetCallback | None = None, args: WidgetArgs | None = None, kwargs: WidgetKwargs | None = None, *, disabled: bool = False, label_visibility: LabelVisibility = "visible", width: WidthWithoutContent = "stretch", ) -> list[UploadedFile]: ... # 1. type is given as not a keyword-only argument # 2. accept_multiple_files = False or omitted @overload def file_uploader( self, label: str, type: str | Sequence[str] | None, accept_multiple_files: Literal[False] = False, key: Key | None = None, help: str | None = None, on_change: WidgetCallback | None = None, args: WidgetArgs | None = None, kwargs: WidgetKwargs | None = None, *, disabled: bool = False, label_visibility: LabelVisibility = "visible", width: WidthWithoutContent = "stretch", ) -> UploadedFile | None: ... # The following 2 overloads represent the cases where # the `type` argument is a keyword-only argument. # See https://github.com/python/mypy/issues/4020#issuecomment-737600893 # for the related discussions and examples. # 1. type is skipped or a keyword argument # 2. accept_multiple_files = True or "directory" @overload def file_uploader( self, label: str, *, accept_multiple_files: Literal[True, "directory"], type: str | Sequence[str] | None = None, key: Key | None = None, help: str | None = None, on_change: WidgetCallback | None = None, args: WidgetArgs | None = None, kwargs: WidgetKwargs | None = None, disabled: bool = False, label_visibility: LabelVisibility = "visible", width: WidthWithoutContent = "stretch", ) -> list[UploadedFile]: ... # 1. type is skipped or a keyword argument # 2. accept_multiple_files = False or omitted @overload def file_uploader( self, label: str, *, accept_multiple_files: Literal[False] = False, type: str | Sequence[str] | None = None, key: Key | None = None, help: str | None = None, on_change: WidgetCallback | None = None, args: WidgetArgs | None = None, kwargs: WidgetKwargs | None = None, disabled: bool = False, label_visibility: LabelVisibility = "visible", width: WidthWithoutContent = "stretch", ) -> UploadedFile | None: ... @gather_metrics("file_uploader") def file_uploader( self, label: str, type: str | Sequence[str] | None = None, accept_multiple_files: AcceptMultipleFiles = False, key: Key | None = None, help: str | None = None, on_change: WidgetCallback | None = None, args: WidgetArgs | None = None, kwargs: WidgetKwargs | None = None, *, # keyword-only arguments: disabled: bool = False, label_visibility: LabelVisibility = "visible", width: WidthWithoutContent = "stretch", ) -> UploadedFile | list[UploadedFile] | None: r"""Display a file uploader widget. By default, uploaded files are limited to 200 MB each. You can configure this using the ``server.maxUploadSize`` config option. For more information on how to set config options, see |config.toml|_. .. |config.toml| replace:: ``config.toml`` .. _config.toml: https://docs.streamlit.io/develop/api-reference/configuration/config.toml Parameters ---------- label : str A short label explaining to the user what this file uploader is for. The label can optionally contain GitHub-flavored Markdown of the following types: Bold, Italics, Strikethroughs, Inline Code, Links, and Images. Images display like icons, with a max height equal to the font height. Unsupported Markdown elements are unwrapped so only their children (text contents) render. Display unsupported elements as literal characters by backslash-escaping them. E.g., ``"1\. Not an ordered list"``. See the ``body`` parameter of |st.markdown|_ for additional, supported Markdown directives. For accessibility reasons, you should never set an empty label, but you can hide it with ``label_visibility`` if needed. In the future, we may disallow empty labels by raising an exception. .. |st.markdown| replace:: ``st.markdown`` .. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown type : str, list of str, or None The allowed file extension(s) for uploaded files. This can be one of the following types: - ``None`` (default): All file extensions are allowed. - A string: A single file extension is allowed. For example, to only accept CSV files, use ``"csv"``. - A sequence of strings: Multiple file extensions are allowed. For example, to only accept JPG/JPEG and PNG files, use ``["jpg", "jpeg", "png"]``. .. note:: This is a best-effort check, but doesn't provide a security guarantee against users uploading files of other types or type extensions. The correct handling of uploaded files is part of the app developer's responsibility. accept_multiple_files : bool or "directory" Whether to accept more than one file in a submission. This can be one of the following values: - ``False`` (default): The user can only submit one file at a time. - ``True``: The user can upload multiple files at the same time. - ``"directory"``: The user can select a directory to upload all files in the directory and its subdirectories. If ``type`` is set, only files matching those type(s) will be uploaded. When this is ``True`` or ``"directory"``, the return value will be a list and a user can additively select files if they click the browse button on the widget multiple times. key : str or int An optional string or integer to use as the unique key for the widget. If this is omitted, a key will be generated for the widget based on its content. No two widgets may have the same key. help : str or None A tooltip that gets displayed next to the widget label. Streamlit only displays the tooltip when ``label_visibility="visible"``. If this is ``None`` (default), no tooltip is displayed. The tooltip can optionally contain GitHub-flavored Markdown, including the Markdown directives described in the ``body`` parameter of ``st.markdown``. on_change : callable An optional callback invoked when this file_uploader's value changes. args : list or tuple An optional list or tuple of args to pass to the callback. kwargs : dict An optional dict of kwargs to pass to the callback. disabled : bool An optional boolean that disables the file uploader if set to ``True``. The default is ``False``. label_visibility : "visible", "hidden", or "collapsed" The visibility of the label. The default is ``"visible"``. If this is ``"hidden"``, Streamlit displays an empty spacer instead of the label, which can help keep the widget aligned with other widgets. If this is ``"collapsed"``, Streamlit displays no label or spacer. width : "stretch" or int The width of the file uploader widget. This can be one of the following: - ``"stretch"`` (default): The width of the widget matches the width of the parent container. - An integer specifying the width in pixels: The widget has a fixed width. If the specified width is greater than the width of the parent container, the width of the widget matches the width of the parent container. Returns ------- None, UploadedFile, or list of UploadedFile - If accept_multiple_files is ``False``, returns either ``None`` or an ``UploadedFile`` object. - If accept_multiple_files is ``True`` or ``"directory"``, returns a list with the uploaded files as ``UploadedFile`` objects. If no files were uploaded, returns an empty list. The ``UploadedFile`` class is a subclass of ``BytesIO``, and therefore is "file-like". This means you can pass an instance of it anywhere a file is expected. Examples -------- **Example 1: Accept a single file at a time** >>> import streamlit as st >>> import pandas as pd >>> from io import StringIO >>> >>> uploaded_file = st.file_uploader("Choose a file") >>> if uploaded_file is not None: ... # To read file as bytes: ... bytes_data = uploaded_file.getvalue() ... st.write(bytes_data) >>> ... # To convert to a string based IO: ... stringio = StringIO(uploaded_file.getvalue().decode("utf-8")) ... st.write(stringio) >>> ... # To read file as string: ... string_data = stringio.read() ... st.write(string_data) >>> ... # Can be used wherever a "file-like" object is accepted: ... dataframe = pd.read_csv(uploaded_file) ... st.write(dataframe) **Example 2: Accept multiple files at a time** >>> import pandas as pd >>> import streamlit as st >>> >>> uploaded_files = st.file_uploader( ... "Upload data", accept_multiple_files=True, type="csv" ... ) >>> for uploaded_file in uploaded_files: ... df = pd.read_csv(uploaded_file) ... st.write(df) .. output:: https://doc-file-uploader.streamlit.app/ height: 375px **Example 3: Accept an entire directory** >>> import streamlit as st >>> >>> uploaded_files = st.file_uploader( ... "Upload images", accept_multiple_files="directory", type=["jpg", "png"] ... ) >>> for uploaded_file in uploaded_files: ... st.image(uploaded_file) .. output:: https://doc-file-uploader-directory.streamlit.app/ height: 375px """ ctx = get_script_run_ctx() return self._file_uploader( label=label, type=type, accept_multiple_files=accept_multiple_files, key=key, help=help, on_change=on_change, args=args, kwargs=kwargs, disabled=disabled, label_visibility=label_visibility, width=width, ctx=ctx, ) def _file_uploader( self, label: str, type: str | Sequence[str] | None = None, accept_multiple_files: AcceptMultipleFiles = False, key: Key | None = None, help: str | None = None, on_change: WidgetCallback | None = None, args: WidgetArgs | None = None, kwargs: WidgetKwargs | None = None, *, # keyword-only arguments: label_visibility: LabelVisibility = "visible", disabled: bool = False, ctx: ScriptRunContext | None = None, width: WidthWithoutContent = "stretch", ) -> UploadedFile | list[UploadedFile] | None: key = to_key(key) check_widget_policies( self.dg, key, on_change, default_value=None, writes_allowed=False, ) maybe_raise_label_warnings(label, label_visibility) element_id = compute_and_register_element_id( "file_uploader", user_key=key, # Treat the provided key as the main identity; only include # changes to the type and accept_multiple_files parameters in # the identity computation as those can invalidate the current value. key_as_main_identity={"type", "accept_multiple_files"}, dg=self.dg, label=label, type=type, accept_multiple_files=accept_multiple_files, help=help, width=width, ) normalized_type = normalize_upload_file_type(type) if type else None file_uploader_proto = FileUploaderProto() file_uploader_proto.id = element_id file_uploader_proto.label = label file_uploader_proto.type[:] = ( normalized_type if normalized_type is not None else [] ) file_uploader_proto.max_upload_size_mb = config.get_option( "server.maxUploadSize" ) # Handle directory uploads - they should enable multiple files and set the directory flag is_directory_upload = accept_multiple_files == "directory" file_uploader_proto.multiple_files = ( accept_multiple_files is True or is_directory_upload ) file_uploader_proto.accept_directory = is_directory_upload file_uploader_proto.form_id = current_form_id(self.dg) file_uploader_proto.disabled = disabled file_uploader_proto.label_visibility.value = get_label_visibility_proto_value( label_visibility ) if help is not None: file_uploader_proto.help = dedent(help) serde = FileUploaderSerde(accept_multiple_files, allowed_types=normalized_type) # FileUploader's widget value is a list of file IDs # representing the current set of files that this uploader should # know about. widget_state = register_widget( file_uploader_proto.id, on_change_handler=on_change, args=args, kwargs=kwargs, deserializer=serde.deserialize, serializer=serde.serialize, ctx=ctx, value_type="file_uploader_state_value", ) validate_width(width) layout_config = LayoutConfig(width=width) self.dg._enqueue( "file_uploader", file_uploader_proto, layout_config=layout_config ) if isinstance(widget_state.value, DeletedFile): return None if isinstance(widget_state.value, list): return [f for f in widget_state.value if not isinstance(f, DeletedFile)] return widget_state.value @property def dg(self) -> DeltaGenerator: """Get our DeltaGenerator.""" return cast("DeltaGenerator", self)
FileUploaderMixin
python
wandb__wandb
wandb/sdk/data_types/html.py
{ "start": 4900, "end": 5019 }
class ____(_dtypes.Type): name = "html-file" types = [Html] _dtypes.TypeRegistry.add(_HtmlFileType)
_HtmlFileType
python
pytest-dev__pytest
src/_pytest/warning_types.py
{ "start": 2272, "end": 2595 }
class ____(PytestWarning): """An unraisable exception was reported. Unraisable exceptions are exceptions raised in :meth:`__del__ <object.__del__>` implementations and similar situations when the exception cannot be raised as normal. """ __module__ = "pytest" @final
PytestUnraisableExceptionWarning
python
keras-team__keras
guides/custom_train_step_in_tensorflow.py
{ "start": 12117, "end": 16172 }
class ____(keras.Model): def __init__(self, discriminator, generator, latent_dim): super().__init__() self.discriminator = discriminator self.generator = generator self.latent_dim = latent_dim self.d_loss_tracker = keras.metrics.Mean(name="d_loss") self.g_loss_tracker = keras.metrics.Mean(name="g_loss") self.seed_generator = keras.random.SeedGenerator(1337) @property def metrics(self): return [self.d_loss_tracker, self.g_loss_tracker] def compile(self, d_optimizer, g_optimizer, loss_fn): super().compile() self.d_optimizer = d_optimizer self.g_optimizer = g_optimizer self.loss_fn = loss_fn def train_step(self, real_images): if isinstance(real_images, tuple): real_images = real_images[0] # Sample random points in the latent space batch_size = tf.shape(real_images)[0] random_latent_vectors = keras.random.normal( shape=(batch_size, self.latent_dim), seed=self.seed_generator ) # Decode them to fake images generated_images = self.generator(random_latent_vectors) # Combine them with real images combined_images = tf.concat([generated_images, real_images], axis=0) # Assemble labels discriminating real from fake images labels = tf.concat( [tf.ones((batch_size, 1)), tf.zeros((batch_size, 1))], axis=0 ) # Add random noise to the labels - important trick! labels += 0.05 * keras.random.uniform( tf.shape(labels), seed=self.seed_generator ) # Train the discriminator with tf.GradientTape() as tape: predictions = self.discriminator(combined_images) d_loss = self.loss_fn(labels, predictions) grads = tape.gradient(d_loss, self.discriminator.trainable_weights) self.d_optimizer.apply(grads, self.discriminator.trainable_weights) # Sample random points in the latent space random_latent_vectors = keras.random.normal( shape=(batch_size, self.latent_dim), seed=self.seed_generator ) # Assemble labels that say "all real images" misleading_labels = tf.zeros((batch_size, 1)) # Train the generator (note that we should *not* update the weights # of the discriminator)! with tf.GradientTape() as tape: predictions = self.discriminator( self.generator(random_latent_vectors) ) g_loss = self.loss_fn(misleading_labels, predictions) grads = tape.gradient(g_loss, self.generator.trainable_weights) self.g_optimizer.apply(grads, self.generator.trainable_weights) # Update metrics and return their value. self.d_loss_tracker.update_state(d_loss) self.g_loss_tracker.update_state(g_loss) return { "d_loss": self.d_loss_tracker.result(), "g_loss": self.g_loss_tracker.result(), } """ Let's test-drive it: """ # Prepare the dataset. We use both the training & test MNIST digits. batch_size = 64 (x_train, _), (x_test, _) = keras.datasets.mnist.load_data() all_digits = np.concatenate([x_train, x_test]) all_digits = all_digits.astype("float32") / 255.0 all_digits = np.reshape(all_digits, (-1, 28, 28, 1)) dataset = tf.data.Dataset.from_tensor_slices(all_digits) dataset = dataset.shuffle(buffer_size=1024).batch(batch_size) gan = GAN( discriminator=discriminator, generator=generator, latent_dim=latent_dim ) gan.compile( d_optimizer=keras.optimizers.Adam(learning_rate=0.0003), g_optimizer=keras.optimizers.Adam(learning_rate=0.0003), loss_fn=keras.losses.BinaryCrossentropy(from_logits=True), ) # To limit the execution time, we only train on 100 batches. You can train on # the entire dataset. You will need about 20 epochs to get nice results. gan.fit(dataset.take(100), epochs=1) """ The ideas behind deep learning are simple, so why should their implementation be painful? """
GAN
python
ray-project__ray
rllib/examples/envs/classes/cartpole_with_dict_observation_space.py
{ "start": 100, "end": 2923 }
class ____(CartPoleEnv): """CartPole gym environment that has a dict observation space. However, otherwise, the information content in each observation remains the same. https://github.com/Farama-Foundation/Gymnasium/blob/main/gymnasium/envs/classic_control/cartpole.py # noqa The new observation space looks as follows (a little quirky, but this is for testing purposes only): gym.spaces.Dict({ "x-pos": [x-pos], "angular-pos": gym.spaces.Dict({"test": [angular-pos]}), "velocs": gym.spaces.Tuple([x-veloc, angular-veloc]), }) """ def __init__(self, config=None): super().__init__() # Fix our observation-space as described above. low = self.observation_space.low high = self.observation_space.high # Test as many quirks and oddities as possible: Dict, Dict inside a Dict, # Tuple inside a Dict, and both (1,)-shapes as well as ()-shapes for Boxes. # Also add a random discrete variable here. self.observation_space = gym.spaces.Dict( { "x-pos": gym.spaces.Box(low[0], high[0], (1,), dtype=np.float32), "angular-pos": gym.spaces.Dict( { "value": gym.spaces.Box(low[2], high[2], (), dtype=np.float32), # Add some random non-essential information. "some_random_stuff": gym.spaces.Discrete(3), } ), "velocs": gym.spaces.Tuple( [ # x-veloc gym.spaces.Box(low[1], high[1], (1,), dtype=np.float32), # angular-veloc gym.spaces.Box(low[3], high[3], (), dtype=np.float32), ] ), } ) def step(self, action): next_obs, reward, done, truncated, info = super().step(action) return self._compile_current_obs(next_obs), reward, done, truncated, info def reset(self, *, seed=None, options=None): init_obs, init_info = super().reset(seed=seed, options=options) return self._compile_current_obs(init_obs), init_info def _compile_current_obs(self, original_cartpole_obs): # original_cartpole_obs is [x-pos, x-veloc, angle, angle-veloc] return { "x-pos": np.array([original_cartpole_obs[0]], np.float32), "angular-pos": { "value": np.array(original_cartpole_obs[2]), "some_random_stuff": np.random.randint(3), }, "velocs": ( np.array([original_cartpole_obs[1]], np.float32), np.array(original_cartpole_obs[3], np.float32), ), }
CartPoleWithDictObservationSpace
python
numba__numba
numba/core/typing/builtins.py
{ "start": 15360, "end": 15539 }
class ____(AbstractTemplate): def generic(self, args, kws): [lhs, rhs] = args return signature(types.boolean, lhs, rhs) @infer_global(operator.is_)
CmpOpIdentity
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 577615, "end": 578235 }
class ____(sgqlc.types.relay.Connection): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("edges", "nodes", "page_info", "total_count") edges = sgqlc.types.Field( sgqlc.types.list_of("ReleaseAssetEdge"), graphql_name="edges" ) nodes = sgqlc.types.Field(sgqlc.types.list_of("ReleaseAsset"), graphql_name="nodes") page_info = sgqlc.types.Field( sgqlc.types.non_null(PageInfo), graphql_name="pageInfo" ) total_count = sgqlc.types.Field( sgqlc.types.non_null(Int), graphql_name="totalCount" )
ReleaseAssetConnection
python
numpy__numpy
numpy/lib/tests/test_mixins.py
{ "start": 230, "end": 2895 }
class ____(np.lib.mixins.NDArrayOperatorsMixin): def __init__(self, value): self.value = np.asarray(value) # One might also consider adding the built-in list type to this # list, to support operations like np.add(array_like, list) _HANDLED_TYPES = (np.ndarray, numbers.Number) def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): out = kwargs.get('out', ()) for x in inputs + out: # Only support operations with instances of _HANDLED_TYPES. # Use ArrayLike instead of type(self) for isinstance to # allow subclasses that don't override __array_ufunc__ to # handle ArrayLike objects. if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): return NotImplemented # Defer to the implementation of the ufunc on unwrapped values. inputs = tuple(x.value if isinstance(x, ArrayLike) else x for x in inputs) if out: kwargs['out'] = tuple( x.value if isinstance(x, ArrayLike) else x for x in out) result = getattr(ufunc, method)(*inputs, **kwargs) if type(result) is tuple: # multiple return values return tuple(type(self)(x) for x in result) elif method == 'at': # no return value return None else: # one return value return type(self)(result) def __repr__(self): return f'{type(self).__name__}({self.value!r})' def wrap_array_like(result): if type(result) is tuple: return tuple(ArrayLike(r) for r in result) else: return ArrayLike(result) def _assert_equal_type_and_value(result, expected, err_msg=None): assert_equal(type(result), type(expected), err_msg=err_msg) if isinstance(result, tuple): assert_equal(len(result), len(expected), err_msg=err_msg) for result_item, expected_item in zip(result, expected): _assert_equal_type_and_value(result_item, expected_item, err_msg) else: assert_equal(result.value, expected.value, err_msg=err_msg) assert_equal(getattr(result.value, 'dtype', None), getattr(expected.value, 'dtype', None), err_msg=err_msg) _ALL_BINARY_OPERATORS = [ operator.lt, operator.le, operator.eq, operator.ne, operator.gt, operator.ge, operator.add, operator.sub, operator.mul, operator.truediv, operator.floordiv, operator.mod, divmod, pow, operator.lshift, operator.rshift, operator.and_, operator.xor, operator.or_, ]
ArrayLike
python
django__django
tests/template_tests/filter_tests/test_capfirst.py
{ "start": 166, "end": 879 }
class ____(SimpleTestCase): @setup( { "capfirst01": ( "{% autoescape off %}{{ a|capfirst }} {{ b|capfirst }}" "{% endautoescape %}" ) } ) def test_capfirst01(self): output = self.engine.render_to_string( "capfirst01", {"a": "fred>", "b": mark_safe("fred&gt;")} ) self.assertEqual(output, "Fred> Fred&gt;") @setup({"capfirst02": "{{ a|capfirst }} {{ b|capfirst }}"}) def test_capfirst02(self): output = self.engine.render_to_string( "capfirst02", {"a": "fred>", "b": mark_safe("fred&gt;")} ) self.assertEqual(output, "Fred&gt; Fred&gt;")
CapfirstTests
python
getsentry__sentry
src/sentry/projects/project_rules/updater.py
{ "start": 445, "end": 2662 }
class ____: rule: Rule project: Project name: str | None = None owner: Actor | None = None environment: int | None = None action_match: str | None = None filter_match: str | None = None actions: Sequence[dict[str, str]] | None = None conditions: Sequence[dict[str, str]] | None = None frequency: int | None = None request: Request | None = None def run(self) -> Rule: with transaction.atomic(router.db_for_write(Rule)): self._update_name() self._update_owner() self._update_environment() self._update_project() self._update_actions() self._update_action_match() self._update_filter_match() self._update_conditions() self._update_frequency() self.rule.save() # uncaught errors will rollback the transaction workflow = update_migrated_issue_alert(self.rule) if workflow: logger.info( "workflow_engine.issue_alert.updated", extra={"rule_id": self.rule.id, "workflow_id": workflow.id}, ) return self.rule def _update_name(self) -> None: if self.name: self.rule.label = self.name def _update_owner(self) -> None: self.rule.owner = self.owner def _update_environment(self) -> None: self.rule.environment_id = self.environment def _update_project(self) -> None: if self.project: self.rule.project = self.project def _update_actions(self) -> None: if self.actions: self.rule.data["actions"] = self.actions def _update_action_match(self) -> None: if self.action_match: self.rule.data["action_match"] = self.action_match def _update_filter_match(self) -> None: if self.filter_match: self.rule.data["filter_match"] = self.filter_match def _update_conditions(self) -> None: self.rule.data["conditions"] = self.conditions or [] def _update_frequency(self) -> None: if self.frequency: self.rule.data["frequency"] = self.frequency
ProjectRuleUpdater
python
pyqtgraph__pyqtgraph
pyqtgraph/widgets/ProgressDialog.py
{ "start": 105, "end": 8791 }
class ____(QtWidgets.QProgressDialog): """ Extends QProgressDialog: * Adds context management so the dialog may be used in `with` statements * Allows nesting multiple progress dialogs Example:: with ProgressDialog("Processing..", minVal, maxVal) as dlg: # do stuff dlg.setValue(i) ## could also use dlg += 1 if dlg.wasCanceled(): raise Exception("Processing canceled by user") """ allDialogs = [] def __init__(self, labelText, minimum=0, maximum=100, cancelText='Cancel', parent=None, wait=250, busyCursor=False, disable=False, nested=False): """ ============== ================================================================ **Arguments:** labelText (required) cancelText Text to display on cancel button, or None to disable it. minimum maximum parent wait Length of time (im ms) to wait before displaying dialog busyCursor If True, show busy cursor until dialog finishes disable If True, the progress dialog will not be displayed and calls to wasCanceled() will always return False. If ProgressDialog is entered from a non-gui thread, it will always be disabled. nested (bool) If True, then this progress bar will be displayed inside any pre-existing progress dialogs that also allow nesting. ============== ================================================================ """ # attributes used for nesting dialogs self.nestedLayout = None self._nestableWidgets = None self._nestingReady = False self._topDialog = None self._subBars = [] self.nested = nested # for rate-limiting Qt event processing during progress bar update self._lastProcessEvents = None isGuiThread = QtCore.QThread.currentThread() == QtCore.QCoreApplication.instance().thread() self.disabled = disable or (not isGuiThread) if self.disabled: return noCancel = False if cancelText is None: cancelText = '' noCancel = True self.busyCursor = busyCursor QtWidgets.QProgressDialog.__init__(self, labelText, cancelText, minimum, maximum, parent) # If this will be a nested dialog, then we ignore the wait time if nested is True and len(ProgressDialog.allDialogs) > 0: self.setMinimumDuration(2**30) else: self.setMinimumDuration(wait) self.setWindowModality(QtCore.Qt.WindowModality.WindowModal) self.setValue(self.minimum()) if noCancel: self.setCancelButton(None) def __enter__(self): if self.disabled: return self if self.busyCursor: QtWidgets.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.CursorShape.WaitCursor)) if self.nested and len(ProgressDialog.allDialogs) > 0: topDialog = ProgressDialog.allDialogs[0] topDialog._addSubDialog(self) self._topDialog = topDialog topDialog.canceled.connect(self.cancel) ProgressDialog.allDialogs.append(self) return self def __exit__(self, exType, exValue, exTrace): if self.disabled: return if self.busyCursor: QtWidgets.QApplication.restoreOverrideCursor() if self._topDialog is not None: self._topDialog._removeSubDialog(self) ProgressDialog.allDialogs.pop(-1) self.setValue(self.maximum()) def __iadd__(self, val): """Use inplace-addition operator for easy incrementing.""" if self.disabled: return self self.setValue(self.value()+val) return self def _addSubDialog(self, dlg): # insert widgets from another dialog into this one. # set a new layout and arrange children into it (if needed). self._prepareNesting() bar, btn = dlg._extractWidgets() # where should we insert this widget? Find the first slot with a # "removed" widget (that was left as a placeholder) inserted = False for i,bar2 in enumerate(self._subBars): if bar2.hidden: self._subBars.pop(i) bar2.hide() bar2.setParent(None) self._subBars.insert(i, bar) inserted = True break if not inserted: self._subBars.append(bar) # reset the layout while self.nestedLayout.count() > 0: self.nestedLayout.takeAt(0) for b in self._subBars: self.nestedLayout.addWidget(b) def _removeSubDialog(self, dlg): # don't remove the widget just yet; instead we hide it and leave it in # as a placeholder. bar, btn = dlg._extractWidgets() bar.hide() def _prepareNesting(self): # extract all child widgets and place into a new layout that we can add to if self._nestingReady is False: # top layout contains progress bars + cancel button at the bottom self._topLayout = QtWidgets.QGridLayout() self.setLayout(self._topLayout) self._topLayout.setContentsMargins(0, 0, 0, 0) # A vbox to contain all progress bars self.nestedVBox = QtWidgets.QWidget() self._topLayout.addWidget(self.nestedVBox, 0, 0, 1, 2) self.nestedLayout = QtWidgets.QVBoxLayout() self.nestedVBox.setLayout(self.nestedLayout) # re-insert all widgets bar, btn = self._extractWidgets() self.nestedLayout.addWidget(bar) self._subBars.append(bar) self._topLayout.addWidget(btn, 1, 1, 1, 1) self._topLayout.setColumnStretch(0, 100) self._topLayout.setColumnStretch(1, 1) self._topLayout.setRowStretch(0, 100) self._topLayout.setRowStretch(1, 1) self._nestingReady = True def _extractWidgets(self): # return: # 1. a single widget containing the label and progress bar # 2. the cancel button if self._nestableWidgets is None: label = [ch for ch in self.children() if isinstance(ch, QtWidgets.QLabel)][0] bar = [ch for ch in self.children() if isinstance(ch, QtWidgets.QProgressBar)][0] btn = [ch for ch in self.children() if isinstance(ch, QtWidgets.QPushButton)][0] sw = ProgressWidget(label, bar) self._nestableWidgets = (sw, btn) return self._nestableWidgets def resizeEvent(self, ev): if self._nestingReady: # don't let progress dialog manage widgets anymore. return return super().resizeEvent(ev) ## wrap all other functions to make sure they aren't being called from non-gui threads def setValue(self, val): if self.disabled: return QtWidgets.QProgressDialog.setValue(self, val) # Qt docs say this should happen automatically, but that doesn't seem # to be the case. if self.windowModality() == QtCore.Qt.WindowModality.WindowModal: now = perf_counter() if self._lastProcessEvents is None or (now - self._lastProcessEvents) > 0.2: QtWidgets.QApplication.processEvents() self._lastProcessEvents = now def setLabelText(self, val): if self.disabled: return QtWidgets.QProgressDialog.setLabelText(self, val) def setMaximum(self, val): if self.disabled: return QtWidgets.QProgressDialog.setMaximum(self, val) def setMinimum(self, val): if self.disabled: return QtWidgets.QProgressDialog.setMinimum(self, val) def wasCanceled(self): if self.disabled: return False return QtWidgets.QProgressDialog.wasCanceled(self) def maximum(self): if self.disabled: return 0 return QtWidgets.QProgressDialog.maximum(self) def minimum(self): if self.disabled: return 0 return QtWidgets.QProgressDialog.minimum(self)
ProgressDialog
python
kubernetes-client__python
kubernetes/client/models/v1_local_volume_source.py
{ "start": 383, "end": 4972 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'fs_type': 'str', 'path': 'str' } attribute_map = { 'fs_type': 'fsType', 'path': 'path' } def __init__(self, fs_type=None, path=None, local_vars_configuration=None): # noqa: E501 """V1LocalVolumeSource - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._fs_type = None self._path = None self.discriminator = None if fs_type is not None: self.fs_type = fs_type self.path = path @property def fs_type(self): """Gets the fs_type of this V1LocalVolumeSource. # noqa: E501 fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified. # noqa: E501 :return: The fs_type of this V1LocalVolumeSource. # noqa: E501 :rtype: str """ return self._fs_type @fs_type.setter def fs_type(self, fs_type): """Sets the fs_type of this V1LocalVolumeSource. fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified. # noqa: E501 :param fs_type: The fs_type of this V1LocalVolumeSource. # noqa: E501 :type: str """ self._fs_type = fs_type @property def path(self): """Gets the path of this V1LocalVolumeSource. # noqa: E501 path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...). # noqa: E501 :return: The path of this V1LocalVolumeSource. # noqa: E501 :rtype: str """ return self._path @path.setter def path(self, path): """Sets the path of this V1LocalVolumeSource. path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...). # noqa: E501 :param path: The path of this V1LocalVolumeSource. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501 raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501 self._path = path def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1LocalVolumeSource): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1LocalVolumeSource): return True return self.to_dict() != other.to_dict()
V1LocalVolumeSource
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_impls.py
{ "start": 33648, "end": 45048 }
class ____( AutoMaterializeRule, NamedTuple( "_SkipOnNotAllParentsUpdatedSinceCronRule", [("cron_schedule", str), ("timezone", str)], ), ): @property def decision_type(self) -> AutoMaterializeDecisionType: return AutoMaterializeDecisionType.SKIP @property def description(self) -> str: return f"waiting until all upstream assets have updated since the last cron schedule tick of '{self.cron_schedule}' (timezone: {self.timezone})" def passed_time_window(self, context: "AutomationContext") -> TimeWindow: """Returns the window of time that has passed between the previous two cron ticks. All parent assets must contain all data from this time window in order for this asset to be materialized. """ previous_ticks = reverse_cron_string_iterator( end_timestamp=context.legacy_context.evaluation_time.timestamp(), cron_string=self.cron_schedule, execution_timezone=self.timezone, ) end_time = next(previous_ticks) start_time = next(previous_ticks) return TimeWindow(start=start_time, end=end_time) def get_parent_subset_updated_since_cron( self, context: "AutomationContext", parent_asset_key: AssetKey, passed_time_window: TimeWindow, ) -> ValidAssetSubset: """Returns the AssetSubset of a given parent asset that has been updated since the end of the previous cron tick. If a value for this parent asset was computed on the previous evaluation, and that evaluation happened within the same cron tick as the current evaluation, then this value will be calculated incrementally from the previous value to avoid expensive queries. """ parent_partitions_def = context.asset_graph.get(parent_asset_key).partitions_def if ( # first tick of evaluating this condition context.legacy_context.node_cursor is None or context.legacy_context.previous_evaluation_timestamp is None # This additional check is neccessary as it is possible for this cursor to be None # even if the previous state is not None in the case that this asset and none of its # parents have been detected as materialized at any point. While in theory this should # not cause issues, in past versions we were not properly capturing materializations # of external assets, causing this detection to be faulty. This ensures that we can # properly re-calculate this value even in the case that we've incorrectly considered # a parent as unmaterialized in the past. or context.legacy_context.previous_max_storage_id is None # new cron tick has happened since the previous tick or passed_time_window.end.timestamp() > context.legacy_context.previous_evaluation_timestamp ): return ValidAssetSubset.coerce_from_subset( context.legacy_context.instance_queryer.get_asset_subset_updated_after_time( asset_key=parent_asset_key, after_time=passed_time_window.end ), parent_partitions_def, ) else: # previous state still valid previous_parent_subsets = ( context.legacy_context.node_cursor.get_structured_cursor(list) or [] ) previous_parent_subset = next( (s for s in previous_parent_subsets if s.key == parent_asset_key), ValidAssetSubset.empty( parent_asset_key, context.asset_graph.get(parent_asset_key).partitions_def ), ) # the set of asset partitions that have been updated since the previous evaluation new_parent_subset = ValidAssetSubset.coerce_from_subset( context.legacy_context.instance_queryer.get_asset_subset_updated_after_cursor( asset_key=parent_asset_key, after_cursor=context.legacy_context.previous_max_storage_id, require_data_version_update=False, ), parent_partitions_def, ) return new_parent_subset | previous_parent_subset def get_parent_subsets_updated_since_cron_by_key( self, context: "AutomationContext", passed_time_window: TimeWindow ) -> Mapping[AssetKey, ValidAssetSubset]: """Returns a mapping of parent asset keys to the AssetSubset of each parent that has been updated since the end of the previous cron tick. Does not compute this value for time-window partitioned parents, as their partitions encode the time windows they have processed. """ updated_subsets_by_key = {} for parent_asset_key in context.legacy_context.asset_graph.get( context.legacy_context.asset_key ).parent_keys: # no need to incrementally calculate updated time-window partitions definitions, as # their partitions encode the time windows they have processed. if isinstance( context.legacy_context.asset_graph.get(parent_asset_key).partitions_def, TimeWindowPartitionsDefinition, ): continue updated_subsets_by_key[parent_asset_key] = self.get_parent_subset_updated_since_cron( context, parent_asset_key, passed_time_window ) return updated_subsets_by_key def parent_updated_since_cron( self, context: "AutomationContext", passed_time_window: TimeWindow, parent_asset_key: AssetKey, child_asset_partition: AssetKeyPartitionKey, updated_parent_subset: ValidAssetSubset, ) -> bool: """Returns if, for a given child asset partition, the given parent asset been updated with information from the required time window. """ parent_partitions_def = context.legacy_context.asset_graph.get( parent_asset_key ).partitions_def if isinstance(parent_partitions_def, TimeWindowPartitionsDefinition): # for time window partitions definitions, we simply assert that all time partitions that # were newly created between the previous cron ticks have been materialized required_parent_partitions = parent_partitions_def.get_partition_keys_in_time_window( time_window=passed_time_window ) # for time window partitions definitions, we simply assert that all time partitions that return all( AssetKeyPartitionKey(parent_asset_key, partition_key) in context.legacy_context.instance_queryer.get_materialized_asset_subset( asset_key=parent_asset_key ) for partition_key in required_parent_partitions ) # for all other partitions definitions, we assert that all parent partition keys have # been materialized since the previous cron tick else: if parent_partitions_def is None: non_updated_parent_asset_partitions = updated_parent_subset.inverse( parent_partitions_def ).asset_partitions else: parent_subset = ( context.legacy_context.asset_graph.get_parent_partition_keys_for_child( child_asset_partition.partition_key, parent_asset_key, child_asset_partition.asset_key, ).partitions_subset ) non_updated_parent_asset_partitions = ( ValidAssetSubset(key=parent_asset_key, value=parent_subset) - updated_parent_subset ).asset_partitions return not any( not context.legacy_context.will_update_asset_partition(p) for p in non_updated_parent_asset_partitions ) def evaluate_for_asset(self, context: "AutomationContext") -> "AutomationResult": from dagster._core.definitions.declarative_automation.automation_condition import ( AutomationResult, ) passed_time_window = self.passed_time_window(context) has_new_passed_time_window = passed_time_window.end.timestamp() > ( context.legacy_context.previous_evaluation_timestamp or 0 ) updated_subsets_by_key = self.get_parent_subsets_updated_since_cron_by_key( context, passed_time_window ) # only need to evaluate net-new candidates and candidates whose parents have updated, unless # this is the first tick after a new cron schedule tick subset_to_evaluate = ( ( context.legacy_context.candidates_not_evaluated_on_previous_tick_subset | context.legacy_context.candidate_parent_has_or_will_update_subset ) if not has_new_passed_time_window else context.legacy_context.candidate_subset ) # the set of candidates for whom all parents have been updated since the previous cron tick all_parents_updated_subset = ValidAssetSubset.from_asset_partitions_set( context.legacy_context.asset_key, context.legacy_context.partitions_def, { candidate for candidate in subset_to_evaluate.asset_partitions if all( self.parent_updated_since_cron( context, passed_time_window, parent_asset_key, candidate, updated_subsets_by_key.get( parent_asset_key, ValidAssetSubset.empty( parent_asset_key, context.asset_graph.get(parent_asset_key).partitions_def, ), ), ) for parent_asset_key in context.legacy_context.asset_graph.get( candidate.asset_key ).parent_keys ) }, ) # if your parents were all updated since the previous cron tick on the previous evaluation, # that will still be true unless a new cron tick has happened since the previous evaluation if not has_new_passed_time_window: all_parents_updated_subset = ( ValidAssetSubset.coerce_from_subset( context.legacy_context.previous_candidate_subset, context.legacy_context.partitions_def, ) - context.legacy_context.previous_true_subset ) | all_parents_updated_subset return AutomationResult( context, true_subset=context.asset_graph_view.legacy_get_asset_subset_from_valid_subset( context.legacy_context.candidate_subset - all_parents_updated_subset ), structured_cursor=list(updated_subsets_by_key.values()), ) @whitelist_for_serdes
SkipOnNotAllParentsUpdatedSinceCronRule
python
tensorflow__tensorflow
tensorflow/python/ops/numpy_ops/np_logic_test.py
{ "start": 1056, "end": 3628 }
class ____(test.TestCase): def setUp(self): super(LogicTest, self).setUp() self.array_transforms = [ lambda x: x, # Identity, ops.convert_to_tensor, np.array, lambda x: np.array(x, dtype=np.int32), lambda x: np.array(x, dtype=np.int64), lambda x: np.array(x, dtype=np.float32), lambda x: np.array(x, dtype=np.float64), np_array_ops.array, lambda x: np_array_ops.array(x, dtype=dtypes.int32), lambda x: np_array_ops.array(x, dtype=dtypes.int64), lambda x: np_array_ops.array(x, dtype=dtypes.float32), lambda x: np_array_ops.array(x, dtype=dtypes.float64), ] def testEqual(self): def run_test(x1, x2=None): if x2 is None: x2 = x1 for fn1 in self.array_transforms: for fn2 in self.array_transforms: arg1 = fn1(x1) arg2 = fn2(x2) self.match( np_math_ops.equal(arg1, arg2), np.equal( make_numpy_compatible(arg1), make_numpy_compatible(arg2))) run_test(1) run_test(1, 2) run_test([1, 2]) run_test([1, 2, 3], [2]) run_test([[1, 2], [3, 4]], [1, 2]) run_test([[1, 2], [1, 4]], [1, 2]) run_test([1, 2], [[1, 2], [1, 4]]) run_test([[1, 2], [3, 4]], [[1, 2], [3, 4]]) run_test([[1, 2], [3, 4]], [[1, 3], [3, 4]]) def match_shape(self, actual, expected, msg=None): if msg: msg = 'Shape match failed for: {}. Expected: {} Actual: {}'.format( msg, expected.shape, actual.shape) self.assertEqual(actual.shape, expected.shape, msg=msg) def match_dtype(self, actual, expected, msg=None): if msg: msg = 'Dtype match failed for: {}. Expected: {} Actual: {}.'.format( msg, expected.dtype, actual.dtype) self.assertEqual(actual.dtype, expected.dtype, msg=msg) def match(self, actual, expected, msg=None): msg_ = 'Expected: {} Actual: {}'.format(expected, actual) if msg: msg = '{} {}'.format(msg_, msg) else: msg = msg_ self.assertIsInstance(actual, np_arrays.ndarray) self.match_dtype(actual, expected, msg) self.match_shape(actual, expected, msg) if not actual.shape.rank: self.assertEqual(actual.tolist(), expected.tolist()) else: self.assertSequenceEqual(actual.tolist(), expected.tolist()) def make_numpy_compatible(s): return s if not isinstance(s, np_arrays.ndarray) else s.numpy() if __name__ == '__main__': ops.enable_eager_execution() np_math_ops.enable_numpy_methods_on_tensor() test.main()
LogicTest
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/roots/assets.py
{ "start": 983, "end": 1133 }
class ____(graphene.Union): class Meta: types = (GrapheneAsset, GrapheneAssetNotFoundError) name = "AssetOrError"
GrapheneAssetOrError
python
pypa__hatch
src/hatch/errors/__init__.py
{ "start": 40, "end": 101 }
class ____(HatchError): pass
PythonDistributionUnknownError
python
EpistasisLab__tpot
tpot/search_spaces/nodes/genetic_feature_selection.py
{ "start": 7085, "end": 9438 }
class ____(SearchSpace): def __init__(self, n_features, start_p=0.2, mutation_rate = 0.1, crossover_rate = 0.1, mutation_rate_rate = 0, # These are still experimental but seem to help. Theory is that it takes slower steps as it gets closer to the optimal solution. crossover_rate_rate = 0,# Otherwise is mutation_rate is too small, it takes forever, and if its too large, it never converges. ): """ A node that generates a GeneticFeatureSelectorIndividual. Uses genetic algorithm to select novel subsets of features. Parameters ---------- n_features : int Number of features in the dataset. start_p : float Probability of selecting a given feature for the initial subset of features. mutation_rate : float Probability of adding/removing a feature from the subset of features. crossover_rate : float Probability of swapping a feature between two subsets of features. mutation_rate_rate : float Probability of changing the mutation rate. (experimental) crossover_rate_rate : float Probability of changing the crossover rate. (experimental) """ self.n_features = n_features self.start_p = start_p self.mutation_rate = mutation_rate self.crossover_rate = crossover_rate self.mutation_rate_rate = mutation_rate_rate self.crossover_rate_rate = crossover_rate_rate def generate(self, rng=None) -> SklearnIndividual: return GeneticFeatureSelectorIndividual( mask=self.n_features, start_p=self.start_p, mutation_rate=self.mutation_rate, crossover_rate=self.crossover_rate, mutation_rate_rate=self.mutation_rate_rate, crossover_rate_rate=self.crossover_rate_rate, rng=rng )
GeneticFeatureSelectorNode
python
huggingface__transformers
src/transformers/models/metaclip_2/modular_metaclip_2.py
{ "start": 20619, "end": 27423 }
class ____(CLIPModel): """ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Args: config ([`MetaClip2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, MetaClip2Model >>> model = MetaClip2Model.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu") >>> processor = AutoProcessor.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" def __init__(self, config: MetaClip2Config): super().__init__(config) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size text_model = MetaClip2TextModel._from_config(text_config) self.text_model = text_model.text_model vision_model = MetaClip2VisionModel._from_config(vision_config) self.vision_model = vision_model.vision_model self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, interpolate_pos_encoding: bool = False, **kwargs: Unpack[TransformersKwargs], ): r""" return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, MetaClip2Model >>> model = MetaClip2Model.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu") >>> processor = AutoProcessor.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" return super().forward( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, return_loss=return_loss, interpolate_pos_encoding=interpolate_pos_encoding, **kwargs, ) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, ): r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`MetaClip2TextModel`]. Examples: ```python >>> from transformers import AutoTokenizer, MetaClip2Model >>> model = MetaClip2Model.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" return super().get_text_features( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, ) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, interpolate_pos_encoding: bool = False, ): r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`MetaClip2VisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, MetaClip2Model >>> model = MetaClip2Model.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu") >>> processor = AutoProcessor.from_pretrained("facebook/metaclip-2-worldwide-huge-quickgelu") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" return super().get_image_features( pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding, )
MetaClip2Model
python
apache__airflow
task-sdk/tests/task_sdk/bases/test_sensor.py
{ "start": 2034, "end": 2316 }
class ____(BaseSensorOperator): def __init__(self, return_value=False, **kwargs): super().__init__(**kwargs) self.return_value = return_value def execute_complete(self, context, event=None): raise AirflowException("Should be skipped")
DummyAsyncSensor
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/overloadImpl2.py
{ "start": 504, "end": 554 }
class ____(Protocol[T_co]): _target_: str
ClassA
python
getsentry__sentry
tests/sentry/ratelimits/test_cardinality.py
{ "start": 288, "end": 8550 }
class ____: """ Wrapper interface around the rate limiter, with specialized, stateful and primitive interface for more readable tests. """ def __init__(self, limiter: RedisCardinalityLimiter): self.limiter = limiter self.quota = Quota(window_seconds=3600, granularity_seconds=60, limit=10) self.timestamp = 3600 def add_value(self, value: int) -> int | None: values = self.add_values([value]) if values: (value,) = values return value else: return None def add_values(self, values: Sequence[int]) -> Collection[int]: request = RequestedQuota(prefix="hello", unit_hashes=values, quota=self.quota) new_timestamp, grants = self.limiter.check_within_quotas( [request], timestamp=self.timestamp ) self.limiter.use_quotas(grants, new_timestamp) (grant,) = grants return grant.granted_unit_hashes def test_basic(limiter: RedisCardinalityLimiter) -> None: helper = LimiterHelper(limiter) for _ in range(20): assert helper.add_value(1) == 1 for _ in range(20): assert helper.add_value(2) == 2 assert [helper.add_value(10 + i) for i in range(100)] == list(range(10, 18)) + [None] * 92 helper.timestamp += 3600 # an hour has passed, we should be able to admit 10 new keys # # note: we only virtually advanced the timestamp. The # `cardinality:timeseries` keys for 1, 2 still exist in this test setup # (and we would admit them on top of 10..20), but they won't in a # real-world scenario assert [helper.add_value(10 + i) for i in range(100)] == list(range(10, 20)) + [None] * 90 def test_multiple_prefixes(limiter: RedisCardinalityLimiter) -> None: """ Test multiple prefixes/organizations and just make sure we're not leaking state between prefixes. * `a` only consumes 5 of the quota first and runs out of quota in the second `check_within_quotas` call * `b` immediately exceeds the quota. * `c` fits comfortably into the quota at first (fills out the limit exactly) """ quota = Quota(window_seconds=3600, granularity_seconds=60, limit=10) requests = [ RequestedQuota(prefix="a", unit_hashes={1, 2, 3, 4, 5}, quota=quota), RequestedQuota(prefix="b", unit_hashes={1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, quota=quota), RequestedQuota( prefix="c", unit_hashes={11, 12, 13, 14, 15, 16, 17, 18, 19, 20}, quota=quota ), ] new_timestamp, grants = limiter.check_within_quotas(requests) assert grants == [ GrantedQuota(request=requests[0], granted_unit_hashes=[1, 2, 3, 4, 5], reached_quota=None), GrantedQuota( request=requests[1], granted_unit_hashes=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], reached_quota=quota, ), GrantedQuota( request=requests[2], granted_unit_hashes=[11, 12, 13, 14, 15, 16, 17, 18, 19, 20], reached_quota=None, ), ] limiter.use_quotas(grants, new_timestamp) requests = [ RequestedQuota(prefix="a", unit_hashes={6, 7, 8, 9, 10, 11}, quota=quota), RequestedQuota(prefix="b", unit_hashes={1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, quota=quota), RequestedQuota( prefix="c", unit_hashes={11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21}, quota=quota ), ] new_timestamp, grants = limiter.check_within_quotas(requests) assert grants == [ GrantedQuota( request=requests[0], granted_unit_hashes=[6, 7, 8, 9, 10], reached_quota=quota ), GrantedQuota( request=requests[1], granted_unit_hashes=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], reached_quota=quota, ), GrantedQuota( request=requests[2], granted_unit_hashes=[11, 12, 13, 14, 15, 16, 17, 18, 19, 20], reached_quota=quota, ), ] limiter.use_quotas(grants, new_timestamp) def test_sliding(limiter: RedisCardinalityLimiter) -> None: """ Our rate limiter has a sliding window of [now - 1 hour ; now], with a granularity of 1 hour. What that means is that, as time moves on, old hashes should be forgotten _one by one_, and the quota budget they occupy should become _gradually_ available to newer, never-seen-before items. """ helper = LimiterHelper(limiter) admissions = [] # We start with a limit of 10 new hashes per hour. We add a new hash and # advance time by 6 minutes, _100 times_ for i in range(100): admissions.append(helper.add_value(i)) helper.timestamp += 360 # We assert that _all 100 items_ are admitted/accepted. This is because we # have advanced time between each item. We have "slept" for 6 minutes a 100 # times, so we actually added 100 hashes over a span of 10 hours. That # should totally fit into our limit. assert admissions == list(range(100)) admissions = [] expected = [] # 100 hashes over 10 hours is "basically" 10 hashes over 1 hour. Since we # added items over a span of 10 hours, the limiter should've forgotten # about 90% of items already, meaning that in a real-world scenario, we # should accept 90 new hashes. # # But since we only advanced time virtually (and not in Redis for TTL # purposes), we actually only accept 10 items... a flaw in this test. # # Anyway, in the previous loop we added an item every 6 minutes. Now we're # adding an item 10 times per 6 minutes. So we should see every 10th item # being admitted. for i in range(100, 200): admissions.append(helper.add_value(i)) expected.append(i if i % 10 == 0 else None) helper.timestamp += 36 assert admissions == expected def test_sampling(limiter: RedisCardinalityLimiter) -> None: """ demonstrate behavior when "shard sampling" is active. If one out of 10 shards for an organization are stored, it is still possible to limit the exactly correct amount of hashes, for certain hash values. """ limiter.impl.num_physical_shards = 1 limiter.impl.num_shards = 10 helper = LimiterHelper(limiter) # when adding "hashes" 0..10 in ascending order, the first hash will fill up the physical shard admissions = [helper.add_value(i) for i in reversed(range(10))] assert admissions == list(reversed(range(10))) # we have stored only one shard out of 10, meaning the set count reported # from redis is 1, but the total counts are extrapolated correctly. like # without sampling, assert that the limit of 10 hashes is correctly applied # and we no longer accept additional hashes beyond 10. admissions = [helper.add_value(i) for i in range(100, 110)] assert admissions == [None] * 10 def test_sampling_going_bad(limiter: RedisCardinalityLimiter) -> None: """ test an edgecase of set sampling in the cardinality limiter. it is not exactly desired behavior but a known sampling artifact """ limiter.impl.num_physical_shards = 1 limiter.impl.num_shards = 10 helper = LimiterHelper(limiter) # when adding "hashes" 0..10 in ascending order, the first hash will fill # up the physical shard, and a total count of 10 is extrapolated from that admissions = [helper.add_value(i) for i in range(10)] assert admissions == [0] + [None] * 9 def test_regression_mixed_order(limiter: RedisCardinalityLimiter) -> None: """ Regression test to assert we still accept hashes after dropping some within the same request, regardless of set order. """ helper = LimiterHelper(limiter) # this hash certainly fits into the default limit of 10 hashes assert helper.add_value(5) == 5 # here, only 10 should be limited, as it is the 11th item being fed to the indexer. # 5 was admitted in an earlier call, and 0..9 are admitted right before it. # there used to be a bug where anything after 10 (i.e. 5) was dropped as # well (due to a wrong `break` somewhere in a loop) assert helper.add_values([0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 5]) == [0, 1, 2, 3, 4, 6, 7, 8, 9, 5]
LimiterHelper
python
huggingface__transformers
tests/models/gpt_oss/test_modeling_gpt_oss.py
{ "start": 1437, "end": 6699 }
class ____(CausalLMModelTest, unittest.TestCase): _is_stateful = True model_split_percents = [0.5, 0.6] model_tester_class = GptOssModelTester @unittest.skip("GptOss's forcefully disables sdpa due to Sink") def test_sdpa_can_dispatch_non_composite_models(self): pass @unittest.skip("GptOss's eager attn/sdpa attn outputs are expected to be different") def test_eager_matches_sdpa_generate(self): pass @unittest.skip("GptOss eager/FA2 attention outputs are expected to be different") def test_flash_attn_2_equivalence(self): pass @unittest.skip("Most probably because of the MOE, the moe and router does not ignore padding tokens") def test_eager_padding_matches_padding_free_with_position_ids(self): pass @unittest.skip("GptOss does not support flex officially") def test_flex_attention_with_grads(self): pass @unittest.skipIf(torch_device == "cpu", "GptOss does not support flex officially") def test_generate_compile_model_forward_fullgraph(self): return super().test_generate_compile_model_forward_fullgraph() RESULTS_PATH = Path(__file__).parent.parent.parent / "fixtures/gpt_oss/integration_tests.json" # ------------------------ # Worker function for distributed torchrun # ------------------------ def distributed_worker(quantized, model_size, kernels, attn_impl, mode): """This is the function that will be executed by torchrun workers.""" import os from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.testing_utils import torch_device def generate_config_key(quantized, model, kernels, attn_impl, mode): """Generate a key for the restructured integration test results.""" return f"quantized={str(quantized).lower()}|model={model}|kernels={str(kernels).lower()}|attn_impl={attn_impl}|mode={mode}" input_text = [ "Roses are red, violets", "How are you? Tell me the name of the president of", ] # Convert args quantized = quantized.lower() == "true" kernels = kernels.lower() == "true" # Distributed model loading model_id = f"openai/gpt-oss-{model_size}" model = AutoModelForCausalLM.from_pretrained( model_id, dtype="auto", tp_plan="auto", # distributed inference use_kernels=kernels, ).to(torch_device) model.set_attn_implementation(attn_impl) tokenizer = AutoTokenizer.from_pretrained(model_id, padding_side="left") # Inference inputs = tokenizer(input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_texts = tokenizer.batch_decode(output, skip_special_tokens=False) # Only rank 0 writes results and validates against expected outputs if int(os.environ.get("RANK", "0")) == 0: # Generate key to look up expected outputs key = generate_config_key(quantized, model_size, kernels, attn_impl, mode) # Load expected outputs from restructured JSON if os.path.exists(RESULTS_PATH): with open(RESULTS_PATH, "r") as f: expected_results = json.load(f) # Check if we have expected results for this configuration if key in expected_results: expected_outputs = expected_results[key] # Compare actual outputs with expected outputs assert len(output_texts) == len(expected_outputs), f"Output length mismatch for {key}" for i, (actual, expected) in enumerate(zip(output_texts, expected_outputs)): actual_stripped = actual.strip() expected_stripped = expected.strip() # Make lengths match by taking minimum length to be resilient to generation differences min_length = min(len(actual_stripped), len(expected_stripped)) actual_truncated = actual_stripped[:min_length] expected_truncated = expected_stripped[:min_length] if actual_truncated != expected_truncated: diff = "\n".join( difflib.unified_diff( expected_truncated.splitlines(keepends=True), actual_truncated.splitlines(keepends=True), fromfile=f"expected[{i}]", tofile=f"actual[{i}]", lineterm="", ) ) raise AssertionError( f"Output mismatch at index {i} for {key}:\n" f"Expected: '{expected_stripped}'\n" f"Actual: '{actual_stripped}'\n" f"Diff (truncated to min length {min_length}):\n{diff}" ) print(f"✓ Outputs match expected results for {key}") else: print(f"Warning: No expected results found for configuration: {key}") else: print(f"Warning: Results file {RESULTS_PATH} not found") @slow @require_torch_accelerator
GptOssModelTest
python
pytorch__pytorch
torch/distributed/tensor/experimental/_context_parallel/_attention.py
{ "start": 1557, "end": 1712 }
class ____(Enum): MONKEY_PATCH = auto() MODULE_WRAPPER = auto() _dispatch_mode: _DispatchMode = _DispatchMode.MONKEY_PATCH @dataclass
_DispatchMode
python
sympy__sympy
sympy/integrals/transforms.py
{ "start": 49441, "end": 51750 }
class ____(HankelTypeTransform): """ Class representing unevaluated inverse Hankel transforms. For usage of this class, see the :class:`IntegralTransform` docstring. For how to compute inverse Hankel transforms, see the :func:`inverse_hankel_transform` docstring. """ _name = 'Inverse Hankel' def inverse_hankel_transform(F, k, r, nu, **hints): r""" Compute the inverse Hankel transform of `F` defined as .. math:: f(r) = \int_{0}^\infty F_\nu(k) J_\nu(k r) k \mathrm{d} k. Explanation =========== If the transform cannot be computed in closed form, this function returns an unevaluated :class:`InverseHankelTransform` object. For a description of possible hints, refer to the docstring of :func:`sympy.integrals.transforms.IntegralTransform.doit`. Note that for this transform, by default ``noconds=True``. Examples ======== >>> from sympy import hankel_transform, inverse_hankel_transform >>> from sympy import exp >>> from sympy.abc import r, k, m, nu, a >>> ht = hankel_transform(1/r**m, r, k, nu) >>> ht 2*k**(m - 2)*gamma(-m/2 + nu/2 + 1)/(2**m*gamma(m/2 + nu/2)) >>> inverse_hankel_transform(ht, k, r, nu) r**(-m) >>> ht = hankel_transform(exp(-a*r), r, k, 0) >>> ht a/(k**3*(a**2/k**2 + 1)**(3/2)) >>> inverse_hankel_transform(ht, k, r, 0) exp(-a*r) See Also ======== fourier_transform, inverse_fourier_transform sine_transform, inverse_sine_transform cosine_transform, inverse_cosine_transform hankel_transform mellin_transform, laplace_transform """ return InverseHankelTransform(F, k, r, nu).doit(**hints) ########################################################################## # Laplace Transform ########################################################################## # Stub classes and functions that used to be here import sympy.integrals.laplace as _laplace LaplaceTransform = _laplace.LaplaceTransform laplace_transform = _laplace.laplace_transform laplace_correspondence = _laplace.laplace_correspondence laplace_initial_conds = _laplace.laplace_initial_conds InverseLaplaceTransform = _laplace.InverseLaplaceTransform inverse_laplace_transform = _laplace.inverse_laplace_transform
InverseHankelTransform
python
mkdocstrings__mkdocstrings
src/mkdocstrings/_internal/extension.py
{ "start": 15003, "end": 16585 }
class ____: def __init__(self, config_file_path: str | None = None) -> None: self.config_file_path = config_file_path def makeExtension( # noqa: N802 *, default_handler: str | None = None, inventory_project: str | None = None, inventory_version: str | None = None, handlers: dict[str, dict] | None = None, custom_templates: str | None = None, markdown_extensions: list[str | dict] | None = None, locale: str | None = None, config_file_path: str | None = None, ) -> MkdocstringsExtension: """Create the extension instance. We only support this function being used by Zensical. Consider this function private API. """ mdx, mdx_config = _split_configs(markdown_extensions or []) tool_config = _ToolConfig(config_file_path=config_file_path) handlers_instance = Handlers( theme="material", default=default_handler or _default_config["default_handler"], inventory_project=inventory_project or "Project", inventory_version=inventory_version or "0.0.0", handlers_config=handlers or _default_config["handlers"], custom_templates=custom_templates or _default_config["custom_templates"], mdx=mdx, mdx_config=mdx_config, locale=locale or _default_config["locale"], tool_config=tool_config, ) handlers_instance._download_inventories() autorefs = AutorefsPlugin() autorefs.config = AutorefsConfig() autorefs.scan_toc = False return MkdocstringsExtension(handlers=handlers_instance, autorefs=autorefs)
_ToolConfig
python
kamyu104__LeetCode-Solutions
Python/grid-game.py
{ "start": 48, "end": 422 }
class ____(object): def gridGame(self, grid): """ :type grid: List[List[int]] :rtype: int """ result = float("inf") left, right = 0, sum(grid[0]) for a, b in itertools.izip(grid[0], grid[1]): right -= a result = min(result, max(left, right)) left += b return result
Solution
python
vyperlang__vyper
vyper/venom/check_venom.py
{ "start": 547, "end": 845 }
class ____(VenomError): message: str = "variable is used before definition" def __init__(self, var, inst): self.var = var self.inst = inst def __str__(self): bb = self.inst.parent return f"var {self.var} not defined:\n {self.inst}\n\n{bb}"
VarNotDefined
python
google__pytype
pytype/pytd/visitors.py
{ "start": 71162, "end": 71687 }
class ____(_RemoveTypeParametersFromGenericAny): """Replace all references to modules in a list with AnythingType.""" def __init__(self, module_list: list[str]): super().__init__() self._any_modules = module_list def VisitNamedType(self, n): if any(n.name.startswith(module) for module in self._any_modules): return pytd.AnythingType() return n def VisitLateType(self, n): return self.VisitNamedType(n) def VisitClassType(self, n): return self.VisitNamedType(n)
ReplaceModulesWithAny
python
dagster-io__dagster
python_modules/dagster/dagster/_config/pythonic_config/typing_utils.py
{ "start": 1226, "end": 2604 }
class ____: _TResValue = TypeVar("_TResValue") class _Temp(Generic[_TResValue]): pass _ResourceDep: type = _Temp _Resource: type = _Temp _PartialResource: type = _Temp @staticmethod def get_resource_rep_type() -> type: return LateBoundTypesForResourceTypeChecking._ResourceDep @staticmethod def get_resource_type() -> type: return LateBoundTypesForResourceTypeChecking._Resource @staticmethod def get_partial_resource_type(base: type) -> type: # LateBoundTypesForResourceTypeChecking._PartialResource[base] would be the more # correct thing to return, but to enable that deeper pydantic integration # needs to be done on the PartialResource class # https://github.com/dagster-io/dagster/issues/18017 return LateBoundTypesForResourceTypeChecking._PartialResource @staticmethod def set_actual_types_for_type_checking( resource_dep_type: type, resource_type: type, partial_resource_type: type ) -> None: LateBoundTypesForResourceTypeChecking._ResourceDep = resource_dep_type LateBoundTypesForResourceTypeChecking._Resource = resource_type LateBoundTypesForResourceTypeChecking._PartialResource = partial_resource_type @dataclass_transform(kw_only_default=True, field_specifiers=(Field,))
LateBoundTypesForResourceTypeChecking
python
pypa__hatch
tests/index/test_core.py
{ "start": 312, "end": 1447 }
class ____: @pytest.mark.parametrize( ("repo_url", "expected_url"), [ pytest.param("https://upload.pypi.org/legacy/", "https://pypi.org/simple/", id="PyPI main"), pytest.param("https://test.pypi.org/legacy/", "https://test.pypi.org/simple/", id="PyPI test"), pytest.param("https://foo.internal/a/b/", "https://foo.internal/a/b/%2Bsimple/", id="default"), ], ) def test_simple(self, repo_url, expected_url): index = PackageIndex(repo_url) assert str(index.urls.simple) == expected_url @pytest.mark.parametrize( ("repo_url", "expected_url"), [ pytest.param("https://upload.pypi.org/legacy/", "https://pypi.org/project/", id="PyPI main"), pytest.param("https://test.pypi.org/legacy/", "https://test.pypi.org/project/", id="PyPI test"), pytest.param("https://foo.internal/a/b/", "https://foo.internal/a/b/", id="default"), ], ) def test_project(self, repo_url, expected_url): index = PackageIndex(repo_url) assert str(index.urls.project) == expected_url
TestURLs
python
numpy__numpy
numpy/polynomial/tests/test_legendre.py
{ "start": 3447, "end": 6100 }
class ____: # coefficients of 1 + 2*x + 3*x**2 c1d = np.array([2., 2., 2.]) c2d = np.einsum('i,j->ij', c1d, c1d) c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) # some random values in [-1, 1) x = np.random.random((3, 5)) * 2 - 1 y = polyval(x, [1., 2., 3.]) def test_legval(self): # check empty input assert_equal(leg.legval([], [1]).size, 0) # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(10): msg = f"At i={i}" tgt = y[i] res = leg.legval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) assert_equal(leg.legval(x, [1]).shape, dims) assert_equal(leg.legval(x, [1, 0]).shape, dims) assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) def test_legval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y # test exceptions assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) # test values tgt = y1 * y2 res = leg.legval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) # test shape z = np.ones((2, 3)) res = leg.legval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) def test_legval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y # test exceptions assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) # test values tgt = y1 * y2 * y3 res = leg.legval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) # test shape z = np.ones((2, 3)) res = leg.legval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) def test_leggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y # test values tgt = np.einsum('i,j->ij', y1, y2) res = leg.leggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) # test shape z = np.ones((2, 3)) res = leg.leggrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) def test_leggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = leg.leggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) # test shape z = np.ones((2, 3)) res = leg.leggrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3)
TestEvaluation
python
mlflow__mlflow
mlflow/telemetry/events.py
{ "start": 1058, "end": 1417 }
class ____(Event): name: str = "start_trace" @classmethod def parse(cls, arguments: dict[str, Any]) -> dict[str, Any] | None: # Capture the set of currently imported packages at trace start time to # understand the flavor of the trace. return {"imports": [pkg for pkg in GENAI_MODULES if pkg in sys.modules]}
StartTraceEvent
python
huggingface__transformers
src/transformers/data/processors/glue.py
{ "start": 8444, "end": 10150 }
class ____(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): """See base class.""" return InputExample( tensor_dict["idx"].numpy(), tensor_dict["sentence"].numpy().decode("utf-8"), None, str(tensor_dict["label"].numpy()), ) def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_test_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training, dev and test sets.""" test_mode = set_type == "test" if test_mode: lines = lines[1:] text_index = 1 if test_mode else 3 examples = [] for i, line in enumerate(lines): guid = f"{set_type}-{i}" text_a = line[text_index] label = None if test_mode else line[1] examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples
ColaProcessor
python
wandb__wandb
wandb/apis/public/projects.py
{ "start": 1308, "end": 4596 }
class ____(Paginator["Project"]): """An lazy iterator of `Project` objects. An iterable interface to access projects created and saved by the entity. Args: client (`wandb.apis.internal.Api`): The API client instance to use. entity (str): The entity name (username or team) to fetch projects for. per_page (int): Number of projects to fetch per request (default is 50). Example: ```python from wandb.apis.public.api import Api # Find projects that belong to this entity projects = Api().projects(entity="entity") # Iterate over files for project in projects: print(f"Project: {project.name}") print(f"- URL: {project.url}") print(f"- Created at: {project.created_at}") print(f"- Is benchmark: {project.is_benchmark}") ``` """ QUERY = gql(f"""#graphql query Projects($entity: String, $cursor: String, $perPage: Int = 50) {{ models(entityName: $entity, after: $cursor, first: $perPage) {{ edges {{ node {{ ...ProjectFragment }} cursor }} pageInfo {{ endCursor hasNextPage }} }} }} {PROJECT_FRAGMENT} """) def __init__( self, client: RetryingClient, entity: str, per_page: int = 50, ) -> Projects: """An iterable collection of `Project` objects. Args: client: The API client used to query W&B. entity: The entity which owns the projects. per_page: The number of projects to fetch per request to the API. """ self.client = client self.entity = entity variables = { "entity": self.entity, } super().__init__(client, variables, per_page) @property def length(self) -> None: """Returns the total number of projects. Note: This property is not available for projects. <!-- lazydoc-ignore: internal --> """ # For backwards compatibility, even though this isn't a SizedPaginator return None @property def more(self): """Returns `True` if there are more projects to fetch. Returns `False` if there are no more projects to fetch. <!-- lazydoc-ignore: internal --> """ if self.last_response: return self.last_response["models"]["pageInfo"]["hasNextPage"] else: return True @property def cursor(self): """Returns the cursor position for pagination of project results. <!-- lazydoc-ignore: internal --> """ if self.last_response: return self.last_response["models"]["edges"][-1]["cursor"] else: return None def convert_objects(self): """Converts GraphQL edges to File objects. <!-- lazydoc-ignore: internal --> """ return [ Project(self.client, self.entity, p["node"]["name"], p["node"]) for p in self.last_response["models"]["edges"] ] def __repr__(self): return f"<Projects {self.entity}>"
Projects
python
getsentry__sentry
src/sentry/core/endpoints/team_release_count.py
{ "start": 673, "end": 2870 }
class ____(TeamEndpoint): publish_status = { "GET": ApiPublishStatus.PRIVATE, } def get(self, request: Request, team) -> Response: """ Returns a dict of team projects, and a time-series list of release counts for each. """ if not features.has("organizations:team-insights", team.organization, actor=request.user): return Response({"detail": "You do not have the insights feature enabled"}, status=400) project_list = Project.objects.get_for_team_ids(team_ids=[team.id]) start, end = get_date_range_from_params(request.GET) end = floor_to_utc_day(end) + timedelta(days=1) start = floor_to_utc_day(start) + timedelta(days=1) per_project_daily_release_counts = ( Release.objects.filter( projects__in=project_list, date_added__gte=start, date_added__lte=end, ) .annotate(bucket=TruncDay("date_added")) .order_by("bucket") .values("projects", "bucket") .annotate(count=Count("id")) ) agg_project_counts = {} project_avgs: dict[int, float] = defaultdict(int) this_week_totals: dict[int, int] = defaultdict(int) this_week_start = now() - timedelta(days=7) for row in per_project_daily_release_counts: project_avgs[row["projects"]] += row["count"] agg_project_counts[str(row["bucket"].date())] = row["count"] if row["bucket"] >= this_week_start: this_week_totals[row["projects"]] += row["count"] for project_id in project_avgs: project_avgs[project_id] = (project_avgs[project_id] / (end - start).days) * 7 current_day = start.date() end_date = end.date() while current_day < end_date: agg_project_counts.setdefault(str(current_day), 0) current_day += timedelta(days=1) return Response( { "release_counts": agg_project_counts, "project_avgs": project_avgs, "last_week_totals": this_week_totals, } )
TeamReleaseCountEndpoint
python
django-extensions__django-extensions
tests/test_runscript.py
{ "start": 5434, "end": 6771 }
class ____(RunScriptTests): def test_prints_error_message_for_script_without_run(self): cmd = self.get_command() with self.assertRaises(CommandError): call_command(cmd, "script_no_run_function") self.assertIn( "No (valid) module for script 'script_no_run_function' found", sys.stdout.getvalue(), ) self.assertIn( "Try running with a higher verbosity level like: -v2 or -v3", sys.stdout.getvalue(), ) self.assertEqual(cmd.last_exit_code, 1) def test_prints_additional_info_for_script__run_extra_verbosity(self): cmd = self.get_command() with self.assertRaises(CommandError): call_command(cmd, "script_no_run_function", verbosity=2) self.assertIn( "No (valid) module for script 'script_no_run_function' found", sys.stdout.getvalue(), ) self.assertIn("Found script", sys.stdout.getvalue()) self.assertEqual(cmd.last_exit_code, 1) def test_prints_nothing_for_script_without_run(self): cmd = self.get_command() call_command(cmd, "script_no_run_function", silent=True) self.assertEqual("", sys.stdout.getvalue()) project_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
RunFunctionTests
python
wandb__wandb
wandb/sdk/artifacts/_generated/artifact_used_by.py
{ "start": 336, "end": 445 }
class ____(GQLResult): used_by: ArtifactUsedByArtifactUsedBy = Field(alias="usedBy")
ArtifactUsedByArtifact
python
keras-team__keras
keras/src/layers/reshaping/permute_test.py
{ "start": 190, "end": 2712 }
class ____(testing.TestCase): @parameterized.named_parameters( [ {"testcase_name": "dense", "sparse": False}, {"testcase_name": "sparse", "sparse": True}, ] ) @pytest.mark.requires_trainable_backend def test_permute(self, sparse): if sparse and not backend.SUPPORTS_SPARSE_TENSORS: pytest.skip("Backend does not support sparse tensors.") inputs = np.random.random((10, 3, 5, 5)).astype("float32") # Make the ndarray relatively sparse inputs = np.multiply(inputs, inputs >= 0.8) expected_output = ops.convert_to_tensor( np.transpose(inputs, axes=(0, 3, 1, 2)) ) if sparse: if backend.backend() == "tensorflow": import tensorflow as tf inputs = tf.sparse.from_dense(inputs) expected_output = tf.sparse.from_dense(expected_output) elif backend.backend() == "jax": import jax.experimental.sparse as jax_sparse inputs = jax_sparse.BCOO.fromdense(inputs) expected_output = jax_sparse.BCOO.fromdense(expected_output) else: self.fail( f"Backend {backend.backend()} does not support sparse" ) self.run_layer_test( layers.Permute, init_kwargs={"dims": (3, 1, 2)}, input_data=inputs, input_sparse=sparse, expected_output=expected_output, expected_output_sparse=sparse, run_training_check=not sparse, ) def test_permute_with_dynamic_batch_size(self): input_layer = layers.Input(batch_shape=(None, 3, 5)) permuted = layers.Permute((2, 1))(input_layer) self.assertEqual(permuted.shape, (None, 5, 3)) def test_permute_errors_on_invalid_starting_dims_index(self): with self.assertRaisesRegex( ValueError, r"Invalid permutation .*dims.*" ): self.run_layer_test( layers.Permute, init_kwargs={"dims": (0, 1, 2)}, input_shape=(3, 2, 4), ) def test_permute_errors_on_invalid_set_of_dims_indices(self): with self.assertRaisesRegex( ValueError, r"Invalid permutation .*dims.*" ): self.run_layer_test( layers.Permute, init_kwargs={"dims": (1, 4, 2)}, input_shape=(3, 2, 4), )
PermuteTest
python
django__django
tests/auth_tests/test_management.py
{ "start": 11147, "end": 47933 }
class ____(TestCase): def test_no_email_argument(self): new_io = StringIO() with self.assertRaisesMessage( CommandError, "You must use --email with --noinput." ): call_command( "createsuperuser", interactive=False, username="joe", stdout=new_io ) def test_basic_usage(self): "Check the operation of the createsuperuser management command" # We can use the management command to create a superuser new_io = StringIO() call_command( "createsuperuser", interactive=False, username="joe", email="joe@somewhere.org", stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, "Superuser created successfully.") u = User.objects.get(username="joe") self.assertEqual(u.email, "joe@somewhere.org") # created password should be unusable self.assertFalse(u.has_usable_password()) def test_validate_username(self): msg = ( "Enter a valid username. This value may contain only letters, numbers, " "and @/./+/-/_ characters." ) with self.assertRaisesMessage(CommandError, msg): call_command( "createsuperuser", interactive=False, username="🤠", email="joe@somewhere.org", ) def test_non_ascii_verbose_name(self): @mock_inputs( { "password": "nopasswd", "Uživatel (leave blank to use '%s'): " % get_default_username(): "foo", # username (cz) "email": "nolocale@somewhere.org", } ) def test(self): username_field = User._meta.get_field("username") old_verbose_name = username_field.verbose_name username_field.verbose_name = _("u\u017eivatel") new_io = StringIO() try: call_command( "createsuperuser", interactive=True, stdout=new_io, stdin=MockTTY(), ) finally: username_field.verbose_name = old_verbose_name command_output = new_io.getvalue().strip() self.assertEqual(command_output, "Superuser created successfully.") test(self) def test_verbosity_zero(self): # We can suppress output on the management command new_io = StringIO() call_command( "createsuperuser", interactive=False, username="joe2", email="joe2@somewhere.org", verbosity=0, stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, "") u = User.objects.get(username="joe2") self.assertEqual(u.email, "joe2@somewhere.org") self.assertFalse(u.has_usable_password()) def test_email_in_username(self): call_command( "createsuperuser", interactive=False, username="joe+admin@somewhere.org", email="joe@somewhere.org", verbosity=0, ) u = User._default_manager.get(username="joe+admin@somewhere.org") self.assertEqual(u.email, "joe@somewhere.org") self.assertFalse(u.has_usable_password()) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUser") def test_swappable_user(self): "A superuser can be created when a custom user model is in use" # We can use the management command to create a superuser # We skip validation because the temporary substitution of the # swappable User model messes with validation. new_io = StringIO() call_command( "createsuperuser", interactive=False, email="joe@somewhere.org", date_of_birth="1976-04-01", first_name="Joe", stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, "Superuser created successfully.") u = CustomUser._default_manager.get(email="joe@somewhere.org") self.assertEqual(u.date_of_birth, date(1976, 4, 1)) # created password should be unusable self.assertFalse(u.has_usable_password()) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUser") def test_swappable_user_missing_required_field(self): """ A Custom superuser won't be created when a required field isn't provided """ # We can use the management command to create a superuser # We skip validation because the temporary substitution of the # swappable User model messes with validation. new_io = StringIO() with self.assertRaisesMessage( CommandError, "You must use --email with --noinput." ): call_command( "createsuperuser", interactive=False, stdout=new_io, stderr=new_io, ) self.assertEqual(CustomUser._default_manager.count(), 0) @override_settings( AUTH_USER_MODEL="auth_tests.CustomUserNonUniqueUsername", AUTHENTICATION_BACKENDS=["my.custom.backend"], ) def test_swappable_user_username_non_unique(self): @mock_inputs( { "username": "joe", "password": "nopasswd", } ) def createsuperuser(): new_io = StringIO() call_command( "createsuperuser", interactive=True, email="joe@somewhere.org", stdout=new_io, stdin=MockTTY(), ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, "Superuser created successfully.") for i in range(2): createsuperuser() users = CustomUserNonUniqueUsername.objects.filter(username="joe") self.assertEqual(users.count(), 2) def test_skip_if_not_in_TTY(self): """ If the command is not called from a TTY, it should be skipped and a message should be displayed (#7423). """ class FakeStdin: """A fake stdin object that has isatty() return False.""" def isatty(self): return False out = StringIO() call_command( "createsuperuser", stdin=FakeStdin(), stdout=out, interactive=True, ) self.assertEqual(User._default_manager.count(), 0) self.assertIn("Superuser creation skipped", out.getvalue()) def test_passing_stdin(self): """ You can pass a stdin object as an option and it should be available on self.stdin. If no such option is passed, it defaults to sys.stdin. """ sentinel = object() command = createsuperuser.Command() call_command( command, stdin=sentinel, interactive=False, verbosity=0, username="janet", email="janet@example.com", ) self.assertIs(command.stdin, sentinel) command = createsuperuser.Command() call_command( command, interactive=False, verbosity=0, username="joe", email="joe@example.com", ) self.assertIs(command.stdin, sys.stdin) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithFK") def test_fields_with_fk(self): new_io = StringIO() group = Group.objects.create(name="mygroup") email = Email.objects.create(email="mymail@gmail.com") call_command( "createsuperuser", interactive=False, username=email.pk, email=email.email, group=group.pk, stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, "Superuser created successfully.") u = CustomUserWithFK._default_manager.get(email=email) self.assertEqual(u.username, email) self.assertEqual(u.group, group) non_existent_email = "mymail2@gmail.com" msg = "email instance with email %r is not a valid choice." % non_existent_email with self.assertRaisesMessage(CommandError, msg): call_command( "createsuperuser", interactive=False, username=email.pk, email=non_existent_email, stdout=new_io, ) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithFK") def test_fields_with_fk_interactive(self): new_io = StringIO() group = Group.objects.create(name="mygroup") email = Email.objects.create(email="mymail@gmail.com") @mock_inputs( { "password": "nopasswd", "Username (Email.id): ": email.pk, "Email (Email.email): ": email.email, "Group (Group.id): ": group.pk, } ) def test(self): call_command( "createsuperuser", interactive=True, stdout=new_io, stdin=MockTTY(), ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, "Superuser created successfully.") u = CustomUserWithFK._default_manager.get(email=email) self.assertEqual(u.username, email) self.assertEqual(u.group, group) test(self) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithFK") def test_fields_with_fk_via_option_interactive(self): new_io = StringIO() group = Group.objects.create(name="mygroup") email = Email.objects.create(email="mymail@gmail.com") @mock_inputs({"password": "nopasswd"}) def test(self): call_command( "createsuperuser", interactive=True, username=email.pk, email=email.email, group=group.pk, stdout=new_io, stdin=MockTTY(), ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, "Superuser created successfully.") u = CustomUserWithFK._default_manager.get(email=email) self.assertEqual(u.username, email) self.assertEqual(u.group, group) test(self) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithFK") def test_validate_fk(self): email = Email.objects.create(email="mymail@gmail.com") Group.objects.all().delete() nonexistent_group_id = 1 msg = f"group instance with id {nonexistent_group_id} is not a valid choice." with self.assertRaisesMessage(CommandError, msg): call_command( "createsuperuser", interactive=False, username=email.pk, email=email.email, group=nonexistent_group_id, verbosity=0, ) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithFK") def test_validate_fk_environment_variable(self): email = Email.objects.create(email="mymail@gmail.com") Group.objects.all().delete() nonexistent_group_id = 1 msg = f"group instance with id {nonexistent_group_id} is not a valid choice." with mock.patch.dict( os.environ, {"DJANGO_SUPERUSER_GROUP": str(nonexistent_group_id)}, ): with self.assertRaisesMessage(CommandError, msg): call_command( "createsuperuser", interactive=False, username=email.pk, email=email.email, verbosity=0, ) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithFK") def test_validate_fk_via_option_interactive(self): email = Email.objects.create(email="mymail@gmail.com") Group.objects.all().delete() nonexistent_group_id = 1 msg = f"group instance with id {nonexistent_group_id} is not a valid choice." @mock_inputs( { "password": "nopasswd", "Username (Email.id): ": email.pk, "Email (Email.email): ": email.email, } ) def test(self): with self.assertRaisesMessage(CommandError, msg): call_command( "createsuperuser", group=nonexistent_group_id, stdin=MockTTY(), verbosity=0, ) test(self) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithM2m") def test_fields_with_m2m(self): new_io = StringIO() org_id_1 = Organization.objects.create(name="Organization 1").pk org_id_2 = Organization.objects.create(name="Organization 2").pk call_command( "createsuperuser", interactive=False, username="joe", orgs=[org_id_1, org_id_2], stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, "Superuser created successfully.") user = CustomUserWithM2M._default_manager.get(username="joe") self.assertEqual(user.orgs.count(), 2) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithM2M") def test_fields_with_m2m_interactive(self): new_io = StringIO() org_id_1 = Organization.objects.create(name="Organization 1").pk org_id_2 = Organization.objects.create(name="Organization 2").pk @mock_inputs( { "password": "nopasswd", "Username: ": "joe", "Orgs (Organization.id): ": "%s, %s" % (org_id_1, org_id_2), } ) def test(self): call_command( "createsuperuser", interactive=True, stdout=new_io, stdin=MockTTY(), ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, "Superuser created successfully.") user = CustomUserWithM2M._default_manager.get(username="joe") self.assertEqual(user.orgs.count(), 2) test(self) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithM2M") def test_fields_with_m2m_interactive_blank(self): new_io = StringIO() org_id = Organization.objects.create(name="Organization").pk entered_orgs = [str(org_id), " "] def return_orgs(): return entered_orgs.pop() @mock_inputs( { "password": "nopasswd", "Username: ": "joe", "Orgs (Organization.id): ": return_orgs, } ) def test(self): call_command( "createsuperuser", interactive=True, stdout=new_io, stderr=new_io, stdin=MockTTY(), ) self.assertEqual( new_io.getvalue().strip(), "Error: This field cannot be blank.\n" "Superuser created successfully.", ) test(self) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithM2MThrough") def test_fields_with_m2m_and_through(self): msg = ( "Required field 'orgs' specifies a many-to-many relation through " "model, which is not supported." ) with self.assertRaisesMessage(CommandError, msg): call_command("createsuperuser") def test_default_username(self): """createsuperuser uses a default username when one isn't provided.""" # Get the default username before creating a user. default_username = get_default_username() new_io = StringIO() entered_passwords = ["password", "password"] def return_passwords(): return entered_passwords.pop(0) @mock_inputs({"password": return_passwords, "username": "", "email": ""}) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Superuser created successfully." ) self.assertTrue(User.objects.filter(username=default_username).exists()) test(self) def test_password_validation(self): """ Creation should fail if the password fails validation. """ new_io = StringIO() entered_passwords = ["1234567890", "1234567890", "password", "password"] def bad_then_good_password(): return entered_passwords.pop(0) @mock_inputs( { "password": bad_then_good_password, "username": "joe1234567890", "email": "", "bypass": "n", } ) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "This password is entirely numeric.\n" "Superuser created successfully.", ) test(self) @override_settings( AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, ] ) def test_validate_password_against_username(self): new_io = StringIO() username = "supremelycomplex" entered_passwords = [ username, username, "superduperunguessablepassword", "superduperunguessablepassword", ] def bad_then_good_password(): return entered_passwords.pop(0) @mock_inputs( { "password": bad_then_good_password, "username": username, "email": "", "bypass": "n", } ) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "The password is too similar to the username.\n" "Superuser created successfully.", ) test(self) @override_settings( AUTH_USER_MODEL="auth_tests.CustomUser", AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, ], ) def test_validate_password_against_required_fields(self): new_io = StringIO() first_name = "josephine" entered_passwords = [ first_name, first_name, "superduperunguessablepassword", "superduperunguessablepassword", ] def bad_then_good_password(): return entered_passwords.pop(0) @mock_inputs( { "password": bad_then_good_password, "username": "whatever", "first_name": first_name, "date_of_birth": "1970-01-01", "email": "joey@example.com", "bypass": "n", } ) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "The password is too similar to the first name.\n" "Superuser created successfully.", ) test(self) @override_settings( AUTH_USER_MODEL="auth_tests.CustomUser", AUTH_PASSWORD_VALIDATORS=[ { "NAME": ( "django.contrib.auth.password_validation." "UserAttributeSimilarityValidator" ) }, ], ) def test_validate_password_against_required_fields_via_option(self): new_io = StringIO() first_name = "josephine" entered_passwords = [ first_name, first_name, "superduperunguessablepassword", "superduperunguessablepassword", ] def bad_then_good_password(): return entered_passwords.pop(0) @mock_inputs( { "password": bad_then_good_password, "bypass": "n", } ) def test(self): call_command( "createsuperuser", interactive=True, first_name=first_name, date_of_birth="1970-01-01", email="joey@example.com", stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "The password is too similar to the first name.\n" "Superuser created successfully.", ) test(self) def test_blank_username(self): """Creation fails if --username is blank.""" new_io = StringIO() with self.assertRaisesMessage(CommandError, "Username cannot be blank."): call_command( "createsuperuser", username="", stdin=MockTTY(), stdout=new_io, stderr=new_io, ) def test_blank_username_non_interactive(self): new_io = StringIO() with self.assertRaisesMessage(CommandError, "Username cannot be blank."): call_command( "createsuperuser", username="", interactive=False, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) def test_blank_email_allowed_non_interactive(self): new_io = StringIO() call_command( "createsuperuser", email="", username="joe", interactive=False, stdout=new_io, stderr=new_io, ) self.assertEqual(new_io.getvalue().strip(), "Superuser created successfully.") u = User.objects.get(username="joe") self.assertEqual(u.email, "") @mock.patch.dict(os.environ, {"DJANGO_SUPERUSER_EMAIL": ""}) def test_blank_email_allowed_non_interactive_environment_variable(self): new_io = StringIO() call_command( "createsuperuser", username="joe", interactive=False, stdout=new_io, stderr=new_io, ) self.assertEqual(new_io.getvalue().strip(), "Superuser created successfully.") u = User.objects.get(username="joe") self.assertEqual(u.email, "") def test_password_validation_bypass(self): """ Password validation can be bypassed by entering 'y' at the prompt. """ new_io = StringIO() @mock_inputs( { "password": "1234567890", "username": "joe1234567890", "email": "", "bypass": "y", } ) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "This password is entirely numeric.\n" "Superuser created successfully.", ) test(self) def test_invalid_username(self): """Creation fails if the username fails validation.""" user_field = User._meta.get_field(User.USERNAME_FIELD) new_io = StringIO() entered_passwords = ["password", "password"] # Enter an invalid (too long) username first and then a valid one. invalid_username = ("x" * user_field.max_length) + "y" entered_usernames = [invalid_username, "janet"] def return_passwords(): return entered_passwords.pop(0) def return_usernames(): return entered_usernames.pop(0) @mock_inputs( {"password": return_passwords, "username": return_usernames, "email": ""} ) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Error: Ensure this value has at most %s characters (it has %s).\n" "Superuser created successfully." % (user_field.max_length, len(invalid_username)), ) test(self) @mock_inputs({"username": "KeyboardInterrupt"}) def test_keyboard_interrupt(self): new_io = StringIO() with self.assertRaises(SystemExit): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual(new_io.getvalue(), "\nOperation cancelled.\n") def test_existing_username(self): """Creation fails if the username already exists.""" user = User.objects.create(username="janet") new_io = StringIO() entered_passwords = ["password", "password"] # Enter the existing username first and then a new one. entered_usernames = [user.username, "joe"] def return_passwords(): return entered_passwords.pop(0) def return_usernames(): return entered_usernames.pop(0) @mock_inputs( {"password": return_passwords, "username": return_usernames, "email": ""} ) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Error: That username is already taken.\n" "Superuser created successfully.", ) test(self) @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithUniqueConstraint") def test_existing_username_meta_unique_constraint(self): """ Creation fails if the username already exists and a custom user model has UniqueConstraint. """ user = CustomUserWithUniqueConstraint.objects.create(username="janet") new_io = StringIO() entered_passwords = ["password", "password"] # Enter the existing username first and then a new one. entered_usernames = [user.username, "joe"] def return_passwords(): return entered_passwords.pop(0) def return_usernames(): return entered_usernames.pop(0) @mock_inputs({"password": return_passwords, "username": return_usernames}) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Error: That username is already taken.\n" "Superuser created successfully.", ) test(self) def test_existing_username_non_interactive(self): """Creation fails if the username already exists.""" User.objects.create(username="janet") new_io = StringIO() with self.assertRaisesMessage( CommandError, "Error: That username is already taken." ): call_command( "createsuperuser", username="janet", email="", interactive=False, stdout=new_io, ) def test_existing_username_provided_via_option_and_interactive(self): """call_command() gets username='janet' and interactive=True.""" new_io = StringIO() entered_passwords = ["password", "password"] User.objects.create(username="janet") def return_passwords(): return entered_passwords.pop(0) @mock_inputs( { "password": return_passwords, "username": "janet1", "email": "test@test.com", } ) def test(self): call_command( "createsuperuser", username="janet", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) msg = ( "Error: That username is already taken.\n" "Superuser created successfully." ) self.assertEqual(new_io.getvalue().strip(), msg) test(self) def test_validation_mismatched_passwords(self): """ Creation should fail if the user enters mismatched passwords. """ new_io = StringIO() # The first two passwords do not match, but the second two do match and # are valid. entered_passwords = ["password", "not password", "password2", "password2"] def mismatched_passwords_then_matched(): return entered_passwords.pop(0) @mock_inputs( { "password": mismatched_passwords_then_matched, "username": "joe1234567890", "email": "", } ) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Error: Your passwords didn't match.\n" "Superuser created successfully.", ) test(self) def test_validation_blank_password_entered(self): """ Creation should fail if the user enters blank passwords. """ new_io = StringIO() # The first two passwords are empty strings, but the second two are # valid. entered_passwords = ["", "", "password2", "password2"] def blank_passwords_then_valid(): return entered_passwords.pop(0) @mock_inputs( { "password": blank_passwords_then_valid, "username": "joe1234567890", "email": "", } ) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Error: Blank passwords aren't allowed.\n" "Superuser created successfully.", ) test(self) @override_settings(AUTH_USER_MODEL="auth_tests.NoPasswordUser") def test_usermodel_without_password(self): new_io = StringIO() call_command( "createsuperuser", interactive=False, stdin=MockTTY(), stdout=new_io, stderr=new_io, username="username", ) self.assertEqual(new_io.getvalue().strip(), "Superuser created successfully.") @override_settings(AUTH_USER_MODEL="auth_tests.NoPasswordUser") def test_usermodel_without_password_interactive(self): new_io = StringIO() @mock_inputs({"username": "username"}) def test(self): call_command( "createsuperuser", interactive=True, stdin=MockTTY(), stdout=new_io, stderr=new_io, ) self.assertEqual( new_io.getvalue().strip(), "Superuser created successfully." ) test(self) @mock.patch.dict( os.environ, { "DJANGO_SUPERUSER_PASSWORD": "test_password", "DJANGO_SUPERUSER_USERNAME": "test_superuser", "DJANGO_SUPERUSER_EMAIL": "joe@somewhere.org", "DJANGO_SUPERUSER_FIRST_NAME": "ignored_first_name", }, ) def test_environment_variable_non_interactive(self): call_command("createsuperuser", interactive=False, verbosity=0) user = User.objects.get(username="test_superuser") self.assertEqual(user.email, "joe@somewhere.org") self.assertTrue(user.check_password("test_password")) # Environment variables are ignored for non-required fields. self.assertEqual(user.first_name, "") @override_settings(AUTH_USER_MODEL="auth_tests.CustomUserWithM2m") def test_environment_variable_m2m_non_interactive(self): new_io = StringIO() org_id_1 = Organization.objects.create(name="Organization 1").pk org_id_2 = Organization.objects.create(name="Organization 2").pk with mock.patch.dict( os.environ, { "DJANGO_SUPERUSER_ORGS": f"{org_id_1},{org_id_2}", }, ): call_command( "createsuperuser", interactive=False, username="joe", stdout=new_io, ) command_output = new_io.getvalue().strip() self.assertEqual(command_output, "Superuser created successfully.") user = CustomUserWithM2M._default_manager.get(username="joe") self.assertEqual(user.orgs.count(), 2) @mock.patch.dict( os.environ, { "DJANGO_SUPERUSER_USERNAME": "test_superuser", "DJANGO_SUPERUSER_EMAIL": "joe@somewhere.org", }, ) def test_ignore_environment_variable_non_interactive(self): # Environment variables are ignored in non-interactive mode, if # provided by a command line arguments. call_command( "createsuperuser", interactive=False, username="cmd_superuser", email="cmd@somewhere.org", verbosity=0, ) user = User.objects.get(username="cmd_superuser") self.assertEqual(user.email, "cmd@somewhere.org") self.assertFalse(user.has_usable_password()) @mock.patch.dict( os.environ, { "DJANGO_SUPERUSER_PASSWORD": "test_password", "DJANGO_SUPERUSER_USERNAME": "test_superuser", "DJANGO_SUPERUSER_EMAIL": "joe@somewhere.org", }, ) def test_ignore_environment_variable_interactive(self): # Environment variables are ignored in interactive mode. @mock_inputs({"password": "cmd_password"}) def test(self): call_command( "createsuperuser", interactive=True, username="cmd_superuser", email="cmd@somewhere.org", stdin=MockTTY(), verbosity=0, ) user = User.objects.get(username="cmd_superuser") self.assertEqual(user.email, "cmd@somewhere.org") self.assertTrue(user.check_password("cmd_password")) test(self)
CreatesuperuserManagementCommandTestCase
python
getsentry__sentry
src/sentry/data_export/base.py
{ "start": 215, "end": 393 }
class ____(Exception): def __init__(self, message: str, recoverable: bool = False) -> None: super().__init__(message) self.recoverable = recoverable
ExportError
python
kamyu104__LeetCode-Solutions
Python/find-elements-in-a-contaminated-binary-tree.py
{ "start": 191, "end": 732 }
class ____(object): def __init__(self, root): """ :type root: TreeNode """ def dfs(node, v, lookup): if not node: return node.val = v lookup.add(v) dfs(node.left, 2*v+1, lookup) dfs(node.right, 2*v+2, lookup) self.__lookup = set() dfs(root, 0, self.__lookup) def find(self, target): """ :type target: int :rtype: bool """ return target in self.__lookup
FindElements
python
pytorch__pytorch
torch/_inductor/config.py
{ "start": 53889, "end": 66124 }
class ____: """ Config specific to codegen/triton.py """ # Use cudagraphs on output code cudagraphs = os.environ.get("TORCHINDUCTOR_CUDAGRAPHS") == "1" # Use cudagraph trees for memory pooling if `cudagraphs` is True cudagraph_trees = True # Should we skip cudagraphing graphs with dynamic shape inputs # If False, we will re-record a graph for each unique set of shape inputs cudagraph_skip_dynamic_graphs = False # Specify dynamic shapes to capture cudagraphs and skip cudagraph for other shapes. # Default to None, which means we capture cudagraphs for all shapes. cudagraph_capture_sizes: Optional[tuple[Union[int, tuple[int, ...]]]] = None # assertions not on the fast path, steady state slow_path_cudagraph_asserts = True # TODO - need to debug why this prevents cleanup cudagraph_trees_history_recording = False # Enable cudagraph support for mutated inputs from prior cudagraph pool cudagraph_support_input_mutation = not is_fbcode() # Maximal number of allowed cudagraph re-record for a function and # a cudagraph node due to static input tensor address changes or # cudagraph managed tensor data pointer changed. # i.e., allow num_recording <= cudagraph_unexpected_rerecord_limit # note: we are conservative here and choose a large limit. cudagraph_unexpected_rerecord_limit = 128 # Warn loudly when the number of cudagraphs due to dynamic shape # exceeds this limit cudagraph_dynamic_shape_warn_limit: Optional[int] = 8 # synchronize after cudagraph invocation force_cudagraph_sync = False # always run cudagraphs in the eager warmup stage # instead of recording and executing cudagraphs force_cudagraphs_warmup = False # If False (default), torch.compile skips cudagraph for a graph if it # contains cudagraph-unsafe ops. If True, we require that all cuda ops # be captured into cudagraph. If this is not possible, this will raise # an error. cudagraph_or_error: bool = Config( env_name_force="TORCHINDUCTOR_CUDAGRAPH_OR_ERROR", default=False, ) # reorder nodes to minimize the number of graph partitions while # not incurring large memory overhead reorder_for_reducing_graph_partitions: bool = True # assertions on the fast path fast_path_cudagraph_asserts = False # skip warmup for cudagraph trees skip_cudagraph_warmup = False # Synchronize before and after every compiled graph. debug_sync_graph = False # Synchronize after every kernel launch, to help pinpoint bugs debug_sync_kernel = False # Always load full blocks (rather than broadcasting inside the block) dense_indexing = False # TODO - enable by default coalesce_tiling_analysis: bool = ( os.environ.get( "TORCHINDUCTOR_COALESCE_TILING_ANALYSIS", "1" if not is_fbcode() else "0" ) == "1" ) # limit tiling dimensions # - max_tiles=1 disables tiling # - max_tiles=2 # - max_tiles=3 is experimental and may have bugs # higher values are unsupported # We use a max of 3 if coalesce_tiling_analysis is True, and 2 otherwise. # Note - coalesce_tiling_analysis does not yet apply to dynamic shapes. max_tiles: Optional[int] = None # Prefer higher dimensional tilings. This simplifies indexing expressions, making # it easier to identify block pointers. prefer_nd_tiling: bool = False # use triton.autotune for pointwise ops with complex layouts # this should only be disabled for debugging/testing autotune_pointwise = True # max autotune gemm with cublasLt autotune_cublasLt = True # Tune the generated Triton kernels at compile time instead of first time they run # Setting to None means uninitialized autotune_at_compile_time: Optional[bool] = None # We use random tensors for autotune by default. Setting this as true will let us # use inputs from sample inputs to autotune user defined triton kernels. # Side effect for this option is increased memory footprint during first pass compilation. autotune_with_sample_inputs: bool = False # Allows tiling reductions into multiple dimensions. # For best results, this should be used with prefer_nd_tiling. tile_reductions: bool = False # Codegen matmul natively with tl.dot without using a template. # This option makes Inductor generate matrix multiplication from scratch, # instead of calling predefined Triton templates (mm, bmm, mm_plus_mm). # Compile time may be longer because native matmul benchmarks more Triton configs # than regular pointwise or reduction kernels. # Native matmul often aggressively fuses operations around the matrix multiply, # which can make it faster or slower depending on your program. # # This option takes priority over other GEMM implementations. If Inductor determines # that a matmul can be generated, it will always generate it with native_matmul. # That means optimized kernels such as decompose_k or persistent_tma_matmul will # not be called when this option is enabled. # # Note: Native matmul does not currently support block pointers or TMA matmul. # If both native_matmul and (use_block_ptr or enable_persistent_tma_matmul) are enabled, # an error will be thrown. native_matmul: bool = False # should we stop a fusion to allow better tiling? tiling_prevents_pointwise_fusion = True tiling_prevents_reduction_fusion = True # should we give different names to kernels # Note: This is orthogonal to descriptive_names - this is deciding whether # our triton kernel names should all be `triton_` (to maximize caching) or # whether they should be unique. unique_kernel_names = ( os.environ.get("TORCHINDUCTOR_UNIQUE_KERNEL_NAMES", "1") == "1" ) # similar to the option above, but this is specific to user defined kernels, # while unique_kernel_name is for kernels generated by inductor. # We have this option because sometimes we reuse user's kernel code with different # configs which would result in the same name. # Note: This MODIFIES the user's kernel function name within inductor phase. unique_user_kernel_names = ( os.environ.get("TORCHINDUCTOR_UNIQUE_USER_KERNEL_NAMES", "0") == "1" ) # should we put op names in kernel names # "torch": Maps to the fx op in the Dynamo graph (module name, method name, etc.) # "original_aten": Maps to the highest-level aten op (i.e. pre-decompositions) # "inductor_node": Maps to the node name in the FX graph passed to Inductor descriptive_names: Literal["torch", "original_aten", "inductor_node"] = ( "original_aten" ) # use alternate codegen for smaller reductions persistent_reductions = ( os.environ.get("TORCHINDUCTOR_PERSISTENT_REDUCTIONS", "1") == "1" ) # For small output size reductions uses cross thread-block synchronization to gain more parallelism cooperative_reductions = ( os.environ.get("TORCHINDUCTOR_COOPERATIVE_REDUCTIONS", "0") == "1" ) # used for debugging cooperative reduction codegen, always generate cooperative_reductions force_cooperative_reductions = False # 0: disable # 1/True: enable, use tuning to pick between different subkernels # 2: enable, force using persistent reduction (for debugging) # 3: enable, force using non-persistent reduction (for debugging) multi_kernel: Literal[0, 1, 2, 3] = int( os.environ.get("TORCHINDUCTOR_MULTI_KERNEL", "0") ) # type: ignore[assignment] # hint to Triton when arguments are divisible by 16 divisible_by_16 = os.environ.get("TORCHINDUCTOR_DIVISIBLE_BY_16", "1") == "1" # Minimum R0_BLOCK to be used for a TritonSplitScanKernel # NOTE: This also indirectly controls the size of workspace buffer required min_split_scan_rblock = 256 # Store the generated cubin files for cpp wrapper code to load store_cubin = False # the max number of spills we allow for the configs we benchmark. # Setting this to 0 means we skip a config if it spills even a single # register. # Setting it to a larger value allows a config spilling a small amount # of registers being benchmarked. # # NOTE: triton will always report >0 register spills for kernels using sin/cos. # (check this issue https://github.com/triton-lang/triton/issues/1756 ) # So far we see a fixed 8 spilled registers for kernels using sin/cos. # Raise the threshold to 16 to be safe. # We should revisit this once we understand more of the source of register spills. spill_threshold: int = 32 if torch.version.hip else 16 # Generate code containing the newer tl.make_block_ptr() API for loads/store use_block_ptr = False # (Experimental) # Generate code using the tl.make_tensor_descriptor() API for loads/store # [Note: TMA API Restrictions] Currently the TMA API requires the following: # - For Nvidia GPUs, the compute capability should be >= 9.0 # - The innermost stride of a descriptor should be 1 # - The size of the block shape in the innermost dimension should load / store # at least 16 bytes. # - Tensors are 16 byte aligned. Enabling this option therefore requires # assume_aligned_inputs to also be enabled # TMA descriptors are only going to be generated if the above conditions # can be satisfied, along with any existing requirements for index expressions use_tensor_descriptor = False # Inject a bug into our relu implementation; useful for testing our repro # extraction and minification functionality. # Valid values: "compile_error", "runtime_error", "accuracy" inject_relu_bug_TESTING_ONLY: Optional[str] = None # Whether to upcast float16 / bfloat16 to float32 in triton codegen (Experimental) codegen_upcast_to_fp32 = True # Whether persistent matmul kernels should be enabled this flag only has effect when on h100 # with a version of triton new enough to support TMA enable_persistent_tma_matmul = ( os.environ.get("ENABLE_PERSISTENT_TMA_MATMUL", "0") == "1" ) # Should TMA store be enable from templates. TODO: Remove once we # can autotune over the result. enable_template_tma_store = os.environ.get("ENABLE_TEMPLATE_TMA_STORE", "0") == "1" # Use epilogue subtiling. We allow disabling it due to limited B200 testing. enable_epilogue_subtiling = os.environ.get("ENABLE_EPILOGUE_SUBTILING", "1") == "1" # Skip L1 cache for buffers that are used only once. Disabled by default skip_l1_cache = os.environ.get("TORCHINDUCTOR_SKIP_L1", "0") == "1" # During autotuning, if one of the kernels/configs fails for some reason, # Inductor will usually skip it (and assign its latency to inf). # For testing it's helpful to be able to assert that none of the configs fail. # Note: it may also need to be used with config.compile_threads = 1 disallow_failing_autotune_kernels_TESTING_ONLY = False # specify number of splits to autotune on for decompose_k. 0 disables decompose_k num_decompose_k_splits = int( os.environ.get("TORCHINDUCTOR_NUM_DECOMPOSE_K_SPLITS", "10") ) # specify minimum ratio of K to M AND N in order to autotune on decompose_k. 0 enables # it as an autotuning choice for all matmuls decompose_k_threshold = int( os.environ.get("TORCHINDUCTOR_DECOMPOSE_K_THRESHOLD", "32") ) # Programmatic Dependent Launch improves launch latency on Nvidia Hopper+ devices # If set to true, will generate PDL code on devices that support it. # If set to false, will never generate PDL code. enable_pdl = False mix_order_reduction = ( os.environ.get("TORCHINDUCTOR_MIX_ORDER_REDUCTION", "0" if is_fbcode() else "1") == "1" ) mix_order_reduction_initial_xblock = 1 mix_order_reduction_split_size: Optional[int] = None mix_order_reduction_autotune_split_size = ( os.environ.get("TORCHINDUCTOR_MIX_ORDER_REDUCTION_AUTOTUNE_SPLIT_SIZE", "0") == "1" )
triton
python
optuna__optuna
optuna/exceptions.py
{ "start": 1614, "end": 1765 }
class ____(OptunaError): """Exception for CLI. CLI raises this exception when it receives invalid configuration. """ pass
CLIUsageError
python
pytorch__pytorch
test/inductor/test_ordered_set.py
{ "start": 61635, "end": 64677 }
class ____(TestCase): def test_8420_set_merge(self): # This used to segfault global be_bad, set2, dict2 be_bad = False set1 = {bad_eq()} set2 = {bad_eq() for i in range(75)} be_bad = True self.assertRaises(ZeroDivisionError, set1.update, set2) be_bad = False set1 = {bad_dict_clear()} dict2 = {bad_dict_clear(): None} be_bad = True set1.symmetric_difference_update(dict2) def test_iter_and_mutate(self): # Issue #24581 s = OrderedSet(range(100)) s.clear() s.update(range(100)) si = iter(s) s.clear() a = list(range(100)) s.update(range(100)) list(si) def test_merge_and_mutate(self): class X: def __hash__(self): return hash(0) def __eq__(self, o): other.clear() return False other = OrderedSet() other = {X() for i in range(10)} s = {0} s.update(other) # Application tests (based on David Eppstein's graph recipes ==================================== def powerset(U): """Generates all subsets of a OrderedSet or sequence U.""" U = iter(U) try: x = frozenset([next(U)]) for S in powerset(U): yield S yield S | x except StopIteration: yield frozenset() def cube(n): """Graph of n-dimensional hypercube.""" singletons = [frozenset([x]) for x in range(n)] return dict( # noqa: C404 [(x, frozenset([x ^ s for s in singletons])) for x in powerset(range(n))] ) def linegraph(G): """Graph, the vertices of which are edges of G, with two vertices being adjacent iff the corresponding edges share a vertex.""" L = {} for x in G: for y in G[x]: nx = [frozenset([x, z]) for z in G[x] if z != y] ny = [frozenset([y, z]) for z in G[y] if z != x] L[frozenset([x, y])] = frozenset(nx + ny) return L def faces(G): "Return a OrderedSet of faces in G. Where a face is a OrderedSet of vertices on that face" # currently limited to triangles,squares, and pentagons f = OrderedSet() for v1, edges in G.items(): for v2 in edges: for v3 in G[v2]: if v1 == v3: continue if v1 in G[v3]: f.add(frozenset([v1, v2, v3])) else: for v4 in G[v3]: if v4 == v2: continue if v1 in G[v4]: f.add(frozenset([v1, v2, v3, v4])) else: for v5 in G[v4]: if v5 == v3 or v5 == v2: # noqa: SIM109 continue if v1 in G[v5]: f.add(frozenset([v1, v2, v3, v4, v5])) return f
TestWeirdBugs
python
dagster-io__dagster
python_modules/libraries/dagster-sigma/dagster_sigma/translator.py
{ "start": 3630, "end": 4137 }
class ____: """A record representing a Sigma dataset and the Sigma organization data.""" dataset: "SigmaDataset" organization_data: "SigmaOrganizationData" @property def properties(self) -> dict[str, Any]: return self.dataset.properties @property def columns(self) -> AbstractSet[str]: return self.dataset.columns @property def inputs(self) -> AbstractSet[str]: return self.dataset.inputs @whitelist_for_serdes @record
SigmaDatasetTranslatorData
python
pytorch__pytorch
torchgen/dest/lazy_ir.py
{ "start": 5871, "end": 12516 }
class ____(ABC): backend_index: BackendIndex backend_name: str node_base: str use_lazy_shape: bool @method_with_native_function def __call__(self, f: NativeFunctionsGroup | NativeFunction) -> list[str]: func = f.functional.func if isinstance(f, NativeFunctionsGroup) else f.func metadata = self.backend_index.get_kernel( f.functional if isinstance(f, NativeFunctionsGroup) else f ) schema = LazyIrSchema( func, symint=metadata is not None and metadata.supports_symint() ) return self.gen(schema) # there is no lowering functionality generated unless this IR base class is subclassed and # implemented as a backend-specific node def lowering_function(self, schema: LazyIrSchema) -> str: return "" def create_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str: return "" def can_be_reused_function(self, schema: LazyIrSchema, node_ctor_args: str) -> str: return f"""bool CanBeReused({node_ctor_args}) const {{ return false; }}""" def node_base_ctor_call(self, schema: LazyIrSchema) -> str: value_args = schema.filtered_args(values=True, scalars=False) # backends can customize the way the node base class constructor is called, # as long as all of its arguments can be generated from information available from the schema base_ctor_value_args_list = [] for arg in value_args: if isinstance(arg.lazy_type, (BaseCType, VectorCType)): base_ctor_value_args_list.append(f"{arg.name}") elif isinstance(arg.lazy_type, OptionalCType): base_ctor_value_args_list.append(f"{arg.name}.value_or(kNullValue)") else: raise AssertionError( f"Unsupported type ({arg.lazy_type}) - add support if necessary" ) base_ctor_value_args = ", ".join(base_ctor_value_args_list) scalar_args = schema.filtered_args(values=False, scalars=True) # Shape construction. # Conditionally build shape depending on specified shape property if schema.properties.ShapePrecompute: shape_ctor_arg = "std::move(shapes)," elif schema.properties.ShapeCompute: shape_args = [a.name for a in value_args] shape_args.extend(a.name for a in scalar_args) shape_ctor_arg = f"compute_shape_{schema.name}({', '.join(shape_args)})," elif schema.properties.ShapeCache: shape_args = [f"operand({i})" for i in range(len(value_args))] shape_args.extend(a.name for a in scalar_args) shape_ctor_arg = f"[&](){{ return compute_shape_{schema.name}({', '.join(shape_args)})[0]; }}," else: shape_ctor_arg = "" scalar_hashes = ", ".join(f"{a.name}" for a in scalar_args) return f"""{self.node_base}( {schema.node_name}::ClassOpKind(), OpList{{{base_ctor_value_args}}}, {shape_ctor_arg} /* num_outputs */ {len(schema.returns)}, torch::lazy::MHash({scalar_hashes}))""" def gen(self, schema: LazyIrSchema) -> list[str]: opkind = schema.opkind or aten_symbol(schema) # for now, we just want one IR class decl and soon after also the method defs # and we use the functional version not out/inplace. all_args = schema.filtered_args() scalar_args = schema.filtered_args(values=False, scalars=True) ctor_args = [f"const {i.lazy_type.cpp_type()}& {i.name}" for i in all_args] reuse_ctor_args = ", ".join(ctor_args) if self.use_lazy_shape and schema.properties.ShapePrecompute: ctor_args.append("std::vector<torch::lazy::Shape>&& shapes") node_ctor_args = ", ".join(ctor_args) scalar_initializers = ",\n ".join( [ # This code is just special casing the mapping from string_view -> strings f"{a.name}({a.name}.has_value() ? ::std::make_optional(std::string(*{a.name})) : ::std::nullopt)" if a.lazy_type.cpp_type() == "::std::optional<c10::string_view>" else f"{a.name}({a.name})" for a in scalar_args ] ) if len(scalar_initializers): scalar_initializers = f",\n {scalar_initializers}" scalar_decls = "\n ".join( [ f"std::string {a.name};" if a.lazy_type.cpp_type() == "c10::string_view" else f"::std::optional<std::string> {a.name};" if a.lazy_type.cpp_type() == "::std::optional<c10::string_view>" else f"{a.lazy_type.cpp_type()} {a.name};" for a in scalar_args ] ) optional_values = [ arg.name for arg in schema.filtered_args(values=True, scalars=False) if isinstance(arg.lazy_type, OptionalCType) ] has_optional_decls = "\n ".join( [f"bool has_{value}: 1;" for value in optional_values] ) has_optional_defs = "\n ".join( [f"has_{value} = !!{value};" for value in optional_values] ) members_to_string = [] for arg in scalar_args: if isinstance(arg.lazy_type, OptionalCType): value = f"{arg.name}.value()" if arg.is_generator: value = '"torch.Generator()"' members_to_string.append( f"""if ({arg.name}.has_value()) {{ ss << ", {arg.name}=" << {value}; }} else {{ ss << ", {arg.name}=null"; }}""" ) else: members_to_string.append(f'ss << ", {arg.name}=" << {arg.name};') members_to_string_str = "\n ".join(members_to_string) return [ f"""\ class {schema.node_name} : public {self.node_base} {{ public: static torch::lazy::OpKind ClassOpKind() {{ return torch::lazy::OpKind({opkind}); }} {schema.node_name}({node_ctor_args}) : {self.node_base_ctor_call(schema)}{scalar_initializers} {{ {has_optional_defs} }} std::string ToString() const override {{ std::stringstream ss; ss << {self.node_base}::ToString(); {members_to_string_str} return ss.str(); }} {self.create_function(schema, reuse_ctor_args)} {self.can_be_reused_function(schema, reuse_ctor_args)} {self.lowering_function(schema)} {scalar_decls} {has_optional_decls} }}; """, ] @dataclass(frozen=True)
GenLazyIR
python
mlflow__mlflow
mlflow/models/resources.py
{ "start": 537, "end": 1623 }
class ____(ABC): """ Base class for defining the resources needed to serve a model. Args: type (ResourceType): The resource type. target_uri (str): The target URI where these resources are hosted. """ @property @abstractmethod def type(self) -> ResourceType: """ The resource type (must be defined by subclasses). """ @property @abstractmethod def target_uri(self) -> str: """ The target URI where the resource is hosted (must be defined by subclasses). """ @abstractmethod def to_dict(self): """ Convert the resource to a dictionary. Subclasses must implement this method. """ @classmethod @abstractmethod def from_dict(cls, data: dict[str, str]): """ Convert the dictionary to a Resource. Subclasses must implement this method. """ def __eq__(self, other: Any): if not isinstance(other, Resource): return False return self.to_dict() == other.to_dict()
Resource
python
ray-project__ray
release/microbenchmark/experimental/compiled_graph_gpu_microbenchmark.py
{ "start": 1498, "end": 2021 }
class ____: def __init__(self): self.device = torch_utils.get_devices()[0] def send(self, shape, dtype, _): t = torch.ones(shape, dtype=dtype, device=self.device) * 1 return t def recv(self, tensor): # This benchmark tests the overhead of sending a tensor between # actors. To minimize the overhead of shared memory transfer, # we return only a byte string. assert tensor.device == self.device return b"x" @ray.remote(num_gpus=1)
TorchTensorWorker
python
allegroai__clearml
clearml/automation/hpbandster/bandster.py
{ "start": 4638, "end": 18321 }
class ____(SearchStrategy, RandomSeed): def __init__( self, base_task_id: str, hyper_parameters: Sequence[Parameter], objective_metric: Objective, execution_queue: str, num_concurrent_workers: int, min_iteration_per_job: Optional[int], max_iteration_per_job: Optional[int], total_max_jobs: Optional[int], pool_period_min: float = 2.0, time_limit_per_job: Optional[float] = None, compute_time_limit: Optional[float] = None, local_port: int = 9090, **bohb_kwargs: Any, ) -> None: """ Initialize a BOHB search strategy optimizer BOHB performs robust and efficient hyperparameter optimization at scale by combining the speed of Hyperband searches with the guidance and guarantees of convergence of Bayesian Optimization. Instead of sampling new configurations at random, BOHB uses kernel density estimators to select promising candidates. .. code-block:: For reference: @InProceedings{falkner-icml-18, title = {{BOHB}: Robust and Efficient Hyperparameter Optimization at Scale}, author = {Falkner, Stefan and Klein, Aaron and Hutter, Frank}, booktitle = {Proceedings of the 35th International Conference on Machine Learning}, pages = {1436--1445}, year = {2018}, } :param str base_task_id: Task ID (str) :param list hyper_parameters: list of Parameter objects to optimize over :param Objective objective_metric: Objective metric to maximize / minimize :param str execution_queue: execution queue to use for launching Tasks (experiments). :param int num_concurrent_workers: Limit number of concurrent running Tasks (machines) :param int min_iteration_per_job: minimum number of iterations for a job to run. 'iterations' are the reported iterations for the specified objective, not the maximum reported iteration of the Task. :param int max_iteration_per_job: number of iteration per job 'iterations' are the reported iterations for the specified objective, not the maximum reported iteration of the Task. :param int total_max_jobs: total maximum job for the optimization process. Must be provided in order to calculate the total budget for the optimization process. The total budget is measured by "iterations" (see above) and will be set to `max_iteration_per_job * total_max_jobs` This means more than total_max_jobs could be created, as long as the cumulative iterations (summed over all created jobs) will not exceed `max_iteration_per_job * total_max_jobs` :param float pool_period_min: time in minutes between two consecutive pools :param float time_limit_per_job: Optional, maximum execution time per single job in minutes, when time limit is exceeded job is aborted :param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded, all jobs aborted. (Optional) :param int local_port: default port 9090 tcp, this is a must for the BOHB workers to communicate, even locally. :param bohb_kwargs: arguments passed directly to the BOHB object """ if not max_iteration_per_job or not min_iteration_per_job or not total_max_jobs: raise ValueError( "OptimizerBOHB is missing a defined budget.\n" "The following arguments must be defined: " "max_iteration_per_job, min_iteration_per_job, total_max_jobs.\n" "Maximum optimization budget is: max_iteration_per_job * total_max_jobs\n" ) super(OptimizerBOHB, self).__init__( base_task_id=base_task_id, hyper_parameters=hyper_parameters, objective_metric=objective_metric, execution_queue=execution_queue, num_concurrent_workers=num_concurrent_workers, pool_period_min=pool_period_min, time_limit_per_job=time_limit_per_job, compute_time_limit=compute_time_limit, max_iteration_per_job=max_iteration_per_job, min_iteration_per_job=min_iteration_per_job, total_max_jobs=total_max_jobs, ) self._max_iteration_per_job = max_iteration_per_job self._min_iteration_per_job = min_iteration_per_job verified_bohb_kwargs = [ "eta", "min_budget", "max_budget", "min_points_in_model", "top_n_percent", "num_samples", "random_fraction", "bandwidth_factor", "min_bandwidth", ] self._bohb_kwargs = dict((k, v) for k, v in bohb_kwargs.items() if k in verified_bohb_kwargs) self._param_iterator = None self._namespace = None self._bohb = None self._res = None self._nameserver_port = local_port def set_optimization_args( self, eta: float = 3, min_budget: Optional[float] = None, max_budget: Optional[float] = None, min_points_in_model: Optional[int] = None, top_n_percent: Optional[int] = 15, num_samples: Optional[int] = None, random_fraction: Optional[float] = 1 / 3.0, bandwidth_factor: Optional[float] = 3, min_bandwidth: Optional[float] = 1e-3, ) -> None: """ Defaults copied from BOHB constructor, see details in BOHB.__init__ BOHB performs robust and efficient hyperparameter optimization at scale by combining the speed of Hyperband searches with the guidance and guarantees of convergence of Bayesian Optimization. Instead of sampling new configurations at random, BOHB uses kernel density estimators to select promising candidates. .. code-block:: For reference: @InProceedings{falkner-icml-18, title = {{BOHB}: Robust and Efficient Hyperparameter Optimization at Scale}, author = {Falkner, Stefan and Klein, Aaron and Hutter, Frank}, booktitle = {Proceedings of the 35th International Conference on Machine Learning}, pages = {1436--1445}, year = {2018}, } :param eta: float (3) In each iteration, a complete run of sequential halving is executed. In it, after evaluating each configuration on the same subset size, only a fraction of 1/eta of them 'advances' to the next round. Must be greater or equal to 2. :param min_budget: float (0.01) The smallest budget to consider. Needs to be positive! :param max_budget: float (1) The largest budget to consider. Needs to be larger than min_budget! The budgets will be geometrically distributed :math:`a^2 + b^2 = c^2 /sim /eta^k` for :math:`k/in [0, 1, ... , num/_subsets - 1]`. :param min_points_in_model: int (None) number of observations to start building a KDE. Default 'None' means dim+1, the bare minimum. :param top_n_percent: int (15) percentage ( between 1 and 99, default 15) of the observations that are considered good. :param num_samples: int (64) number of samples to optimize EI (default 64) :param random_fraction: float (1/3.) fraction of purely random configurations that are sampled from the prior without the model. :param bandwidth_factor: float (3.) to encourage diversity, the points proposed to optimize EI, are sampled from a 'widened' KDE where the bandwidth is multiplied by this factor (default: 3) :param min_bandwidth: float (1e-3) to keep diversity, even when all (good) samples have the same value for one of the parameters, a minimum bandwidth (Default: 1e-3) is used instead of zero. """ if min_budget: self._bohb_kwargs["min_budget"] = min_budget if max_budget: self._bohb_kwargs["max_budget"] = max_budget if num_samples: self._bohb_kwargs["num_samples"] = num_samples self._bohb_kwargs["eta"] = eta self._bohb_kwargs["min_points_in_model"] = min_points_in_model self._bohb_kwargs["top_n_percent"] = top_n_percent self._bohb_kwargs["random_fraction"] = random_fraction self._bohb_kwargs["bandwidth_factor"] = bandwidth_factor self._bohb_kwargs["min_bandwidth"] = min_bandwidth def start(self) -> None: """ Start the Optimizer controller function loop() If the calling process is stopped, the controller will stop as well. .. important:: This function returns only after optimization is completed or :meth:`stop` was called. """ # Step 1: Start a NameServer fake_run_id = "OptimizerBOHB_{}".format(time()) # default port is 9090, we must have one, this is how BOHB workers communicate (even locally) self._namespace = hpns.NameServer(run_id=fake_run_id, host="127.0.0.1", port=self._nameserver_port) self._namespace.start() # we have to scale the budget to the iterations per job, otherwise numbers might be too high budget_iteration_scale = self._max_iteration_per_job # Step 2: Start the workers workers = [] for i in range(self._num_concurrent_workers): w = _TrainsBandsterWorker( optimizer=self, sleep_interval=int(self.pool_period_minutes * 60), budget_iteration_scale=budget_iteration_scale, base_task_id=self._base_task_id, objective=self._objective_metric.objectives[0], queue_name=self._execution_queue, nameserver="127.0.0.1", nameserver_port=self._nameserver_port, run_id=fake_run_id, id=i, ) w.run(background=True) workers.append(w) # Step 3: Run an optimizer self._bohb = BOHB( configspace=self._convert_hyper_parameters_to_cs(), run_id=fake_run_id, # num_samples=self.total_max_jobs, # will be set by self._bohb_kwargs min_budget=float(self._min_iteration_per_job) / float(self._max_iteration_per_job), **self._bohb_kwargs, ) # scale the budget according to the successive halving iterations if self.budget.jobs.limit: self.budget.jobs.limit *= len(self._bohb.budgets) if self.budget.iterations.limit: self.budget.iterations.limit *= len(self._bohb.budgets) # start optimization self._res = self._bohb.run(n_iterations=self.total_max_jobs, min_n_workers=self._num_concurrent_workers) # Step 4: if we get here, Shutdown self.stop() def stop(self) -> None: """ Stop the current running optimization loop, Called from a different thread than the :meth:`start`. """ # After the optimizer run, we must shutdown the master and the nameserver. self._bohb.shutdown(shutdown_workers=True) # no need to specifically shutdown the name server, hopefully pyro will do that # self._namespace.shutdown() if not self._res: return # Step 5: Analysis id2config = self._res.get_id2config_mapping() incumbent = self._res.get_incumbent_id() all_runs = self._res.get_all_runs() # Step 6: Print Analysis print("Best found configuration:", id2config[incumbent]["config"]) print("A total of {} unique configurations where sampled.".format(len(id2config.keys()))) print("A total of {} runs where executed.".format(len(self._res.get_all_runs()))) print( "Total budget corresponds to {:.1f} full function evaluations.".format( sum([r.budget for r in all_runs]) / self._bohb_kwargs.get("max_budget", 1.0) ) ) print( "The run took {:.1f} seconds to complete.".format( all_runs[-1].time_stamps["finished"] - all_runs[0].time_stamps["started"] ) ) def _convert_hyper_parameters_to_cs(self) -> CS.ConfigurationSpace: cs = CS.ConfigurationSpace(seed=self._seed) for p in self._hyper_parameters: if isinstance(p, UniformParameterRange): hp = CSH.UniformFloatHyperparameter( p.name, lower=p.min_value, upper=p.max_value, log=False, q=p.step_size, ) elif isinstance(p, UniformIntegerParameterRange): hp = CSH.UniformIntegerHyperparameter( p.name, lower=p.min_value, upper=p.max_value, log=False, q=p.step_size, ) elif isinstance(p, DiscreteParameterRange): hp = CSH.CategoricalHyperparameter(p.name, choices=p.values) else: raise ValueError("HyperParameter type {} not supported yet with OptimizerBOHB".format(type(p))) cs.add_hyperparameter(hp) return cs
OptimizerBOHB
python
doocs__leetcode
solution/3100-3199/3165.Maximum Sum of Subsequence With Non-adjacent Elements/Solution.py
{ "start": 263, "end": 1721 }
class ____: __slots__ = "tr" def __init__(self, n: int): self.tr: List[Node | None] = [None] * (n << 2) self.build(1, 1, n) def build(self, u: int, l: int, r: int): self.tr[u] = Node(l, r) if l == r: return mid = (l + r) >> 1 self.build(u << 1, l, mid) self.build(u << 1 | 1, mid + 1, r) def query(self, u: int, l: int, r: int) -> int: if self.tr[u].l >= l and self.tr[u].r <= r: return self.tr[u].s11 mid = (self.tr[u].l + self.tr[u].r) >> 1 ans = 0 if r <= mid: ans = self.query(u << 1, l, r) if l > mid: ans = max(ans, self.query(u << 1 | 1, l, r)) return ans def pushup(self, u: int): left, right = self.tr[u << 1], self.tr[u << 1 | 1] self.tr[u].s00 = max(left.s00 + right.s10, left.s01 + right.s00) self.tr[u].s01 = max(left.s00 + right.s11, left.s01 + right.s01) self.tr[u].s10 = max(left.s10 + right.s10, left.s11 + right.s00) self.tr[u].s11 = max(left.s10 + right.s11, left.s11 + right.s01) def modify(self, u: int, x: int, v: int): if self.tr[u].l == self.tr[u].r: self.tr[u].s11 = max(0, v) return mid = (self.tr[u].l + self.tr[u].r) >> 1 if x <= mid: self.modify(u << 1, x, v) else: self.modify(u << 1 | 1, x, v) self.pushup(u)
SegmentTree
python
PrefectHQ__prefect
src/prefect/exceptions.py
{ "start": 11878, "end": 11984 }
class ____(PrefectException): """ Raised when a configuration is invalid. """
ConfigurationError
python
pypa__pipenv
pipenv/patched/pip/_vendor/rich/syntax.py
{ "start": 7705, "end": 35896 }
class ____(JupyterMixin): """Construct a Syntax object to render syntax highlighted code. Args: code (str): Code to highlight. lexer (Lexer | str): Lexer to use (see https://pygments.org/docs/lexers/) theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "monokai". dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False. line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. start_line (int, optional): Starting number for line numbers. Defaults to 1. line_range (Tuple[int | None, int | None], optional): If given should be a tuple of the start and end line to render. A value of None in the tuple indicates the range is open in that direction. highlight_lines (Set[int]): A set of line numbers to highlight. code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. tab_size (int, optional): Size of tabs. Defaults to 4. word_wrap (bool, optional): Enable word wrapping. background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. indent_guides (bool, optional): Show indent guides. Defaults to False. padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding). """ _pygments_style_class: Type[PygmentsStyle] _theme: SyntaxTheme @classmethod def get_theme(cls, name: Union[str, SyntaxTheme]) -> SyntaxTheme: """Get a syntax theme instance.""" if isinstance(name, SyntaxTheme): return name theme: SyntaxTheme if name in RICH_SYNTAX_THEMES: theme = ANSISyntaxTheme(RICH_SYNTAX_THEMES[name]) else: theme = PygmentsSyntaxTheme(name) return theme def __init__( self, code: str, lexer: Union[Lexer, str], *, theme: Union[str, SyntaxTheme] = DEFAULT_THEME, dedent: bool = False, line_numbers: bool = False, start_line: int = 1, line_range: Optional[Tuple[Optional[int], Optional[int]]] = None, highlight_lines: Optional[Set[int]] = None, code_width: Optional[int] = None, tab_size: int = 4, word_wrap: bool = False, background_color: Optional[str] = None, indent_guides: bool = False, padding: PaddingDimensions = 0, ) -> None: self.code = code self._lexer = lexer self.dedent = dedent self.line_numbers = line_numbers self.start_line = start_line self.line_range = line_range self.highlight_lines = highlight_lines or set() self.code_width = code_width self.tab_size = tab_size self.word_wrap = word_wrap self.background_color = background_color self.background_style = ( Style(bgcolor=background_color) if background_color else Style() ) self.indent_guides = indent_guides self.padding = padding self._theme = self.get_theme(theme) self._stylized_ranges: List[_SyntaxHighlightRange] = [] @classmethod def from_path( cls, path: str, encoding: str = "utf-8", lexer: Optional[Union[Lexer, str]] = None, theme: Union[str, SyntaxTheme] = DEFAULT_THEME, dedent: bool = False, line_numbers: bool = False, line_range: Optional[Tuple[int, int]] = None, start_line: int = 1, highlight_lines: Optional[Set[int]] = None, code_width: Optional[int] = None, tab_size: int = 4, word_wrap: bool = False, background_color: Optional[str] = None, indent_guides: bool = False, padding: PaddingDimensions = 0, ) -> "Syntax": """Construct a Syntax object from a file. Args: path (str): Path to file to highlight. encoding (str): Encoding of file. lexer (str | Lexer, optional): Lexer to use. If None, lexer will be auto-detected from path/file content. theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs". dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True. line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False. start_line (int, optional): Starting number for line numbers. Defaults to 1. line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render. highlight_lines (Set[int]): A set of line numbers to highlight. code_width: Width of code to render (not including line numbers), or ``None`` to use all available width. tab_size (int, optional): Size of tabs. Defaults to 4. word_wrap (bool, optional): Enable word wrapping of code. background_color (str, optional): Optional background color, or None to use theme color. Defaults to None. indent_guides (bool, optional): Show indent guides. Defaults to False. padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding). Returns: [Syntax]: A Syntax object that may be printed to the console """ code = Path(path).read_text(encoding=encoding) if not lexer: lexer = cls.guess_lexer(path, code=code) return cls( code, lexer, theme=theme, dedent=dedent, line_numbers=line_numbers, line_range=line_range, start_line=start_line, highlight_lines=highlight_lines, code_width=code_width, tab_size=tab_size, word_wrap=word_wrap, background_color=background_color, indent_guides=indent_guides, padding=padding, ) @classmethod def guess_lexer(cls, path: str, code: Optional[str] = None) -> str: """Guess the alias of the Pygments lexer to use based on a path and an optional string of code. If code is supplied, it will use a combination of the code and the filename to determine the best lexer to use. For example, if the file is ``index.html`` and the file contains Django templating syntax, then "html+django" will be returned. If the file is ``index.html``, and no templating language is used, the "html" lexer will be used. If no string of code is supplied, the lexer will be chosen based on the file extension.. Args: path (AnyStr): The path to the file containing the code you wish to know the lexer for. code (str, optional): Optional string of code that will be used as a fallback if no lexer is found for the supplied path. Returns: str: The name of the Pygments lexer that best matches the supplied path/code. """ lexer: Optional[Lexer] = None lexer_name = "default" if code: try: lexer = guess_lexer_for_filename(path, code) except ClassNotFound: pass if not lexer: try: _, ext = os.path.splitext(path) if ext: extension = ext.lstrip(".").lower() lexer = get_lexer_by_name(extension) except ClassNotFound: pass if lexer: if lexer.aliases: lexer_name = lexer.aliases[0] else: lexer_name = lexer.name return lexer_name def _get_base_style(self) -> Style: """Get the base style.""" default_style = self._theme.get_background_style() + self.background_style return default_style def _get_token_color(self, token_type: TokenType) -> Optional[Color]: """Get a color (if any) for the given token. Args: token_type (TokenType): A token type tuple from Pygments. Returns: Optional[Color]: Color from theme, or None for no color. """ style = self._theme.get_style_for_token(token_type) return style.color @property def lexer(self) -> Optional[Lexer]: """The lexer for this syntax, or None if no lexer was found. Tries to find the lexer by name if a string was passed to the constructor. """ if isinstance(self._lexer, Lexer): return self._lexer try: return get_lexer_by_name( self._lexer, stripnl=False, ensurenl=True, tabsize=self.tab_size, ) except ClassNotFound: return None @property def default_lexer(self) -> Lexer: """A Pygments Lexer to use if one is not specified or invalid.""" return get_lexer_by_name( "text", stripnl=False, ensurenl=True, tabsize=self.tab_size, ) def highlight( self, code: str, line_range: Optional[Tuple[Optional[int], Optional[int]]] = None, ) -> Text: """Highlight code and return a Text instance. Args: code (str): Code to highlight. line_range(Tuple[int, int], optional): Optional line range to highlight. Returns: Text: A text instance containing highlighted syntax. """ base_style = self._get_base_style() justify: JustifyMethod = ( "default" if base_style.transparent_background else "left" ) text = Text( justify=justify, style=base_style, tab_size=self.tab_size, no_wrap=not self.word_wrap, ) _get_theme_style = self._theme.get_style_for_token lexer = self.lexer or self.default_lexer if lexer is None: text.append(code) else: if line_range: # More complicated path to only stylize a portion of the code # This speeds up further operations as there are less spans to process line_start, line_end = line_range def line_tokenize() -> Iterable[Tuple[Any, str]]: """Split tokens to one per line.""" assert lexer # required to make MyPy happy - we know lexer is not None at this point for token_type, token in lexer.get_tokens(code): while token: line_token, new_line, token = token.partition("\n") yield token_type, line_token + new_line def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]: """Convert tokens to spans.""" tokens = iter(line_tokenize()) line_no = 0 _line_start = line_start - 1 if line_start else 0 # Skip over tokens until line start while line_no < _line_start: try: _token_type, token = next(tokens) except StopIteration: break yield (token, None) if token.endswith("\n"): line_no += 1 # Generate spans until line end for token_type, token in tokens: yield (token, _get_theme_style(token_type)) if token.endswith("\n"): line_no += 1 if line_end and line_no >= line_end: break text.append_tokens(tokens_to_spans()) else: text.append_tokens( (token, _get_theme_style(token_type)) for token_type, token in lexer.get_tokens(code) ) if self.background_color is not None: text.stylize(f"on {self.background_color}") if self._stylized_ranges: self._apply_stylized_ranges(text) return text def stylize_range( self, style: StyleType, start: SyntaxPosition, end: SyntaxPosition, style_before: bool = False, ) -> None: """ Adds a custom style on a part of the code, that will be applied to the syntax display when it's rendered. Line numbers are 1-based, while column indexes are 0-based. Args: style (StyleType): The style to apply. start (Tuple[int, int]): The start of the range, in the form `[line number, column index]`. end (Tuple[int, int]): The end of the range, in the form `[line number, column index]`. style_before (bool): Apply the style before any existing styles. """ self._stylized_ranges.append( _SyntaxHighlightRange(style, start, end, style_before) ) def _get_line_numbers_color(self, blend: float = 0.3) -> Color: background_style = self._theme.get_background_style() + self.background_style background_color = background_style.bgcolor if background_color is None or background_color.is_system_defined: return Color.default() foreground_color = self._get_token_color(Token.Text) if foreground_color is None or foreground_color.is_system_defined: return foreground_color or Color.default() new_color = blend_rgb( background_color.get_truecolor(), foreground_color.get_truecolor(), cross_fade=blend, ) return Color.from_triplet(new_color) @property def _numbers_column_width(self) -> int: """Get the number of characters used to render the numbers column.""" column_width = 0 if self.line_numbers: column_width = ( len(str(self.start_line + self.code.count("\n"))) + NUMBERS_COLUMN_DEFAULT_PADDING ) return column_width def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]: """Get background, number, and highlight styles for line numbers.""" background_style = self._get_base_style() if background_style.transparent_background: return Style.null(), Style(dim=True), Style.null() if console.color_system in ("256", "truecolor"): number_style = Style.chain( background_style, self._theme.get_style_for_token(Token.Text), Style(color=self._get_line_numbers_color()), self.background_style, ) highlight_number_style = Style.chain( background_style, self._theme.get_style_for_token(Token.Text), Style(bold=True, color=self._get_line_numbers_color(0.9)), self.background_style, ) else: number_style = background_style + Style(dim=True) highlight_number_style = background_style + Style(dim=False) return background_style, number_style, highlight_number_style def __rich_measure__( self, console: "Console", options: "ConsoleOptions" ) -> "Measurement": _, right, _, left = Padding.unpack(self.padding) padding = left + right if self.code_width is not None: width = self.code_width + self._numbers_column_width + padding + 1 return Measurement(self._numbers_column_width, width) lines = self.code.splitlines() width = ( self._numbers_column_width + padding + (max(cell_len(line) for line in lines) if lines else 0) ) if self.line_numbers: width += 1 return Measurement(self._numbers_column_width, width) def __rich_console__( self, console: Console, options: ConsoleOptions ) -> RenderResult: segments = Segments(self._get_syntax(console, options)) if self.padding: yield Padding(segments, style=self._get_base_style(), pad=self.padding) else: yield segments def _get_syntax( self, console: Console, options: ConsoleOptions, ) -> Iterable[Segment]: """ Get the Segments for the Syntax object, excluding any vertical/horizontal padding """ transparent_background = self._get_base_style().transparent_background code_width = ( ( (options.max_width - self._numbers_column_width - 1) if self.line_numbers else options.max_width ) if self.code_width is None else self.code_width ) ends_on_nl, processed_code = self._process_code(self.code) text = self.highlight(processed_code, self.line_range) if not self.line_numbers and not self.word_wrap and not self.line_range: if not ends_on_nl: text.remove_suffix("\n") # Simple case of just rendering text style = ( self._get_base_style() + self._theme.get_style_for_token(Comment) + Style(dim=True) + self.background_style ) if self.indent_guides and not options.ascii_only: text = text.with_indent_guides(self.tab_size, style=style) text.overflow = "crop" if style.transparent_background: yield from console.render( text, options=options.update(width=code_width) ) else: syntax_lines = console.render_lines( text, options.update(width=code_width, height=None, justify="left"), style=self.background_style, pad=True, new_lines=True, ) for syntax_line in syntax_lines: yield from syntax_line return start_line, end_line = self.line_range or (None, None) line_offset = 0 if start_line: line_offset = max(0, start_line - 1) lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl) if self.line_range: if line_offset > len(lines): return lines = lines[line_offset:end_line] if self.indent_guides and not options.ascii_only: style = ( self._get_base_style() + self._theme.get_style_for_token(Comment) + Style(dim=True) + self.background_style ) lines = ( Text("\n") .join(lines) .with_indent_guides(self.tab_size, style=style + Style(italic=False)) .split("\n", allow_blank=True) ) numbers_column_width = self._numbers_column_width render_options = options.update(width=code_width) highlight_line = self.highlight_lines.__contains__ _Segment = Segment new_line = _Segment("\n") line_pointer = "> " if options.legacy_windows else "❱ " ( background_style, number_style, highlight_number_style, ) = self._get_number_styles(console) for line_no, line in enumerate(lines, self.start_line + line_offset): if self.word_wrap: wrapped_lines = console.render_lines( line, render_options.update(height=None, justify="left"), style=background_style, pad=not transparent_background, ) else: segments = list(line.render(console, end="")) if options.no_wrap: wrapped_lines = [segments] else: wrapped_lines = [ _Segment.adjust_line_length( segments, render_options.max_width, style=background_style, pad=not transparent_background, ) ] if self.line_numbers: wrapped_line_left_pad = _Segment( " " * numbers_column_width + " ", background_style ) for first, wrapped_line in loop_first(wrapped_lines): if first: line_column = str(line_no).rjust(numbers_column_width - 2) + " " if highlight_line(line_no): yield _Segment(line_pointer, Style(color="red")) yield _Segment(line_column, highlight_number_style) else: yield _Segment(" ", highlight_number_style) yield _Segment(line_column, number_style) else: yield wrapped_line_left_pad yield from wrapped_line yield new_line else: for wrapped_line in wrapped_lines: yield from wrapped_line yield new_line def _apply_stylized_ranges(self, text: Text) -> None: """ Apply stylized ranges to a text instance, using the given code to determine the right portion to apply the style to. Args: text (Text): Text instance to apply the style to. """ code = text.plain newlines_offsets = [ # Let's add outer boundaries at each side of the list: 0, # N.B. using "\n" here is much faster than using metacharacters such as "^" or "\Z": *[ match.start() + 1 for match in re.finditer("\n", code, flags=re.MULTILINE) ], len(code) + 1, ] for stylized_range in self._stylized_ranges: start = _get_code_index_for_syntax_position( newlines_offsets, stylized_range.start ) end = _get_code_index_for_syntax_position( newlines_offsets, stylized_range.end ) if start is not None and end is not None: if stylized_range.style_before: text.stylize_before(stylized_range.style, start, end) else: text.stylize(stylized_range.style, start, end) def _process_code(self, code: str) -> Tuple[bool, str]: """ Applies various processing to a raw code string (normalises it so it always ends with a line return, dedents it if necessary, etc.) Args: code (str): The raw code string to process Returns: Tuple[bool, str]: the boolean indicates whether the raw code ends with a line return, while the string is the processed code. """ ends_on_nl = code.endswith("\n") processed_code = code if ends_on_nl else code + "\n" processed_code = ( textwrap.dedent(processed_code) if self.dedent else processed_code ) processed_code = processed_code.expandtabs(self.tab_size) return ends_on_nl, processed_code def _get_code_index_for_syntax_position( newlines_offsets: Sequence[int], position: SyntaxPosition ) -> Optional[int]: """ Returns the index of the code string for the given positions. Args: newlines_offsets (Sequence[int]): The offset of each newline character found in the code snippet. position (SyntaxPosition): The position to search for. Returns: Optional[int]: The index of the code string for this position, or `None` if the given position's line number is out of range (if it's the column that is out of range we silently clamp its value so that it reaches the end of the line) """ lines_count = len(newlines_offsets) line_number, column_index = position if line_number > lines_count or len(newlines_offsets) < (line_number + 1): return None # `line_number` is out of range line_index = line_number - 1 line_length = newlines_offsets[line_index + 1] - newlines_offsets[line_index] - 1 # If `column_index` is out of range: let's silently clamp it: column_index = min(line_length, column_index) return newlines_offsets[line_index] + column_index if __name__ == "__main__": # pragma: no cover import argparse import sys parser = argparse.ArgumentParser( description="Render syntax to the console with Rich" ) parser.add_argument( "path", metavar="PATH", help="path to file, or - for stdin", ) parser.add_argument( "-c", "--force-color", dest="force_color", action="store_true", default=None, help="force color for non-terminals", ) parser.add_argument( "-i", "--indent-guides", dest="indent_guides", action="store_true", default=False, help="display indent guides", ) parser.add_argument( "-l", "--line-numbers", dest="line_numbers", action="store_true", help="render line numbers", ) parser.add_argument( "-w", "--width", type=int, dest="width", default=None, help="width of output (default will auto-detect)", ) parser.add_argument( "-r", "--wrap", dest="word_wrap", action="store_true", default=False, help="word wrap long lines", ) parser.add_argument( "-s", "--soft-wrap", action="store_true", dest="soft_wrap", default=False, help="enable soft wrapping mode", ) parser.add_argument( "-t", "--theme", dest="theme", default="monokai", help="pygments theme" ) parser.add_argument( "-b", "--background-color", dest="background_color", default=None, help="Override background color", ) parser.add_argument( "-x", "--lexer", default=None, dest="lexer_name", help="Lexer name", ) parser.add_argument( "-p", "--padding", type=int, default=0, dest="padding", help="Padding" ) parser.add_argument( "--highlight-line", type=int, default=None, dest="highlight_line", help="The line number (not index!) to highlight", ) args = parser.parse_args() from pipenv.patched.pip._vendor.rich.console import Console console = Console(force_terminal=args.force_color, width=args.width) if args.path == "-": code = sys.stdin.read() syntax = Syntax( code=code, lexer=args.lexer_name, line_numbers=args.line_numbers, word_wrap=args.word_wrap, theme=args.theme, background_color=args.background_color, indent_guides=args.indent_guides, padding=args.padding, highlight_lines={args.highlight_line}, ) else: syntax = Syntax.from_path( args.path, lexer=args.lexer_name, line_numbers=args.line_numbers, word_wrap=args.word_wrap, theme=args.theme, background_color=args.background_color, indent_guides=args.indent_guides, padding=args.padding, highlight_lines={args.highlight_line}, ) console.print(syntax, soft_wrap=args.soft_wrap)
Syntax
python
donnemartin__interactive-coding-challenges
stacks_queues/set_of_stacks/test_set_of_stacks.py
{ "start": 18, "end": 853 }
class ____(unittest.TestCase): def test_set_of_stacks(self): print('Test: Push on an empty stack') stacks = SetOfStacks(indiv_stack_capacity=2) stacks.push(3) print('Test: Push on a non-empty stack') stacks.push(5) print('Test: Push on a capacity stack to create a new one') stacks.push('a') print('Test: Pop on a stack to destroy it') self.assertEqual(stacks.pop(), 'a') print('Test: Pop general case') self.assertEqual(stacks.pop(), 5) self.assertEqual(stacks.pop(), 3) print('Test: Pop on no elements') self.assertEqual(stacks.pop(), None) print('Success: test_set_of_stacks') def main(): test = TestSetOfStacks() test.test_set_of_stacks() if __name__ == '__main__': main()
TestSetOfStacks
python
jina-ai__jina
tests/unit/jaml/test_type_parse.py
{ "start": 281, "end": 347 }
class ____(BaseExecutor): pass @dataclasses.dataclass
MyExecutor
python
Netflix__metaflow
test/core/metaflow_test/metadata_check.py
{ "start": 225, "end": 8077 }
class ____(MetaflowCheck): def __init__(self, flow): from metaflow.client import Flow, get_namespace self.flow = flow self.run = Flow(flow.name)[self.run_id] expected_steps = set(step.name for step in flow) actual_steps = set(step.id for step in self.run) assert actual_steps.issubset( expected_steps ), f"Executed steps {actual_steps} not a subset of flow steps {expected_steps}" self._test_namespace() def _test_namespace(self): from metaflow.client import Flow, get_namespace, namespace, default_namespace from metaflow.exception import MetaflowNamespaceMismatch # test 1) METAFLOW_USER should be the default assert_equals("user:%s" % os.environ.get("METAFLOW_USER"), get_namespace()) # test 2) Run should be in the listing assert_equals(True, self.run_id in [run.id for run in Flow(self.flow.name)]) # test 3) changing namespace should change namespace namespace("user:nobody") assert_equals(get_namespace(), "user:nobody") # test 4) fetching results in the incorrect namespace should fail assert_exception( lambda: Flow(self.flow.name)[self.run_id], MetaflowNamespaceMismatch ) # test 5) global namespace should work namespace(None) assert_equals(get_namespace(), None) Flow(self.flow.name)[self.run_id] default_namespace() def get_run(self): return self.run def assert_artifact(self, step, name, value, fields=None): for task, artifacts in self.artifact_dict(step, name).items(): if name in artifacts: artifact = artifacts[name] if fields: for field, v in fields.items(): if is_stringish(artifact): data = json.loads(artifact) else: data = artifact if not isinstance(data, dict): raise AssertArtifactFailed( "Task '%s' expected %s to be a dictionary (got %s)" % (task, name, type(data)) ) if data.get(field, None) != v: raise AssertArtifactFailed( "Task '%s' expected %s[%s]=%r but got %s[%s]=%s" % ( task, name, field, truncate(v), name, field, truncate(data.get(field, None)), ) ) elif artifact != value: raise AssertArtifactFailed( "Task '%s' expected %s=%r but got %s=%s" % (task, name, truncate(value), name, truncate(artifact)) ) else: raise AssertArtifactFailed( "Task '%s' expected %s=%s but " "the key was not found" % (task, name, truncate(value)) ) return True def artifact_dict(self, step, name): return {task.id: {name: task[name].data} for task in self.run[step]} def artifact_dict_if_exists(self, step, name): return { task.id: {name: task[name].data} for task in self.run[step] if name in task } def assert_log(self, step, logtype, value, exact_match=True): log_value = self.get_log(step, logtype) if log_value == value: return True elif not exact_match and value in log_value: return True else: raise AssertLogFailed( "Step '%s' expected task.%s='%s' but got task.%s='%s'" % (step, logtype, repr(value), logtype, repr(log_value)) ) def list_cards(self, step, task, card_type=None): from metaflow.plugins.cards.exception import CardNotPresentException try: card_iter = self.get_card(step, task, card_type) except CardNotPresentException: card_iter = None if card_iter is None: return pathspec = self.run[step][task].pathspec list_data = dict(pathspec=pathspec, cards=[]) if len(card_iter) > 0: list_data["cards"] = [ dict( hash=card.hash, id=card.id, type=card.type, filename=card.path.split("/")[-1], ) for card in card_iter ] return list_data def assert_card( self, step, task, card_type, value, card_hash=None, card_id=None, exact_match=True, ): from metaflow.plugins.cards.exception import CardNotPresentException try: card_iter = self.get_card(step, task, card_type, card_id=card_id) except CardNotPresentException: card_iter = None card_data = None # Since there are many cards possible for a taskspec, we check for hash to assert a single card. # If the id argument is present then there will be a single cards anyway. if card_iter is not None: if len(card_iter) > 0: if card_hash is None: card_data = card_iter[0].get() else: card_filter = [c for c in card_iter if card_hash in c.hash] card_data = None if len(card_filter) == 0 else card_filter[0].get() if (exact_match and card_data != value) or ( not exact_match and value not in card_data ): raise AssertCardFailed( "Task '%s/%s' expected %s card with content '%s' but got '%s'" % (self.run_id, step, card_type, repr(value), repr(card_data)) ) return True def get_card_data(self, step, task, card_type, card_id=None): """ returns : (card_present, card_data) """ from metaflow.plugins.cards.exception import CardNotPresentException try: card_iter = self.get_card(step, task, card_type, card_id=card_id) except CardNotPresentException: return False, None if card_id is None: # Return the first piece of card_data we can find. return True, card_iter[0].get_data() for card in card_iter: if card.id == card_id: return True, card.get_data() return False, None def get_log(self, step, logtype): return "".join(getattr(task, logtype) for task in self.run[step]) def get_card(self, step, task, card_type, card_id=None): from metaflow.cards import get_cards iterator = get_cards(self.run[step][task], type=card_type, id=card_id) return iterator def get_user_tags(self): return self.run.user_tags def get_system_tags(self): return self.run.system_tags def add_tag(self, tag): return self.run.add_tag(tag) def add_tags(self, tags): return self.run.add_tags(tags) def remove_tag(self, tag): return self.run.remove_tag(tag) def remove_tags(self, tags): return self.run.remove_tags(tags) def replace_tag(self, tag_to_remove, tag_to_add): return self.run.replace_tag(tag_to_remove, tag_to_add) def replace_tags(self, tags_to_remove, tags_to_add): return self.run.replace_tags(tags_to_remove, tags_to_add)
MetadataCheck
python
graphql-python__graphene
graphene/types/tests/test_definition.py
{ "start": 1214, "end": 1252 }
class ____(Enum): foo = "foo"
MyEnum
python
pytorch__pytorch
torch/_dynamo/exc.py
{ "start": 9731, "end": 9917 }
class ____(ObservedException): # An AttributeError exception to be raised from inside Dynamo tracing. This can happen on user defined object __getattr__ pass
ObservedAttributeError
python
simplejson__simplejson
simplejson/tests/test_tuple.py
{ "start": 83, "end": 1831 }
class ____(unittest.TestCase): def test_tuple_array_dumps(self): t = (1, 2, 3) expect = json.dumps(list(t)) # Default is True self.assertEqual(expect, json.dumps(t)) self.assertEqual(expect, json.dumps(t, tuple_as_array=True)) self.assertRaises(TypeError, json.dumps, t, tuple_as_array=False) # Ensure that the "default" does not get called self.assertEqual(expect, json.dumps(t, default=repr)) self.assertEqual(expect, json.dumps(t, tuple_as_array=True, default=repr)) # Ensure that the "default" gets called self.assertEqual( json.dumps(repr(t)), json.dumps(t, tuple_as_array=False, default=repr)) def test_tuple_array_dump(self): t = (1, 2, 3) expect = json.dumps(list(t)) # Default is True sio = StringIO() json.dump(t, sio) self.assertEqual(expect, sio.getvalue()) sio = StringIO() json.dump(t, sio, tuple_as_array=True) self.assertEqual(expect, sio.getvalue()) self.assertRaises(TypeError, json.dump, t, StringIO(), tuple_as_array=False) # Ensure that the "default" does not get called sio = StringIO() json.dump(t, sio, default=repr) self.assertEqual(expect, sio.getvalue()) sio = StringIO() json.dump(t, sio, tuple_as_array=True, default=repr) self.assertEqual(expect, sio.getvalue()) # Ensure that the "default" gets called sio = StringIO() json.dump(t, sio, tuple_as_array=False, default=repr) self.assertEqual( json.dumps(repr(t)), sio.getvalue())
TestTuples
python
rq__rq
rq/worker_pool.py
{ "start": 770, "end": 850 }
class ____(NamedTuple): name: str pid: int process: Process
WorkerData
python
langchain-ai__langchain
libs/langchain/langchain_classic/indexes/_sql_record_manager.py
{ "start": 1363, "end": 2479 }
class ____(Base): # type: ignore[valid-type,misc] """Table used to keep track of when a key was last updated.""" # ATTENTION: # Prior to modifying this table, please determine whether # we should create migrations for this table to make sure # users do not experience data loss. __tablename__ = "upsertion_record" uuid = Column( String, index=True, default=lambda: str(uuid.uuid4()), primary_key=True, nullable=False, ) key = Column(String, index=True) # Using a non-normalized representation to handle `namespace` attribute. # If the need arises, this attribute can be pulled into a separate Collection # table at some time later. namespace = Column(String, index=True, nullable=False) group_id = Column(String, index=True, nullable=True) # The timestamp associated with the last record upsertion. updated_at = Column(Float, index=True) __table_args__ = ( UniqueConstraint("key", "namespace", name="uix_key_namespace"), Index("ix_key_namespace", "key", "namespace"), )
UpsertionRecord
python
ray-project__ray
release/train_tests/benchmark/recsys/recsys_factory.py
{ "start": 2151, "end": 8779 }
class ____(BenchmarkFactory): def __init__(self, benchmark_config: BenchmarkConfig): super().__init__(benchmark_config) self.torchrec_config = TorchRecConfig() def get_dataloader_factory(self) -> BaseDataLoaderFactory: data_factory_cls = { DataloaderType.MOCK: RecsysMockDataLoaderFactory, DataloaderType.RAY_DATA: RecsysRayDataLoaderFactory, }[self.benchmark_config.dataloader_type] return data_factory_cls(self.benchmark_config) def get_model(self) -> torch.nn.Module: # NOTE: These imports error on a CPU-only driver node. # Delay the import to happen on the GPU train workers instead. from torchrec import EmbeddingBagCollection from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES from torchrec.distributed.model_parallel import ( DistributedModelParallel, get_default_sharders, ) from torchrec.distributed.planner import EmbeddingShardingPlanner, Topology from torchrec.distributed.planner.storage_reservations import ( HeuristicalStorageReservation, ) from torchrec.models.dlrm import DLRM, DLRM_DCN, DLRM_Projection, DLRMTrain from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.optim.apply_optimizer_in_backward import ( apply_optimizer_in_backward, ) args = self.torchrec_config device = ray.train.torch.get_device() local_world_size = ray.train.get_context().get_local_world_size() global_world_size = ray.train.get_context().get_world_size() eb_configs = [ EmbeddingBagConfig( name=f"t_{feature_name}", embedding_dim=args.embedding_dim, num_embeddings=args.num_embeddings_per_feature[feature_idx], feature_names=[feature_name], ) for feature_idx, feature_name in enumerate(DEFAULT_CAT_NAMES) ] sharded_module_kwargs = {} if args.over_arch_layer_sizes is not None: sharded_module_kwargs["over_arch_layer_sizes"] = args.over_arch_layer_sizes if args.interaction_type == "original": dlrm_model = DLRM( embedding_bag_collection=EmbeddingBagCollection( tables=eb_configs, device=torch.device("meta") ), dense_in_features=len(DEFAULT_INT_NAMES), dense_arch_layer_sizes=args.dense_arch_layer_sizes, over_arch_layer_sizes=args.over_arch_layer_sizes, dense_device=device, ) elif args.interaction_type == "dcn": dlrm_model = DLRM_DCN( embedding_bag_collection=EmbeddingBagCollection( tables=eb_configs, device=torch.device("meta") ), dense_in_features=len(DEFAULT_INT_NAMES), dense_arch_layer_sizes=args.dense_arch_layer_sizes, over_arch_layer_sizes=args.over_arch_layer_sizes, dcn_num_layers=args.dcn_num_layers, dcn_low_rank_dim=args.dcn_low_rank_dim, dense_device=device, ) elif args.interaction_type == "projection": raise NotImplementedError dlrm_model = DLRM_Projection( embedding_bag_collection=EmbeddingBagCollection( tables=eb_configs, device=torch.device("meta") ), dense_in_features=len(DEFAULT_INT_NAMES), dense_arch_layer_sizes=args.dense_arch_layer_sizes, over_arch_layer_sizes=args.over_arch_layer_sizes, interaction_branch1_layer_sizes=args.interaction_branch1_layer_sizes, interaction_branch2_layer_sizes=args.interaction_branch2_layer_sizes, dense_device=device, ) else: raise ValueError( "Unknown interaction option set. Should be original, dcn, or projection." ) train_model = DLRMTrain(dlrm_model) embedding_optimizer = torch.optim.Adagrad # This will apply the Adagrad optimizer in the backward pass for the embeddings (sparse_arch). This means that # the optimizer update will be applied in the backward pass, in this case through a fused op. # TorchRec will use the FBGEMM implementation of EXACT_ADAGRAD. For GPU devices, a fused CUDA kernel is invoked. For CPU, FBGEMM_GPU invokes CPU kernels # https://github.com/pytorch/FBGEMM/blob/2cb8b0dff3e67f9a009c4299defbd6b99cc12b8f/fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py#L676-L678 # Note that lr_decay, weight_decay and initial_accumulator_value for Adagrad optimizer in FBGEMM v0.3.2 # cannot be specified below. This equivalently means that all these parameters are hardcoded to zero. optimizer_kwargs = {"lr": 15.0, "eps": 1e-8} apply_optimizer_in_backward( embedding_optimizer, train_model.model.sparse_arch.parameters(), optimizer_kwargs, ) planner = EmbeddingShardingPlanner( topology=Topology( local_world_size=local_world_size, world_size=global_world_size, compute_device=device.type, ), batch_size=self.benchmark_config.dataloader_config.train_batch_size, # If experience OOM, increase the percentage. see # https://pytorch.org/torchrec/torchrec.distributed.planner.html#torchrec.distributed.planner.storage_reservations.HeuristicalStorageReservation storage_reservation=HeuristicalStorageReservation(percentage=0.05), ) plan = planner.collective_plan( train_model, get_default_sharders(), torch_dist.GroupMember.WORLD ) model = DistributedModelParallel( module=train_model, device=device, plan=plan, ) if ray.train.get_context().get_world_rank() == 0: for collectionkey, plans in model._plan.plan.items(): logger.info(collectionkey) for table_name, plan in plans.items(): logger.info(table_name) logger.info(plan) return model def get_loss_fn(self) -> torch.nn.Module: raise NotImplementedError( "torchrec model should return the loss directly in forward. " "See the `DLRMTrain` wrapper class." )
RecsysFactory
python
numpy__numpy
numpy/_core/tests/test_indexing.py
{ "start": 48051, "end": 49672 }
class ____: """ These test that ``TypeError`` is raised when you try to use non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. """ def test_valid_indexing(self): # These should raise no errors. a = np.array([[[5]]]) a[np.array([0])] a[[0, 0]] a[:, [0, 0]] a[:, 0, :] a[:, :, :] def test_valid_slicing(self): # These should raise no errors. a = np.array([[[5]]]) a[::] a[0:] a[:2] a[0:2] a[::2] a[1::2] a[:2:2] a[1:2:2] def test_non_integer_argument_errors(self): a = np.array([[5]]) assert_raises(TypeError, np.reshape, a, (1., 1., -1)) assert_raises(TypeError, np.reshape, a, (np.array(1.), -1)) assert_raises(TypeError, np.take, a, [0], 1.) assert_raises(TypeError, np.take, a, [0], np.float64(1.)) def test_non_integer_sequence_multiplication(self): # NumPy scalar sequence multiply should not work with non-integers def mult(a, b): return a * b assert_raises(TypeError, mult, [1], np.float64(3)) # following should be OK mult([1], np.int_(3)) def test_reduce_axis_float_index(self): d = np.zeros((3, 3, 3)) assert_raises(TypeError, np.min, d, 0.5) assert_raises(TypeError, np.min, d, (0.5, 1)) assert_raises(TypeError, np.min, d, (1, 2.2)) assert_raises(TypeError, np.min, d, (.2, 1.2))
TestFloatNonIntegerArgument
python
PyCQA__pylint
doc/data/messages/m/multiple-class-sub-patterns/bad.py
{ "start": 0, "end": 359 }
class ____: __match_args__ = ("title", "year") def __init__(self, title, year): self.title = title self.year = year def func(item: Book): match item: case Book("abc", title="abc"): # [multiple-class-sub-patterns] ... case Book(year=2000, year=2001): # [multiple-class-sub-patterns] ...
Book
python
yaml__pyyaml
tests/legacy_tests/canonical.py
{ "start": 11071, "end": 12371 }
class ____(CanonicalScanner, CanonicalParser, yaml.composer.Composer, yaml.constructor.Constructor, yaml.resolver.Resolver): def __init__(self, stream): if hasattr(stream, 'read'): stream = stream.read() CanonicalScanner.__init__(self, stream) CanonicalParser.__init__(self) yaml.composer.Composer.__init__(self) yaml.constructor.Constructor.__init__(self) yaml.resolver.Resolver.__init__(self) yaml.CanonicalLoader = CanonicalLoader def canonical_scan(stream): return yaml.scan(stream, Loader=CanonicalLoader) yaml.canonical_scan = canonical_scan def canonical_parse(stream): return yaml.parse(stream, Loader=CanonicalLoader) yaml.canonical_parse = canonical_parse def canonical_compose(stream): return yaml.compose(stream, Loader=CanonicalLoader) yaml.canonical_compose = canonical_compose def canonical_compose_all(stream): return yaml.compose_all(stream, Loader=CanonicalLoader) yaml.canonical_compose_all = canonical_compose_all def canonical_load(stream): return yaml.load(stream, Loader=CanonicalLoader) yaml.canonical_load = canonical_load def canonical_load_all(stream): return yaml.load_all(stream, Loader=CanonicalLoader) yaml.canonical_load_all = canonical_load_all
CanonicalLoader
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/orm/interfaces.py
{ "start": 4517, "end": 4658 }
class ____(roles.FromClauseRole): __slots__ = () _role_name = "ORM mapped entity, aliased entity, or FROM expression"
ORMFromClauseRole
python
donnemartin__system-design-primer
solutions/object_oriented_design/online_chat/online_chat.py
{ "start": 504, "end": 1441 }
class ____(object): def __init__(self, user_id, name, pass_hash): self.user_id = user_id self.name = name self.pass_hash = pass_hash self.friends_by_id = {} # key: friend id, value: User self.friend_ids_to_private_chats = {} # key: friend id, value: private chats self.group_chats_by_id = {} # key: chat id, value: GroupChat self.received_friend_requests_by_friend_id = {} # key: friend id, value: AddRequest self.sent_friend_requests_by_friend_id = {} # key: friend id, value: AddRequest def message_user(self, friend_id, message): pass def message_group(self, group_id, message): pass def send_friend_request(self, friend_id): pass def receive_friend_request(self, friend_id): pass def approve_friend_request(self, friend_id): pass def reject_friend_request(self, friend_id): pass
User
python
apache__thrift
lib/py/src/transport/TTwisted.py
{ "start": 8691, "end": 9098 }
class ____(ServerFactory): protocol = ThriftServerProtocol def __init__(self, processor, iprot_factory, oprot_factory=None): self.processor = processor self.iprot_factory = iprot_factory if oprot_factory is None: self.oprot_factory = iprot_factory else: self.oprot_factory = oprot_factory @implementer(IThriftClientFactory)
ThriftServerFactory
python
getsentry__sentry
src/sentry/api/serializers/models/group_stream.py
{ "start": 9506, "end": 9670 }
class ____(TypedDict): count: str userCount: int firstSeen: datetime | None lastSeen: datetime | None stats: NotRequired[dict[str, Any]]
_Filtered