language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
wandb__wandb
wandb/sdk/artifacts/_generated/fragments.py
{ "start": 6836, "end": 7055 }
class ____(GQLResult): total_count: int = Field(alias="totalCount") page_info: PageInfoFragment = Field(alias="pageInfo") edges: List[RunInputArtifactConnectionFragmentEdges]
RunInputArtifactConnectionFragment
python
pikepdf__pikepdf
src/pikepdf/objects.py
{ "start": 5883, "end": 6359 }
class ____(Object, metaclass=_ObjectMeta): """Construct a PDF String object.""" object_type = ObjectType.string def __new__(cls, s: str | bytes) -> String: """Construct a PDF String. Args: s: The string to use. String will be encoded for PDF, bytes will be constructed without encoding. """ if isinstance(s, bytes): return _core._new_string(s) return _core._new_string_utf8(s)
String
python
ray-project__ray
python/ray/air/integrations/wandb.py
{ "start": 13605, "end": 17129 }
class ____: """ Wandb assumes that each trial's information should be logged from a separate process. We use Ray actors as forking multiprocessing processes is not supported by Ray and spawn processes run into pickling problems. We use a queue for the driver to communicate with the logging process. The queue accepts the following items: - If it's a dict, it is assumed to be a result and will be logged using ``wandb.log()`` - If it's a checkpoint object, it will be saved using ``wandb.log_artifact()``. """ def __init__( self, logdir: str, queue: Queue, exclude: List[str], to_config: List[str], *args, **kwargs, ): import wandb self._wandb = wandb os.chdir(logdir) self.queue = queue self._exclude = set(exclude) self._to_config = set(to_config) self.args = args self.kwargs = kwargs self._trial_name = self.kwargs.get("name", "unknown") self._logdir = logdir def run(self): # Since we're running in a separate process already, use threads. os.environ["WANDB_START_METHOD"] = "thread" run = self._wandb.init(*self.args, **self.kwargs) run.config.trial_log_path = self._logdir _run_wandb_process_run_info_hook(run) while True: item_type, item_content = self.queue.get() if item_type == _QueueItem.END: break if item_type == _QueueItem.CHECKPOINT: self._handle_checkpoint(item_content) continue assert item_type == _QueueItem.RESULT log, config_update = self._handle_result(item_content) try: self._wandb.config.update(config_update, allow_val_change=True) self._wandb.log(log, step=log.get(TRAINING_ITERATION)) except urllib.error.HTTPError as e: # Ignore HTTPError. Missing a few data points is not a # big issue, as long as things eventually recover. logger.warning("Failed to log result to w&b: {}".format(str(e))) except FileNotFoundError as e: logger.error( "FileNotFoundError: Did not log result to Weights & Biases. " "Possible cause: relative file path used instead of absolute path. " "Error: %s", e, ) self._wandb.finish() def _handle_checkpoint(self, checkpoint_path: str): artifact = self._wandb.Artifact( name=f"checkpoint_{self._trial_name}", type="model" ) artifact.add_dir(checkpoint_path) self._wandb.log_artifact(artifact) def _handle_result(self, result: Dict) -> Tuple[Dict, Dict]: config_update = result.get("config", {}).copy() log = {} flat_result = flatten_dict(result, delimiter="/") for k, v in flat_result.items(): if any(k.startswith(item + "/") or k == item for item in self._exclude): continue elif any(k.startswith(item + "/") or k == item for item in self._to_config): config_update[k] = v elif not _is_allowed_type(v): continue else: log[k] = v config_update.pop("callbacks", None) # Remove callbacks return log, config_update @PublicAPI(stability="alpha")
_WandbLoggingActor
python
keras-team__keras
keras/src/optimizers/rmsprop_test.py
{ "start": 157, "end": 2761 }
class ____(testing.TestCase): def test_config(self): optimizer = RMSprop( learning_rate=0.5, rho=0.8, momentum=0.05, epsilon=1e-6, centered=True, ) self.run_class_serialization_test(optimizer) def test_single_step(self): optimizer = RMSprop(learning_rate=0.5) grads = ops.array([1.0, 6.0, 7.0, 2.0]) vars = backend.Variable([1.0, 2.0, 3.0, 4.0]) optimizer.apply_gradients(zip([grads], [vars])) self.assertAllClose( vars, [-0.5811, 0.4189, 1.4189, 2.4189], rtol=1e-4, atol=1e-4 ) def test_weight_decay(self): grads, var1, var2, var3 = ( ops.zeros(()), backend.Variable(2.0), backend.Variable(2.0, name="exclude"), backend.Variable(2.0), ) optimizer_1 = RMSprop(learning_rate=1.0, weight_decay=0.004) optimizer_1.apply_gradients(zip([grads], [var1])) optimizer_2 = RMSprop(learning_rate=1.0, weight_decay=0.004) optimizer_2.exclude_from_weight_decay(var_names=["exclude"]) optimizer_2.apply_gradients(zip([grads, grads], [var1, var2])) optimizer_3 = RMSprop(learning_rate=1.0, weight_decay=0.004) optimizer_3.exclude_from_weight_decay(var_list=[var3]) optimizer_3.apply_gradients(zip([grads, grads], [var1, var3])) self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6) self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6) self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6) def test_correctness_with_golden(self): optimizer = RMSprop(centered=True) x = backend.Variable(np.ones([10])) grads = ops.arange(0.1, 1.1, 0.1) first_grads = ops.full((10,), 0.01) golden = np.tile( [[0.9967], [0.9933], [0.9908], [0.9885], [0.9864]], (1, 10) ) optimizer.apply_gradients(zip([first_grads], [x])) for i in range(5): self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4) optimizer.apply_gradients(zip([grads], [x])) def test_clip_norm(self): optimizer = RMSprop(clipnorm=1) grad = [np.array([100.0, 100.0])] clipped_grad = optimizer._clip_gradients(grad) self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2]) def test_clip_value(self): optimizer = RMSprop(clipvalue=1) grad = [np.array([100.0, 100.0])] clipped_grad = optimizer._clip_gradients(grad) self.assertAllClose(clipped_grad[0], [1.0, 1.0])
RMSpropTest
python
django__django
tests/utils_tests/test_autoreload.py
{ "start": 23067, "end": 24457 }
class ____(SimpleTestCase): RELOADER_CLS = None def setUp(self): _tempdir = tempfile.TemporaryDirectory() self.tempdir = Path(_tempdir.name).resolve(strict=True).absolute() self.existing_file = self.ensure_file(self.tempdir / "test.py") self.nonexistent_file = (self.tempdir / "does_not_exist.py").absolute() self.reloader = self.RELOADER_CLS() self.addCleanup(self.reloader.stop) self.addCleanup(_tempdir.cleanup) def ensure_file(self, path): path.parent.mkdir(exist_ok=True, parents=True) path.touch() # On Linux and Windows updating the mtime of a file using touch() will # set a timestamp value that is in the past, as the time value for the # last kernel tick is used rather than getting the correct absolute # time. # To make testing simpler set the mtime to be the observed time when # this function is called. self.set_mtime(path, time.time()) return path.absolute() def set_mtime(self, fp, value): os.utime(str(fp), (value, value)) def increment_mtime(self, fp, by=1): current_time = time.time() self.set_mtime(fp, current_time + by) @contextlib.contextmanager def tick_twice(self): ticker = self.reloader.tick() next(ticker) yield next(ticker)
ReloaderTests
python
psf__black
src/black/brackets.py
{ "start": 1080, "end": 1216 }
class ____(Exception): """Raised when an opening bracket is unable to be matched to a closing bracket.""" @dataclass
BracketMatchError
python
walkccc__LeetCode
solutions/678. Valid Parenthesis String/678.py
{ "start": 0, "end": 366 }
class ____: def checkValidString(self, s: str) -> bool: low = 0 high = 0 for c in s: if c == '(': low += 1 high += 1 elif c == ')': if low > 0: low -= 1 high -= 1 else: if low > 0: low -= 1 high += 1 if high < 0: return False return low == 0
Solution
python
getsentry__sentry
tests/sentry/tasks/test_post_process.py
{ "start": 100785, "end": 102900 }
class ____(BasePostProgressGroupMixin): @patch("sentry.tasks.post_process.safe_execute") def test_process_similarity(self, mock_safe_execute: MagicMock) -> None: from sentry import similarity event = self.create_event(data={}, project_id=self.project.id) self.call_post_process_group( is_new=True, is_regression=False, is_new_group_environment=False, event=event, ) mock_safe_execute.assert_called_with(similarity.record, mock.ANY, mock.ANY) def assert_not_called_with(self, mock_function: Mock): """ Helper function to check that safe_execute isn't called with similarity.record It can/will be called with other parameters """ from sentry import similarity try: mock_function.assert_called_with(similarity.record, mock.ANY, mock.ANY) except AssertionError: return raise AssertionError("Expected safe_execute to not be called with similarity.record") @patch("sentry.tasks.post_process.safe_execute") def test_skip_process_similarity(self, mock_safe_execute: MagicMock) -> None: self.project.update_option("sentry:similarity_backfill_completed", int(time.time())) event = self.create_event(data={}, project_id=self.project.id) self.call_post_process_group( is_new=True, is_regression=False, is_new_group_environment=False, event=event, ) self.assert_not_called_with(mock_safe_execute) @patch("sentry.tasks.post_process.safe_execute") @override_options({"sentry.similarity.indexing.enabled": False}) def test_skip_process_similarity_global(self, mock_safe_execute: MagicMock) -> None: event = self.create_event(data={}, project_id=self.project.id) self.call_post_process_group( is_new=True, is_regression=False, is_new_group_environment=False, event=event, ) self.assert_not_called_with(mock_safe_execute)
ProcessSimilarityTestMixin
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_stateful.py
{ "start": 4115, "end": 4545 }
class ____(RuleBasedStateMachine): ratchet = 0 @rule(d=data()) def action(self, d): FlakyRatchettingMachine.ratchet += 1 n = FlakyRatchettingMachine.ratchet d.draw(lists(integers(), min_size=n, max_size=n)) raise AssertionError @Settings( stateful_step_count=10, max_examples=30, suppress_health_check=[HealthCheck.filter_too_much], ) # speed this up
FlakyRatchettingMachine
python
ansible__ansible
lib/ansible/_internal/_json/__init__.py
{ "start": 856, "end": 959 }
class ____(t.Protocol): """Utility protocol for mixin type safety.""" _current: t.Any
HasCurrent
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/asset_health/asset_check_health.py
{ "start": 644, "end": 9042 }
class ____(LoadableBy[AssetKey]): """Maintains a list of asset checks for the asset in each terminal state. If a check is in progress, it will not move to a new list until the execution is complete. """ passing_checks: set[AssetCheckKey] failing_checks: set[AssetCheckKey] warning_checks: set[AssetCheckKey] all_checks: set[AssetCheckKey] latest_passing_check_timestamp: Optional[float] = None latest_failing_check_timestamp: Optional[float] = None latest_warning_check_timestamp: Optional[float] = None @classmethod def default(cls) -> "AssetCheckHealthState": return cls( passing_checks=set(), failing_checks=set(), warning_checks=set(), all_checks=set(), latest_passing_check_timestamp=None, latest_failing_check_timestamp=None, latest_warning_check_timestamp=None, ) @property def health_status(self) -> AssetHealthStatus: """Returns the health status of the asset based on the checks.""" if len(self.all_checks) == 0: return AssetHealthStatus.NOT_APPLICABLE if len(self.failing_checks) > 0: return AssetHealthStatus.DEGRADED if len(self.warning_checks) > 0: return AssetHealthStatus.WARNING num_unexecuted = ( len(self.all_checks) - len(self.passing_checks) - len(self.failing_checks) - len(self.warning_checks) ) if num_unexecuted > 0: return AssetHealthStatus.UNKNOWN # all checks are passing return AssetHealthStatus.HEALTHY @classmethod async def compute_for_asset_checks( cls, check_keys: set[AssetCheckKey], loading_context: LoadingContext ) -> "AssetCheckHealthState": """Using the latest terminal state for each check as stored in the DB, returns a set of asset checks in each terminal state. If a check is in progress, it remains in the terminal state it was in prior to the in progress execution. """ from dagster._core.storage.asset_check_execution_record import ( AssetCheckExecutionResolvedStatus, ) from dagster._core.storage.event_log.base import AssetCheckSummaryRecord if len(check_keys) == 0: # no checks defined return AssetCheckHealthState.default() passing_checks = set() warning_checks = set() failing_checks = set() latest_passing_check_timestamp = None latest_failing_check_timestamp = None latest_warning_check_timestamp = None check_records = await AssetCheckSummaryRecord.gen_many( loading_context, check_keys, ) for check_record in check_records: if check_record is None or check_record.last_check_execution_record is None: # check has never been executed continue check_key = check_record.asset_check_key # if the last_check_execution_record is completed, it will be the same as last_completed_check_execution_record, # but we check the last_check_execution_record status first since there is an edge case # where the record will have status PLANNED, but the resolve_status will be EXECUTION_FAILED # because the run for the check failed. last_check_execution_status = ( await check_record.last_check_execution_record.resolve_status(loading_context) ) last_check_evaluation = check_record.last_check_execution_record.evaluation if last_check_execution_status in [ AssetCheckExecutionResolvedStatus.IN_PROGRESS, AssetCheckExecutionResolvedStatus.SKIPPED, ]: # the last check is still in progress or is skipped, so we want to check the status of # the latest completed check instead if check_record.last_completed_check_execution_record is None: # the check hasn't been executed prior to this in progress check continue # should never need to fetch a run since the non-resolved status is success or failed. # this method converts directly to the resolved status last_check_execution_status = ( await check_record.last_completed_check_execution_record.resolve_status( loading_context ) ) last_check_evaluation = ( check_record.last_completed_check_execution_record.evaluation ) if last_check_execution_status == AssetCheckExecutionResolvedStatus.FAILED: # failed checks should always have an evaluation, but default to ERROR if not if ( last_check_evaluation and last_check_evaluation.severity == AssetCheckSeverity.WARN ): warning_checks.add(check_key) if check_record.last_completed_check_execution_record is not None and ( latest_warning_check_timestamp is None or latest_warning_check_timestamp < check_record.last_completed_check_execution_record.create_timestamp ): latest_warning_check_timestamp = ( check_record.last_completed_check_execution_record.create_timestamp ) else: failing_checks.add(check_key) if check_record.last_completed_check_execution_record is not None and ( latest_failing_check_timestamp is None or latest_failing_check_timestamp < check_record.last_completed_check_execution_record.create_timestamp ): latest_failing_check_timestamp = ( check_record.last_completed_check_execution_record.create_timestamp ) elif last_check_execution_status == AssetCheckExecutionResolvedStatus.EXECUTION_FAILED: # EXECUTION_FAILED checks don't have an evaluation and we want to report them as failures failing_checks.add(check_key) if check_record.last_completed_check_execution_record is not None and ( latest_failing_check_timestamp is None or latest_failing_check_timestamp < check_record.last_completed_check_execution_record.create_timestamp ): latest_failing_check_timestamp = ( check_record.last_completed_check_execution_record.create_timestamp ) else: # asset check passed passing_checks.add(check_key) if check_record.last_completed_check_execution_record is not None and ( latest_passing_check_timestamp is None or latest_passing_check_timestamp < check_record.last_completed_check_execution_record.create_timestamp ): latest_passing_check_timestamp = ( check_record.last_completed_check_execution_record.create_timestamp ) return AssetCheckHealthState( passing_checks=passing_checks, failing_checks=failing_checks, warning_checks=warning_checks, all_checks=check_keys, latest_passing_check_timestamp=latest_passing_check_timestamp, latest_failing_check_timestamp=latest_failing_check_timestamp, latest_warning_check_timestamp=latest_warning_check_timestamp, ) @classmethod def _blocking_batch_load( cls, keys: Iterable[AssetKey], context: LoadingContext ) -> Iterable[Optional["AssetCheckHealthState"]]: asset_check_health_states = context.instance.get_asset_check_health_state_for_assets( list(keys) ) return [asset_check_health_states.get(key) for key in keys] @whitelist_for_serdes @record.record
AssetCheckHealthState
python
gevent__gevent
src/greentest/3.10/test_socket.py
{ "start": 208794, "end": 209581 }
class ____(SocketUDPLITETest): def testUDPLITETimeout(self): def raise_timeout(*args, **kwargs): self.serv.settimeout(1.0) self.serv.recv(1024) self.assertRaises(TimeoutError, raise_timeout, "Error generating a timeout exception (UDPLITE)") def testTimeoutZero(self): ok = False try: self.serv.settimeout(0.0) foo = self.serv.recv(1024) except TimeoutError: self.fail("caught timeout instead of error (UDPLITE)") except OSError: ok = True except: self.fail("caught unexpected exception (UDPLITE)") if not ok: self.fail("recv() returned success when we did not expect it")
UDPLITETimeoutTest
python
getsentry__sentry
tests/sentry/workflow_engine/processors/test_delayed_workflow.py
{ "start": 10439, "end": 11926 }
class ____(TestDelayedWorkflowBase): def test_fetch_project(self) -> None: assert fetch_project(self.project.id) == self.project assert fetch_project(1) is None def test_fetch_workflows_envs(self) -> None: self._push_base_events() event_data = EventRedisData.from_redis_data( self.batch_client.for_project(self.project.id).get_hash_data(batch_key=None), continue_on_error=False, ) workflows_to_envs = fetch_workflows_envs(list(event_data.workflow_ids)) assert workflows_to_envs == { self.workflow1.id: self.environment.id, self.workflow2.id: None, } def test_parse_none_timestamps(self) -> None: self._push_base_events() event_data = EventRedisData.from_redis_data( self.batch_client.for_project(self.project.id).get_hash_data(batch_key=None), continue_on_error=False, ) for instance in event_data.events.values(): assert instance.timestamp is None @freeze_time() def test_parse_timestamps(self) -> None: self._push_base_events(timestamp=timezone.now()) event_data = EventRedisData.from_redis_data( self.batch_client.for_project(self.project.id).get_hash_data(batch_key=None), continue_on_error=False, ) for instance in event_data.events.values(): assert instance.timestamp == timezone.now()
TestDelayedWorkflowHelpers
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_pretty.py
{ "start": 7747, "end": 8030 }
class ____: __module__ = 1 @property def __class__(self): raise ValueError("I am horrible") def __repr__(self): raise BadException def test_really_bad_repr(): with pytest.raises(BadException): pretty.pretty(ReallyBadRepr())
ReallyBadRepr
python
tensorflow__tensorflow
tensorflow/python/ops/boosted_trees_ops.py
{ "start": 7909, "end": 9741 }
class ____(saver.BaseSaverBuilder.SaveableObject): """SaveableObject implementation for TreeEnsemble.""" def __init__(self, resource_handle, create_op, name): """Creates a _TreeEnsembleSavable object. Args: resource_handle: handle to the decision tree ensemble variable. create_op: the op to initialize the variable. name: the name to save the tree ensemble variable under. """ stamp_token, serialized = ( gen_boosted_trees_ops.boosted_trees_serialize_ensemble(resource_handle)) # slice_spec is useful for saving a slice from a variable. # It's not meaningful the tree ensemble variable. So we just pass an empty # value. slice_spec = '' specs = [ saver.BaseSaverBuilder.SaveSpec(stamp_token, slice_spec, name + '_stamp'), saver.BaseSaverBuilder.SaveSpec(serialized, slice_spec, name + '_serialized'), ] super(_TreeEnsembleSavable, self).__init__(resource_handle, specs, name) self.resource_handle = resource_handle self._create_op = create_op def restore(self, restored_tensors, unused_restored_shapes): """Restores the associated tree ensemble from 'restored_tensors'. Args: restored_tensors: the tensors that were loaded from a checkpoint. unused_restored_shapes: the shapes this object should conform to after restore. Not meaningful for trees. Returns: The operation that restores the state of the tree ensemble variable. """ with ops.control_dependencies([self._create_op]): return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble( self.resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1])
_TreeEnsembleSavable
python
run-llama__llama_index
llama-index-core/llama_index/core/base/response/schema.py
{ "start": 412, "end": 1285 }
class ____: """ Response object. Returned if streaming=False. Attributes: response: The response text. """ response: Optional[str] source_nodes: List[NodeWithScore] = field(default_factory=list) metadata: Optional[Dict[str, Any]] = None def __str__(self) -> str: """Convert to string representation.""" return self.response or "None" def get_formatted_sources(self, length: int = 100) -> str: """Get formatted sources text.""" texts = [] for source_node in self.source_nodes: fmt_text_chunk = truncate_text(source_node.node.get_content(), length) doc_id = source_node.node.node_id or "None" source_text = f"> Source (Doc id: {doc_id}): {fmt_text_chunk}" texts.append(source_text) return "\n\n".join(texts) @dataclass
Response
python
keras-team__keras
keras/src/metrics/iou_metrics.py
{ "start": 261, "end": 5759 }
class ____(Metric): """Computes the confusion matrix for Intersection-Over-Union metrics. Formula: ```python iou = true_positives / (true_positives + false_positives + false_negatives) ``` Intersection-Over-Union is a common evaluation metric for semantic image segmentation. From IoUs of individual classes, the MeanIoU can be computed as the mean of the individual IoUs. To compute IoUs, the predictions are accumulated in a confusion matrix, weighted by `sample_weight` and the metric is then calculated from it. If `sample_weight` is `None`, weights default to 1. Use `sample_weight` of 0 to mask values. Args: num_classes: The possible number of labels the prediction task can have. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. ignore_class: Optional integer. The ID of a class to be ignored during metric computation. This is useful, for example, in segmentation problems featuring a "void" class (commonly -1 or 255) in segmentation maps. By default (`ignore_class=None`), all classes are considered. sparse_y_true: Whether labels are encoded using integers or dense floating point vectors. If `False`, the `argmax` function is used to determine each sample's most likely associated label. sparse_y_pred: Whether predictions are encoded using integers or dense floating point vectors. If `False`, the `argmax` function is used to determine each sample's most likely associated label. axis: (Optional) -1 is the dimension containing the logits. Defaults to `-1`. """ def __init__( self, num_classes, name=None, dtype=None, ignore_class=None, sparse_y_true=True, sparse_y_pred=True, axis=-1, ): # defaulting to int to avoid issues with confusion matrix super().__init__(name=name, dtype=dtype or "int") # Metric should be maximized during optimization. self._direction = "up" self.num_classes = num_classes self.ignore_class = ignore_class self.sparse_y_true = sparse_y_true self.sparse_y_pred = sparse_y_pred self.axis = axis self.total_cm = self.add_variable( name="total_confusion_matrix", shape=(num_classes, num_classes), initializer=initializers.Zeros(), dtype=self.dtype, ) def update_state(self, y_true, y_pred, sample_weight=None): """Accumulates the confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Can be a `Tensor` whose rank is either 0, or the same as `y_true`, and must be broadcastable to `y_true`. Defaults to `1`. Returns: Update op. """ if not self.sparse_y_true: y_true = ops.argmax(y_true, axis=self.axis) if not self.sparse_y_pred: y_pred = ops.argmax(y_pred, axis=self.axis) y_true = ops.convert_to_tensor(y_true, dtype=self.dtype) y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype) # Flatten the input if its rank > 1. if len(y_pred.shape) > 1: y_pred = ops.reshape(y_pred, [-1]) if len(y_true.shape) > 1: y_true = ops.reshape(y_true, [-1]) if sample_weight is None: sample_weight = 1 else: if ( hasattr(sample_weight, "dtype") and "float" in str(sample_weight.dtype) and "int" in str(self.dtype) ): warnings.warn( "You are passing weight as `float`, but dtype is `int`. " "This may result in an incorrect weight due to type casting" " Consider using integer weights." ) sample_weight = ops.convert_to_tensor(sample_weight, dtype=self.dtype) if len(sample_weight.shape) > 1: sample_weight = ops.reshape(sample_weight, [-1]) sample_weight = ops.broadcast_to(sample_weight, ops.shape(y_true)) if self.ignore_class is not None: ignore_class = ops.convert_to_tensor( self.ignore_class, y_true.dtype ) valid_mask = ops.not_equal(y_true, ignore_class) y_true = y_true * ops.cast(valid_mask, y_true.dtype) y_pred = y_pred * ops.cast(valid_mask, y_pred.dtype) if sample_weight is not None: sample_weight = sample_weight * ops.cast( valid_mask, sample_weight.dtype ) y_pred = ops.cast(y_pred, dtype=self.dtype) y_true = ops.cast(y_true, dtype=self.dtype) sample_weight = ops.cast(sample_weight, dtype=self.dtype) current_cm = confusion_matrix( y_true, y_pred, self.num_classes, weights=sample_weight, dtype=self.dtype, ) return self.total_cm.assign(self.total_cm + current_cm) def reset_state(self): self.total_cm.assign( ops.zeros(self.total_cm.shape, dtype=self.total_cm.dtype) ) @keras_export("keras.metrics.IoU")
_IoUBase
python
jazzband__django-simple-history
simple_history/tests/tests/test_commands.py
{ "start": 6689, "end": 16666 }
class ____(TestCase): command_name = "clean_duplicate_history" command_error = (management.CommandError, SystemExit) def test_no_args(self): out = StringIO() management.call_command(self.command_name, stdout=out, stderr=StringIO()) self.assertIn(clean_duplicate_history.Command.COMMAND_HINT, out.getvalue()) def test_bad_args(self): test_data = ( (clean_duplicate_history.Command.MODEL_NOT_HISTORICAL, ("tests.place",)), (clean_duplicate_history.Command.MODEL_NOT_FOUND, ("invalid.model",)), (clean_duplicate_history.Command.MODEL_NOT_FOUND, ("bad_key",)), ) for msg, args in test_data: out = StringIO() self.assertRaises( self.command_error, management.call_command, self.command_name, *args, stdout=StringIO(), stderr=out, ) self.assertIn(msg, out.getvalue()) def test_no_historical(self): out = StringIO() with replace_registry({"test_place": Place}): management.call_command(self.command_name, auto=True, stdout=out) self.assertIn( clean_duplicate_history.Command.NO_REGISTERED_MODELS, out.getvalue() ) def test_auto_dry_run(self): p = Poll.objects.create( question="Will this be deleted?", pub_date=datetime.now() ) p.save() # not related to dry_run test, just for increasing coverage :) # create instance with single-entry history older than "minutes" # so it is skipped p = Poll.objects.create( question="Will this be deleted?", pub_date=datetime.now() ) h = p.history.first() h.history_date -= timedelta(hours=1) h.save() self.assertEqual(Poll.history.all().count(), 3) out = StringIO() management.call_command( self.command_name, auto=True, minutes=50, dry=True, stdout=out, stderr=StringIO(), ) self.assertEqual( out.getvalue(), "Removed 1 historical records for " "<class 'simple_history.tests.models.Poll'>\n", ) self.assertEqual(Poll.history.all().count(), 3) def test_auto_cleanup(self): p = Poll.objects.create( question="Will this be deleted?", pub_date=datetime.now() ) self.assertEqual(Poll.history.all().count(), 1) p.save() self.assertEqual(Poll.history.all().count(), 2) p.question = "Maybe this one won't...?" p.save() self.assertEqual(Poll.history.all().count(), 3) out = StringIO() management.call_command( self.command_name, auto=True, stdout=out, stderr=StringIO() ) self.assertEqual( out.getvalue(), "Removed 1 historical records for " "<class 'simple_history.tests.models.Poll'>\n", ) self.assertEqual(Poll.history.all().count(), 2) def _prepare_cleanup_manager(self): one = PollWithCustomManager._default_manager.create( question="This is hidden in default manager", pub_date=datetime.now(), hidden=True, ) one.save() two = PollWithCustomManager._default_manager.create( question="This is visible in default manager", pub_date=datetime.now() ) two.save() self.assertEqual(PollWithCustomManager.history.count(), 4) def test_auto_cleanup_defaultmanager(self): self._prepare_cleanup_manager() out = StringIO() management.call_command( self.command_name, auto=True, stdout=out, stderr=StringIO() ) self.assertEqual( out.getvalue(), "Removed 1 historical records for " "<class 'simple_history.tests.models.PollWithCustomManager'>\n", ) self.assertEqual(PollWithCustomManager.history.count(), 3) def test_auto_cleanup_basemanage(self): self._prepare_cleanup_manager() out = StringIO() management.call_command( self.command_name, auto=True, base_manager=True, stdout=out, stderr=StringIO(), ) self.assertEqual( out.getvalue(), "Removed 1 historical records for " "<class 'simple_history.tests.models.PollWithCustomManager'>\n" "Removed 1 historical records for " "<class 'simple_history.tests.models.PollWithCustomManager'>\n", ) self.assertEqual(PollWithCustomManager.history.count(), 2) def test_auto_cleanup_verbose(self): p = Poll.objects.create( question="Will this be deleted?", pub_date=datetime.now() ) self.assertEqual(Poll.history.all().count(), 1) p.save() p.question = "Maybe this one won't...?" p.save() self.assertEqual(Poll.history.all().count(), 3) out = StringIO() management.call_command( self.command_name, "tests.poll", auto=True, verbosity=2, stdout=out, stderr=StringIO(), ) self.assertEqual( out.getvalue(), "<class 'simple_history.tests.models.Poll'> has 3 historical entries\n" "Removed 1 historical records for " "<class 'simple_history.tests.models.Poll'>\n", ) self.assertEqual(Poll.history.all().count(), 2) def test_auto_cleanup_dated(self): the_time_is_now = datetime.now() p = Poll.objects.create( question="Will this be deleted?", pub_date=the_time_is_now ) self.assertEqual(Poll.history.all().count(), 1) p.save() p.save() self.assertEqual(Poll.history.all().count(), 3) p.question = "Or this one...?" p.save() p.save() self.assertEqual(Poll.history.all().count(), 5) for h in Poll.history.all()[2:]: h.history_date -= timedelta(hours=1) h.save() management.call_command( self.command_name, auto=True, minutes=50, stdout=StringIO(), stderr=StringIO(), ) self.assertEqual(Poll.history.all().count(), 4) def test_auto_cleanup_dated_extra_one(self): the_time_is_now = datetime.now() p = Poll.objects.create( question="Will this be deleted?", pub_date=the_time_is_now ) self.assertEqual(Poll.history.all().count(), 1) p.save() p.save() self.assertEqual(Poll.history.all().count(), 3) p.question = "Or this one...?" p.save() p.save() p.save() p.save() self.assertEqual(Poll.history.all().count(), 7) for h in Poll.history.all()[2:]: h.history_date -= timedelta(hours=1) h.save() management.call_command( self.command_name, auto=True, minutes=50, stdout=StringIO(), stderr=StringIO(), ) # even though only the last 2 entries match the date range # the "extra_one" (the record before the oldest match) # is identical to the oldest match, so oldest match is deleted self.assertEqual(Poll.history.all().count(), 5) def test_auto_cleanup_custom_history_field(self): m = CustomManagerNameModel.objects.create(name="John") self.assertEqual(CustomManagerNameModel.log.all().count(), 1) m.save() self.assertEqual(CustomManagerNameModel.log.all().count(), 2) m.name = "Ivan" m.save() self.assertEqual(CustomManagerNameModel.log.all().count(), 3) out = StringIO() management.call_command( self.command_name, auto=True, stdout=out, stderr=StringIO() ) self.assertEqual( out.getvalue(), "Removed 1 historical records for " "<class 'simple_history.tests.models.CustomManagerNameModel'>\n", ) self.assertEqual(CustomManagerNameModel.log.all().count(), 2) def test_auto_cleanup_with_excluded_fields(self): p = Poll.objects.create( question="Will this be deleted?", pub_date=datetime.now() ) self.assertEqual(Poll.history.all().count(), 1) p.pub_date = p.pub_date + timedelta(days=1) p.save() self.assertEqual(Poll.history.all().count(), 2) out = StringIO() management.call_command( self.command_name, auto=True, excluded_fields=("pub_date",), stdout=out, stderr=StringIO(), ) self.assertEqual( out.getvalue(), "Removed 1 historical records for " "<class 'simple_history.tests.models.Poll'>\n", ) self.assertEqual(Poll.history.all().count(), 1) def test_auto_cleanup_for_model_with_excluded_fields(self): p = PollWithExcludeFields.objects.create( question="Will this be deleted?", pub_date=datetime.now() ) self.assertEqual(PollWithExcludeFields.history.all().count(), 1) p.pub_date = p.pub_date + timedelta(days=1) p.save() self.assertEqual(PollWithExcludeFields.history.all().count(), 2) out = StringIO() management.call_command( self.command_name, auto=True, stdout=out, stderr=StringIO() ) self.assertEqual( out.getvalue(), "Removed 1 historical records for " "<class 'simple_history.tests.models.PollWithExcludeFields'>\n", ) self.assertEqual(PollWithExcludeFields.history.all().count(), 1)
TestCleanDuplicateHistory
python
cython__cython
Cython/Plex/Regexps.py
{ "start": 7411, "end": 8728 }
class ____(RE): """Seq(re1, re2, re3...) is an RE which matches |re1| followed by |re2| followed by |re3|...""" def __init__(self, *re_list): nullable = 1 for i, re in enumerate(re_list): self.check_re(i, re) nullable = nullable and re.nullable self.re_list = re_list self.nullable = nullable i = len(re_list) match_nl = 0 while i: i -= 1 re = re_list[i] if re.match_nl: match_nl = 1 break if not re.nullable: break self.match_nl = match_nl def build_machine(self, m, initial_state, final_state, match_bol, nocase): re_list = self.re_list if len(re_list) == 0: initial_state.link_to(final_state) else: s1 = initial_state n = len(re_list) for i, re in enumerate(re_list): if i < n - 1: s2 = m.new_state() else: s2 = final_state re.build_machine(m, s1, s2, match_bol, nocase) s1 = s2 match_bol = re.match_nl or (match_bol and re.nullable) def calc_str(self): return "Seq(%s)" % ','.join(map(str, self.re_list))
Seq
python
getsentry__sentry
src/sentry/snuba/metrics/naming_layer/public.py
{ "start": 6053, "end": 6286 }
class ____(Enum): """Identifier for a transaction-related tag.""" TRANSACTION_STATUS = "transaction.status" TRANSACTION_SATISFACTION = "satisfaction" TRANSACTION_HTTP_STATUS_CODE = "http.status_code"
TransactionTagsKey
python
allegroai__clearml
clearml/backend_api/services/v2_23/datasets.py
{ "start": 185642, "end": 186716 }
class ____(Response): """ Response of datasets.get_sources endpoint. :param sources: Mapping of source URL to first frame_id of the source :type sources: dict """ _service = "datasets" _action = "get_sources" _version = "2.23" _schema = { "definitions": {}, "properties": { "sources": { "additionalProperties": True, "description": "Mapping of source URL to first frame_id of the source", "type": ["object", "null"], } }, "type": "object", } def __init__(self, sources=None, **kwargs): super(GetSourcesResponse, self).__init__(**kwargs) self.sources = sources @schema_property("sources") def sources(self): return self._property_sources @sources.setter def sources(self, value): if value is None: self._property_sources = None return self.assert_isinstance(value, "sources", (dict,)) self._property_sources = value
GetSourcesResponse
python
spack__spack
lib/spack/spack/fetch_strategy.py
{ "start": 47397, "end": 50708 }
class ____(VCSFetchStrategy): """ Fetch strategy that gets source code from a Mercurial repository. Use like this in a package:: version("name", hg="https://jay.grs.rwth-aachen.de/hg/lwm2") Optionally, you can provide a branch, or revision to check out, e.g.:: version("torus", hg="https://jay.grs.rwth-aachen.de/hg/lwm2", branch="torus") You can use the optional ``revision`` attribute to check out a branch, tag, or particular revision in hg. To prevent non-reproducible builds, using a moving target like a branch is discouraged. * ``revision``: Particular revision, branch, or tag. Repositories are cloned into the standard stage source path directory. """ url_attr = "hg" optional_attrs = ["revision"] def __init__(self, **kwargs): # Discards the keywords in kwargs that may conflict with the next call # to __init__ forwarded_args = copy.copy(kwargs) forwarded_args.pop("name", None) super().__init__(**forwarded_args) self._hg = None @property def hg(self): """ Returns: Executable: the hg executable """ if not self._hg: self._hg = which("hg", required=True) # When building PythonPackages, Spack automatically sets # PYTHONPATH. This can interfere with hg, which is a Python # script. Unset PYTHONPATH while running hg. self._hg.add_default_env("PYTHONPATH", "") return self._hg @property def cachable(self): return self.cache_enabled and bool(self.revision) def source_id(self): return self.revision def mirror_id(self): if self.revision: repo_path = urllib.parse.urlparse(self.url).path result = os.path.sep.join(["hg", repo_path, self.revision]) return result @_needs_stage def fetch(self): if self.stage.expanded: tty.debug("Already fetched {0}".format(self.stage.source_path)) return args = [] if self.revision: args.append("at revision %s" % self.revision) tty.debug("Cloning mercurial repository: {0} {1}".format(self.url, args)) args = ["clone"] if not spack.config.get("config:verify_ssl"): args.append("--insecure") if self.revision: args.extend(["-r", self.revision]) args.extend([self.url]) with temp_cwd(): self.hg(*args) repo_name = get_single_file(".") self.stage.srcdir = repo_name shutil.move(repo_name, self.stage.source_path) def archive(self, destination): super().archive(destination, exclude=".hg") @_needs_stage def reset(self): with working_dir(self.stage.path): source_path = self.stage.source_path scrubbed = "scrubbed-source-tmp" args = ["clone"] if self.revision: args += ["-r", self.revision] args += [source_path, scrubbed] self.hg(*args) shutil.rmtree(source_path, ignore_errors=True) shutil.move(scrubbed, source_path) def __str__(self): return f"[hg] {self.url}" @fetcher
HgFetchStrategy
python
coleifer__peewee
tests/sqlite.py
{ "start": 3397, "end": 3515 }
class ____(FTS5Model): message = SearchField() class Meta: options = {'tokenize': 'porter'}
FTS5Document
python
tensorflow__tensorflow
tensorflow/python/distribute/coordinator/cluster_coordinator.py
{ "start": 12382, "end": 22784 }
class ____(object): """Manage a queue of closures, inflight count and errors from execution. This class is thread-safe. """ def __init__(self): # `self._inflight_closure_count` only tracks the number of inflight closures # that are "in generation". Once an error occurs, error generation is # incremented and all subsequent arriving closures (from inflight) are # considered "out of generation". self.inflight_closure_count = 0 self._queue_lock = threading.Lock() # Condition indicating that all pending closures (either queued or inflight) # have been processed, failed, or cancelled. self._stop_waiting_condition = threading.Condition(self._queue_lock) # Condition indicating that an item becomes available in queue (not empty). self._closures_queued_condition = threading.Condition(self._queue_lock) self._should_process_closures = True # Condition indicating that a queue slot becomes available (not full). # Note that even with "infinite" queue size, there is still a "practical" # size limit for the queue depending on host memory capacity, and thus the # queue will eventually become full with a lot of enqueued closures. self._queue_free_slot_condition = threading.Condition(self._queue_lock) # Condition indicating there is no inflight closures. self._no_inflight_closure_condition = threading.Condition(self._queue_lock) # Use to cancel in-flight closures. self._cancellation_mgr = cancellation.CancellationManager() if _CLOSURE_QUEUE_MAX_SIZE <= 0: logging.warning( "In a `ClusterCoordinator`, creating an infinite closure queue can " "consume a significant amount of memory and even lead to OOM.") self._queue = queue.Queue(maxsize=_CLOSURE_QUEUE_MAX_SIZE) metric_utils.monitor_int("queued_closures", self._queue.qsize()) self._tagged_queue = collections.defaultdict(queue.Queue) self._error = None # The following is a lock to make sure when `wait` is called and before it # returns no `put` can be executed during this period. It is because `wait` # won't know what to do with newly put closures. This lock adds an cutoff # for `wait` so that closures put into the queue while waiting would not be # taken responsible by this `wait`. # # We cannot reuse the `self._queue_lock` since when `wait` waits for a # condition, the `self._queue_lock` will be released. # # We don't use a reader/writer's lock on purpose to reduce the complexity # of the code. self._put_wait_lock = threading.Lock() self._watchdog = watchdog.WatchDog(on_triggered=self._on_watchdog_timeout) def _on_watchdog_timeout(self): logging.info("inflight_closure_count is %d", self._inflight_closure_count) logging.info("current error is %s:%r", self._error, self._error) @property def inflight_closure_count(self): return self._inflight_closure_count @inflight_closure_count.setter def inflight_closure_count(self, value): self._inflight_closure_count = value metric_utils.monitor_int("inflight_closures", self._inflight_closure_count) def stop(self): with self._queue_lock: self._should_process_closures = False self._cancellation_mgr.start_cancel() self._closures_queued_condition.notify_all() self._watchdog.stop() def _cancel_all_closures(self): """Clears the queue and sets remaining closures cancelled error. This method expects self._queue_lock to be held prior to entry. """ self._cancellation_mgr.start_cancel() logging.info("Canceling all closures: waiting for inflight closures to " "finish") while self._inflight_closure_count > 0: self._no_inflight_closure_condition.wait() logging.info("Canceling all closures: canceling remaining closures on the " "queue") while True: try: closure = self._queue.get(block=False) metric_utils.monitor_int("queued_closures", self._queue.qsize()) self._queue_free_slot_condition.notify() closure.mark_cancelled() except queue.Empty: break # The cancellation manager cannot be reused once cancelled. After all # closures (queued or inflight) are cleaned up, recreate the cancellation # manager with clean state. # Note on thread-safety: this is triggered when one of theses # ClusterCoordinator APIs are called: `schedule`, `wait`, and `done`. At the # same time, no new closures can be constructed (which reads the # _cancellation_mgr to get cancellable functions). self._cancellation_mgr = cancellation.CancellationManager() def _raise_if_error(self): """Raises the error if one exists. If an error exists, cancel the closures in queue, raises it, and clear the error. This method expects self._queue_lock to be held prior to entry. """ if self._error: logging.error("Start cancelling closures due to error %r: %s", self._error, self._error) self._cancel_all_closures() try: raise self._error # pylint: disable=raising-bad-type finally: self._error = None def put(self, closure, tag=None): """Put a closure into the queue for later execution. If `mark_failed` was called before `put`, the error from the first invocation of `mark_failed` will be raised. Args: closure: The `Closure` to put into the queue. tag: if not None, put into a queue with the given tag. """ closure.tag = tag if tag is not None: with self._queue_lock: self._tagged_queue[tag].put(closure, block=False) self._closures_queued_condition.notify_all() else: with self._put_wait_lock, self._queue_lock: self._queue_free_slot_condition.wait_for(lambda: not self._queue.full()) self._queue.put(closure, block=False) metric_utils.monitor_int("queued_closures", self._queue.qsize()) self._raise_if_error() self._closures_queued_condition.notify() def get(self, timeout=None, tag=None): """Return a closure from the queue to be executed. It will try to fetch an item from the queue with the given tag. If this queue is empty, it will then check the global queue. Args: timeout: timeout when waiting for a closure to be put. tag: optional tag to specify which queue to query first before querying the global queue. Returns: a closure or None after timeout. """ with self._queue_lock: while (self._should_process_closures and self._queue.empty() and (tag is None or self._tagged_queue[tag].empty())): if not self._closures_queued_condition.wait(timeout=timeout): return None if not self._should_process_closures: return None if tag is not None and not self._tagged_queue[tag].empty(): closure = self._tagged_queue[tag].get(block=False) return closure closure = self._queue.get(block=False) metric_utils.monitor_int("queued_closures", self._queue.qsize()) assert closure.tag is None assert tag is None or self._tagged_queue[tag].empty() self._queue_free_slot_condition.notify() self.inflight_closure_count += 1 return closure def mark_finished(self): """Let the queue know that a closure has been successfully executed.""" with self._queue_lock: if self._inflight_closure_count < 1: raise AssertionError("There is no inflight closures to mark_finished.") self.inflight_closure_count -= 1 if self._inflight_closure_count == 0: self._no_inflight_closure_condition.notify_all() if self._queue.empty() and self._inflight_closure_count == 0: self._stop_waiting_condition.notify_all() self._watchdog.report_closure_done() def put_back(self, closure): """Put the closure back into the queue as it was not properly executed.""" assert closure.tag is None with self._queue_lock: if self._inflight_closure_count < 1: raise AssertionError("There is no inflight closures to put_back.") if self._error: closure.mark_cancelled() else: self._queue_free_slot_condition.wait_for(lambda: not self._queue.full()) self._queue.put(closure, block=False) metric_utils.monitor_int("queued_closures", self._queue.qsize()) self._closures_queued_condition.notify() self.inflight_closure_count -= 1 if self._inflight_closure_count == 0: self._no_inflight_closure_condition.notify_all() def wait(self, timeout=None): """Wait for all closures to be finished before returning. If `mark_failed` was called before or during `wait`, the error from the first invocation of `mark_failed` will be raised. Args: timeout: A float specifying a timeout for the wait in seconds. Returns: True unless the given timeout expired, in which case it returns False. """ with self._put_wait_lock, self._queue_lock: logging.info("Waiting for all global closures to be finished.") while (not self._error and (not self._queue.empty() or self._inflight_closure_count > 0)): if not self._stop_waiting_condition.wait(timeout=timeout): return False self._raise_if_error() return True def mark_failed(self, e): """Sets error and unblocks any wait() call.""" with self._queue_lock: # TODO(yuefengz): maybe record all failure and give users more # information? if self._inflight_closure_count < 1: raise AssertionError("There is no inflight closures to mark_failed.") if self._error is None: self._error = e self.inflight_closure_count -= 1 if self._inflight_closure_count == 0: self._no_inflight_closure_condition.notify_all() self._stop_waiting_condition.notify_all() def done(self): """Returns true if the queue is empty and there is no inflight closure. If `mark_failed` was called before `done`, the error from the first invocation of `mark_failed` will be raised. """ with self._queue_lock: self._raise_if_error() return self._queue.empty() and self._inflight_closure_count == 0 def clear_tag_unlocked(self, tag): self._tagged_queue[tag] = queue.Queue()
_CoordinatedClosureQueue
python
getsentry__sentry
tests/sentry/api/endpoints/test_builtin_symbol_sources.py
{ "start": 617, "end": 1222 }
class ____(APITestCase): endpoint = "sentry-api-0-organization-builtin-symbol-sources" def setUp(self) -> None: super().setUp() self.organization = self.create_organization(owner=self.user) self.login_as(user=self.user) def test_with_slug(self) -> None: resp = self.get_response(self.organization.slug) assert resp.status_code == 200 body = resp.data assert len(body) assert "sentry_key" in body[0] assert "id" in body[0] assert "name" in body[0] assert "hidden" in body[0]
BuiltinSymbolSourcesWithSlugTest
python
kamyu104__LeetCode-Solutions
Python/minimum-sum-after-divisible-sum-deletions.py
{ "start": 50, "end": 393 }
class ____(object): def minArraySum(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ dp = [float("inf")]*k dp[0] = result = 0 for x in nums: result += x dp[result%k] = result = min(result, dp[result%k]) return result
Solution
python
python__mypy
mypyc/test/test_exceptions.py
{ "start": 877, "end": 2133 }
class ____(MypycDataSuite): files = files base_path = test_temp_dir def run_case(self, testcase: DataDrivenTestCase) -> None: """Perform a runtime checking transformation test case.""" with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase): expected_output = remove_comment_lines(testcase.output) try: ir = build_ir_for_single_file(testcase.input) except CompileError as e: actual = e.messages else: actual = [] for fn in ir: if fn.name == TOP_LEVEL_NAME and not testcase.name.endswith("_toplevel"): continue insert_uninit_checks(fn) insert_exception_handling(fn) insert_ref_count_opcodes(fn) actual.extend(format_func(fn)) if testcase.name.endswith("_freq"): common = frequently_executed_blocks(fn.blocks[0]) actual.append("hot blocks: %s" % sorted(b.label for b in common)) assert_test_output(testcase, actual, "Invalid source code output", expected_output)
TestExceptionTransform
python
ray-project__ray
python/ray/data/_internal/execution/node_trackers/actor_location.py
{ "start": 191, "end": 1384 }
class ____: def __init__(self): self._actor_locations = {} self._actor_locations_lock = threading.Lock() def update_actor_location(self, logical_actor_id: str, node_id: str): with self._actor_locations_lock: self._actor_locations[logical_actor_id] = node_id def get_actor_locations(self, logical_actor_ids: List[str]): return { logical_actor_id: self._actor_locations.get(logical_actor_id, None) for logical_actor_id in logical_actor_ids } def get_or_create_actor_location_tracker(): # Pin the actor location tracker to the local node so it fate-shares with the driver. # NOTE: for Ray Client, the ray.get_runtime_context().get_node_id() should # point to the head node. scheduling_strategy = NodeAffinitySchedulingStrategy( ray.get_runtime_context().get_node_id(), soft=False, ) return ActorLocationTracker.options( name="ActorLocationTracker", namespace="ActorLocationTracker", get_if_exists=True, lifetime="detached", scheduling_strategy=scheduling_strategy, max_concurrency=8, ).remote()
ActorLocationTracker
python
ipython__ipython
IPython/terminal/interactiveshell.py
{ "start": 6154, "end": 40095 }
class ____(InteractiveShell): mime_renderers = Dict().tag(config=True) min_elide = Integer( 30, help="minimum characters for filling with ellipsis in file completions" ).tag(config=True) space_for_menu = Integer( 6, help="Number of line at the bottom of the screen " "to reserve for the tab completion menu, " "search history, ...etc, the height of " "these menus will at most this value. " "Increase it is you prefer long and skinny " "menus, decrease for short and wide.", ).tag(config=True) pt_app: UnionType[PromptSession, None] = None auto_suggest: UnionType[ AutoSuggestFromHistory, NavigableAutoSuggestFromHistory, None, ] = None debugger_history = None debugger_history_file = Unicode( "~/.pdbhistory", help="File in which to store and read history" ).tag(config=True) simple_prompt = Bool(_use_simple_prompt, help="""Use `raw_input` for the REPL, without completion and prompt colors. Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are: IPython's own testing machinery, and emacs' inferior-python subprocess (assuming you have set `python-shell-interpreter` to "ipython") available through the built-in `M-x run-python` and third party packages such as elpy. This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment variable is set, or the current terminal is not a tty. Thus the Default value reported in --help-all, or config will often be incorrectly reported. """, ).tag(config=True) @property def debugger_cls(self): return Pdb if self.simple_prompt else TerminalPdb confirm_exit = Bool(True, help=""" Set to confirm when you try to exit IPython with an EOF (Control-D in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a direct exit without any confirmation.""", ).tag(config=True) editing_mode = Unicode('emacs', help="Shortcut style to use at the prompt. 'vi' or 'emacs'.", ).tag(config=True) emacs_bindings_in_vi_insert_mode = Bool( True, help="Add shortcuts from 'emacs' insert mode to 'vi' insert mode.", ).tag(config=True) modal_cursor = Bool( True, help=""" Cursor shape changes depending on vi mode: beam in vi insert mode, block in nav mode, underscore in replace mode.""", ).tag(config=True) ttimeoutlen = Float( 0.01, help="""The time in milliseconds that is waited for a key code to complete.""", ).tag(config=True) timeoutlen = Float( 0.5, help="""The time in milliseconds that is waited for a mapped key sequence to complete.""", ).tag(config=True) autoformatter = Unicode( None, help="Autoformatter to reformat Terminal code. Can be `'black'`, `'yapf'` or `None`", allow_none=True ).tag(config=True) auto_match = Bool( False, help=""" Automatically add/delete closing bracket or quote when opening bracket or quote is entered/deleted. Brackets: (), [], {} Quotes: '', \"\" """, ).tag(config=True) mouse_support = Bool(False, help="Enable mouse support in the prompt\n(Note: prevents selecting text with the mouse)" ).tag(config=True) # We don't load the list of styles for the help string, because loading # Pygments plugins takes time and can cause unexpected errors. highlighting_style = Union( [Unicode("legacy"), Type(klass=Style)], help="""Deprecated, and has not effect, use IPython themes The name or class of a Pygments style to use for syntax highlighting. To see available styles, run `pygmentize -L styles`.""", ).tag(config=True) @validate('editing_mode') def _validate_editing_mode(self, proposal): if proposal['value'].lower() == 'vim': proposal['value']= 'vi' elif proposal['value'].lower() == 'default': proposal['value']= 'emacs' if hasattr(EditingMode, proposal['value'].upper()): return proposal['value'].lower() return self.editing_mode @observe('editing_mode') def _editing_mode(self, change): if self.pt_app: self.pt_app.editing_mode = getattr(EditingMode, change.new.upper()) def _set_formatter(self, formatter): if formatter is None: self.reformat_handler = lambda x:x elif formatter == 'black': self.reformat_handler = black_reformat_handler elif formatter == "yapf": self.reformat_handler = yapf_reformat_handler else: raise ValueError @observe("autoformatter") def _autoformatter_changed(self, change): formatter = change.new self._set_formatter(formatter) @observe('highlighting_style') @observe('colors') def _highlighting_style_changed(self, change): assert change.new == change.new.lower() if change.new != "legacy": warn( "highlighting_style is deprecated since 9.0 and have no effect, use themeing." ) return def refresh_style(self): self._style = self._make_style_from_name_or_cls("legacy") # TODO: deprecate this highlighting_style_overrides = Dict( help="Override highlighting format for specific tokens" ).tag(config=True) true_color = Bool(False, help="""Use 24bit colors instead of 256 colors in prompt highlighting. If your terminal supports true color, the following command should print ``TRUECOLOR`` in orange:: printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\" """, ).tag(config=True) editor = Unicode(get_default_editor(), help="Set the editor used by IPython (default to $EDITOR/vi/notepad)." ).tag(config=True) prompts_class = Type(Prompts, help='Class used to generate Prompt token for prompt_toolkit').tag(config=True) prompts = Instance(Prompts) @default('prompts') def _prompts_default(self): return self.prompts_class(self) # @observe('prompts') # def _(self, change): # self._update_layout() @default('displayhook_class') def _displayhook_class_default(self): return RichPromptDisplayHook term_title = Bool(True, help="Automatically set the terminal title" ).tag(config=True) term_title_format = Unicode("IPython: {cwd}", help="Customize the terminal title format. This is a python format string. " + "Available substitutions are: {cwd}." ).tag(config=True) display_completions = Enum(('column', 'multicolumn','readlinelike'), help= ( "Options for displaying tab completions, 'column', 'multicolumn', and " "'readlinelike'. These options are for `prompt_toolkit`, see " "`prompt_toolkit` documentation for more information." ), default_value='multicolumn').tag(config=True) highlight_matching_brackets = Bool(True, help="Highlight matching brackets.", ).tag(config=True) extra_open_editor_shortcuts = Bool(False, help="Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. " "This is in addition to the F2 binding, which is always enabled." ).tag(config=True) handle_return = Any(None, help="Provide an alternative handler to be called when the user presses " "Return. This is an advanced option intended for debugging, which " "may be changed or removed in later releases." ).tag(config=True) enable_history_search = Bool(True, help="Allows to enable/disable the prompt toolkit history search" ).tag(config=True) autosuggestions_provider = Unicode( "NavigableAutoSuggestFromHistory", help="Specifies from which source automatic suggestions are provided. " "Can be set to ``'NavigableAutoSuggestFromHistory'`` (:kbd:`up` and " ":kbd:`down` swap suggestions), ``'AutoSuggestFromHistory'``, " " or ``None`` to disable automatic suggestions. " "Default is `'NavigableAutoSuggestFromHistory`'.", allow_none=True, ).tag(config=True) _autosuggestions_provider: Any llm_constructor_kwargs = Dict( {}, help=""" Extra arguments to pass to `llm_provider_class` constructor. This is used to – for example – set the `model_id`""", ).tag(config=True) llm_prefix_from_history = DottedObjectName( "input_history", help="""\ Fully Qualifed name of a function that takes an IPython history manager and return a prefix to pass the llm provider in addition to the current buffer text. You can use: - no_prefix - input_history As default value. `input_history` (default), will use all the input history of current IPython session """, ).tag(config=True) _llm_prefix_from_history: Any @observe("llm_prefix_from_history") def _llm_prefix_from_history_changed(self, change): name = change.new self._llm_prefix_from_history = name self._set_autosuggestions() llm_provider_class = DottedObjectName( None, allow_none=True, help="""\ Provisional: This is a provisional API in IPython 8.32, before stabilisation in 9.0, it may change without warnings. class to use for the `NavigableAutoSuggestFromHistory` to request completions from a LLM, this should inherit from `jupyter_ai_magics:BaseProvider` and implement `stream_inline_completions` """, ).tag(config=True) _llm_provider_class: Any = None @observe("llm_provider_class") def _llm_provider_class_changed(self, change): provider_class = change.new self._llm_provider_class = provider_class self._set_autosuggestions() def _set_autosuggestions(self, provider=None): if provider is None: provider = self.autosuggestions_provider # disconnect old handler if self.auto_suggest and isinstance( self.auto_suggest, NavigableAutoSuggestFromHistory ): self.auto_suggest.disconnect() if provider is None: self.auto_suggest = None elif provider == "AutoSuggestFromHistory": self.auto_suggest = AutoSuggestFromHistory() elif provider == "NavigableAutoSuggestFromHistory": # LLM stuff are all Provisional in 8.32 if self._llm_provider_class: def init_llm_provider(): llm_provider_constructor = import_item(self._llm_provider_class) return llm_provider_constructor(**self.llm_constructor_kwargs) else: init_llm_provider = None self.auto_suggest = NavigableAutoSuggestFromHistory() # Provisinal in 8.32 self.auto_suggest._init_llm_provider = init_llm_provider name = self.llm_prefix_from_history if name == "no_prefix": def no_prefix(history_manager): return "" fun = no_prefix elif name == "input_history": def input_history(history_manager): return "\n".join([s[2] for s in history_manager.get_range()]) + "\n" fun = input_history else: fun = import_item(name) self.auto_suggest._llm_prefixer = fun else: raise ValueError("No valid provider.") if self.pt_app: self.pt_app.auto_suggest = self.auto_suggest @observe("autosuggestions_provider") def _autosuggestions_provider_changed(self, change): provider = change.new self._set_autosuggestions(provider) shortcuts = List( trait=Dict( key_trait=Enum( [ "command", "match_keys", "match_filter", "new_keys", "new_filter", "create", ] ), per_key_traits={ "command": Unicode(), "match_keys": List(Unicode()), "match_filter": Unicode(), "new_keys": List(Unicode()), "new_filter": Unicode(), "create": Bool(False), }, ), help=""" Add, disable or modifying shortcuts. Each entry on the list should be a dictionary with ``command`` key identifying the target function executed by the shortcut and at least one of the following: - ``match_keys``: list of keys used to match an existing shortcut, - ``match_filter``: shortcut filter used to match an existing shortcut, - ``new_keys``: list of keys to set, - ``new_filter``: a new shortcut filter to set The filters have to be composed of pre-defined verbs and joined by one of the following conjunctions: ``&`` (and), ``|`` (or), ``~`` (not). The pre-defined verbs are: {filters} To disable a shortcut set ``new_keys`` to an empty list. To add a shortcut add key ``create`` with value ``True``. When modifying/disabling shortcuts, ``match_keys``/``match_filter`` can be omitted if the provided specification uniquely identifies a shortcut to be modified/disabled. When modifying a shortcut ``new_filter`` or ``new_keys`` can be omitted which will result in reuse of the existing filter/keys. Only shortcuts defined in IPython (and not default prompt-toolkit shortcuts) can be modified or disabled. The full list of shortcuts, command identifiers and filters is available under :ref:`terminal-shortcuts-list`. Here is an example: .. code:: c.TerminalInteractiveShell.shortcuts = [ {{ "new_keys": ["c-q"], "command": "prompt_toolkit:named_commands.capitalize_word", "create": True, }}, {{ "new_keys": ["c-j"], "command": "prompt_toolkit:named_commands.beginning_of_line", "create": True, }}, ] """.format( filters="\n ".join([f" - ``{k}``" for k in KEYBINDING_FILTERS]) ), ).tag(config=True) @observe("shortcuts") def _shortcuts_changed(self, change): if self.pt_app: self.pt_app.key_bindings = self._merge_shortcuts(user_shortcuts=change.new) def _merge_shortcuts(self, user_shortcuts): # rebuild the bindings list from scratch key_bindings = create_ipython_shortcuts(self) # for now we only allow adding shortcuts for a specific set of # commands; this is a security precution. allowed_commands = { create_identifier(binding.command): binding.command for binding in KEY_BINDINGS } allowed_commands.update( { create_identifier(command): command for command in UNASSIGNED_ALLOWED_COMMANDS } ) shortcuts_to_skip = [] shortcuts_to_add = [] for shortcut in user_shortcuts: command_id = shortcut["command"] if command_id not in allowed_commands: allowed_commands = "\n - ".join(allowed_commands) raise ValueError( f"{command_id} is not a known shortcut command." f" Allowed commands are: \n - {allowed_commands}" ) old_keys = shortcut.get("match_keys", None) old_filter = ( filter_from_string(shortcut["match_filter"]) if "match_filter" in shortcut else None ) matching = [ binding for binding in KEY_BINDINGS if ( (old_filter is None or binding.filter == old_filter) and (old_keys is None or [k for k in binding.keys] == old_keys) and create_identifier(binding.command) == command_id ) ] new_keys = shortcut.get("new_keys", None) new_filter = shortcut.get("new_filter", None) command = allowed_commands[command_id] creating_new = shortcut.get("create", False) modifying_existing = not creating_new and ( new_keys is not None or new_filter ) if creating_new and new_keys == []: raise ValueError("Cannot add a shortcut without keys") if modifying_existing: specification = { key: shortcut[key] for key in ["command", "filter"] if key in shortcut } if len(matching) == 0: raise ValueError( f"No shortcuts matching {specification} found in {KEY_BINDINGS}" ) elif len(matching) > 1: raise ValueError( f"Multiple shortcuts matching {specification} found," f" please add keys/filter to select one of: {matching}" ) matched = matching[0] old_filter = matched.filter old_keys = list(matched.keys) shortcuts_to_skip.append( RuntimeBinding( command, keys=old_keys, filter=old_filter, ) ) if new_keys != []: shortcuts_to_add.append( RuntimeBinding( command, keys=new_keys or old_keys, filter=( filter_from_string(new_filter) if new_filter is not None else ( old_filter if old_filter is not None else filter_from_string("always") ) ), ) ) # rebuild the bindings list from scratch key_bindings = create_ipython_shortcuts(self, skip=shortcuts_to_skip) for binding in shortcuts_to_add: add_binding(key_bindings, binding) return key_bindings prompt_includes_vi_mode = Bool(True, help="Display the current vi mode (when using vi editing mode)." ).tag(config=True) prompt_line_number_format = Unicode( "", help="The format for line numbering, will be passed `line` (int, 1 based)" " the current line number and `rel_line` the relative line number." " for example to display both you can use the following template string :" " c.TerminalInteractiveShell.prompt_line_number_format='{line: 4d}/{rel_line:+03d} | '" " This will display the current line number, with leading space and a width of at least 4" " character, as well as the relative line number 0 padded and always with a + or - sign." " Note that when using Emacs mode the prompt of the first line may not update.", ).tag(config=True) @observe('term_title') def init_term_title(self, change=None): # Enable or disable the terminal title. if self.term_title and _is_tty: toggle_set_term_title(True) set_term_title(self.term_title_format.format(cwd=abbrev_cwd())) else: toggle_set_term_title(False) def restore_term_title(self): if self.term_title and _is_tty: restore_term_title() def init_display_formatter(self): super(TerminalInteractiveShell, self).init_display_formatter() # terminal only supports plain text if not explicitly configured config = self.display_formatter._trait_values["config"] if not ( "DisplayFormatter" in config and "active_types" in config["DisplayFormatter"] ): self.display_formatter.active_types = ["text/plain"] def init_prompt_toolkit_cli(self): if self.simple_prompt: # Fall back to plain non-interactive output for tests. # This is very limited. def prompt(): prompt_text = "".join(x[1] for x in self.prompts.in_prompt_tokens()) lines = [input(prompt_text)] prompt_continuation = "".join( x[1] for x in self.prompts.continuation_prompt_tokens() ) while self.check_complete("\n".join(lines))[0] == "incomplete": lines.append(input(prompt_continuation)) return "\n".join(lines) self.prompt_for_code = prompt return # Set up keyboard shortcuts key_bindings = self._merge_shortcuts(user_shortcuts=self.shortcuts) # Pre-populate history from IPython's history database history = PtkHistoryAdapter(self) self.refresh_style() ptk_s = DynamicStyle(lambda: self._style) editing_mode = getattr(EditingMode, self.editing_mode.upper()) self._use_asyncio_inputhook = False self.pt_app = PromptSession( auto_suggest=self.auto_suggest, editing_mode=editing_mode, key_bindings=key_bindings, history=history, completer=IPythonPTCompleter(shell=self), enable_history_search=self.enable_history_search, style=ptk_s, include_default_pygments_style=False, mouse_support=self.mouse_support, enable_open_in_editor=self.extra_open_editor_shortcuts, color_depth=self.color_depth, tempfile_suffix=".py", **self._extra_prompt_options(), ) if isinstance(self.auto_suggest, NavigableAutoSuggestFromHistory): self.auto_suggest.connect(self.pt_app) def _make_style_from_name_or_cls(self, name_or_cls): """ Small wrapper that make an IPython compatible style from a style name We need that to add style for prompt ... etc. """ assert name_or_cls == "legacy" legacy = self.colors.lower() theme = theme_table.get(legacy, None) assert theme is not None, legacy if legacy == "nocolor": style_overrides = {} style_cls = _NoStyle else: style_overrides = {**theme.extra_style, **self.highlighting_style_overrides} if theme.base is not None: style_cls = get_style_by_name(theme.base) else: style_cls = _NoStyle style = merge_styles( [ style_from_pygments_cls(style_cls), style_from_pygments_dict(style_overrides), ] ) return style @property def pt_complete_style(self): return { 'multicolumn': CompleteStyle.MULTI_COLUMN, 'column': CompleteStyle.COLUMN, 'readlinelike': CompleteStyle.READLINE_LIKE, }[self.display_completions] @property def color_depth(self): return (ColorDepth.TRUE_COLOR if self.true_color else None) def _ptk_prompt_cont(self, width: int, line_number: int, wrap_count: int): return PygmentsTokens( _backward_compat_continuation_prompt_tokens( self.prompts.continuation_prompt_tokens, width, lineno=line_number, wrap_count=wrap_count, ) ) def _extra_prompt_options(self): """ Return the current layout option for the current Terminal InteractiveShell """ def get_message(): return PygmentsTokens(self.prompts.in_prompt_tokens()) if self.editing_mode == "emacs" and self.prompt_line_number_format == "": # with emacs mode the prompt is (usually) static, so we call only # the function once. With VI mode it can toggle between [ins] and # [nor] so we can't precompute. # here I'm going to favor the default keybinding which almost # everybody uses to decrease CPU usage. # if we have issues with users with custom Prompts we can see how to # work around this. get_message = get_message() options = { "complete_in_thread": False, "lexer": IPythonPTLexer(), "reserve_space_for_menu": self.space_for_menu, "message": get_message, "prompt_continuation": self._ptk_prompt_cont, "multiline": True, "complete_style": self.pt_complete_style, "input_processors": [ # Highlight matching brackets, but only when this setting is # enabled, and only when the DEFAULT_BUFFER has the focus. ConditionalProcessor( processor=HighlightMatchingBracketProcessor(chars="[](){}"), filter=HasFocus(DEFAULT_BUFFER) & ~IsDone() & Condition(lambda: self.highlight_matching_brackets), ), # Show auto-suggestion in lines other than the last line. ConditionalProcessor( processor=AppendAutoSuggestionInAnyLine(), filter=HasFocus(DEFAULT_BUFFER) & ~IsDone() & Condition( lambda: isinstance( self.auto_suggest, NavigableAutoSuggestFromHistory, ) ), ), ], } return options def prompt_for_code(self): if self.rl_next_input: default = self.rl_next_input self.rl_next_input = None else: default = '' # In order to make sure that asyncio code written in the # interactive shell doesn't interfere with the prompt, we run the # prompt in a different event loop. # If we don't do this, people could spawn coroutine with a # while/true inside which will freeze the prompt. with patch_stdout(raw=True): if self._use_asyncio_inputhook: # When we integrate the asyncio event loop, run the UI in the # same event loop as the rest of the code. don't use an actual # input hook. (Asyncio is not made for nesting event loops.) asyncio_loop = get_asyncio_loop() text = asyncio_loop.run_until_complete( self.pt_app.prompt_async( default=default, **self._extra_prompt_options() ) ) else: text = self.pt_app.prompt( default=default, inputhook=self._inputhook, **self._extra_prompt_options(), ) return text def init_io(self): if sys.platform not in {'win32', 'cli'}: return import colorama colorama.init() def init_magics(self): super(TerminalInteractiveShell, self).init_magics() self.register_magics(TerminalMagics) def init_alias(self): # The parent class defines aliases that can be safely used with any # frontend. super(TerminalInteractiveShell, self).init_alias() # Now define aliases that only make sense on the terminal, because they # need direct access to the console in a way that we can't emulate in # GUI or web frontend if os.name == 'posix': for cmd in ('clear', 'more', 'less', 'man'): self.alias_manager.soft_define_alias(cmd, cmd) def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._set_autosuggestions(self.autosuggestions_provider) self.init_prompt_toolkit_cli() self.init_term_title() self.keep_running = True self._set_formatter(self.autoformatter) def ask_exit(self): self.keep_running = False rl_next_input = None def interact(self): self.keep_running = True while self.keep_running: print(self.separate_in, end='') try: code = self.prompt_for_code() except EOFError: if (not self.confirm_exit) \ or self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'): self.ask_exit() else: if code: self.run_cell(code, store_history=True) def mainloop(self): # An extra layer of protection in case someone mashing Ctrl-C breaks # out of our internal code. while True: try: self.interact() break except KeyboardInterrupt as e: print("\n%s escaped interact()\n" % type(e).__name__) finally: # An interrupt during the eventloop will mess up the # internal state of the prompt_toolkit library. # Stopping the eventloop fixes this, see # https://github.com/ipython/ipython/pull/9867 if hasattr(self, '_eventloop'): self._eventloop.stop() self.restore_term_title() # try to call some at-exit operation optimistically as some things can't # be done during interpreter shutdown. this is technically inaccurate as # this make mainlool not re-callable, but that should be a rare if not # in existent use case. self._atexit_once() _inputhook = None def inputhook(self, context): warn( "inputkook seem unused, and marked for deprecation/Removal as of IPython 9.0. " "Please open an issue if you are using it.", category=DeprecationWarning, stacklevel=2, ) if self._inputhook is not None: self._inputhook(context) active_eventloop: Optional[str] = None def enable_gui(self, gui: Optional[str] = None) -> None: if gui: from ..core.pylabtools import _convert_gui_from_matplotlib gui = _convert_gui_from_matplotlib(gui) if self.simple_prompt is True and gui is not None: print( f'Cannot install event loop hook for "{gui}" when running with `--simple-prompt`.' ) print( "NOTE: Tk is supported natively; use Tk apps and Tk backends with `--simple-prompt`." ) return if self._inputhook is None and gui is None: print("No event loop hook running.") return if self._inputhook is not None and gui is not None: newev, newinhook = get_inputhook_name_and_func(gui) if self._inputhook == newinhook: # same inputhook, do nothing self.log.info( f"Shell is already running the {self.active_eventloop} eventloop. Doing nothing" ) return self.log.warning( f"Shell is already running a different gui event loop for {self.active_eventloop}. " "Call with no arguments to disable the current loop." ) return if self._inputhook is not None and gui is None: self.active_eventloop = self._inputhook = None if gui and (gui not in {None, "webagg"}): # This hook runs with each cycle of the `prompt_toolkit`'s event loop. self.active_eventloop, self._inputhook = get_inputhook_name_and_func(gui) else: self.active_eventloop = self._inputhook = None self._use_asyncio_inputhook = gui == "asyncio" # Run !system commands directly, not through pipes, so terminal programs # work correctly. system = InteractiveShell.system_raw def auto_rewrite_input(self, cmd): """Overridden from the parent class to use fancy rewriting prompt""" if not self.show_rewritten_input: return tokens = self.prompts.rewrite_prompt_tokens() if self.pt_app: print_formatted_text(PygmentsTokens(tokens), end='', style=self.pt_app.app.style) print(cmd) else: prompt = ''.join(s for t, s in tokens) print(prompt, cmd, sep='') _prompts_before = None def switch_doctest_mode(self, mode): """Switch prompts to classic for %doctest_mode""" if mode: self._prompts_before = self.prompts self.prompts = ClassicPrompts(self) elif self._prompts_before: self.prompts = self._prompts_before self._prompts_before = None # self._update_layout() InteractiveShellABC.register(TerminalInteractiveShell) if __name__ == '__main__': TerminalInteractiveShell.instance().interact()
TerminalInteractiveShell
python
jazzband__tablib
src/tablib/formats/_latex.py
{ "start": 117, "end": 4148 }
class ____: title = 'latex' extensions = ('tex',) TABLE_TEMPLATE = """\ %% Note: add \\usepackage{booktabs} to your preamble %% \\begin{table}[!htbp] \\centering %(CAPTION)s \\begin{tabular}{%(COLSPEC)s} \\toprule %(HEADER)s %(MIDRULE)s %(BODY)s \\bottomrule \\end{tabular} \\end{table} """ TEX_RESERVED_SYMBOLS_MAP = { '\\': '\\textbackslash{}', '{': '\\{', '}': '\\}', '$': '\\$', '&': '\\&', '#': '\\#', '^': '\\textasciicircum{}', '_': '\\_', '~': '\\textasciitilde{}', '%': '\\%', } TEX_RESERVED_SYMBOLS_RE = re.compile( '({})'.format('|'.join(map(re.escape, TEX_RESERVED_SYMBOLS_MAP.keys())))) @classmethod def export_set(cls, dataset): """Returns LaTeX representation of dataset :param dataset: dataset to serialize :type dataset: tablib.core.Dataset """ caption = f'\\caption{{{dataset.title}}}' if dataset.title else '%' colspec = cls._colspec(dataset.width) header = cls._serialize_row(dataset.headers) if dataset.headers else '' midrule = cls._midrule(dataset.width) body = '\n'.join([cls._serialize_row(row) for row in dataset]) return cls.TABLE_TEMPLATE % {'CAPTION': caption, 'COLSPEC': colspec, 'HEADER': header, 'MIDRULE': midrule, 'BODY': body} @classmethod def _colspec(cls, dataset_width): """Generates the column specification for the LaTeX `tabular` environment based on the dataset width. The first column is justified to the left, all further columns are aligned to the right. .. note:: This is only a heuristic and most probably has to be fine-tuned post export. Column alignment should depend on the data type, e.g., textual content should usually be aligned to the left while numeric content almost always should be aligned to the right. :param dataset_width: width of the dataset """ spec = 'l' for _ in range(1, dataset_width): spec += 'r' return spec @classmethod def _midrule(cls, dataset_width): """Generates the table `midrule`, which may be composed of several `cmidrules`. :param dataset_width: width of the dataset to serialize """ if not dataset_width or dataset_width == 1: return '\\midrule' return ' '.join([cls._cmidrule(colindex, dataset_width) for colindex in range(1, dataset_width + 1)]) @classmethod def _cmidrule(cls, colindex, dataset_width): """Generates the `cmidrule` for a single column with appropriate trimming based on the column position. :param colindex: Column index :param dataset_width: width of the dataset """ rule = '\\cmidrule(%s){%d-%d}' if colindex == 1: # Rule of first column is trimmed on the right return rule % ('r', colindex, colindex) if colindex == dataset_width: # Rule of last column is trimmed on the left return rule % ('l', colindex, colindex) # Inner columns are trimmed on the left and right return rule % ('lr', colindex, colindex) @classmethod def _serialize_row(cls, row): """Returns string representation of a single row. :param row: single dataset row """ new_row = [cls._escape_tex_reserved_symbols(str(item)) if item else '' for item in row] return 6 * ' ' + ' & '.join(new_row) + ' \\\\' @classmethod def _escape_tex_reserved_symbols(cls, string): """Escapes all TeX reserved symbols ('_', '~', etc.) in a string. :param string: String to escape """ def replace(match): return cls.TEX_RESERVED_SYMBOLS_MAP[match.group()] return cls.TEX_RESERVED_SYMBOLS_RE.sub(replace, string)
LATEXFormat
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_P.py
{ "start": 170, "end": 1375 }
class ____(Benchmark): r""" Parsopoulos objective function. This class defines the Parsopoulos [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Parsopoulos}}(x) = \cos(x_1)^2 + \sin(x_2)^2 with :math:`x_i \in [-5, 5]` for :math:`i = 1, 2`. *Global optimum*: This function has infinite number of global minima in R2, at points :math:`\left(k\frac{\pi}{2}, \lambda \pi \right)`, where :math:`k = \pm1, \pm3, ...` and :math:`\lambda = 0, \pm1, \pm2, ...` In the given domain problem, function has 12 global minima all equal to zero. .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N)) self.global_optimum = [[pi / 2.0, pi]] self.fglob = 0 def fun(self, x, *args): self.nfev += 1 return cos(x[0]) ** 2.0 + sin(x[1]) ** 2.0
Parsopoulos
python
PrefectHQ__prefect
tests/cli/test_api_command.py
{ "start": 13534, "end": 15355 }
class ____: """Test output formatting and verbosity.""" def test_json_output(self, respx_mock: MockRouter) -> None: """Test JSON is output correctly.""" respx_mock.get("http://localhost:4200/api/flows/123").mock( return_value=httpx.Response(200, json={"id": "123", "name": "test"}) ) with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}): result = invoke_and_assert( ["api", "GET", "/flows/123"], expected_code=0, ) assert "123" in result.output assert "test" in result.output def test_verbose_shows_request_info(self, respx_mock: MockRouter) -> None: """Test --verbose shows request details.""" respx_mock.get("http://localhost:4200/api/flows").mock( return_value=httpx.Response( 200, json={}, headers={"content-type": "application/json"} ) ) with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}): result = invoke_and_assert( ["api", "GET", "/flows", "--verbose"], expected_code=0, ) assert "GET" in result.output assert "200" in result.output def test_verbose_on_error(self, respx_mock: MockRouter) -> None: """Test --verbose shows details on errors.""" respx_mock.get("http://localhost:4200/api/flows/bad").mock( return_value=httpx.Response(404, json={"detail": "Not found"}) ) with temporary_settings({PREFECT_API_URL: "http://localhost:4200/api"}): result = invoke_and_assert( ["api", "GET", "/flows/bad", "--verbose"], expected_code=4, ) assert "404" in result.output
TestOutputFormatting
python
huggingface__transformers
src/transformers/models/glm4_moe/modeling_glm4_moe.py
{ "start": 21391, "end": 22403 }
class ____(PreTrainedModel): config: Glm4MoeConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["Glm4MoeDecoderLayer"] _skip_keys_device_placement = ["past_key_values"] _supports_flash_attn = True _supports_sdpa = True _supports_flex_attn = True _can_compile_fullgraph = False _supports_attention_backend = True _can_record_outputs = { "hidden_states": Glm4MoeDecoderLayer, "attentions": Glm4MoeAttention, } @torch.no_grad() def _init_weights(self, module): super()._init_weights(module) if isinstance(module, Glm4MoeTopkRouter): init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) elif isinstance(module, Glm4MoeNaiveMoe): init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range) init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range) @auto_docstring
Glm4MoePreTrainedModel
python
bokeh__bokeh
src/bokeh/models/nodes.py
{ "start": 1638, "end": 3366 }
class ____: """ Provider of box nodes for box-like models. """ def __init__(self, target: Model | ImplicitTargetType) -> None: self.target = target def _node(self, symbol: str) -> Node: return Node(target=self.target, symbol=symbol) @property def left(self) -> Node: return self._node("left") @property def right(self) -> Node: return self._node("right") @property def top(self) -> Node: return self._node("top") @property def bottom(self) -> Node: return self._node("bottom") @property def top_left(self) -> Node: return self._node("top_left") @property def top_center(self) -> Node: return self._node("top_center") @property def top_right(self) -> Node: return self._node("top_right") @property def center_left(self) -> Node: return self._node("center_left") @property def center(self) -> Node: return self._node("center") @property def center_right(self) -> Node: return self._node("center_right") @property def bottom_left(self) -> Node: return self._node("bottom_left") @property def bottom_center(self) -> Node: return self._node("bottom_center") @property def bottom_right(self) -> Node: return self._node("bottom_right") @property def width(self) -> Node: return self._node("width") @property def height(self) -> Node: return self._node("height") #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- @abstract
BoxNodes
python
encode__django-rest-framework
tests/test_pagination.py
{ "start": 12437, "end": 22912 }
class ____: """ Unit tests for `pagination.LimitOffsetPagination`. """ def setup_method(self): class ExamplePagination(pagination.LimitOffsetPagination): default_limit = 10 max_limit = 15 self.pagination = ExamplePagination() self.queryset = range(1, 101) def paginate_queryset(self, request): return list(self.pagination.paginate_queryset(self.queryset, request)) def get_paginated_content(self, queryset): response = self.pagination.get_paginated_response(queryset) return response.data def get_html_context(self): return self.pagination.get_html_context() def test_no_offset(self): request = Request(factory.get('/', {'limit': 5})) queryset = self.paginate_queryset(request) content = self.get_paginated_content(queryset) context = self.get_html_context() assert queryset == [1, 2, 3, 4, 5] assert content == { 'results': [1, 2, 3, 4, 5], 'previous': None, 'next': 'http://testserver/?limit=5&offset=5', 'count': 100 } assert context == { 'previous_url': None, 'next_url': 'http://testserver/?limit=5&offset=5', 'page_links': [ PageLink('http://testserver/?limit=5', 1, True, False), PageLink('http://testserver/?limit=5&offset=5', 2, False, False), PageLink('http://testserver/?limit=5&offset=10', 3, False, False), PAGE_BREAK, PageLink('http://testserver/?limit=5&offset=95', 20, False, False), ] } assert self.pagination.display_page_controls assert isinstance(self.pagination.to_html(), str) def test_pagination_not_applied_if_limit_or_default_limit_not_set(self): class MockPagination(pagination.LimitOffsetPagination): default_limit = None request = Request(factory.get('/')) queryset = MockPagination().paginate_queryset(self.queryset, request) assert queryset is None def test_single_offset(self): """ When the offset is not a multiple of the limit we get some edge cases: * The first page should still be offset zero. * We may end up displaying an extra page in the pagination control. """ request = Request(factory.get('/', {'limit': 5, 'offset': 1})) queryset = self.paginate_queryset(request) content = self.get_paginated_content(queryset) context = self.get_html_context() assert queryset == [2, 3, 4, 5, 6] assert content == { 'results': [2, 3, 4, 5, 6], 'previous': 'http://testserver/?limit=5', 'next': 'http://testserver/?limit=5&offset=6', 'count': 100 } assert context == { 'previous_url': 'http://testserver/?limit=5', 'next_url': 'http://testserver/?limit=5&offset=6', 'page_links': [ PageLink('http://testserver/?limit=5', 1, False, False), PageLink('http://testserver/?limit=5&offset=1', 2, True, False), PageLink('http://testserver/?limit=5&offset=6', 3, False, False), PAGE_BREAK, PageLink('http://testserver/?limit=5&offset=96', 21, False, False), ] } def test_first_offset(self): request = Request(factory.get('/', {'limit': 5, 'offset': 5})) queryset = self.paginate_queryset(request) content = self.get_paginated_content(queryset) context = self.get_html_context() assert queryset == [6, 7, 8, 9, 10] assert content == { 'results': [6, 7, 8, 9, 10], 'previous': 'http://testserver/?limit=5', 'next': 'http://testserver/?limit=5&offset=10', 'count': 100 } assert context == { 'previous_url': 'http://testserver/?limit=5', 'next_url': 'http://testserver/?limit=5&offset=10', 'page_links': [ PageLink('http://testserver/?limit=5', 1, False, False), PageLink('http://testserver/?limit=5&offset=5', 2, True, False), PageLink('http://testserver/?limit=5&offset=10', 3, False, False), PAGE_BREAK, PageLink('http://testserver/?limit=5&offset=95', 20, False, False), ] } def test_middle_offset(self): request = Request(factory.get('/', {'limit': 5, 'offset': 10})) queryset = self.paginate_queryset(request) content = self.get_paginated_content(queryset) context = self.get_html_context() assert queryset == [11, 12, 13, 14, 15] assert content == { 'results': [11, 12, 13, 14, 15], 'previous': 'http://testserver/?limit=5&offset=5', 'next': 'http://testserver/?limit=5&offset=15', 'count': 100 } assert context == { 'previous_url': 'http://testserver/?limit=5&offset=5', 'next_url': 'http://testserver/?limit=5&offset=15', 'page_links': [ PageLink('http://testserver/?limit=5', 1, False, False), PageLink('http://testserver/?limit=5&offset=5', 2, False, False), PageLink('http://testserver/?limit=5&offset=10', 3, True, False), PageLink('http://testserver/?limit=5&offset=15', 4, False, False), PAGE_BREAK, PageLink('http://testserver/?limit=5&offset=95', 20, False, False), ] } def test_ending_offset(self): request = Request(factory.get('/', {'limit': 5, 'offset': 95})) queryset = self.paginate_queryset(request) content = self.get_paginated_content(queryset) context = self.get_html_context() assert queryset == [96, 97, 98, 99, 100] assert content == { 'results': [96, 97, 98, 99, 100], 'previous': 'http://testserver/?limit=5&offset=90', 'next': None, 'count': 100 } assert context == { 'previous_url': 'http://testserver/?limit=5&offset=90', 'next_url': None, 'page_links': [ PageLink('http://testserver/?limit=5', 1, False, False), PAGE_BREAK, PageLink('http://testserver/?limit=5&offset=85', 18, False, False), PageLink('http://testserver/?limit=5&offset=90', 19, False, False), PageLink('http://testserver/?limit=5&offset=95', 20, True, False), ] } def test_erroneous_offset(self): request = Request(factory.get('/', {'limit': 5, 'offset': 1000})) queryset = self.paginate_queryset(request) self.get_paginated_content(queryset) self.get_html_context() def test_invalid_offset(self): """ An invalid offset query param should be treated as 0. """ request = Request(factory.get('/', {'limit': 5, 'offset': 'invalid'})) queryset = self.paginate_queryset(request) assert queryset == [1, 2, 3, 4, 5] def test_invalid_limit(self): """ An invalid limit query param should be ignored in favor of the default. """ request = Request(factory.get('/', {'limit': 'invalid', 'offset': 0})) queryset = self.paginate_queryset(request) content = self.get_paginated_content(queryset) next_limit = self.pagination.default_limit next_offset = self.pagination.default_limit next_url = f'http://testserver/?limit={next_limit}&offset={next_offset}' assert queryset == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] assert content.get('next') == next_url def test_zero_limit(self): """ An zero limit query param should be ignored in favor of the default. """ request = Request(factory.get('/', {'limit': 0, 'offset': 0})) queryset = self.paginate_queryset(request) content = self.get_paginated_content(queryset) next_limit = self.pagination.default_limit next_offset = self.pagination.default_limit next_url = f'http://testserver/?limit={next_limit}&offset={next_offset}' assert queryset == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] assert content.get('next') == next_url def test_max_limit(self): """ The limit defaults to the max_limit when there is a max_limit and the requested limit is greater than the max_limit """ offset = 50 request = Request(factory.get('/', {'limit': '11235', 'offset': offset})) queryset = self.paginate_queryset(request) content = self.get_paginated_content(queryset) max_limit = self.pagination.max_limit next_offset = offset + max_limit prev_offset = offset - max_limit base_url = f'http://testserver/?limit={max_limit}' next_url = base_url + f'&offset={next_offset}' prev_url = base_url + f'&offset={prev_offset}' assert queryset == list(range(51, 66)) assert content.get('next') == next_url assert content.get('previous') == prev_url def test_get_paginated_response_schema(self): unpaginated_schema = { 'type': 'object', 'item': { 'properties': { 'test-property': { 'type': 'integer', }, }, }, } assert self.pagination.get_paginated_response_schema(unpaginated_schema) == { 'type': 'object', 'required': ['count', 'results'], 'properties': { 'count': { 'type': 'integer', 'example': 123, }, 'next': { 'type': 'string', 'nullable': True, 'format': 'uri', 'example': 'http://api.example.org/accounts/?offset=400&limit=100', }, 'previous': { 'type': 'string', 'nullable': True, 'format': 'uri', 'example': 'http://api.example.org/accounts/?offset=200&limit=100', }, 'results': unpaginated_schema, }, }
TestLimitOffset
python
ray-project__ray
release/llm_tests/benchmark/load_test.py
{ "start": 7562, "end": 9824 }
class ____(BaseProvider): def get_url(self): if self.parsed_options.chat: return "/v1/chat/completions" else: return "/v1/completions" def format_payload(self, prompt, max_tokens, images): data = { "model": self.model, "max_tokens": max_tokens, "stream": self.parsed_options.stream, "temperature": self.parsed_options.temperature, "n": self.parsed_options.n, } if self.parsed_options.chat: if images is None: data["messages"] = [{"role": "user", "content": prompt}] else: image_urls = [] for image in images: image_urls.append( {"type": "image_url", "image_url": {"url": image}} ) data["messages"] = [ { "role": "user", "content": [ {"type": "text", "text": prompt}, *image_urls, ], } ] else: data["prompt"] = prompt if images is not None: data["images"] = images if self.parsed_options.logprobs is not None: data["logprobs"] = self.parsed_options.logprobs return data def parse_output_json(self, data, prompt): usage = data.get("usage", None) assert len(data["choices"]) == 1, f"Too many choices {len(data['choices'])}" choice = data["choices"][0] if self.parsed_options.chat: if self.parsed_options.stream: text = choice["delta"].get("content", "") else: text = choice["message"]["content"] else: text = choice["text"] logprobs = (choice.get("logprobs", {}) or {}).get("content", []) return ChunkMetadata( text=text, logprob_tokens=len(logprobs["tokens"]) if logprobs else None, usage_tokens=usage["completion_tokens"] if usage else None, prompt_usage_tokens=(usage.get("prompt_tokens", None) if usage else None), )
OpenAIProvider
python
ansible__ansible
lib/ansible/module_utils/_internal/_messages.py
{ "start": 3605, "end": 4307 }
class ____(WarningSummary): """Deprecation summary with details (possibly derived from an exception __cause__ chain) and an optional traceback.""" deprecator: _t.Optional[PluginInfo] = None """ The identifier for the content which is being deprecated. """ date: _t.Optional[str] = None """ The date after which a new release of `deprecator` will remove the feature described by `msg`. Ignored if `deprecator` is not provided. """ version: _t.Optional[str] = None """ The version of `deprecator` which will remove the feature described by `msg`. Ignored if `deprecator` is not provided. Ignored if `date` is provided. """
DeprecationSummary
python
dagster-io__dagster
python_modules/libraries/dagster-snowflake/dagster_snowflake_tests/test_snowflake_io_manager.py
{ "start": 754, "end": 7553 }
class ____(SnowflakeIOManager): @classmethod def _is_dagster_maintained(cls) -> bool: return True def type_handlers(self): # pyright: ignore[reportIncompatibleMethodOverride] return [PassTypeHandler()] def test_get_select_statement(): assert ( SnowflakeDbClient.get_select_statement( TableSlice(database="database_abc", schema="schema1", table="table1") ) == "SELECT * FROM database_abc.schema1.table1" ) def test_get_select_statement_columns(): assert ( SnowflakeDbClient.get_select_statement( TableSlice( database="database_abc", schema="schema1", table="table1", columns=["apple", "banana"], ) ) == "SELECT apple, banana FROM database_abc.schema1.table1" ) def test_get_select_statement_time_partitioned(): assert ( SnowflakeDbClient.get_select_statement( TableSlice( database="database_abc", schema="schema1", table="table1", partition_dimensions=[ TablePartitionDimension( partitions=TimeWindow(datetime(2020, 1, 2), datetime(2020, 2, 3)), partition_expr="my_timestamp_col", ) ], columns=["apple", "banana"], ) ) == "SELECT apple, banana FROM database_abc.schema1.table1 WHERE\nmy_timestamp_col >=" " '2020-01-02 00:00:00' AND my_timestamp_col < '2020-02-03 00:00:00'" ) def test_get_select_statement_static_partitioned(): assert ( SnowflakeDbClient.get_select_statement( TableSlice( database="database_abc", schema="schema1", table="table1", partition_dimensions=[ TablePartitionDimension(partition_expr="my_fruit_col", partitions=["apple"]) ], columns=["apple", "banana"], ) ) == "SELECT apple, banana FROM database_abc.schema1.table1 WHERE\nmy_fruit_col in ('apple')" ) def test_get_select_statement_multiple_static_partitions(): assert ( SnowflakeDbClient.get_select_statement( TableSlice( database="database_abc", schema="schema1", table="table1", partition_dimensions=[ TablePartitionDimension( partition_expr="fruit_col", partitions=["apple", "banana"] ) ], columns=["fruit_col", "other_col"], ) ) == "SELECT fruit_col, other_col FROM database_abc.schema1.table1 WHERE\nfruit_col in" " ('apple', 'banana')" ) def test_get_select_statement_multi_partitioned(): assert ( SnowflakeDbClient.get_select_statement( TableSlice( database="database_abc", schema="schema1", table="table1", partition_dimensions=[ TablePartitionDimension(partition_expr="my_fruit_col", partitions=["apple"]), TablePartitionDimension( partitions=TimeWindow(datetime(2020, 1, 2), datetime(2020, 2, 3)), partition_expr="my_timestamp_col", ), ], ) ) == "SELECT * FROM database_abc.schema1.table1 WHERE\nmy_fruit_col in ('apple')" " AND\nmy_timestamp_col >= '2020-01-02 00:00:00' AND my_timestamp_col < '2020-02-03" " 00:00:00'" ) def test_get_cleanup_statement(): assert ( _get_cleanup_statement( TableSlice(database="database_abc", schema="schema1", table="table1") ) == "DELETE FROM database_abc.schema1.table1" ) def test_get_cleanup_statement_time_partitioned(): assert ( _get_cleanup_statement( TableSlice( database="database_abc", schema="schema1", table="table1", partition_dimensions=[ TablePartitionDimension( partitions=TimeWindow(datetime(2020, 1, 2), datetime(2020, 2, 3)), partition_expr="my_timestamp_col", ) ], ) ) == "DELETE FROM database_abc.schema1.table1 WHERE\nmy_timestamp_col >= '2020-01-02" " 00:00:00' AND my_timestamp_col < '2020-02-03 00:00:00'" ) def test_get_cleanup_statement_static_partitioned(): assert ( _get_cleanup_statement( TableSlice( database="database_abc", schema="schema1", table="table1", partition_dimensions=[ TablePartitionDimension(partition_expr="my_fruit_col", partitions=["apple"]) ], ) ) == "DELETE FROM database_abc.schema1.table1 WHERE\nmy_fruit_col in ('apple')" ) def test_get_cleanup_statement_multi_partitioned(): assert ( _get_cleanup_statement( TableSlice( database="database_abc", schema="schema1", table="table1", partition_dimensions=[ TablePartitionDimension(partition_expr="my_fruit_col", partitions=["apple"]), TablePartitionDimension( partitions=TimeWindow(datetime(2020, 1, 2), datetime(2020, 2, 3)), partition_expr="my_timestamp_col", ), ], ) ) == "DELETE FROM database_abc.schema1.table1 WHERE\nmy_fruit_col in ('apple')" " AND\nmy_timestamp_col >= '2020-01-02 00:00:00' AND my_timestamp_col < '2020-02-03" " 00:00:00'" ) def test_io_manager_snowflake_additional_snowflake_connection_args(): """Tests that args passed to additional_snowflake_connection_args are correctly forwarded to snowflake.connector.connect. """ with mock.patch("snowflake.connector.connect") as snowflake_conn_mock: io_mgr = TestSnowflakeIOManager( account="account", user="user", password="password", database="TESTDB", schema="TESTSCHEMA", additional_snowflake_connection_args={"foo": "bar"}, ) @asset def return_one(): return 1 materialize([return_one], resources={"io_manager": io_mgr}) assert snowflake_conn_mock.call_count == 1 assert snowflake_conn_mock.call_args[1]["foo"] == "bar"
TestSnowflakeIOManager
python
huggingface__transformers
examples/modular-transformers/modeling_dummy_bert.py
{ "start": 15170, "end": 15743 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states
DummyBertIntermediate
python
bokeh__bokeh
tests/unit/bokeh/core/test_has_props.py
{ "start": 2632, "end": 10968 }
class ____(Child): aliased_int1 = Alias("int1") aliased_int2 = Alias("int2") def test_HasProps_getattr() -> None: p = Parent() assert getattr(p, "int1") == 10 assert p.int1 == 10 assert getattr(p, "foo_prop") == 110 assert p.foo_prop == 110 assert isinstance(getattr(p, "foo_func"), MethodType) assert isinstance(p.foo_func, MethodType) assert getattr(p, "foo_func")() == 111 assert p.foo_func() == 111 assert getattr(p, "_foo_prop") == 1100 assert p._foo_prop == 1100 assert isinstance(getattr(p, "_foo_func"), MethodType) assert isinstance(p._foo_func, MethodType) assert getattr(p, "_foo_func")() == 1110 assert p._foo_func() == 1110 with pytest.raises(AttributeError): getattr(p, "foo_prop2") with pytest.raises(AttributeError): p.foo_prop2 with pytest.raises(AttributeError): getattr(p, "foo_func2") with pytest.raises(AttributeError): p.foo_func2 with pytest.raises(AttributeError): getattr(p, "_foo_prop2") with pytest.raises(AttributeError): p._foo_prop2 with pytest.raises(AttributeError): getattr(p, "_foo_func2") with pytest.raises(AttributeError): p._foo_func2 def test_HasProps_default_init() -> None: p = Parent() assert p.int1 == 10 assert p.ds1 == field("x") assert p.lst1 == [] c = Child() assert c.int1 == 10 assert c.ds1 == field("x") assert c.lst1 == [] assert c.int2 is None assert c.str2 == "foo" assert c.ds2 == field("y") assert c.lst2 == [1,2,3] def test_HasProps_kw_init() -> None: p = Parent(int1=30, ds1=field("foo")) assert p.int1 == 30 assert p.ds1 == field("foo") assert p.lst1 == [] c = Child(str2="bar", lst2=[2,3,4], ds2=10) assert c.int1 == 10 assert c.ds1 == field("x") assert c.lst1 == [] assert c.int2 is None assert c.str2 == "bar" assert c.ds2 == 10 assert c.lst2 == [2,3,4] def test_HasProps_override() -> None: ov = OverrideChild() assert ov.int1 == 20 assert ov.ds1 == field("x") assert ov.lst1 == [] def test_HasProps_intrinsic() -> None: obj0 = Parent(int1=Intrinsic, ds1=Intrinsic, lst1=Intrinsic) assert obj0.int1 == 10 assert obj0.ds1 == field("x") assert obj0.lst1 == [] obj1 = Parent(int1=30, ds1=field("y"), lst1=["x", "y", "z"]) assert obj1.int1 == 30 assert obj1.ds1 == field("y") assert obj1.lst1 == ["x", "y", "z"] obj1.int1 = Intrinsic obj1.ds1 = Intrinsic obj1.lst1 = Intrinsic assert obj1.int1 == 10 assert obj1.ds1 == field("x") assert obj1.lst1 == [] def test_HasProps_alias() -> None: obj0 = AliasedChild() assert obj0.int1 == 10 assert obj0.int2 is None assert obj0.aliased_int1 == 10 assert obj0.aliased_int2 is None obj0.int1 = 20 assert obj0.int1 == 20 assert obj0.int2 is None assert obj0.aliased_int1 == 20 assert obj0.aliased_int2 is None obj0.int2 = 1 assert obj0.int1 == 20 assert obj0.int2 == 1 assert obj0.aliased_int1 == 20 assert obj0.aliased_int2 == 1 obj0.aliased_int1 = 30 assert obj0.int1 == 30 assert obj0.int2 == 1 assert obj0.aliased_int1 == 30 assert obj0.aliased_int2 == 1 obj0.aliased_int2 = 2 assert obj0.int1 == 30 assert obj0.int2 == 2 assert obj0.aliased_int1 == 30 assert obj0.aliased_int2 == 2 obj1 = AliasedChild(int1=20) assert obj1.int1 == 20 assert obj1.int2 is None assert obj1.aliased_int1 == 20 assert obj1.aliased_int2 is None obj2 = AliasedChild(int2=1) assert obj2.int1 == 10 assert obj2.int2 == 1 assert obj2.aliased_int1 == 10 assert obj2.aliased_int2 == 1 obj3 = AliasedChild(int1=20, int2=1) assert obj3.int1 == 20 assert obj3.int2 == 1 assert obj3.aliased_int1 == 20 assert obj3.aliased_int2 == 1 obj4 = AliasedChild(aliased_int1=20) assert obj4.int1 == 20 assert obj4.int2 is None assert obj4.aliased_int1 == 20 assert obj4.aliased_int2 is None obj5 = AliasedChild(aliased_int2=1) assert obj5.int1 == 10 assert obj5.int2 == 1 assert obj5.aliased_int1 == 10 assert obj5.aliased_int2 == 1 obj6 = AliasedChild(aliased_int1=20, aliased_int2=1) assert obj6.int1 == 20 assert obj6.int2 == 1 assert obj6.aliased_int1 == 20 assert obj6.aliased_int2 == 1 def test_HasProps_equals() -> None: p1 = Parent() p2 = Parent() assert p1.equals(p2) p1.int1 = 25 assert not p1.equals(p2) p2.int1 = 25 assert p1.equals(p2) def test_HasProps_update() -> None: c = Child() c.update(**dict(lst2=[1,2], str2="baz", int1=25, ds1=value(123))) assert c.int1 == 25 assert c.ds1 == value(123) assert c.lst1 == [] assert c.int2 is None assert c.str2 == "baz" assert c.ds2 == field("y") assert c.lst2 == [1,2] def test_HasProps_set_from_json() -> None: c = Child() c.set_from_json('lst2', [1,2]) assert c.int1 == 10 assert c.ds1 == field("x") assert c.lst1 == [] assert c.int2 is None assert c.str2 == "foo" assert c.ds2 == field("y") assert c.lst2 == [1,2] c.set_from_json('ds1', "foo") assert c.int1 == 10 assert c.ds1 == "foo" assert c.lst1 == [] assert c.int2 is None assert c.str2 == "foo" assert c.ds2 == field("y") assert c.lst2 == [1,2] c.set_from_json('int2', 100) assert c.int1 == 10 assert c.ds1 == "foo" assert c.lst1 == [] assert c.int2 == 100 assert c.str2 == "foo" assert c.ds2 == field("y") assert c.lst2 == [1,2] def test_HasProps_set() -> None: c = Child() c.update(**dict(lst2=[1,2], str2="baz", int1=25, ds1=field("foo"))) assert c.int1 == 25 assert c.ds1 == field("foo") assert c.lst1 == [] assert c.int2 is None assert c.str2 == "baz" assert c.ds2 == field("y") assert c.lst2 == [1,2] c.str2_proxy = "some" assert c.str2 == "somesome" assert c.str2_proxy == "somesome" def test_HasProps_set_error() -> None: c = Child() with pytest.raises(AttributeError) as e: c.int3 = 10 assert str(e.value).endswith("unexpected attribute 'int3' to Child, similar attributes are int2 or int1") with pytest.raises(AttributeError) as e: c.junkjunk = 10 assert str(e.value).endswith("unexpected attribute 'junkjunk' to Child, possible attributes are ds1, ds2, int1, int2, lst1, lst2 or str2") def test_HasProps_lookup() -> None: p = Parent() d = p.lookup('int1') assert isinstance(d, PropertyDescriptor) assert d.name == 'int1' d = p.lookup('ds1') assert isinstance(d, DataSpecPropertyDescriptor) assert d.name == 'ds1' d = p.lookup('lst1') assert isinstance(d, PropertyDescriptor) assert d.name == 'lst1' def test_HasProps_apply_theme() -> None: c = Child() theme = dict(int2=10, lst1=["foo", "bar"]) c.apply_theme(theme) assert c.themed_values() is theme c.apply_theme(theme) assert c.themed_values() is theme assert c.int2 == 10 assert c.lst1 == ["foo", "bar"] assert c.int1 == 10 assert c.ds1 == field("x") assert c.str2 == "foo" assert c.ds2 == field("y") assert c.lst2 == [1,2,3] c.int2 = 25 assert c.int2 == 25 assert c.lst1 == ["foo", "bar"] assert c.int1 == 10 assert c.ds1 == field("x") assert c.str2 == "foo" assert c.ds2 == field("y") assert c.lst2 == [1,2,3] c.ds2 = "foo" assert c.int2 == 25 assert c.lst1 == ["foo", "bar"] assert c.int1 == 10 assert c.ds1 == field("x") assert c.str2 == "foo" assert c.ds2 == "foo" assert c.lst2 == [1,2,3] def test_HasProps_unapply_theme() -> None: c = Child() theme = dict(int2=10, lst1=["foo", "bar"]) c.apply_theme(theme) assert c.int2 == 10 assert c.lst1 == ["foo", "bar"] assert c.int1 == 10 assert c.ds1 == field("x") assert c.str2 == "foo" assert c.ds2 == field("y") assert c.lst2 == [1,2,3] c.unapply_theme() assert c.int2 is None assert c.lst1 == [] assert c.int1 == 10 assert c.ds1 == field("x") assert c.str2 == "foo" assert c.ds2 == field("y") assert c.lst2 == [1,2,3] assert c.themed_values() is None
AliasedChild
python
huggingface__transformers
src/transformers/models/funnel/modeling_funnel.py
{ "start": 52499, "end": 54792 }
class ____(FunnelPreTrainedModel): def __init__(self, config: FunnelConfig) -> None: super().__init__(config) self.num_labels = config.num_labels self.funnel = FunnelModel(config) self.dropout = nn.Dropout(config.hidden_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.funnel( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = outputs[0] last_hidden_state = self.dropout(last_hidden_state) logits = self.classifier(last_hidden_state) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @auto_docstring
FunnelForTokenClassification
python
getsentry__sentry
src/sentry/integrations/aws_lambda/integration.py
{ "start": 14594, "end": 18326 }
class ____: def dispatch(self, request: HttpRequest, pipeline: IntegrationPipeline) -> HttpResponseBase: if "finish_pipeline" in request.GET: return pipeline.finish_pipeline() assert pipeline.organization is not None organization = pipeline.organization account_number = pipeline.fetch_state("account_number") region = pipeline.fetch_state("region") project_id = pipeline.fetch_state("project_id") aws_external_id = pipeline.fetch_state("aws_external_id") enabled_lambdas = pipeline.fetch_state("enabled_lambdas") assert enabled_lambdas is not None sentry_project_dsn = get_dsn_for_project(organization.id, project_id) lambda_client = gen_aws_client(account_number, region, aws_external_id) lambda_functions = get_supported_functions(lambda_client) lambda_functions.sort(key=lambda x: x["FunctionName"].lower()) def is_lambda_enabled(function): name = function["FunctionName"] # check to see if the user wants to enable this function return enabled_lambdas.get(name) lambda_functions = filter(is_lambda_enabled, lambda_functions) def _enable_lambda(function): try: enable_single_lambda(lambda_client, function, sentry_project_dsn) return (True, function, None) except Exception as e: return (False, function, e) failures = [] success_count = 0 with ThreadPoolExecutor( max_workers=options.get("aws-lambda.thread-count") ) as _lambda_setup_thread_pool: # use threading here to parallelize requests # no timeout on the thread since the underlying request will time out # if it takes too long for success, function, e in _lambda_setup_thread_pool.map( _enable_lambda, lambda_functions ): name = function["FunctionName"] if success: success_count += 1 else: # need to make sure we catch any error to continue to the next function err_message: str | _StrPromise = str(e) is_custom_err, err_message = get_sentry_err_message(err_message) if not is_custom_err: capture_exception(e) err_message = _("Unknown Error") failures.append({"name": function["FunctionName"], "error": err_message}) logger.info( "update_function_configuration.error", extra={ "organization_id": organization.id, "lambda_name": name, "account_number": account_number, "region": region, "error": str(e), }, ) analytics.record( IntegrationServerlessSetup( user_id=request.user.id, organization_id=organization.id, integration="aws_lambda", success_count=success_count, failure_count=len(failures), ) ) # if we have failures, show them to the user # otherwise, finish if failures: return render_react_view( request, "awsLambdaFailureDetails", {"lambdaFunctionFailures": failures, "successCount": success_count}, ) else: return pipeline.finish_pipeline()
AwsLambdaSetupLayerPipelineView
python
spack__spack
lib/spack/spack/database.py
{ "start": 5252, "end": 8286 }
class ____: """A record represents one installation in the DB. The record keeps track of the spec for the installation, its install path, AND whether or not it is installed. We need the installed flag in case a user either: 1. blew away a directory, or 2. used spack uninstall -f to get rid of it If, in either case, the package was removed but others still depend on it, we still need to track its spec, so we don't actually remove from the database until a spec has no installed dependents left. Args: spec: spec tracked by the install record path: path where the spec has been installed installed: whether or not the spec is currently installed ref_count (int): number of specs that depend on this one explicit (bool or None): whether or not this spec was explicitly installed, or pulled-in as a dependency of something else installation_time (datetime.datetime or None): time of the installation """ def __init__( self, spec: "spack.spec.Spec", path: Optional[str], installed: bool, ref_count: int = 0, explicit: bool = False, installation_time: Optional[float] = None, deprecated_for: Optional[str] = None, in_buildcache: bool = False, origin: Optional[str] = None, ) -> None: self.spec = spec self.path = str(path) if path else None self.installed = bool(installed) self.ref_count = ref_count self.explicit = explicit self.installation_time = installation_time or _now() self.deprecated_for = deprecated_for self.in_buildcache = in_buildcache self.origin = origin def install_type_matches(self, installed: InstallRecordStatus) -> bool: if self.installed: return InstallRecordStatus.INSTALLED in installed elif self.deprecated_for: return InstallRecordStatus.DEPRECATED in installed return InstallRecordStatus.MISSING in installed def to_dict(self, include_fields=DEFAULT_INSTALL_RECORD_FIELDS): rec_dict = {} for field_name in include_fields: if field_name == "spec": rec_dict.update({"spec": self.spec.node_dict_with_hashes()}) elif field_name == "deprecated_for" and self.deprecated_for: rec_dict.update({"deprecated_for": self.deprecated_for}) else: rec_dict.update({field_name: getattr(self, field_name)}) if self.origin: rec_dict["origin"] = self.origin return rec_dict @classmethod def from_dict(cls, spec, dictionary): d = dict(dictionary.items()) d.pop("spec", None) # Old databases may have "None" for path for externals if "path" not in d or d["path"] == "None": d["path"] = None if "installed" not in d: d["installed"] = False return InstallRecord(spec, **d)
InstallRecord
python
pytorch__pytorch
torch/distributions/constraints.py
{ "start": 17328, "end": 17651 }
class ____(Constraint): """ Constrain to square matrices. """ event_dim = 2 def check(self, value): return torch.full( size=value.shape[:-2], fill_value=(value.shape[-2] == value.shape[-1]), dtype=torch.bool, device=value.device, )
_Square
python
django__django
django/contrib/postgres/validators.py
{ "start": 2330, "end": 2568 }
class ____(MaxValueValidator): def compare(self, a, b): return a.upper is None or a.upper > b message = _( "Ensure that the upper bound of the range is not greater than %(limit_value)s." )
RangeMaxValueValidator
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 129410, "end": 129589 }
class ____(str, Enum): """ Mutable RAM sparse index """ def __str__(self) -> str: return str(self.value) MUTABLERAM = "MutableRam"
SparseIndexTypeOneOf
python
pydantic__pydantic
pydantic/json_schema.py
{ "start": 116494, "end": 123938 }
class ____: """Add examples to a JSON schema. If the JSON Schema already contains examples, the provided examples will be appended. If `mode` is set this will only apply to that schema generation mode, allowing you to add different examples for validation and serialization. """ @overload @deprecated('Using a dict for `examples` is deprecated since v2.9 and will be removed in v3.0. Use a list instead.') def __init__( self, examples: dict[str, Any], mode: Literal['validation', 'serialization'] | None = None ) -> None: ... @overload def __init__(self, examples: list[Any], mode: Literal['validation', 'serialization'] | None = None) -> None: ... def __init__( self, examples: dict[str, Any] | list[Any], mode: Literal['validation', 'serialization'] | None = None ) -> None: if isinstance(examples, dict): warnings.warn( 'Using a dict for `examples` is deprecated, use a list instead.', PydanticDeprecatedSince29, stacklevel=2, ) self.examples = examples self.mode = mode def __get_pydantic_json_schema__( self, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler ) -> JsonSchemaValue: mode = self.mode or handler.mode json_schema = handler(core_schema) if mode != handler.mode: return json_schema examples = json_schema.get('examples') if examples is None: json_schema['examples'] = to_jsonable_python(self.examples) if isinstance(examples, dict): if isinstance(self.examples, list): warnings.warn( 'Updating existing JSON Schema examples of type dict with examples of type list. ' 'Only the existing examples values will be retained. Note that dict support for ' 'examples is deprecated and will be removed in v3.0.', UserWarning, ) json_schema['examples'] = to_jsonable_python( [ex for value in examples.values() for ex in value] + self.examples ) else: json_schema['examples'] = to_jsonable_python({**examples, **self.examples}) if isinstance(examples, list): if isinstance(self.examples, list): json_schema['examples'] = to_jsonable_python(examples + self.examples) elif isinstance(self.examples, dict): warnings.warn( 'Updating existing JSON Schema examples of type list with examples of type dict. ' 'Only the examples values will be retained. Note that dict support for ' 'examples is deprecated and will be removed in v3.0.', UserWarning, ) json_schema['examples'] = to_jsonable_python( examples + [ex for value in self.examples.values() for ex in value] ) return json_schema def __hash__(self) -> int: return hash(type(self.mode)) def _get_all_json_refs(item: Any) -> set[JsonRef]: """Get all the definitions references from a JSON schema.""" refs: set[JsonRef] = set() stack = [item] while stack: current = stack.pop() if isinstance(current, dict): for key, value in current.items(): if key == 'examples' and isinstance(value, list): # Skip examples that may contain arbitrary values and references # (e.g. `{"examples": [{"$ref": "..."}]}`). Note: checking for value # of type list is necessary to avoid skipping valid portions of the schema, # for instance when "examples" is used as a property key. A more robust solution # could be found, but would require more advanced JSON Schema parsing logic. continue if key == '$ref' and isinstance(value, str): refs.add(JsonRef(value)) elif isinstance(value, dict): stack.append(value) elif isinstance(value, list): stack.extend(value) elif isinstance(current, list): stack.extend(current) return refs AnyType = TypeVar('AnyType') if TYPE_CHECKING: SkipJsonSchema = Annotated[AnyType, ...] else: @dataclasses.dataclass(**_internal_dataclass.slots_true) class SkipJsonSchema: """!!! abstract "Usage Documentation" [`SkipJsonSchema` Annotation](../concepts/json_schema.md#skipjsonschema-annotation) Add this as an annotation on a field to skip generating a JSON schema for that field. Example: ```python from pprint import pprint from typing import Union from pydantic import BaseModel from pydantic.json_schema import SkipJsonSchema class Model(BaseModel): a: Union[int, None] = None # (1)! b: Union[int, SkipJsonSchema[None]] = None # (2)! c: SkipJsonSchema[Union[int, None]] = None # (3)! pprint(Model.model_json_schema()) ''' { 'properties': { 'a': { 'anyOf': [ {'type': 'integer'}, {'type': 'null'} ], 'default': None, 'title': 'A' }, 'b': { 'default': None, 'title': 'B', 'type': 'integer' } }, 'title': 'Model', 'type': 'object' } ''' ``` 1. The integer and null types are both included in the schema for `a`. 2. The integer type is the only type included in the schema for `b`. 3. The entirety of the `c` field is omitted from the schema. """ def __class_getitem__(cls, item: AnyType) -> AnyType: return Annotated[item, cls()] def __get_pydantic_json_schema__( self, core_schema: CoreSchema, handler: GetJsonSchemaHandler ) -> JsonSchemaValue: raise PydanticOmit def __hash__(self) -> int: return hash(type(self)) def _get_typed_dict_config(cls: type[Any] | None) -> ConfigDict: if cls is not None: try: return _decorators.get_attribute_from_bases(cls, '__pydantic_config__') except AttributeError: pass return {} def _get_ser_schema_for_default_value(schema: CoreSchema) -> core_schema.PlainSerializerFunctionSerSchema | None: """Get a `'function-plain'` serialization schema that can be used to serialize a default value. This takes into account having the serialization schema nested under validation schema(s). """ if ( (ser_schema := schema.get('serialization')) and ser_schema['type'] == 'function-plain' and not ser_schema.get('info_arg') ): return ser_schema if _core_utils.is_function_with_inner_schema(schema): return _get_ser_schema_for_default_value(schema['schema'])
Examples
python
spack__spack
lib/spack/spack/vendor/pyrsistent/_pclass.py
{ "start": 434, "end": 1301 }
class ____(type): def __new__(mcs, name, bases, dct): set_fields(dct, bases, name='_pclass_fields') store_invariants(dct, bases, '_pclass_invariants', '__invariant__') dct['__slots__'] = ('_pclass_frozen',) + tuple(key for key in dct['_pclass_fields']) # There must only be one __weakref__ entry in the inheritance hierarchy, # lets put it on the top level class. if _is_pclass(bases): dct['__slots__'] += ('__weakref__',) return super(PClassMeta, mcs).__new__(mcs, name, bases, dct) _MISSING_VALUE = object() def _check_and_set_attr(cls, field, name, value, result, invariant_errors): check_type(cls, field, name, value) is_ok, error_code = field.invariant(value) if not is_ok: invariant_errors.append(error_code) else: setattr(result, name, value)
PClassMeta
python
keras-team__keras
keras/src/metrics/f_score_metrics.py
{ "start": 225, "end": 9225 }
class ____(Metric): """Computes F-Beta score. Formula: ```python b2 = beta ** 2 f_beta_score = (1 + b2) * (precision * recall) / (precision * b2 + recall) ``` This is the weighted harmonic mean of precision and recall. Its output range is `[0, 1]`. It works for both multi-class and multi-label classification. Args: average: Type of averaging to be performed across per-class results in the multi-class case. Acceptable values are `None`, `"micro"`, `"macro"` and `"weighted"`. Defaults to `None`. If `None`, no averaging is performed and `result()` will return the score for each class. If `"micro"`, compute metrics globally by counting the total true positives, false negatives and false positives. If `"macro"`, compute metrics for each label, and return their unweighted mean. This does not take label imbalance into account. If `"weighted"`, compute metrics for each label, and return their average weighted by support (the number of true instances for each label). This alters `"macro"` to account for label imbalance. It can result in an score that is not between precision and recall. beta: Determines the weight of given to recall in the harmonic mean between precision and recall (see pseudocode equation above). Defaults to `1`. threshold: Elements of `y_pred` greater than `threshold` are converted to be 1, and the rest 0. If `threshold` is `None`, the argmax of `y_pred` is converted to 1, and the rest to 0. name: Optional. String name of the metric instance. dtype: Optional. Data type of the metric result. Returns: F-Beta Score: float. Example: >>> metric = keras.metrics.FBetaScore(beta=2.0, threshold=0.5) >>> y_true = np.array([[1, 1, 1], ... [1, 0, 0], ... [1, 1, 0]], np.int32) >>> y_pred = np.array([[0.2, 0.6, 0.7], ... [0.2, 0.6, 0.6], ... [0.6, 0.8, 0.0]], np.float32) >>> metric.update_state(y_true, y_pred) >>> result = metric.result() >>> result [0.3846154 , 0.90909094, 0.8333334 ] """ def __init__( self, average=None, beta=1.0, threshold=None, name="fbeta_score", dtype=None, ): super().__init__(name=name, dtype=dtype) # Metric should be maximized during optimization. self._direction = "up" if average not in (None, "micro", "macro", "weighted"): raise ValueError( "Invalid `average` argument value. Expected one of: " "{None, 'micro', 'macro', 'weighted'}. " f"Received: average={average}" ) if not isinstance(beta, float): raise ValueError( "Invalid `beta` argument value. " "It should be a Python float. " f"Received: beta={beta} of type '{type(beta)}'" ) if beta <= 0.0: raise ValueError( "Invalid `beta` argument value. " "It should be > 0. " f"Received: beta={beta}" ) if threshold is not None: if not isinstance(threshold, float): raise ValueError( "Invalid `threshold` argument value. " "It should be a Python float. " f"Received: threshold={threshold} " f"of type '{type(threshold)}'" ) if threshold > 1.0 or threshold <= 0.0: raise ValueError( "Invalid `threshold` argument value. " "It should verify 0 < threshold <= 1. " f"Received: threshold={threshold}" ) self.average = average self.beta = beta self.threshold = threshold self.axis = None self._built = False if self.average != "micro": self.axis = 0 def _build(self, y_true_shape, y_pred_shape): if len(y_pred_shape) != 2 or len(y_true_shape) != 2: raise ValueError( "FBetaScore expects 2D inputs with shape " "(batch_size, output_dim). Received input " f"shapes: y_pred.shape={y_pred_shape} and " f"y_true.shape={y_true_shape}." ) if y_pred_shape[-1] is None or y_true_shape[-1] is None: raise ValueError( "FBetaScore expects 2D inputs with shape " "(batch_size, output_dim), with output_dim fully " "defined (not None). Received input " f"shapes: y_pred.shape={y_pred_shape} and " f"y_true.shape={y_true_shape}." ) num_classes = y_pred_shape[-1] if self.average != "micro": init_shape = (num_classes,) else: init_shape = () def _add_zeros_variable(name): return self.add_variable( name=name, shape=init_shape, initializer=initializers.Zeros(), dtype=self.dtype, ) self.true_positives = _add_zeros_variable("true_positives") self.false_positives = _add_zeros_variable("false_positives") self.false_negatives = _add_zeros_variable("false_negatives") self.intermediate_weights = _add_zeros_variable("intermediate_weights") self._built = True def update_state(self, y_true, y_pred, sample_weight=None): y_true = ops.convert_to_tensor(y_true, dtype=self.dtype) y_pred = ops.convert_to_tensor(y_pred, dtype=self.dtype) if not self._built: self._build(y_true.shape, y_pred.shape) if self.threshold is None: threshold = ops.max(y_pred, axis=-1, keepdims=True) # make sure [0, 0, 0] doesn't become [1, 1, 1] # Use abs(x) > eps, instead of x != 0 to check for zero y_pred = ops.logical_and( y_pred >= threshold, ops.abs(y_pred) > 1e-9 ) else: y_pred = y_pred > self.threshold y_pred = ops.cast(y_pred, dtype=self.dtype) y_true = ops.cast(y_true, dtype=self.dtype) if sample_weight is not None: sample_weight = ops.convert_to_tensor( sample_weight, dtype=self.dtype ) def _weighted_sum(val, sample_weight): if sample_weight is not None: val = ops.multiply(val, ops.expand_dims(sample_weight, 1)) return ops.sum(val, axis=self.axis) self.true_positives.assign( self.true_positives + _weighted_sum(y_pred * y_true, sample_weight) ) self.false_positives.assign( self.false_positives + _weighted_sum(y_pred * (1 - y_true), sample_weight) ) self.false_negatives.assign( self.false_negatives + _weighted_sum((1 - y_pred) * y_true, sample_weight) ) self.intermediate_weights.assign( self.intermediate_weights + _weighted_sum(y_true, sample_weight) ) def result(self): precision = ops.divide( self.true_positives, self.true_positives + self.false_positives + backend.epsilon(), ) recall = ops.divide( self.true_positives, self.true_positives + self.false_negatives + backend.epsilon(), ) precision = ops.convert_to_tensor(precision, dtype=self.dtype) recall = ops.convert_to_tensor(recall, dtype=self.dtype) mul_value = precision * recall add_value = ((self.beta**2) * precision) + recall mean = ops.divide(mul_value, add_value + backend.epsilon()) f1_score = mean * (1 + (self.beta**2)) if self.average == "weighted": weights = ops.divide( self.intermediate_weights, ops.sum(self.intermediate_weights) + backend.epsilon(), ) f1_score = ops.sum(f1_score * weights) elif self.average is not None: # [micro, macro] f1_score = ops.mean(f1_score) return f1_score def get_config(self): """Returns the serializable config of the metric.""" config = { "name": self.name, "dtype": self.dtype, "average": self.average, "beta": self.beta, "threshold": self.threshold, } base_config = super().get_config() return {**base_config, **config} def reset_state(self): for v in self.variables: v.assign(ops.zeros(v.shape, dtype=v.dtype)) @keras_export("keras.metrics.F1Score")
FBetaScore
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/elements.py
{ "start": 120717, "end": 121659 }
class ____(ColumnElement[_T]): """Wrap a column expression as it appears in a 'reference' context. This expression is any that includes an _order_by_label_element, which is a Label, or a DESC / ASC construct wrapping a Label. The production of _label_reference() should occur when an expression is added to this context; this includes the ORDER BY or GROUP BY of a SELECT statement, as well as a few other places, such as the ORDER BY within an OVER clause. """ __visit_name__ = "label_reference" _traverse_internals: _TraverseInternalsType = [ ("element", InternalTraversal.dp_clauseelement) ] element: ColumnElement[_T] def __init__(self, element: ColumnElement[_T]): self.element = element self._propagate_attrs = element._propagate_attrs @util.ro_non_memoized_property def _from_objects(self) -> List[FromClause]: return []
_label_reference
python
mkdocs__mkdocs
mkdocs/structure/pages.py
{ "start": 21104, "end": 21575 }
class ____(markdown.htmlparser.htmlparser.HTMLParser): # type: ignore[name-defined] def __init__(self) -> None: super().__init__() self.present_anchor_ids: set[str] = set() def handle_starttag(self, tag: str, attrs: Sequence[tuple[str, str]]) -> None: for k, v in attrs: if k == 'id' or (k == 'name' and tag == 'a'): self.present_anchor_ids.add(v) return super().handle_starttag(tag, attrs)
_HTMLHandler
python
wandb__wandb
wandb/sdk/data_types/table.py
{ "start": 40541, "end": 40982 }
class ____: """Helper class for PartitionTable to track its parts.""" def __init__(self, entry, source_artifact): self.entry = entry self.source_artifact = source_artifact self._part = None def get_part(self): if self._part is None: self._part = self.source_artifact.get(self.entry.path) return self._part def free(self): self._part = None
_PartitionTablePartEntry
python
pytorch__pytorch
test/torch_np/numpy_tests/core/test_dtype.py
{ "start": 6915, "end": 10766 }
class ____(TestCase): """Test cases related to more complex DType promotions. Further promotion tests are defined in `test_numeric.py` """ @parametrize( "other, expected, expected_weak", [ (2**16 - 1, np.complex64, None), (2**32 - 1, np.complex128, np.complex64), subtest((np.float16(2), np.complex64, None), name="float16_complex64_None"), subtest((np.float32(2), np.complex64, None), name="float32_complex64_None"), # repeat for complex scalars: subtest( (np.complex64(2), np.complex64, None), name="complex64_complex64_None" ), ], ) def test_complex_other_value_based( self, weak_promotion, other, expected, expected_weak ): if weak_promotion and expected_weak is not None: expected = expected_weak # This would change if we modify the value based promotion min_complex = np.dtype(np.complex64) res = np.result_type(other, min_complex) assert res == expected # Check the same for a simple ufunc call that uses the same logic: res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype assert res == expected @parametrize( "other, expected", [ (np.bool_, np.complex128), (np.int64, np.complex128), (np.float16, np.complex64), (np.float32, np.complex64), (np.float64, np.complex128), (np.complex64, np.complex64), (np.complex128, np.complex128), ], ) def test_complex_scalar_value_based(self, other, expected): # This would change if we modify the value based promotion complex_scalar = 1j res = np.result_type(other, complex_scalar) assert res == expected # Check the same for a simple ufunc call that uses the same logic: res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype assert res == expected @parametrize("val", [2, 2**32, 2**63, 2**64, 2 * 100]) def test_python_integer_promotion(self, val): # If we only path scalars (mainly python ones!), the result must take # into account that the integer may be considered int32, int64, uint64, # or object depending on the input value. So test those paths! expected_dtype = np.result_type(np.array(val).dtype, np.array(0).dtype) assert np.result_type(val, 0) == expected_dtype # For completeness sake, also check with a NumPy scalar as second arg: assert np.result_type(val, np.int8(0)) == expected_dtype @parametrize( "dtypes, expected", [ # These promotions are not associative/commutative: ([np.int16, np.float16], np.float32), ([np.int8, np.float16], np.float32), ([np.uint8, np.int16, np.float16], np.float32), # The following promotions are not ambiguous, but cover code # paths of abstract promotion (no particular logic being tested) ([1, 1, np.float64], np.float64), ([1, 1.0, np.complex128], np.complex128), ([1, 1j, np.float64], np.complex128), ([1.0, 1.0, np.int64], np.float64), ([1.0, 1j, np.float64], np.complex128), ([1j, 1j, np.float64], np.complex128), ([1, True, np.bool_], np.int_), ], ) def test_permutations_do_not_influence_result(self, dtypes, expected): # Tests that most permutations do not influence the result. In the # above some uint and int combinations promote to a larger integer # type, which would then promote to a larger than necessary float. for perm in permutations(dtypes): assert np.result_type(*perm) == expected
TestPromotion
python
huggingface__transformers
src/transformers/models/glm4/modeling_glm4.py
{ "start": 2801, "end": 8805 }
class ____(GradientCheckpointingLayer): def __init__(self, config: Glm4Config, layer_idx: int): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Glm4Attention(config=config, layer_idx=layer_idx) self.mlp = Glm4MLP(config) self.input_layernorm = Glm4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = Glm4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_self_attn_layernorm = Glm4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_mlp_layernorm = Glm4RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) hidden_states, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.post_self_attn_layernorm(hidden_states) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = self.post_mlp_layernorm(hidden_states) hidden_states = residual + hidden_states return hidden_states def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs: Unpack[TransformersKwargs], ): key_states = repeat_kv(key, module.num_key_value_groups) value_states = repeat_kv(value, module.num_key_value_groups) attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., 0::2] x2 = x[..., 1::2] return torch.stack((-x2, x1), dim=-1).flatten(-2) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) # Interleave them instead of usual shape cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1) sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1) # Keep half or full tensor for later concatenation rotary_dim = cos.shape[-1] q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] # Apply rotary embeddings on the first half or full tensor q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin) k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin) # Concatenate back to full shape q_embed = torch.cat([q_embed, q_pass], dim=-1) k_embed = torch.cat([k_embed, k_pass], dim=-1) return q_embed, k_embed
Glm4DecoderLayer
python
PrefectHQ__prefect
tests/server/models/test_flow_runs.py
{ "start": 46518, "end": 49030 }
class ____: async def test_read_task_run_dependencies(self, flow_run, session): task_run_1 = await models.task_runs.create_task_run( session=session, task_run=schemas.core.TaskRun( flow_run_id=flow_run.id, task_key="key-1", dynamic_key="0" ), ) task_run_2 = await models.task_runs.create_task_run( session=session, task_run=schemas.core.TaskRun( flow_run_id=flow_run.id, task_key="key-2", dynamic_key="0", task_inputs=dict(x={TaskRunResult(id=task_run_1.id)}), ), ) task_run_3 = await models.task_runs.create_task_run( session=session, task_run=schemas.core.TaskRun( flow_run_id=flow_run.id, task_key="key-3", dynamic_key="0", task_inputs=dict(x={TaskRunResult(id=task_run_2.id)}), ), ) dependencies = await models.flow_runs.read_task_run_dependencies( session=session, flow_run_id=flow_run.id ) # We do this because read_task_run_dependencies doesn't guarantee any ordering d1 = next(filter(lambda d: d.id == task_run_1.id, dependencies)) d2 = next(filter(lambda d: d.id == task_run_2.id, dependencies)) d3 = next(filter(lambda d: d.id == task_run_3.id, dependencies)) assert len(dependencies) == 3 fields = [ "id", "name", "state", "expected_start_time", "start_time", "end_time", "total_run_time", "estimated_run_time", ] for field in fields: assert getattr(d1, field) == getattr(task_run_1, field) assert getattr(d2, field) == getattr(task_run_2, field) assert getattr(d3, field) == getattr(task_run_3, field) assert len(d1.upstream_dependencies) == 0 assert len(d2.upstream_dependencies) == len(d3.upstream_dependencies) == 1 assert d2.upstream_dependencies[0].id == d1.id assert d3.upstream_dependencies[0].id == d2.id async def test_read_task_run_dependencies_throws_error_if_does_not_exist( self, session ): with pytest.raises(ObjectNotFoundError): await models.flow_runs.read_task_run_dependencies( session=session, flow_run_id=uuid4() )
TestReadFlowRunTaskRunDependencies
python
pypa__hatch
src/hatch/template/files_default.py
{ "start": 541, "end": 2264 }
class ____(File): TEMPLATE = """\ # {project_name} [![PyPI - Version](https://img.shields.io/pypi/v/{project_name_normalized}.svg)](https://pypi.org/project/{project_name_normalized}) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/{project_name_normalized}.svg)](https://pypi.org/project/{project_name_normalized}) {extra_badges} ----- ## Table of Contents - [Installation](#installation) {extra_toc} ## Installation ```console pip install {project_name_normalized} ```{license_info} """ def __init__( self, template_config: dict, plugin_config: dict, # noqa: ARG002 ): extra_badges = "" extra_toc = "" license_info = "" if template_config["license_data"]: extra_toc += "- [License](#license)\n" license_info += ( f"\n\n## License\n\n`{template_config['project_name_normalized']}` is distributed under the terms of " ) license_data = template_config["license_data"] if len(license_data) == 1: license_id = next(iter(license_data)) license_info += f"the [{license_id}](https://spdx.org/licenses/{license_id}.html) license." else: license_info += "any of the following licenses:\n" for license_id in sorted(license_data): license_info += f"\n- [{license_id}](https://spdx.org/licenses/{license_id}.html)" super().__init__( Path(template_config["readme_file_path"]), self.TEMPLATE.format( extra_badges=extra_badges, extra_toc=extra_toc, license_info=license_info, **template_config ), )
Readme
python
ZoranPandovski__al-go-rithms
data_structures/B+tree/btree.py
{ "start": 708, "end": 4255 }
class ____(Node): def __init__(self): self.pt = 4 self.n = 3 self.root = Node(True) def get_leaf(self,val,node): if node.leaf == False: for ind, key in enumerate(node.keys): if ind == 0 and val<= key: return self.get_leaf(val,node.children[ind]) elif ind == len(node.keys)-1 and val <= key: continue elif ind == len(node.keys)-1 and val > key: return self.get_leaf(val,node.children[-1]) if val > key and val <= node.keys[ind+1]: return self.get_leaf(val,node.children[ind+1]) else : return node def keys_in_range(self, mn, mx, node): ans = 0 for _ , key in enumerate(node.keys): if key >= mn and key <= mx: ans+=1 if len(node.keys) == 0: return ans,None elif node.keys[-1] > mx: return ans, None else : return ans, node.next def range(self, mn, mx): st_node = self.get_leaf(mn, self.root) ans = 0 while st_node is not None: cnt, st_node = self.keys_in_range(mn,mx,st_node) ans += cnt return ans def insert(self, value, node): if node.leaf == True: idx = bisect.bisect(node.keys,value) node.keys.insert(idx,value) node.children.insert(idx,value) if len(node.keys) <= self.n : return None, None else: return node.kaatde() for ind, key in enumerate(node.keys): if value < key and ind == 0: mid_val, nw = self.insert(value,node.children[ind]) break elif ind == (len(node.keys) -1) and value < key: continue elif ind == (len(node.keys) -1) and value >= key: mid_val, nw = self.insert(value,node.children[-1]) break elif value >= key and value < node.keys[ind+1]: mid_val, nw = self.insert(value,node.children[ind+1]) break if mid_val: idx = bisect.bisect(node.keys, mid_val) node.keys.insert(idx, mid_val) node.children.insert(idx+1, nw) if len(node.keys)<= self.n : return None, None else: return node.kaatde() else: return None, None def insert_op(self, value): mid, nw_node = self.insert(value,self.root) if mid: new_root = Node(True) new_root.children = [self.root, nw_node] new_root.keys = [mid] new_root.leaf = False self.root = new_root # Globals output = [] # parser command def doit(fname): with open(fname,'r') as f: for line in f: cmd = line.strip().lower().split() if cmd[0]== "insert": btree.insert_op(int(cmd[1])) if cmd[0]== "count": out = btree.range(int(cmd[1]),int(cmd[1])) output.append(str(out)) if cmd[0]== "find": out = btree.range(int(cmd[1]),int(cmd[1])) output.append("YES" if out>0 else "NO") if cmd[0]== "range": out = btree.range(int(cmd[1]),int(cmd[2])) output.append(str(out)) for i in output: print(i) # beginning of code if len(sys.argv) != 2: sys.exit("syntax error//") fname = sys.argv[1] btree = Btree() doit(fname)
Btree
python
scipy__scipy
scipy/sparse/linalg/_eigen/arpack/arpack.py
{ "start": 41655, "end": 69603 }
class ____(LinearOperator): """ IterOpInv: helper class to repeatedly solve [A-sigma*M]*x = b using an iterative method """ def __init__(self, A, M, sigma, ifunc=gmres_loose, tol=0): self.A = A self.M = M self.sigma = sigma def mult_func(x): return A.matvec(x) - sigma * M.matvec(x) def mult_func_M_None(x): return A.matvec(x) - sigma * x x = np.zeros(A.shape[1]) if M is None: dtype = mult_func_M_None(x).dtype self.OP = LinearOperator(self.A.shape, mult_func_M_None, dtype=dtype) else: dtype = mult_func(x).dtype self.OP = LinearOperator(self.A.shape, mult_func, dtype=dtype) self.shape = A.shape if tol <= 0: # when tol=0, ARPACK uses machine tolerance as calculated # by LAPACK's _LAMCH function. We should match this tol = 2 * np.finfo(self.OP.dtype).eps self.ifunc = ifunc self.tol = tol def _matvec(self, x): b, info = self.ifunc(self.OP, x, tol=self.tol) if info != 0: raise ValueError( f"Error in inverting [A-sigma*M]: function {self.ifunc.__name__} " f"did not converge (info = {info})." ) return b @property def dtype(self): return self.OP.dtype def _fast_spmatrix_to_csc(A, hermitian=False): """Convert sparse matrix to CSC (by transposing, if possible)""" if (A.format == "csr" and hermitian and not np.issubdtype(A.dtype, np.complexfloating)): return A.T elif is_pydata_spmatrix(A): # No need to convert return A else: return A.tocsc() def get_inv_matvec(M, hermitian=False, tol=0): if isdense(M): return LuInv(M).matvec elif issparse(M) or is_pydata_spmatrix(M): M = _fast_spmatrix_to_csc(M, hermitian=hermitian) return SpLuInv(M).matvec else: return IterInv(M, tol=tol).matvec def get_OPinv_matvec(A, M, sigma, hermitian=False, tol=0): if sigma == 0: return get_inv_matvec(A, hermitian=hermitian, tol=tol) if M is None: #M is the identity matrix if isdense(A): if (np.issubdtype(A.dtype, np.complexfloating) or np.imag(sigma) == 0): A = np.copy(A) else: A = A + 0j A.flat[::A.shape[1] + 1] -= sigma return LuInv(A).matvec elif issparse(A) or is_pydata_spmatrix(A): A = A - sigma * eye(A.shape[0]) A = _fast_spmatrix_to_csc(A, hermitian=hermitian) return SpLuInv(A).matvec else: return IterOpInv(aslinearoperator(A), M, sigma, tol=tol).matvec else: if ((not isdense(A) and not issparse(A) and not is_pydata_spmatrix(A)) or (not isdense(M) and not issparse(M) and not is_pydata_spmatrix(A))): return IterOpInv(aslinearoperator(A), aslinearoperator(M), sigma, tol=tol).matvec elif isdense(A) or isdense(M): return LuInv(A - sigma * M).matvec else: OP = A - sigma * M OP = _fast_spmatrix_to_csc(OP, hermitian=hermitian) return SpLuInv(OP).matvec def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None, OPpart=None, rng=None): """ Find k eigenvalues and eigenvectors of the square matrix A. Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the generalized eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i] Parameters ---------- A : ndarray, sparse matrix or LinearOperator An array, sparse matrix, or LinearOperator representing the operation ``A @ x``, where A is a real or complex square matrix. k : int, optional The number of eigenvalues and eigenvectors desired. `k` must be smaller than N-1. It is not possible to compute all eigenvectors of a matrix. M : ndarray, sparse matrix or LinearOperator, optional An array, sparse matrix, or LinearOperator representing the operation M@x for the generalized eigenvalue problem A @ x = w * M @ x. M must represent a real symmetric matrix if A is real, and must represent a complex Hermitian matrix if A is complex. For best results, the data type of M should be the same as that of A. Additionally: If `sigma` is None, M is positive definite If sigma is specified, M is positive semi-definite If sigma is None, eigs requires an operator to compute the solution of the linear equation ``M @ x = b``. This is done internally via a (sparse) LU decomposition for an explicit matrix M, or via an iterative solver for a general linear operator. Alternatively, the user can supply the matrix or operator Minv, which gives ``x = Minv @ b = M^-1 @ b``. sigma : real or complex, optional Find eigenvalues near sigma using shift-invert mode. This requires an operator to compute the solution of the linear system ``[A - sigma * M] @ x = b``, where M is the identity matrix if unspecified. This is computed internally via a (sparse) LU decomposition for explicit matrices A & M, or via an iterative solver if either A or M is a general linear operator. Alternatively, the user can supply the matrix or operator OPinv, which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``. For a real matrix A, shift-invert can either be done in imaginary mode or real mode, specified by the parameter OPpart ('r' or 'i'). Note that when sigma is specified, the keyword 'which' (below) refers to the shifted eigenvalues ``w'[i]`` where: If A is real and OPpart == 'r' (default), ``w'[i] = 1/2 * [1/(w[i]-sigma) + 1/(w[i]-conj(sigma))]``. If A is real and OPpart == 'i', ``w'[i] = 1/2i * [1/(w[i]-sigma) - 1/(w[i]-conj(sigma))]``. If A is complex, ``w'[i] = 1/(w[i]-sigma)``. v0 : ndarray, optional Starting vector for iteration. Default: random ncv : int, optional The number of Lanczos vectors generated `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``. Default: ``min(n, max(2*k + 1, 20))`` which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional Which `k` eigenvectors and eigenvalues to find: 'LM' : largest magnitude 'SM' : smallest magnitude 'LR' : largest real part 'SR' : smallest real part 'LI' : largest imaginary part 'SI' : smallest imaginary part When sigma != None, 'which' refers to the shifted eigenvalues w'[i] (see discussion in 'sigma', above). ARPACK is generally better at finding large values than small values. If small eigenvalues are desired, consider using shift-invert mode for better performance. maxiter : int, optional Maximum number of Arnoldi update iterations allowed Default: ``n*10`` tol : float, optional Relative accuracy for eigenvalues (stopping criterion) The default value of 0 implies machine precision. return_eigenvectors : bool, optional Return eigenvectors (True) in addition to eigenvalues Minv : ndarray, sparse matrix or LinearOperator, optional See notes in M, above. OPinv : ndarray, sparse matrix or LinearOperator, optional See notes in sigma, above. OPpart : {'r' or 'i'}, optional See notes in sigma, above rng : `numpy.random.Generator`, optional Pseudorandom number generator state. When `rng` is None, a new `numpy.random.Generator` is created using entropy from the operating system. Types other than `numpy.random.Generator` are passed to `numpy.random.default_rng` to instantiate a ``Generator``. Returns ------- w : ndarray Array of k eigenvalues. v : ndarray An array of `k` eigenvectors. ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i]. Raises ------ ArpackNoConvergence When the requested convergence is not obtained. The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception object. See Also -------- eigsh : eigenvalues and eigenvectors for symmetric matrix A svds : singular value decomposition for a matrix A Notes ----- This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD, ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to find the eigenvalues and eigenvectors [2]_. References ---------- .. [1] ARPACK Software, https://github.com/opencollab/arpack-ng .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. Examples -------- Find 6 eigenvectors of the identity matrix: >>> import numpy as np >>> from scipy.sparse.linalg import eigs >>> id = np.eye(13) >>> vals, vecs = eigs(id, k=6) >>> vals array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) >>> vecs.shape (13, 6) """ A = convert_pydata_sparse_to_scipy(A) M = convert_pydata_sparse_to_scipy(M) if A.shape[0] != A.shape[1]: raise ValueError(f'expected square matrix (shape={A.shape})') if M is not None: if M.shape != A.shape: raise ValueError(f'wrong M dimensions {M.shape}, should be {A.shape}') if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): warnings.warn('M does not have the same type precision as A. ' 'This may adversely affect ARPACK convergence', stacklevel=2) n = A.shape[0] if k <= 0: raise ValueError(f"k={k} must be greater than 0.") if k >= n - 1: warnings.warn("k >= N - 1 for N * N square matrix. " "Attempting to use scipy.linalg.eig instead.", RuntimeWarning, stacklevel=2) if issparse(A): raise TypeError("Cannot use scipy.linalg.eig for sparse A with " "k >= N - 1. Use scipy.linalg.eig(A.toarray()) or" " reduce k.") if isinstance(A, LinearOperator): raise TypeError("Cannot use scipy.linalg.eig for LinearOperator " "A with k >= N - 1.") if isinstance(M, LinearOperator): raise TypeError("Cannot use scipy.linalg.eig for LinearOperator " "M with k >= N - 1.") return eig(A, b=M, right=return_eigenvectors) if sigma is None: matvec = aslinearoperator(A).matvec if OPinv is not None: raise ValueError("OPinv should not be specified " "with sigma = None.") if OPpart is not None: raise ValueError("OPpart should not be specified with " "sigma = None or complex A") if M is None: #standard eigenvalue problem mode = 1 M_matvec = None Minv_matvec = None if Minv is not None: raise ValueError("Minv should not be " "specified with M = None.") else: #general eigenvalue problem mode = 2 if Minv is None: Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol) else: Minv = aslinearoperator(Minv) Minv_matvec = Minv.matvec M_matvec = aslinearoperator(M).matvec else: #sigma is not None: shift-invert mode if np.issubdtype(A.dtype, np.complexfloating): if OPpart is not None: raise ValueError("OPpart should not be specified " "with sigma=None or complex A") mode = 3 elif OPpart is None or OPpart.lower() == 'r': mode = 3 elif OPpart.lower() == 'i': if np.imag(sigma) == 0: raise ValueError("OPpart cannot be 'i' if sigma is real") mode = 4 else: raise ValueError("OPpart must be one of ('r','i')") matvec = aslinearoperator(A).matvec if Minv is not None: raise ValueError("Minv should not be specified when sigma is" "specified.") if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, hermitian=False, tol=tol) else: OPinv = aslinearoperator(OPinv) Minv_matvec = OPinv.matvec if M is None: M_matvec = None else: M_matvec = aslinearoperator(M).matvec rng = np.random.default_rng(rng) params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode, M_matvec, Minv_matvec, sigma, ncv, v0, maxiter, which, tol, rng) while not params.converged: params.iterate() return params.extract(return_eigenvectors) def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None, mode='normal', rng=None): """ Find k eigenvalues and eigenvectors of the real symmetric square matrix or complex Hermitian matrix A. Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the generalized eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. Note that there is no specialized routine for the case when A is a complex Hermitian matrix. In this case, ``eigsh()`` will call ``eigs()`` and return the real parts of the eigenvalues thus obtained. Parameters ---------- A : ndarray, sparse matrix or LinearOperator A square operator representing the operation ``A @ x``, where ``A`` is real symmetric or complex Hermitian. For buckling mode (see below) ``A`` must additionally be positive-definite. k : int, optional The number of eigenvalues and eigenvectors desired. `k` must be smaller than N. It is not possible to compute all eigenvectors of a matrix. Returns ------- w : array Array of k eigenvalues. v : array An array representing the `k` eigenvectors. The column ``v[:, i]`` is the eigenvector corresponding to the eigenvalue ``w[i]``. Other Parameters ---------------- M : An N x N matrix, array, sparse matrix, or linear operator representing the operation ``M @ x`` for the generalized eigenvalue problem A @ x = w * M @ x. M must represent a real symmetric matrix if A is real, and must represent a complex Hermitian matrix if A is complex. For best results, the data type of M should be the same as that of A. Additionally: If sigma is None, M is symmetric positive definite. If sigma is specified, M is symmetric positive semi-definite. In buckling mode, M is symmetric indefinite. If sigma is None, eigsh requires an operator to compute the solution of the linear equation ``M @ x = b``. This is done internally via a (sparse) LU decomposition for an explicit matrix M, or via an iterative solver for a general linear operator. Alternatively, the user can supply the matrix or operator Minv, which gives ``x = Minv @ b = M^-1 @ b``. sigma : real Find eigenvalues near sigma using shift-invert mode. This requires an operator to compute the solution of the linear system ``[A - sigma * M] x = b``, where M is the identity matrix if unspecified. This is computed internally via a (sparse) LU decomposition for explicit matrices A & M, or via an iterative solver if either A or M is a general linear operator. Alternatively, the user can supply the matrix or operator `OPinv`, which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``. Regardless of the selected mode (normal, cayley, or buckling), `OPinv` should always be supplied as ``OPinv = [A - sigma * M]^-1``. Note that when sigma is specified, the keyword 'which' refers to the shifted eigenvalues ``w'[i]`` where: if ``mode == 'normal'``: ``w'[i] = 1 / (w[i] - sigma)``. if ``mode == 'cayley'``: ``w'[i] = (w[i] + sigma) / (w[i] - sigma)``. if ``mode == 'buckling'``: ``w'[i] = w[i] / (w[i] - sigma)``. (see further discussion in 'mode' below) v0 : ndarray, optional Starting vector for iteration. Default: random ncv : int, optional The number of Lanczos vectors generated ncv must be greater than k and smaller than n; it is recommended that ``ncv > 2*k``. Default: ``min(n, max(2*k + 1, 20))`` which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE'] If A is a complex Hermitian matrix, 'BE' is invalid. Which `k` eigenvectors and eigenvalues to find: 'LM' : Largest (in magnitude) eigenvalues. 'SM' : Smallest (in magnitude) eigenvalues. 'LA' : Largest (algebraic) eigenvalues. 'SA' : Smallest (algebraic) eigenvalues. 'BE' : Half (k/2) from each end of the spectrum. When k is odd, return one more (k/2+1) from the high end. When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]`` (see discussion in 'sigma', above). ARPACK is generally better at finding large values than small values. If small eigenvalues are desired, consider using shift-invert mode for better performance. maxiter : int, optional Maximum number of Arnoldi update iterations allowed. Default: ``n*10`` tol : float Relative accuracy for eigenvalues (stopping criterion). The default value of 0 implies machine precision. Minv : N x N matrix, array, sparse matrix, or LinearOperator See notes in M, above. OPinv : N x N matrix, array, sparse matrix, or LinearOperator See notes in sigma, above. return_eigenvectors : bool Return eigenvectors (True) in addition to eigenvalues. This value determines the order in which eigenvalues are sorted. The sort order is also dependent on the `which` variable. For which = 'LM' or 'SA': If `return_eigenvectors` is True, eigenvalues are sorted by algebraic value. If `return_eigenvectors` is False, eigenvalues are sorted by absolute value. For which = 'BE' or 'LA': eigenvalues are always sorted by algebraic value. For which = 'SM': If `return_eigenvectors` is True, eigenvalues are sorted by algebraic value. If `return_eigenvectors` is False, eigenvalues are sorted by decreasing absolute value. mode : string ['normal' | 'buckling' | 'cayley'] Specify strategy to use for shift-invert mode. This argument applies only for real-valued A and sigma != None. For shift-invert mode, ARPACK internally solves the eigenvalue problem ``OP @ x'[i] = w'[i] * B @ x'[i]`` and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i] into the desired eigenvectors and eigenvalues of the problem ``A @ x[i] = w[i] * M @ x[i]``. The modes are as follows: 'normal' : OP = [A - sigma * M]^-1 @ M, B = M, w'[i] = 1 / (w[i] - sigma) 'buckling' : OP = [A - sigma * M]^-1 @ A, B = A, w'[i] = w[i] / (w[i] - sigma) 'cayley' : OP = [A - sigma * M]^-1 @ [A + sigma * M], B = M, w'[i] = (w[i] + sigma) / (w[i] - sigma) The choice of mode will affect which eigenvalues are selected by the keyword 'which', and can also impact the stability of convergence (see [2] for a discussion). rng : `numpy.random.Generator`, optional Pseudorandom number generator state. When `rng` is None, a new `numpy.random.Generator` is created using entropy from the operating system. Types other than `numpy.random.Generator` are passed to `numpy.random.default_rng` to instantiate a ``Generator``. Raises ------ ArpackNoConvergence When the requested convergence is not obtained. The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception object. See Also -------- eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A svds : singular value decomposition for a matrix A Notes ----- This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD functions which use the Implicitly Restarted Lanczos Method to find the eigenvalues and eigenvectors [2]_. References ---------- .. [1] ARPACK Software, https://github.com/opencollab/arpack-ng .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. Examples -------- >>> import numpy as np >>> from scipy.sparse.linalg import eigsh >>> identity = np.eye(13) >>> eigenvalues, eigenvectors = eigsh(identity, k=6) >>> eigenvalues array([1., 1., 1., 1., 1., 1.]) >>> eigenvectors.shape (13, 6) """ # complex Hermitian matrices should be solved with eigs if np.issubdtype(A.dtype, np.complexfloating): if mode != 'normal': raise ValueError(f"mode={mode} cannot be used with complex matrix A") if which == 'BE': raise ValueError("which='BE' cannot be used with complex matrix A") elif which == 'LA': which = 'LR' elif which == 'SA': which = 'SR' ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0, ncv=ncv, maxiter=maxiter, tol=tol, return_eigenvectors=return_eigenvectors, Minv=Minv, OPinv=OPinv) if return_eigenvectors: return ret[0].real, ret[1] else: return ret.real if A.shape[0] != A.shape[1]: raise ValueError(f'expected square matrix (shape={A.shape})') if M is not None: if M.shape != A.shape: raise ValueError(f'wrong M dimensions {M.shape}, should be {A.shape}') if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): warnings.warn('M does not have the same type precision as A. ' 'This may adversely affect ARPACK convergence', stacklevel=2) n = A.shape[0] if k <= 0: raise ValueError("k must be greater than 0.") if k >= n: warnings.warn("k >= N for N * N square matrix. " "Attempting to use scipy.linalg.eigh instead.", RuntimeWarning, stacklevel=2) if issparse(A): raise TypeError("Cannot use scipy.linalg.eigh for sparse A with " "k >= N. Use scipy.linalg.eigh(A.toarray()) or" " reduce k.") if isinstance(A, LinearOperator): raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator " "A with k >= N.") if isinstance(M, LinearOperator): raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator " "M with k >= N.") return eigh(A, b=M, eigvals_only=not return_eigenvectors) if sigma is None: A = aslinearoperator(A) matvec = A.matvec if OPinv is not None: raise ValueError("OPinv should not be specified with sigma = None.") if M is None: #standard eigenvalue problem mode = 1 M_matvec = None Minv_matvec = None if Minv is not None: raise ValueError("Minv should not be specified with M = None.") else: #general eigenvalue problem mode = 2 if Minv is None: Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol) else: Minv = aslinearoperator(Minv) Minv_matvec = Minv.matvec M_matvec = aslinearoperator(M).matvec else: # sigma is not None: shift-invert mode if Minv is not None: raise ValueError("Minv should not be specified when sigma is " "specified.") # normal mode if mode == 'normal': mode = 3 matvec = None if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, hermitian=True, tol=tol) else: OPinv = aslinearoperator(OPinv) Minv_matvec = OPinv.matvec if M is None: M_matvec = None else: M = aslinearoperator(M) M_matvec = M.matvec # buckling mode elif mode == 'buckling': mode = 4 if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, hermitian=True, tol=tol) else: Minv_matvec = aslinearoperator(OPinv).matvec matvec = aslinearoperator(A).matvec M_matvec = None # cayley-transform mode elif mode == 'cayley': mode = 5 matvec = aslinearoperator(A).matvec if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, hermitian=True, tol=tol) else: Minv_matvec = aslinearoperator(OPinv).matvec if M is None: M_matvec = None else: M_matvec = aslinearoperator(M).matvec # unrecognized mode else: raise ValueError(f"unrecognized mode '{mode}'") rng = np.random.default_rng(rng) params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode, M_matvec, Minv_matvec, sigma, ncv, v0, maxiter, which, tol, rng) while not params.converged: params.iterate() return params.extract(return_eigenvectors)
IterOpInv
python
huggingface__transformers
src/transformers/models/florence2/modeling_florence2.py
{ "start": 12712, "end": 16114 }
class ____(nn.Module): def __init__(self, config: Florence2VisionConfig, stage_idx: int): super().__init__() self.config = config self.dim = config.embed_dim[stage_idx] self.window_size = config.window_size self.num_heads = config.num_heads[stage_idx] head_dim = self.dim // self.num_heads self.scale = head_dim**-0.5 self.qkv = nn.Linear(self.dim, self.dim * 3, bias=config.qkv_bias) self.proj = nn.Linear(self.dim, self.dim) self.is_causal = False def forward(self, hidden_states: torch.Tensor): batch_size, height, width, embed_dim = hidden_states.shape # Pad the input if necessary pad_left = pad_top = 0 pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size hidden_states = F.pad(hidden_states, (0, 0, pad_left, pad_right, pad_top, pad_bottom)) _, padded_height, padded_width, _ = hidden_states.shape # Partition input into non-overlapping windows (for local spatial attention in DaViT) hidden_states = hidden_states.view( batch_size, padded_height // self.window_size, self.window_size, padded_width // self.window_size, self.window_size, embed_dim, ) windowed_hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous() windowed_hidden_states = windowed_hidden_states.view(-1, self.window_size * self.window_size, embed_dim) # Generate Q, K, V for each window num_windows_per_batch, num_tokens_per_window, embed_dim = windowed_hidden_states.shape qkv = self.qkv(windowed_hidden_states).reshape( num_windows_per_batch, num_tokens_per_window, 3, self.num_heads, embed_dim // self.num_heads ) qkv = qkv.permute(2, 0, 3, 1, 4) query, key, value = qkv.unbind(0) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] windowed_hidden_states, _ = attention_interface( self, query, key, value, attention_mask=None, scaling=self.scale, ) windowed_hidden_states = windowed_hidden_states.view(num_windows_per_batch, num_tokens_per_window, embed_dim) windowed_hidden_states = self.proj(windowed_hidden_states) # Merge windows back to original spatial layout windowed_hidden_states = windowed_hidden_states.view(-1, self.window_size, self.window_size, embed_dim) hidden_states = windowed_hidden_states.view( -1, padded_height // self.window_size, padded_width // self.window_size, self.window_size, self.window_size, embed_dim, ) hidden_states = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous() hidden_states = hidden_states.view(-1, padded_height, padded_width, embed_dim) hidden_states = hidden_states[:, :height, :width, :].contiguous() hidden_states = hidden_states.view(batch_size, height * width, embed_dim) return hidden_states
Florence2VisionWindowAttention
python
huggingface__transformers
src/transformers/models/instructblip/modeling_instructblip.py
{ "start": 16569, "end": 20905 }
class ____(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size) self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, **kwargs: Unpack[TransformersKwargs], ): # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) mixed_query_layer = self.query(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) attention_scores_dtype = attention_scores.dtype if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores).to(attention_scores_dtype) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer, attention_probs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->InstructBlipQFormer
InstructBlipQFormerMultiHeadAttention
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/compute.py
{ "start": 68250, "end": 73337 }
class ____(ComputeEngineBaseOperator): """ Permanently and irrevocably deletes an Instance Group Managers. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:ComputeEngineDeleteInstanceGroupManagerOperator` :param resource_id: Name of the Instance Group Managers. :param project_id: Google Cloud project ID where the Compute Engine Instance Group Managers exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param request_id: Unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new Instance Group Managers again) It should be in UUID format as defined in RFC 4122 :param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'. :param api_version: API version used (for example v1 - or beta). Defaults to v1. :param impersonation_chain: Service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :param retry: A retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if `retry` is specified, the timeout applies to each individual attempt. :param metadata: Additional metadata that is provided to the method. """ # [START gce_igm_delete_fields] template_fields: Sequence[str] = ( "project_id", "resource_id", "zone", "request_id", "gcp_conn_id", "api_version", "impersonation_chain", ) # [END gce_igm_delete_fields] def __init__( self, *, resource_id: str, zone: str, project_id: str = PROVIDE_PROJECT_ID, request_id: str | None = None, gcp_conn_id: str = "google_cloud_default", api_version="v1", retry: Retry | None = None, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), impersonation_chain: str | Sequence[str] | None = None, validate_body: bool = True, **kwargs, ) -> None: self.zone = zone self.request_id = request_id self.resource_id = resource_id self._field_validator = None # Optional[GcpBodyFieldValidator] self.retry = retry self.timeout = timeout self.metadata = metadata if validate_body: self._field_validator = GcpBodyFieldValidator( GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version ) self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE) super().__init__( project_id=project_id, zone=zone, resource_id=resource_id, gcp_conn_id=gcp_conn_id, api_version=api_version, impersonation_chain=impersonation_chain, **kwargs, ) def _validate_inputs(self) -> None: super()._validate_inputs() if not self.resource_id: raise AirflowException("The required parameter 'resource_id' is missing. ") def execute(self, context: Context): hook = ComputeEngineHook( gcp_conn_id=self.gcp_conn_id, api_version=self.api_version, impersonation_chain=self.impersonation_chain, ) try: # Checking if specified Instance Group Managers exists and if it does, delete it hook.get_instance_group_manager( resource_id=self.resource_id, project_id=self.project_id, zone=self.zone, ) self.log.info("Successfully found Group Manager %s", self.resource_id) hook.delete_instance_group_manager( resource_id=self.resource_id, project_id=self.project_id, request_id=self.request_id, zone=self.zone, ) self.log.info("Successfully deleted Instance Group Managers") except exceptions.NotFound as e: # Expecting 404 Error in case if Instance Group Managers doesn't exist. if e.code == 404: self.log.error("Instance Group Managers %s doesn't exist", self.resource_id) raise e
ComputeEngineDeleteInstanceGroupManagerOperator
python
pytest-dev__pytest
src/_pytest/main.py
{ "start": 15879, "end": 16101 }
class ____(dict[Path, str]): __slots__ = ("path",) path: Path def __missing__(self, path: Path) -> str: r = bestrelpath(self.path, path) self[path] = r return r @final
_bestrelpath_cache
python
pydantic__pydantic
tests/mypy/modules/metaclass_args.py
{ "start": 186, "end": 325 }
class ____(BaseModel, validate_by_name=True): i: int = Field(alias='j') MetaclassArgumentsNoDefault(i=None)
MetaclassArgumentsNoDefault
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/testing/fixtures/sql.py
{ "start": 802, "end": 6218 }
class ____(TestBase): # 'once', None run_setup_bind = "once" # 'once', 'each', None run_define_tables = "once" # 'once', 'each', None run_create_tables = "once" # 'once', 'each', None run_inserts = "each" # 'each', None run_deletes = "each" # 'once', None run_dispose_bind = None bind = None _tables_metadata = None tables = None other = None sequences = None @config.fixture(autouse=True, scope="class") def _setup_tables_test_class(self): cls = self.__class__ cls._init_class() cls._setup_once_tables() cls._setup_once_inserts() yield cls._teardown_once_metadata_bind() @config.fixture(autouse=True, scope="function") def _setup_tables_test_instance(self): self._setup_each_tables() self._setup_each_inserts() yield self._teardown_each_tables() @property def tables_test_metadata(self): return self._tables_metadata @classmethod def _init_class(cls): if cls.run_define_tables == "each": if cls.run_create_tables == "once": cls.run_create_tables = "each" assert cls.run_inserts in ("each", None) cls.other = adict() cls.tables = adict() cls.sequences = adict() cls.bind = cls.setup_bind() cls._tables_metadata = sa.MetaData() @classmethod def _setup_once_inserts(cls): if cls.run_inserts == "once": cls._load_fixtures() with cls.bind.begin() as conn: cls.insert_data(conn) @classmethod def _setup_once_tables(cls): if cls.run_define_tables == "once": cls.define_tables(cls._tables_metadata) if cls.run_create_tables == "once": cls._tables_metadata.create_all(cls.bind) cls.tables.update(cls._tables_metadata.tables) cls.sequences.update(cls._tables_metadata._sequences) def _setup_each_tables(self): if self.run_define_tables == "each": self.define_tables(self._tables_metadata) if self.run_create_tables == "each": self._tables_metadata.create_all(self.bind) self.tables.update(self._tables_metadata.tables) self.sequences.update(self._tables_metadata._sequences) elif self.run_create_tables == "each": self._tables_metadata.create_all(self.bind) def _setup_each_inserts(self): if self.run_inserts == "each": self._load_fixtures() with self.bind.begin() as conn: self.insert_data(conn) def _teardown_each_tables(self): if self.run_define_tables == "each": self.tables.clear() if self.run_create_tables == "each": drop_all_tables_from_metadata(self._tables_metadata, self.bind) self._tables_metadata.clear() elif self.run_create_tables == "each": drop_all_tables_from_metadata(self._tables_metadata, self.bind) # no need to run deletes if tables are recreated on setup if ( self.run_define_tables != "each" and self.run_create_tables == "once" and self.run_deletes == "each" ): with self.bind.begin() as conn: provision.delete_from_all_tables( conn, config, self._tables_metadata ) @classmethod def _teardown_once_metadata_bind(cls): if cls.run_create_tables: drop_all_tables_from_metadata(cls._tables_metadata, cls.bind) if cls.run_dispose_bind == "once": cls.dispose_bind(cls.bind) cls._tables_metadata.bind = None if cls.run_setup_bind is not None: cls.bind = None @classmethod def setup_bind(cls): return config.db @classmethod def dispose_bind(cls, bind): if hasattr(bind, "dispose"): bind.dispose() elif hasattr(bind, "close"): bind.close() @classmethod def define_tables(cls, metadata): pass @classmethod def fixtures(cls): return {} @classmethod def insert_data(cls, connection): pass def sql_count_(self, count, fn): self.assert_sql_count(self.bind, fn, count) def sql_eq_(self, callable_, statements): self.assert_sql(self.bind, callable_, statements) @classmethod def _load_fixtures(cls): """Insert rows as represented by the fixtures() method.""" headers, rows = {}, {} for table, data in cls.fixtures().items(): if len(data) < 2: continue if isinstance(table, str): table = cls.tables[table] headers[table] = data[0] rows[table] = data[1:] for table, fks in sort_tables_and_constraints( cls._tables_metadata.tables.values() ): if table is None: continue if table not in headers: continue with cls.bind.begin() as conn: conn.execute( table.insert(), [ dict(zip(headers[table], column_values)) for column_values in rows[table] ], )
TablesTest
python
getsentry__sentry
src/sentry/rules/conditions/event_frequency.py
{ "start": 31769, "end": 32678 }
class ____(EventFrequencyForm): intervals = PERCENT_INTERVALS_TO_DISPLAY interval = forms.ChoiceField( choices=[ (key, label) for key, (label, duration) in sorted( PERCENT_INTERVALS_TO_DISPLAY.items(), key=lambda key____label__duration: key____label__duration[1][1], ) ] ) value = forms.FloatField(widget=forms.TextInput(), min_value=0) def clean(self) -> dict[str, Any] | None: cleaned_data = super().clean() if ( cleaned_data and cleaned_data["comparisonType"] == ComparisonType.COUNT and cleaned_data.get("value", 0) > 100 ): self.add_error( "value", forms.ValidationError("Ensure this value is less than or equal to 100") ) return None return cleaned_data
EventFrequencyPercentForm
python
django__django
tests/admin_views/test_history_view.py
{ "start": 377, "end": 1894 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.superuser = User.objects.create_superuser( username="super", password="secret", email="super@example.com", ) def setUp(self): self.client.force_login(self.superuser) def test_changed_message_uses_form_labels(self): """ Admin's model history change messages use form labels instead of field names. """ state = State.objects.create(name="My State Name") city = City.objects.create(name="My City Name", state=state) change_dict = { "name": "My State Name 2", "nolabel_form_field": True, "city_set-0-name": "My City name 2", "city_set-0-id": city.pk, "city_set-TOTAL_FORMS": "3", "city_set-INITIAL_FORMS": "1", "city_set-MAX_NUM_FORMS": "0", } state_change_url = reverse("admin:admin_views_state_change", args=(state.pk,)) self.client.post(state_change_url, change_dict) logentry = LogEntry.objects.filter(content_type__model__iexact="state").latest( "id" ) self.assertEqual( logentry.get_change_message(), "Changed State name (from form’s Meta.labels), " "nolabel_form_field and not_a_form_field. " "Changed City verbose_name for city “%s”." % city, ) @override_settings(ROOT_URLCONF="admin_views.urls")
AdminHistoryViewTests
python
huggingface__transformers
tests/models/gemma2/test_modeling_gemma2.py
{ "start": 1907, "end": 20908 }
class ____(unittest.TestCase): input_text = ["Hello I am doing", "Hi today"] def setUp(self): cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @require_torch_large_accelerator @require_read_token def test_model_9b_bf16(self): model_id = "google/gemma-2-9b" EXPECTED_TEXTS = [ "<bos>Hello I am doing a project on the 1918 flu pandemic and I am trying to find out how many", "<pad><pad><bos>Hi today I'm going to be talking about the history of the United States. The United States of America", ] model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, attn_implementation="eager").to( torch_device ) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=False) self.assertEqual(output_text, EXPECTED_TEXTS) @require_torch_large_accelerator @require_read_token def test_model_9b_fp16(self): model_id = "google/gemma-2-9b" EXPECTED_TEXTS = [ "<bos>Hello I am doing a project on the 1918 flu pandemic and I am trying to find out how many", "<pad><pad><bos>Hi today I'm going to be talking about the history of the United States. The United States of America", ] model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16, attn_implementation="eager").to( torch_device ) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=False) self.assertEqual(output_text, EXPECTED_TEXTS) @require_read_token @require_torch_large_accelerator def test_model_9b_pipeline_bf16(self): # See https://github.com/huggingface/transformers/pull/31747 -- pipeline was broken for Gemma2 before this PR model_id = "google/gemma-2-9b" # EXPECTED_TEXTS should match the same non-pipeline test, minus the special tokens EXPECTED_TEXTS = [ "Hello I am doing a project on the 1918 flu pandemic and I am trying to find out how many", "Hi today I'm going to be talking about the history of the United States. The United States of America", ] model = AutoModelForCausalLM.from_pretrained( model_id, dtype=torch.bfloat16, attn_implementation="flex_attention" ).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) output = pipe(self.input_text, max_new_tokens=20, do_sample=False, padding=True) self.assertEqual(output[0][0]["generated_text"], EXPECTED_TEXTS[0]) self.assertEqual(output[1][0]["generated_text"], EXPECTED_TEXTS[1]) # TODO: run_test_using_subprocess was added because of an issue in torch 2.9, which is already fixed in nightly # We can remove this once we upgrade to torch 2.10 @run_test_using_subprocess @require_read_token def test_model_2b_pipeline_bf16_flex_attention(self): # See https://github.com/huggingface/transformers/pull/31747 -- pipeline was broken for Gemma2 before this PR model_id = "google/gemma-2-2b" # EXPECTED_TEXTS should match the same non-pipeline test, minus the special tokens EXPECTED_BATCH_TEXTS = Expectations( { ("xpu", 3): [ "Hello I am doing a project on the 1960s and I am trying to find out what the average", "Hi today I'm going to be talking about the 10 most powerful characters in the Naruto series.", ], ("cuda", 8): [ "Hello I am doing a project on the 1960s and I am trying to find out what the average", "Hi today I'm going to be talking about the 10 most powerful characters in the Naruto series.", ], } ) EXPECTED_BATCH_TEXT = EXPECTED_BATCH_TEXTS.get_expectation() model = AutoModelForCausalLM.from_pretrained( model_id, dtype=torch.bfloat16, attn_implementation="flex_attention" ).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) output = pipe(self.input_text, max_new_tokens=20, do_sample=False, padding=True) self.assertEqual(output[0][0]["generated_text"], EXPECTED_BATCH_TEXT[0]) self.assertEqual(output[1][0]["generated_text"], EXPECTED_BATCH_TEXT[1]) @require_read_token @require_flash_attn @require_torch_large_gpu @mark.flash_attn_test @slow def test_model_9b_flash_attn(self): # See https://github.com/huggingface/transformers/issues/31953 --- flash attn was generating garbage for gemma2, especially in long context model_id = "google/gemma-2-9b" EXPECTED_TEXTS = [ '<bos>Hello I am doing a project on the 1918 flu pandemic and I am trying to find out how many people died in the United States. I have found a few sites that say 500,000 but I am not sure if that is correct. I have also found a site that says 675,000 but I am not sure if that is correct either. I am trying to find out how many people died in the United States. I have found a few', "<pad><pad><bos>Hi today I'm going to be talking about the history of the United States. The United States of America is a country in North America. It is the third largest country in the world by total area and the third most populous country with over 320 million people. The United States is a federal republic composed of 50 states and a federal district. The 48 contiguous states and the district of Columbia are in central North America between Canada and Mexico. The state of Alaska is in the", ] # fmt: skip model = AutoModelForCausalLM.from_pretrained( model_id, attn_implementation="flash_attention_2", dtype="float16" ).to(torch_device) tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=100, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=False) self.assertEqual(output_text, EXPECTED_TEXTS) @pytest.mark.torch_export_test @slow @require_read_token def test_export_static_cache(self): if version.parse(torch.__version__) < version.parse("2.5.0"): self.skipTest(reason="This test requires torch >= 2.5 to run.") from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, ) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-2b", pad_token="</s>", padding_side="right") EXPECTED_TEXT_COMPLETIONS = Expectations( { ("xpu", 3): [ "Hello I am doing a project for my school and I need to know how to make a program that will take a number" ], ("cuda", 7): [ "Hello I am doing a project for my school and I need to know how to make a program that will take a number" ], ("cuda", 8): [ "Hello I am doing a project for my class and I am having trouble with the code. I am trying to make a" ], ("rocm", (9, 5)): [ "Hello I am doing a project for my school and I need to know how to make a program that will take a number" ], } ) EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation() max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[ "input_ids" ].shape[-1] # Load model device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM dtype = torch.bfloat16 cache_implementation = "static" attn_implementation = "sdpa" batch_size = 1 model = AutoModelForCausalLM.from_pretrained( "google/gemma-2-2b", device_map=device, dtype=dtype, attn_implementation=attn_implementation, generation_config=GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_generation_length, cache_config={ "batch_size": batch_size, "max_cache_len": max_generation_length, }, ), ) prompts = ["Hello I am doing"] prompt_tokens = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) prompt_token_ids = prompt_tokens["input_ids"] max_new_tokens = max_generation_length - prompt_token_ids.shape[-1] # Static Cache + export from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM exportable_module = TorchExportableModuleForDecoderOnlyLM(model) exported_program = exportable_module.export( input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device), cache_position=torch.tensor([0], dtype=torch.long, device=model.device), ) ep_generated_ids = TorchExportableModuleWithStaticCache.generate( exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens ) ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text) @slow @require_read_token @require_large_cpu_ram @pytest.mark.torch_export_test def test_export_hybrid_cache(self): from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM from transformers.pytorch_utils import is_torch_greater_or_equal if not is_torch_greater_or_equal("2.6.0"): self.skipTest(reason="This test requires torch >= 2.6 to run.") model_id = "google/gemma-2-2b" model = AutoModelForCausalLM.from_pretrained(model_id) self.assertEqual(model.config.cache_implementation, "hybrid") # Export + hybrid cache model.eval() exportable_module = TorchExportableModuleForDecoderOnlyLM(model, batch_size=1, max_cache_len=1024) exported_program = exportable_module.export( input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device), cache_position=torch.tensor([0], dtype=torch.long, device=model.device), ) # Test generation with the exported model prompt = "What is the capital of France?" max_new_tokens_to_generate = 20 # Generate text with the exported model tokenizer = AutoTokenizer.from_pretrained(model_id) export_generated_text = TorchExportableModuleForDecoderOnlyLM.generate( exported_program, tokenizer, prompt, max_new_tokens=max_new_tokens_to_generate ) input_text = tokenizer(prompt, return_tensors="pt") with torch.no_grad(): eager_outputs = model.generate( **input_text, max_new_tokens=max_new_tokens_to_generate, do_sample=False, # Use greedy decoding to match the exported model ) eager_generated_text = tokenizer.decode(eager_outputs[0], skip_special_tokens=True) self.assertEqual(export_generated_text, eager_generated_text) @require_torch_large_accelerator @require_read_token def test_model_9b_bf16_flex_attention(self): model_id = "google/gemma-2-9b" EXPECTED_TEXTS = [ "<bos>Hello I am doing a project on the 1918 flu pandemic and I am trying to find out how many", "<pad><pad><bos>Hi today I'm going to be talking about the history of the United States. The United States of America", ] model = AutoModelForCausalLM.from_pretrained( model_id, dtype=torch.bfloat16, attn_implementation="flex_attention" ).to(torch_device) assert model.config._attn_implementation == "flex_attention" tokenizer = AutoTokenizer.from_pretrained(model_id) inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device) output = model.generate(**inputs, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output, skip_special_tokens=False) self.assertEqual(output_text, EXPECTED_TEXTS) @parameterized.expand([("flash_attention_2",), ("sdpa",), ("flex_attention",), ("eager",)]) @require_read_token def test_generation_beyond_sliding_window(self, attn_implementation: str): """Test that we can correctly generate beyond the sliding window. This is non trivial as we need to correctly slice the attention mask in all cases (because we use a hybrid cache). Outputs for every attention functions should be coherent and identical. """ # Impossible to test it with this model (even with < 100 tokens), probably due to the compilation of a large model. if attn_implementation == "flex_attention": self.skipTest( reason="`flex_attention` gives `torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 147456 Hardware limit:101376 Reducing block sizes or `num_stages` may help.`" ) if attn_implementation == "flash_attention_2" and not is_flash_attn_2_available(): self.skipTest("FlashAttention2 is required for this test.") if torch_device == "xpu" and attn_implementation == "flash_attention_2": self.skipTest(reason="Intel XPU doesn't support flash_attention_2 as of now.") model_id = "google/gemma-2-2b" EXPECTED_COMPLETIONS = [ " the people, the food, the culture, the history, the music, the art, the architecture", ", green, yellow, orange, purple, pink, brown, black, white, gray, silver", ] input_text = [ "This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens "A list of colors: red, blue", # This will almost all be padding tokens ] tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left") inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained( model_id, attn_implementation=attn_implementation, dtype=torch.float16 ).to(torch_device) # Make sure prefill is larger than sliding window input_size = inputs.input_ids.shape[-1] self.assertTrue(input_size > model.config.sliding_window) # It should by Hybrid by default from hub config, but let's make sure! out = model.generate(**inputs, max_new_tokens=20, cache_implementation="hybrid")[:, input_size:] output_text = tokenizer.batch_decode(out) self.assertEqual(output_text, EXPECTED_COMPLETIONS) @parameterized.expand([("flash_attention_2",), ("sdpa",), ("flex_attention",), ("eager",)]) @require_read_token def test_generation_beyond_sliding_window_dynamic(self, attn_implementation: str): """ Same as above, but explicitly setting the cache to Dynamic, as it's otherwise static by default for the model on the hub """ # Impossible to test it with this model (even with < 100 tokens), probably due to the compilation of a large model. if attn_implementation == "flex_attention": self.skipTest( reason="`flex_attention` gives `torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 147456 Hardware limit:101376 Reducing block sizes or `num_stages` may help.`" ) if attn_implementation == "flash_attention_2" and not is_flash_attn_2_available(): self.skipTest("FlashAttention2 is required for this test.") if torch_device == "xpu" and attn_implementation == "flash_attention_2": self.skipTest(reason="Intel XPU doesn't support flash_attention_2 as of now.") model_id = "google/gemma-2-2b" EXPECTED_COMPLETIONS = [ " the people, the food, the culture, the history, the music, the art, the architecture", ", green, yellow, orange, purple, pink, brown, black, white, gray, silver", ] input_text = [ "This is a nice place. " * 800 + "I really enjoy the scenery,", # This is larger than 4096 tokens "A list of colors: red, blue", # This will almost all be padding tokens ] tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left") inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained( model_id, attn_implementation=attn_implementation, dtype=torch.float16 ).to(torch_device) # Make sure prefill is larger than sliding window input_size = inputs.input_ids.shape[-1] self.assertTrue(input_size > model.config.sliding_window) out = model.generate(**inputs, max_new_tokens=20, cache_implementation="dynamic", return_dict_in_generate=True) output_text = tokenizer.batch_decode(out.sequences[:, input_size:]) self.assertEqual(output_text, EXPECTED_COMPLETIONS) # Let's check that the dynamic cache has hybrid layers! dynamic_cache = out.past_key_values self.assertTrue(isinstance(dynamic_cache, DynamicCache)) for layer, layer_type in zip(dynamic_cache.layers, model.config.layer_types): if layer_type == "sliding_attention": self.assertTrue(isinstance(layer, DynamicSlidingWindowLayer)) self.assertEqual(layer.keys.shape[-2], model.config.sliding_window - 1) else: self.assertTrue(isinstance(layer, DynamicLayer)) # max_new_tokens - 1 because last token generated is not cached self.assertEqual(layer.keys.shape[-2], input_size + 20 - 1)
Gemma2IntegrationTest
python
scikit-learn__scikit-learn
sklearn/utils/_param_validation.py
{ "start": 8915, "end": 9507 }
class ____(Real): """A type that represents reals that are not instances of int. Behaves like float, but also works with values extracted from numpy arrays. isintance(1, RealNotInt) -> False isinstance(1.0, RealNotInt) -> True """ RealNotInt.register(float) def _type_name(t): """Convert type into human readable string.""" module = t.__module__ qualname = t.__qualname__ if module == "builtins": return qualname elif t == Real: return "float" elif t == Integral: return "int" return f"{module}.{qualname}"
RealNotInt
python
huggingface__transformers
.circleci/create_circleci_config.py
{ "start": 2322, "end": 3362 }
class ____: job_name = "empty" def to_dict(self): steps = [{"run": 'ls -la'}] if self.job_name == "collection_job": steps.extend( [ "checkout", {"run": "pip install requests || true"}, {"run": """while [[ $(curl --location --request GET "https://circleci.com/api/v2/workflow/$CIRCLE_WORKFLOW_ID/job" --header "Circle-Token: $CCI_TOKEN"| jq -r '.items[]|select(.name != "collection_job")|.status' | grep -c "running") -gt 0 ]]; do sleep 5; done || true"""}, {"run": 'python utils/process_circleci_workflow_test_reports.py --workflow_id $CIRCLE_WORKFLOW_ID || true'}, {"store_artifacts": {"path": "outputs"}}, {"run": 'echo "All required jobs have now completed"'}, ] ) return { "docker": copy.deepcopy(DEFAULT_DOCKER_IMAGE), "resource_class": "small", "steps": steps, } @dataclass
EmptyJob
python
TheAlgorithms__Python
data_structures/queues/queue_by_two_stacks.py
{ "start": 84, "end": 2614 }
class ____[T]: def __init__(self, iterable: Iterable[T] | None = None) -> None: """ >>> QueueByTwoStacks() Queue(()) >>> QueueByTwoStacks([10, 20, 30]) Queue((10, 20, 30)) >>> QueueByTwoStacks((i**2 for i in range(1, 4))) Queue((1, 4, 9)) """ self._stack1: list[T] = list(iterable or []) self._stack2: list[T] = [] def __len__(self) -> int: """ >>> len(QueueByTwoStacks()) 0 >>> from string import ascii_lowercase >>> len(QueueByTwoStacks(ascii_lowercase)) 26 >>> queue = QueueByTwoStacks() >>> for i in range(1, 11): ... queue.put(i) ... >>> len(queue) 10 >>> for i in range(2): ... queue.get() 1 2 >>> len(queue) 8 """ return len(self._stack1) + len(self._stack2) def __repr__(self) -> str: """ >>> queue = QueueByTwoStacks() >>> queue Queue(()) >>> str(queue) 'Queue(())' >>> queue.put(10) >>> queue Queue((10,)) >>> queue.put(20) >>> queue.put(30) >>> queue Queue((10, 20, 30)) """ return f"Queue({tuple(self._stack2[::-1] + self._stack1)})" def put(self, item: T) -> None: """ Put `item` into the Queue >>> queue = QueueByTwoStacks() >>> queue.put(10) >>> queue.put(20) >>> len(queue) 2 >>> queue Queue((10, 20)) """ self._stack1.append(item) def get(self) -> T: """ Get `item` from the Queue >>> queue = QueueByTwoStacks((10, 20, 30)) >>> queue.get() 10 >>> queue.put(40) >>> queue.get() 20 >>> queue.get() 30 >>> len(queue) 1 >>> queue.get() 40 >>> queue.get() Traceback (most recent call last): ... IndexError: Queue is empty """ # To reduce number of attribute look-ups in `while` loop. stack1_pop = self._stack1.pop stack2_append = self._stack2.append if not self._stack2: while self._stack1: stack2_append(stack1_pop()) if not self._stack2: raise IndexError("Queue is empty") return self._stack2.pop() if __name__ == "__main__": from doctest import testmod testmod()
QueueByTwoStacks
python
python-openxml__python-docx
src/docx/oxml/text/font.py
{ "start": 10702, "end": 10882 }
class ____(BaseOxmlElement): """`<w:vertAlign>` element, specifying subscript or superscript.""" val: str = RequiredAttribute("w:val", ST_VerticalAlignRun)
CT_VerticalAlignRun
python
PyCQA__pylint
tests/functional/m/method_hidden.py
{ "start": 2271, "end": 2387 }
class ____(Parent): @functools().cached_property def _protected(self): pass
ChildHidingAncestorAttribute
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py
{ "start": 30908, "end": 31819 }
class ____(graphene.Mutation): """Log telemetry about the Dagster instance.""" Output = graphene.NonNull(GrapheneLogTelemetryMutationResult) class Arguments: action = graphene.Argument(graphene.NonNull(graphene.String)) clientTime = graphene.Argument(graphene.NonNull(graphene.String)) clientId = graphene.Argument(graphene.NonNull(graphene.String)) metadata = graphene.Argument(graphene.NonNull(graphene.String)) class Meta: name = "LogTelemetryMutation" @capture_error def mutate( self, graphene_info: ResolveInfo, action: str, clientTime: str, clientId: str, metadata: str ): action = log_ui_telemetry_event( graphene_info, action=action, client_time=clientTime, client_id=clientId, metadata=metadata, ) return action
GrapheneLogTelemetryMutation
python
kamyu104__LeetCode-Solutions
Python/find-indices-with-index-and-value-difference-i.py
{ "start": 42, "end": 1153 }
class ____(object): def findIndices(self, nums, indexDifference, valueDifference): """ :type nums: List[int] :type indexDifference: int :type valueDifference: int :rtype: List[int] """ mx_i = mn_i = 0 for i in xrange(len(nums)-indexDifference): if nums[i] > nums[mx_i]: mx_i = i elif nums[i] < nums[mn_i]: mn_i = i # we don't need to add abs for the difference since # - if nums[mx_i]-nums[i+indexDifference] < 0, then checking nums[i+indexDifference]-nums[mn_i] >= -(nums[mx_i]-nums[i+indexDifference]) > 0 can cover the case # - if nums[i+indexDifference]-nums[mn_i] < 0, then checking nums[mx_i]-nums[i+indexDifference] >= -(nums[i+indexDifference]-nums[mn_i]) > 0 can cover the case if nums[mx_i]-nums[i+indexDifference] >= valueDifference: return [mx_i, i+indexDifference] if nums[i+indexDifference]-nums[mn_i] >= valueDifference: return [mn_i, i+indexDifference] return [-1]*2
Solution
python
prabhupant__python-ds
data_structures/binary_trees/left_right_to_down_right.py
{ "start": 88, "end": 436 }
class ____: def __init__(self, val): self.val = val self.left = None self.rigt = None def convert(root): if root is None: return convert(root.left) convert(root.right) if root.left == None: root.left = root.right else: root.left.right = root.right root.right = None
Node
python
huggingface__transformers
tests/models/visual_bert/test_modeling_visual_bert.py
{ "start": 23578, "end": 29197 }
class ____(unittest.TestCase): @slow def test_inference_vqa_coco_pre(self): model = VisualBertForPreTraining.from_pretrained("uclanlp/visualbert-vqa-coco-pre") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) vocab_size = 30522 expected_shape = torch.Size((1, 16, vocab_size)) self.assertEqual(output.prediction_logits.shape, expected_shape) expected_slice = torch.tensor( [[[-5.1858, -5.1903, -4.9142], [-6.2214, -5.9238, -5.8381], [-6.3027, -5.9939, -5.9297]]] ) torch.testing.assert_close(output.prediction_logits[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) expected_shape_2 = torch.Size((1, 2)) self.assertEqual(output.seq_relationship_logits.shape, expected_shape_2) expected_slice_2 = torch.tensor([[0.7393, 0.1754]]) torch.testing.assert_close(output.seq_relationship_logits, expected_slice_2, rtol=1e-4, atol=1e-4) @slow def test_inference_vqa(self): model = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 2048), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 3129)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor( [[-8.9898, 3.0803, -1.8016, 2.4542, -8.3420, -2.0224, -3.3124, -4.4139, -3.1491, -3.8997]] ) torch.testing.assert_close(output.logits[:, :10], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_nlvr(self): model = VisualBertForVisualReasoning.from_pretrained("uclanlp/visualbert-nlvr2") input_ids = torch.tensor([1, 2, 3, 4, 5, 6], dtype=torch.long).reshape(1, -1) token_type_ids = torch.tensor([0, 0, 0, 1, 1, 1], dtype=torch.long).reshape(1, -1) visual_embeds = torch.ones(size=(1, 10, 1024), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 10), dtype=torch.long) attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 2)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor([[-1.1436, 0.8900]]) torch.testing.assert_close(output.logits, expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_vcr(self): model = VisualBertForMultipleChoice.from_pretrained("uclanlp/visualbert-vcr") input_ids = torch.tensor([[[1, 2, 3, 4, 5, 6] for i in range(4)]], dtype=torch.long) attention_mask = torch.ones_like(input_ids) token_type_ids = torch.ones_like(input_ids) visual_embeds = torch.ones(size=(1, 4, 10, 512), dtype=torch.float32) * 0.5 visual_token_type_ids = torch.ones(size=(1, 4, 10), dtype=torch.long) visual_attention_mask = torch.ones_like(visual_token_type_ids) with torch.no_grad(): output = model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, ) # vocab_size = 30522 expected_shape = torch.Size((1, 4)) self.assertEqual(output.logits.shape, expected_shape) expected_slice = torch.tensor([[-7.7697, -7.7697, -7.7697, -7.7697]]) torch.testing.assert_close(output.logits, expected_slice, rtol=1e-4, atol=1e-4)
VisualBertModelIntegrationTest
python
ray-project__ray
release/llm_tests/serve/benchmark/firehose_utils.py
{ "start": 377, "end": 630 }
class ____(str, Enum): STARTUP_TEST = "service-startup-test" STARTUP_TEST_GCP = "service-startup-test-gcp" STARTUP_TEST_AWS = "service-startup-test-aws" RAYLLM_PERF_TEST = "rayllm-perf-test" VLLM_PERF_TEST = "vllm-perf-test"
RecordName
python
cython__cython
Cython/Compiler/Nodes.py
{ "start": 111651, "end": 131875 }
class ____(FuncDefNode): # C function definition. # # modifiers ['inline'] # visibility 'private' or 'public' or 'extern' # base_type CBaseTypeNode # declarator CDeclaratorNode # cfunc_declarator the CFuncDeclarator of this function # (this is also available through declarator or a # base thereof) # body StatListNode # api boolean # decorators [DecoratorNode] list of decorators # # with_gil boolean Acquire GIL around body # type CFuncType # py_func wrapper for calling from Python # overridable whether or not this is a cpdef function # inline_in_pxd whether this is an inline function in a pxd file # template_declaration String or None Used for c++ class methods # is_const_method whether this is a const method # is_static_method whether this is a static method # is_c_class_method whether this is a cclass method child_attrs = ["base_type", "declarator", "body", "decorators", "py_func_stat"] outer_attrs = ["decorators", "py_func_stat"] inline_in_pxd = False decorators = None directive_locals = None directive_returns = None override = None template_declaration = None is_const_method = False py_func_stat = None _code_object = None def unqualified_name(self): return self.entry.name def declared_name(self): return self.declarator.declared_name() @property def code_object(self): # share the CodeObject with the cpdef wrapper (if available) return self.py_func.code_object if self.py_func else self._code_object @code_object.setter def code_object(self, code_object): self._code_object = code_object def analyse_declarations(self, env): self.c_compile_guard = env.directives['c_compile_guard'] self.is_c_class_method = env.is_c_class_scope if self.directive_locals is None: self.directive_locals = {} self.directive_locals.update(env.directives.get('locals', {})) if self.directive_returns is not None: base_type = self.directive_returns.analyse_as_type(env) # Annotated return types with wrong type produce warnings instead of errors. if base_type is None: if self.directive_returns.is_annotation: base_type = self.base_type.analyse(env) else: error(self.directive_returns.pos, "Not a type") base_type = PyrexTypes.error_type else: base_type = self.base_type.analyse(env) self.is_static_method = 'staticmethod' in env.directives and not env.lookup_here('staticmethod') # The 2 here is because we need both function and argument names. if isinstance(self.declarator, CFuncDeclaratorNode): name_declarator, typ = self.declarator.analyse( base_type, env, nonempty=2 * (self.body is not None), directive_locals=self.directive_locals, visibility=self.visibility) else: name_declarator, typ = self.declarator.analyse( base_type, env, nonempty=2 * (self.body is not None), visibility=self.visibility) if not typ.is_cfunction: error(self.pos, "Suite attached to non-function declaration") # Remember the actual type according to the function header # written here, because the type in the symbol table entry # may be different if we're overriding a C method inherited # from the base type of an extension type. self.type = typ typ.is_overridable = self.overridable declarator = self.declarator while not hasattr(declarator, 'args'): declarator = declarator.base self.cfunc_declarator = declarator self.args = declarator.args opt_arg_count = self.cfunc_declarator.optional_arg_count if (self.visibility == 'public' or self.api) and opt_arg_count: error(self.cfunc_declarator.pos, "Function with optional arguments may not be declared public or api") if typ.exception_check == '+' and self.visibility != 'extern': if typ.exception_value is not None and typ.exception_value.is_name: # it really is impossible to reason about what the user wants to happens # if they've specified a C++ exception translation function. Therefore, # raise an error. error(self.pos, "Only extern functions can throw C++ exceptions.") else: warning(self.pos, "Only extern functions can throw C++ exceptions.", 2) for formal_arg, type_arg in zip(self.args, typ.args): self.align_argument_type(env, type_arg) formal_arg.type = type_arg.type formal_arg.name = type_arg.name formal_arg.cname = type_arg.cname self._validate_type_visibility(type_arg.type, type_arg.pos, env) if type_arg.type.is_fused: self.has_fused_arguments = True if type_arg.type.is_buffer and 'inline' in self.modifiers: warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1) if type_arg.type.is_buffer or type_arg.type.is_pythran_expr: if self.type.nogil: error(formal_arg.pos, "Buffer may not be acquired without the GIL. Consider using memoryview slices instead.") elif 'inline' in self.modifiers: warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1) self._validate_type_visibility(typ.return_type, self.pos, env) name = name_declarator.name cname = name_declarator.cname typ.is_const_method = self.is_const_method typ.is_static_method = self.is_static_method self.entry = env.declare_cfunction( name, typ, self.pos, cname=cname, visibility=self.visibility, api=self.api, defining=self.body is not None, modifiers=self.modifiers, overridable=self.overridable, in_pxd=self.inline_in_pxd) self.return_type = typ.return_type if self.return_type.is_array and self.visibility != 'extern': error(self.pos, "Function cannot return an array") if self.return_type.is_cpp_class: self.return_type.check_nullary_constructor(self.pos, "used as a return value") if self.overridable and not env.is_module_scope and not self.is_static_method: if len(self.args) < 1 or not self.args[0].type.is_pyobject: # An error will be produced in the cdef function self.overridable = False self.declare_cpdef_wrapper(env) self.create_local_scope(env) def declare_cpdef_wrapper(self, env): if not self.overridable: return if self.is_static_method: # TODO(robertwb): Finish this up, perhaps via more function refactoring. error(self.pos, "static cpdef methods not yet supported") py_func_body = self.call_self_node(is_module_scope=env.is_module_scope) py_func_body = CompilerDirectivesNode.for_directives( py_func_body, env, profile=False, linetrace=False) if self.is_static_method: from .ExprNodes import NameNode decorators = [DecoratorNode(self.pos, decorator=NameNode(self.pos, name=EncodedString('staticmethod')))] decorators[0].decorator.analyse_types(env) else: decorators = [] name = self.entry.name self.py_func = DefNode(pos=self.pos, name=self.entry.name, args=self.args, star_arg=None, starstar_arg=None, doc=self.doc, body=StatListNode(self.pos, stats=[py_func_body]), decorators=decorators, is_wrapper=1) self.py_func.is_module_scope = env.is_module_scope self.py_func.analyse_declarations(env) self.py_func.entry.is_overridable = True self.py_func_stat = StatListNode(self.pos, stats=[self.py_func]) self.py_func.type = PyrexTypes.py_object_type self.entry.as_variable = self.py_func.entry self.entry.used = self.entry.as_variable.used = True # Reset scope entry the above cfunction env.entries[name] = self.entry if (not self.entry.is_final_cmethod and (not env.is_module_scope or Options.lookup_module_cpdef)): if self.override: # This is a hack: we shouldn't create the wrapper twice, but we do for fused functions. assert self.entry.is_fused_specialized # should not happen for non-fused cpdef functions self.override.py_func = self.py_func else: self.override = OverrideCheckNode(self.pos, py_func=self.py_func) self.body = StatListNode(self.pos, stats=[self.override, self.body]) def _validate_type_visibility(self, type, pos, env): """ Ensure that types used in cdef functions are public or api, or defined in a C header. """ public_or_api = (self.visibility == 'public' or self.api) entry = getattr(type, 'entry', None) if public_or_api and entry and env.is_module_scope: if not (entry.visibility in ('public', 'extern') or entry.api or entry.in_cinclude): error(pos, "Function declared public or api may not have private types") def call_self_node(self, omit_optional_args=0, is_module_scope=0): from . import ExprNodes args = self.type.args if omit_optional_args: args = args[:len(args) - self.type.optional_arg_count] arg_names = [arg.name for arg in args] if is_module_scope: cfunc = ExprNodes.NameNode(self.pos, name=self.entry.name) call_arg_names = arg_names elif self.type.is_static_method: class_entry = self.entry.scope.parent_type.entry class_node = ExprNodes.NameNode(self.pos, name=class_entry.name) class_node.entry = class_entry cfunc = ExprNodes.AttributeNode(self.pos, obj=class_node, attribute=self.entry.name) else: type_entry = self.type.args[0].type.entry type_arg = ExprNodes.NameNode(self.pos, name=type_entry.name) type_arg.entry = type_entry cfunc = ExprNodes.AttributeNode(self.pos, obj=type_arg, attribute=self.entry.name) skip_dispatch = not (is_module_scope and Options.lookup_module_cpdef) c_call = ExprNodes.SimpleCallNode( self.pos, function=cfunc, args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names], wrapper_call=skip_dispatch) return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call) def declare_arguments(self, env): for arg in self.type.args: if not arg.name: error(arg.pos, "Missing argument name") self.declare_argument(env, arg) def need_gil_acquisition(self, lenv): return self.type.with_gil def nogil_check(self, env): type = self.type with_gil = type.with_gil if type.nogil and not with_gil: if type.return_type.is_pyobject: error(self.pos, "Function with Python return type cannot be declared nogil") for entry in self.local_scope.var_entries: if entry.type.is_pyobject and not entry.in_with_gil_block: error(self.pos, "Function declared nogil has Python locals or temporaries") def analyse_expressions(self, env): self.local_scope.directives = env.directives if self.py_func_stat is not None: # this will also analyse the default values and the function name assignment self.py_func_stat = self.py_func_stat.analyse_expressions(env) elif self.py_func is not None: # this will also analyse the default values self.py_func = self.py_func.analyse_expressions(env) else: self.analyse_default_values(env) self.analyse_annotations(env) self.acquire_gil = self.need_gil_acquisition(self.local_scope) return self def needs_assignment_synthesis(self, env, code=None): return False def generate_function_header(self, code, with_pymethdef, with_opt_args=1, with_dispatch=1, cname=None): scope = self.local_scope arg_decls = [] type = self.type for arg in type.args[:len(type.args)-type.optional_arg_count]: arg_decl = arg.declaration_code() entry = scope.lookup(arg.name) if not entry.cf_used: arg_decl = 'CYTHON_UNUSED %s' % arg_decl arg_decls.append(arg_decl) if with_dispatch and self.overridable: dispatch_arg = PyrexTypes.c_int_type.declaration_code( Naming.skip_dispatch_cname) if self.override: arg_decls.append(dispatch_arg) else: arg_decls.append('CYTHON_UNUSED %s' % dispatch_arg) if type.optional_arg_count and with_opt_args: arg_decls.append(type.op_arg_struct.declaration_code(Naming.optional_args_cname)) if type.has_varargs: arg_decls.append("...") if not arg_decls: arg_decls = ["void"] if cname is None: cname = self.entry.func_cname entity = type.function_header_code(cname, ', '.join(arg_decls)) if self.entry.visibility == 'private' and '::' not in cname: storage_class = "static " else: storage_class = "" dll_linkage = None modifiers = code.build_function_modifiers(self.entry.func_modifiers) header = self.return_type.declaration_code(entity, dll_linkage=dll_linkage) #print (storage_class, modifiers, header) needs_proto = self.is_c_class_method or self.entry.is_cproperty if self.template_declaration: if needs_proto: code.globalstate.parts['module_declarations'].putln(self.template_declaration) code.putln(self.template_declaration) if needs_proto: preprocessor_guard = self.get_preprocessor_guard() if preprocessor_guard: code.globalstate.parts['module_declarations'].putln(preprocessor_guard) code.globalstate.parts['module_declarations'].putln( "%s%s%s; /* proto*/" % (storage_class, modifiers, header)) if preprocessor_guard: code.globalstate.parts['module_declarations'].putln("#endif") code.putln("%s%s%s {" % (storage_class, modifiers, header)) code.globalstate.use_entry_utility_code(self.entry) def generate_argument_declarations(self, env, code): scope = self.local_scope for arg in self.args: if arg.default: entry = scope.lookup(arg.name) if self.override or entry.cf_used: result = arg.calculate_default_value_code(code) code.putln('%s = %s;' % ( arg.type.declaration_code(arg.cname), result)) def generate_keyword_list(self, code): pass def generate_argument_parsing_code(self, env, code): i = 0 used = 0 scope = self.local_scope if self.type.optional_arg_count: code.putln('if (%s) {' % Naming.optional_args_cname) for arg in self.args: if arg.default: entry = scope.lookup(arg.name) if self.override or entry.cf_used: code.putln('if (%s->%sn > %s) {' % (Naming.optional_args_cname, Naming.pyrex_prefix, i)) declarator = arg.declarator while not hasattr(declarator, 'name'): declarator = declarator.base code.putln('%s = %s->%s;' % (arg.cname, Naming.optional_args_cname, self.type.opt_arg_cname(declarator.name))) used += 1 i += 1 for _ in range(used): code.putln('}') code.putln('}') # Move arguments into closure if required def put_into_closure(entry): if entry.in_closure and not arg.default: code.putln('%s = %s;' % (entry.cname, entry.original_cname)) if entry.type.is_memoryviewslice: entry.type.generate_incref_memoryviewslice(code, entry.cname, True) else: code.put_var_incref(entry) code.put_var_giveref(entry) for arg in self.args: put_into_closure(scope.lookup_here(arg.name)) def generate_argument_conversion_code(self, code): pass def generate_argument_type_tests(self, code): # Generate type tests for args whose type in a parent # class is a supertype of the declared type. for arg in self.type.args: if arg.needs_type_test: self.generate_arg_type_test(arg, code) elif arg.type.is_pyobject and not arg.accept_none: self.generate_arg_none_check(arg, code) def generate_execution_code(self, code): if code.globalstate.directives['linetrace']: code.mark_pos(self.pos) code.putln("") # generate line tracing code super().generate_execution_code(code) if self.py_func_stat: self.py_func_stat.generate_execution_code(code) def error_value(self): if self.return_type.is_pyobject: return "0" else: return self.entry.type.exception_value def caller_will_check_exceptions(self): return self.entry.type.exception_check def generate_wrapper_functions(self, code): # If the C signature of a function has changed, we need to generate # wrappers to put in the slots here. k = 0 entry = self.entry func_type = entry.type while entry.prev_entry is not None: k += 1 entry = entry.prev_entry entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k) code.putln() self.generate_function_header( code, 0, with_dispatch=entry.type.is_overridable, with_opt_args=entry.type.optional_arg_count, cname=entry.func_cname) if not self.return_type.is_void: code.put('return ') args = self.type.args arglist = [arg.cname for arg in args[:len(args)-self.type.optional_arg_count]] if entry.type.is_overridable: arglist.append(Naming.skip_dispatch_cname) elif func_type.is_overridable: arglist.append('0') if entry.type.optional_arg_count: arglist.append(Naming.optional_args_cname) elif func_type.optional_arg_count: arglist.append('NULL') code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist))) code.putln('}') def get_preprocessor_guard(self): super_guard = super().get_preprocessor_guard() if self.c_compile_guard: assert not super_guard # Don't currently know how to combine return f"#if {self.c_compile_guard}" return super_guard
CFuncDefNode
python
airbytehq__airbyte
airbyte-integrations/bases/base-normalization/normalization/transform_catalog/table_name_registry.py
{ "start": 1533, "end": 2799 }
class ____(Dict[str, List[NormalizedNameMetadata]]): """ An intermediate registry used by TableNameRegistry to detect conflicts in table names per schema """ def __init__(self, name_transformer: DestinationNameTransformer): super(NormalizedTablesRegistry, self).__init__() self.name_transformer = name_transformer def add( self, intermediate_schema: str, schema: str, json_path: List[str], stream_name: str, table_name: str ) -> "NormalizedTablesRegistry": key = self.get_table_key(schema, table_name) if key not in self: self[key] = [] self[key].append(NormalizedNameMetadata(intermediate_schema, schema, json_path, stream_name, table_name)) return self def get_table_key(self, schema: str, table_name: str) -> str: return ( f"{self.name_transformer.normalize_schema_name(schema, False, False)}." f"{self.name_transformer.normalize_table_name(table_name, False, False)}" ) def get_value(self, schema: str, table_name: str) -> List[NormalizedNameMetadata]: return self[self.get_table_key(schema, table_name)] def has_collisions(self, key: str) -> bool: return len(self[key]) > 1
NormalizedTablesRegistry
python
kubernetes-client__python
kubernetes/client/models/v1_validating_admission_policy_binding_spec.py
{ "start": 383, "end": 11243 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'match_resources': 'V1MatchResources', 'param_ref': 'V1ParamRef', 'policy_name': 'str', 'validation_actions': 'list[str]' } attribute_map = { 'match_resources': 'matchResources', 'param_ref': 'paramRef', 'policy_name': 'policyName', 'validation_actions': 'validationActions' } def __init__(self, match_resources=None, param_ref=None, policy_name=None, validation_actions=None, local_vars_configuration=None): # noqa: E501 """V1ValidatingAdmissionPolicyBindingSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._match_resources = None self._param_ref = None self._policy_name = None self._validation_actions = None self.discriminator = None if match_resources is not None: self.match_resources = match_resources if param_ref is not None: self.param_ref = param_ref if policy_name is not None: self.policy_name = policy_name if validation_actions is not None: self.validation_actions = validation_actions @property def match_resources(self): """Gets the match_resources of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 :return: The match_resources of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 :rtype: V1MatchResources """ return self._match_resources @match_resources.setter def match_resources(self, match_resources): """Sets the match_resources of this V1ValidatingAdmissionPolicyBindingSpec. :param match_resources: The match_resources of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 :type: V1MatchResources """ self._match_resources = match_resources @property def param_ref(self): """Gets the param_ref of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 :return: The param_ref of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 :rtype: V1ParamRef """ return self._param_ref @param_ref.setter def param_ref(self, param_ref): """Sets the param_ref of this V1ValidatingAdmissionPolicyBindingSpec. :param param_ref: The param_ref of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 :type: V1ParamRef """ self._param_ref = param_ref @property def policy_name(self): """Gets the policy_name of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required. # noqa: E501 :return: The policy_name of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 :rtype: str """ return self._policy_name @policy_name.setter def policy_name(self, policy_name): """Sets the policy_name of this V1ValidatingAdmissionPolicyBindingSpec. PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required. # noqa: E501 :param policy_name: The policy_name of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 :type: str """ self._policy_name = policy_name @property def validation_actions(self): """Gets the validation_actions of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions. Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. validationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action. The supported actions values are: \"Deny\" specifies that a validation failure results in a denied request. \"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses. \"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\"` Clients should expect to handle additional values by ignoring any values not recognized. \"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers. Required. # noqa: E501 :return: The validation_actions of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 :rtype: list[str] """ return self._validation_actions @validation_actions.setter def validation_actions(self, validation_actions): """Sets the validation_actions of this V1ValidatingAdmissionPolicyBindingSpec. validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions. Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy. validationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action. The supported actions values are: \"Deny\" specifies that a validation failure results in a denied request. \"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses. \"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\"` Clients should expect to handle additional values by ignoring any values not recognized. \"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers. Required. # noqa: E501 :param validation_actions: The validation_actions of this V1ValidatingAdmissionPolicyBindingSpec. # noqa: E501 :type: list[str] """ self._validation_actions = validation_actions def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1ValidatingAdmissionPolicyBindingSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1ValidatingAdmissionPolicyBindingSpec): return True return self.to_dict() != other.to_dict()
V1ValidatingAdmissionPolicyBindingSpec
python
django__django
django/test/runner.py
{ "start": 1060, "end": 1952 }
class ____(logging.Formatter): def format(self, record): if (alias := getattr(record, "alias", None)) in connections: format_sql = connections[alias].ops.format_debug_sql sql = None formatted_sql = None if args := record.args: if isinstance(args, tuple) and len(args) > 1 and (sql := args[1]): record.args = (args[0], formatted_sql := format_sql(sql), *args[2:]) elif isinstance(record.args, dict) and (sql := record.args.get("sql")): record.args["sql"] = formatted_sql = format_sql(sql) if extra_sql := getattr(record, "sql", None): if extra_sql == sql: record.sql = formatted_sql else: record.sql = format_sql(extra_sql) return super().format(record)
QueryFormatter
python
django__django
tests/decorators/test_vary.py
{ "start": 1360, "end": 2409 }
class ____(SimpleTestCase): def test_wrapped_sync_function_is_not_coroutine_function(self): def sync_view(request): return HttpResponse() wrapped_view = vary_on_cookie(sync_view) self.assertIs(iscoroutinefunction(wrapped_view), False) def test_wrapped_async_function_is_coroutine_function(self): async def async_view(request): return HttpResponse() wrapped_view = vary_on_cookie(async_view) self.assertIs(iscoroutinefunction(wrapped_view), True) def test_vary_on_cookie_decorator(self): @vary_on_cookie def sync_view(request): return HttpResponse() response = sync_view(HttpRequest()) self.assertEqual(response.get("Vary"), "Cookie") async def test_vary_on_cookie_decorator_async_view(self): @vary_on_cookie async def async_view(request): return HttpResponse() response = await async_view(HttpRequest()) self.assertEqual(response.get("Vary"), "Cookie")
VaryOnCookieTests
python
walkccc__LeetCode
solutions/3008. Find Beautiful Indices in the Given Array II/3008.py
{ "start": 0, "end": 1699 }
class ____: # Same as 3006. Find Beautiful Indices in the Given Array I def beautifulIndices(self, s: str, a: str, b: str, k: int) -> list[int]: ans = [] indicesA = self._kmp(s, a) indicesB = self._kmp(s, b) indicesBIndex = 0 # indicesB' index for i in indicesA: # The constraint is: |j - i| <= k. So, -k <= j - i <= k. So, move # `indicesBIndex` s.t. j - i >= -k, where j := indicesB[indicesBIndex]. while indicesBIndex < len(indicesB) and indicesB[indicesBIndex] - i < -k: indicesBIndex += 1 if indicesBIndex < len(indicesB) and indicesB[indicesBIndex] - i <= k: ans.append(i) return ans def _kmp(self, s: str, pattern: str) -> list[int]: """Returns the starting indices of all occurrences of the pattern in `s`.""" def getLPS(pattern: str) -> list[int]: """ Returns the lps array, where lps[i] is the length of the longest prefix of pattern[0..i] which is also a suffix of this substring. """ lps = [0] * len(pattern) j = 0 for i in range(1, len(pattern)): while j > 0 and pattern[j] != pattern[i]: j = lps[j - 1] if pattern[i] == pattern[j]: lps[i] = j + 1 j += 1 return lps lps = getLPS(pattern) res = [] i = 0 # s' index j = 0 # pattern's index while i < len(s): if s[i] == pattern[j]: i += 1 j += 1 if j == len(pattern): res.append(i - j) j = lps[j - 1] elif j != 0: # Mismatch after j matches. # Don't match lps[0..lps[j - 1]] since they will match anyway. j = lps[j - 1] else: i += 1 return res
Solution
python
PyCQA__pylint
tests/functional/u/unexpected_special_method_signature.py
{ "start": 2008, "end": 2810 }
class ____: def __new__(cls, test, multiple, args): pass # pylint: disable-next=too-many-positional-arguments def __init__(self, this, can, have, multiple, args, as_well): pass def __call__(self, also, trv, for_this): pass def __round__(self, n): pass def __index__(self, n=42): """Expects 0 args, but we are taking in account arguments with defaults.""" def __deepcopy__(self, memo): pass def __format__(self, format_specification=''): pass def __copy__(self, this=None, is_not=None, necessary=None): pass @staticmethod def __enter__(): pass @staticmethod def __getitem__(index): pass @classmethod def __init_subclass__(cls, blabla): pass
Valid
python
facebook__pyre-check
tools/generate_taint_models/__init__.py
{ "start": 2459, "end": 2918 }
class ____(enum.IntEnum): SUCCESS = 0 # Unexpected internal error INTERNAL_ERROR = 1 # Error that originated from the user's code, not the model generator. # For instance, when importing modules or initializing things. USER_ERROR = 2 # Pyre start errors PYRE_INTERNAL_ERROR = 3 BUCK_INTERNAL_ERROR = 4 BUCK_USER_ERROR = 5 CONFIGURATION_ERROR = 6 WATCHMAN_ERROR = 7 PYRE_QUERY_ERROR = 8 @dataclass
ExitCode
python
django__django
tests/test_client_regress/tests.py
{ "start": 36965, "end": 42187 }
class ____(TestDataMixin, TestCase): def test_session(self): "The session isn't lost if a user logs in" # The session doesn't exist to start. response = self.client.get("/check_session/") self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b"NO") # This request sets a session variable. response = self.client.get("/set_session/") self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b"set_session") # The session has been modified response = self.client.get("/check_session/") self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b"YES") # Log in login = self.client.login(username="testclient", password="password") self.assertTrue(login, "Could not log in") # Session should still contain the modified value response = self.client.get("/check_session/") self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b"YES") def test_session_initiated(self): session = self.client.session session["session_var"] = "foo" session.save() response = self.client.get("/check_session/") self.assertEqual(response.content, b"foo") def test_logout(self): """Logout should work whether the user is logged in or not (#9978).""" self.client.logout() login = self.client.login(username="testclient", password="password") self.assertTrue(login, "Could not log in") self.client.logout() self.client.logout() def test_logout_with_user(self): """Logout should send user_logged_out signal if user was logged in.""" def listener(*args, **kwargs): listener.executed = True self.assertEqual(kwargs["sender"], User) listener.executed = False user_logged_out.connect(listener) self.client.login(username="testclient", password="password") self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) @override_settings(AUTH_USER_MODEL="test_client_regress.CustomUser") def test_logout_with_custom_user(self): """ Logout should send user_logged_out signal if custom user was logged in. """ def listener(*args, **kwargs): self.assertEqual(kwargs["sender"], CustomUser) listener.executed = True listener.executed = False u = CustomUser.custom_objects.create(email="test@test.com") u.set_password("password") u.save() user_logged_out.connect(listener) self.client.login(username="test@test.com", password="password") self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) @override_settings( AUTHENTICATION_BACKENDS=( "django.contrib.auth.backends.ModelBackend", "test_client_regress.auth_backends.CustomUserBackend", ) ) def test_logout_with_custom_auth_backend(self): "Request a logout after logging in with custom authentication backend" def listener(*args, **kwargs): self.assertEqual(kwargs["sender"], CustomUser) listener.executed = True listener.executed = False u = CustomUser.custom_objects.create(email="test@test.com") u.set_password("password") u.save() user_logged_out.connect(listener) self.client.login(username="test@test.com", password="password") self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) def test_logout_without_user(self): """Logout should send signal even if user not authenticated.""" def listener(user, *args, **kwargs): listener.user = user listener.executed = True listener.executed = False user_logged_out.connect(listener) self.client.login(username="incorrect", password="password") self.client.logout() user_logged_out.disconnect(listener) self.assertTrue(listener.executed) self.assertIsNone(listener.user) def test_login_with_user(self): """Login should send user_logged_in signal on successful login.""" def listener(*args, **kwargs): listener.executed = True listener.executed = False user_logged_in.connect(listener) self.client.login(username="testclient", password="password") user_logged_out.disconnect(listener) self.assertTrue(listener.executed) def test_login_without_signal(self): """Login shouldn't send signal if user wasn't logged in""" def listener(*args, **kwargs): listener.executed = True listener.executed = False user_logged_in.connect(listener) self.client.login(username="incorrect", password="password") user_logged_in.disconnect(listener) self.assertFalse(listener.executed) @override_settings(ROOT_URLCONF="test_client_regress.urls")
SessionTests
python
wandb__wandb
wandb/plot/custom_chart.py
{ "start": 120, "end": 2131 }
class ____: spec_name: str fields: dict[str, Any] string_fields: dict[str, Any] key: str = "" panel_type: str = "Vega2" split_table: bool = False @property def table_key(self) -> str: if not self.key: raise wandb.Error("Key for the custom chart spec is not set.") if self.split_table: return f"Custom Chart Tables/{self.key}_table" return f"{self.key}_table" @property def config_value(self) -> dict[str, Any]: return { "panel_type": self.panel_type, "panel_config": { "panelDefId": self.spec_name, "fieldSettings": self.fields, "stringSettings": self.string_fields, "transform": {"name": "tableWithLeafColNames"}, "userQuery": { "queryFields": [ { "name": "runSets", "args": [{"name": "runSets", "value": "${runSets}"}], "fields": [ {"name": "id", "fields": []}, {"name": "name", "fields": []}, {"name": "_defaultColorIndex", "fields": []}, { "name": "summaryTable", "args": [ { "name": "tableKey", "value": self.table_key, } ], "fields": [], }, ], } ], }, }, } @property def config_key(self) -> tuple[str, str, str]: return ("_wandb", "visualize", self.key) @dataclass
CustomChartSpec
python
django__django
tests/admin_filters/tests.py
{ "start": 6840, "end": 7185 }
class ____(ModelAdmin): list_filter = ( "year", "is_best_seller", "date_registered", "no", ("author", RelatedOnlyFieldListFilter), ("contributors", RelatedOnlyFieldListFilter), ("employee__department", RelatedOnlyFieldListFilter), ) ordering = ("-id",)
BookAdminRelatedOnlyFilter
python
django__django
tests/db_functions/text/test_strindex.py
{ "start": 190, "end": 2672 }
class ____(TestCase): def test_annotate_charfield(self): Author.objects.create(name="George. R. R. Martin") Author.objects.create(name="J. R. R. Tolkien") Author.objects.create(name="Terry Pratchett") authors = Author.objects.annotate(fullstop=StrIndex("name", Value("R."))) self.assertQuerySetEqual( authors.order_by("name"), [9, 4, 0], lambda a: a.fullstop ) def test_annotate_textfield(self): Article.objects.create( title="How to Django", text="This is about How to Django.", written=timezone.now(), ) Article.objects.create( title="How to Tango", text="Won't find anything here.", written=timezone.now(), ) articles = Article.objects.annotate(title_pos=StrIndex("text", "title")) self.assertQuerySetEqual( articles.order_by("title"), [15, 0], lambda a: a.title_pos ) def test_order_by(self): Author.objects.create(name="Terry Pratchett") Author.objects.create(name="J. R. R. Tolkien") Author.objects.create(name="George. R. R. Martin") self.assertQuerySetEqual( Author.objects.order_by(StrIndex("name", Value("R.")).asc()), [ "Terry Pratchett", "J. R. R. Tolkien", "George. R. R. Martin", ], lambda a: a.name, ) self.assertQuerySetEqual( Author.objects.order_by(StrIndex("name", Value("R.")).desc()), [ "George. R. R. Martin", "J. R. R. Tolkien", "Terry Pratchett", ], lambda a: a.name, ) def test_unicode_values(self): Author.objects.create(name="ツリー") Author.objects.create(name="皇帝") Author.objects.create(name="皇帝 ツリー") authors = Author.objects.annotate(sb=StrIndex("name", Value("リ"))) self.assertQuerySetEqual(authors.order_by("name"), [2, 0, 5], lambda a: a.sb) def test_filtering(self): Author.objects.create(name="George. R. R. Martin") Author.objects.create(name="Terry Pratchett") self.assertQuerySetEqual( Author.objects.annotate(middle_name=StrIndex("name", Value("R."))).filter( middle_name__gt=0 ), ["George. R. R. Martin"], lambda a: a.name, )
StrIndexTests
python
pandas-dev__pandas
pandas/tests/reshape/concat/test_append_common.py
{ "start": 1433, "end": 25913 }
class ____: """ Test common dtype coercion rules between concat and append. """ def test_dtypes(self, item, index_or_series, using_infer_string): # to confirm test case covers intended dtypes typ, vals = item obj = index_or_series(vals) if typ == "object" and using_infer_string: typ = "string" if isinstance(obj, Index): assert obj.dtype == typ elif isinstance(obj, Series): if typ.startswith("period"): assert obj.dtype == "Period[M]" else: assert obj.dtype == typ def test_concatlike_same_dtypes(self, item): # GH 13660 typ1, vals1 = item vals2 = vals1 vals3 = vals1 if typ1 == "category": exp_data = Categorical(list(vals1) + list(vals2)) exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3)) else: exp_data = vals1 + vals2 exp_data3 = vals1 + vals2 + vals3 # ----- Index ----- # # index.append res = Index(vals1).append(Index(vals2)) exp = Index(exp_data) tm.assert_index_equal(res, exp) # 3 elements res = Index(vals1).append([Index(vals2), Index(vals3)]) exp = Index(exp_data3) tm.assert_index_equal(res, exp) # index.append name mismatch i1 = Index(vals1, name="x") i2 = Index(vals2, name="y") res = i1.append(i2) exp = Index(exp_data) tm.assert_index_equal(res, exp) # index.append name match i1 = Index(vals1, name="x") i2 = Index(vals2, name="x") res = i1.append(i2) exp = Index(exp_data, name="x") tm.assert_index_equal(res, exp) # cannot append non-index with pytest.raises(TypeError, match="all inputs must be Index"): Index(vals1).append(vals2) with pytest.raises(TypeError, match="all inputs must be Index"): Index(vals1).append([Index(vals2), vals3]) # ----- Series ----- # # series.append res = Series(vals1)._append_internal(Series(vals2), ignore_index=True) exp = Series(exp_data) tm.assert_series_equal(res, exp, check_index_type=True) # concat res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True) tm.assert_series_equal(res, exp, check_index_type=True) # 3 elements exp = Series(exp_data3) res = pd.concat( [Series(vals1), Series(vals2), Series(vals3)], ignore_index=True, ) tm.assert_series_equal(res, exp) # name mismatch s1 = Series(vals1, name="x") s2 = Series(vals2, name="y") res = s1._append_internal(s2, ignore_index=True) exp = Series(exp_data) tm.assert_series_equal(res, exp, check_index_type=True) res = pd.concat([s1, s2], ignore_index=True) tm.assert_series_equal(res, exp, check_index_type=True) # name match s1 = Series(vals1, name="x") s2 = Series(vals2, name="x") res = s1._append_internal(s2, ignore_index=True) exp = Series(exp_data, name="x") tm.assert_series_equal(res, exp, check_index_type=True) res = pd.concat([s1, s2], ignore_index=True) tm.assert_series_equal(res, exp, check_index_type=True) # cannot append non-index msg = ( r"cannot concatenate object of type '.+'; " "only Series and DataFrame objs are valid" ) with pytest.raises(TypeError, match=msg): pd.concat([Series(vals1), vals2]) with pytest.raises(TypeError, match=msg): pd.concat([Series(vals1), Series(vals2), vals3]) def test_concatlike_dtypes_coercion(self, item, item2, request): # GH 13660 typ1, vals1 = item typ2, vals2 = item2 vals3 = vals2 # basically infer exp_index_dtype = None exp_series_dtype = None if typ1 == typ2: pytest.skip("same dtype is tested in test_concatlike_same_dtypes") elif typ1 == "category" or typ2 == "category": pytest.skip("categorical type tested elsewhere") # specify expected dtype if typ1 == "bool" and typ2 in ("int64", "float64"): # series coerces to numeric based on numpy rule # index doesn't because bool is object dtype exp_series_dtype = typ2 mark = pytest.mark.xfail(reason="GH#39187 casting to object") request.applymarker(mark) elif typ2 == "bool" and typ1 in ("int64", "float64"): exp_series_dtype = typ1 mark = pytest.mark.xfail(reason="GH#39187 casting to object") request.applymarker(mark) elif typ1 in {"datetime64[ns, US/Eastern]", "timedelta64[ns]"} or typ2 in { "datetime64[ns, US/Eastern]", "timedelta64[ns]", }: exp_index_dtype = object exp_series_dtype = object exp_data = vals1 + vals2 exp_data3 = vals1 + vals2 + vals3 # ----- Index ----- # # index.append # GH#39817 res = Index(vals1).append(Index(vals2)) exp = Index(exp_data, dtype=exp_index_dtype) tm.assert_index_equal(res, exp) # 3 elements res = Index(vals1).append([Index(vals2), Index(vals3)]) exp = Index(exp_data3, dtype=exp_index_dtype) tm.assert_index_equal(res, exp) # ----- Series ----- # # series._append # GH#39817 res = Series(vals1)._append_internal(Series(vals2), ignore_index=True) exp = Series(exp_data, dtype=exp_series_dtype) tm.assert_series_equal(res, exp, check_index_type=True) # concat # GH#39817 res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True) tm.assert_series_equal(res, exp, check_index_type=True) # 3 elements # GH#39817 exp = Series(exp_data3, dtype=exp_series_dtype) res = pd.concat( [Series(vals1), Series(vals2), Series(vals3)], ignore_index=True, ) tm.assert_series_equal(res, exp) def test_concatlike_common_coerce_to_pandas_object(self): # GH 13626 # result must be Timestamp/Timedelta, not datetime.datetime/timedelta dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"]) tdi = pd.TimedeltaIndex(["1 days", "2 days"]) exp = Index( [ pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02"), pd.Timedelta("1 days"), pd.Timedelta("2 days"), ] ) res = dti.append(tdi) tm.assert_index_equal(res, exp) assert isinstance(res[0], pd.Timestamp) assert isinstance(res[-1], pd.Timedelta) dts = Series(dti) tds = Series(tdi) res = dts._append_internal(tds) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) assert isinstance(res.iloc[0], pd.Timestamp) assert isinstance(res.iloc[-1], pd.Timedelta) res = pd.concat([dts, tds]) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) assert isinstance(res.iloc[0], pd.Timestamp) assert isinstance(res.iloc[-1], pd.Timedelta) def test_concatlike_datetimetz(self, tz_aware_fixture): tz = tz_aware_fixture # GH 7795 dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz) dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz) exp = pd.DatetimeIndex( ["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz ) res = dti1.append(dti2) tm.assert_index_equal(res, exp) dts1 = Series(dti1) dts2 = Series(dti2) res = dts1._append_internal(dts2) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) res = pd.concat([dts1, dts2]) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) @pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"]) def test_concatlike_datetimetz_short(self, tz): # GH#7795 ix1 = pd.date_range( start="2014-07-15", end="2014-07-17", freq="D", tz=tz, unit="ns" ) ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz) df1 = DataFrame(0, index=ix1, columns=["A", "B"]) df2 = DataFrame(0, index=ix2, columns=["A", "B"]) exp_idx = pd.DatetimeIndex( ["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"], tz=tz, ).as_unit("ns") exp = DataFrame(0, index=exp_idx, columns=["A", "B"]) tm.assert_frame_equal(pd.concat([df1, df2]), exp) def test_concatlike_datetimetz_to_object(self, tz_aware_fixture): tz = tz_aware_fixture # GH 13660 # different tz coerces to object dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz) dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"]) exp = Index( [ pd.Timestamp("2011-01-01", tz=tz), pd.Timestamp("2011-01-02", tz=tz), pd.Timestamp("2012-01-01"), pd.Timestamp("2012-01-02"), ], dtype=object, ) res = dti1.append(dti2) tm.assert_index_equal(res, exp) dts1 = Series(dti1) dts2 = Series(dti2) res = dts1._append_internal(dts2) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) res = pd.concat([dts1, dts2]) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) # different tz tz_diff = zoneinfo.ZoneInfo("US/Hawaii") dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz_diff) exp = Index( [ pd.Timestamp("2011-01-01", tz=tz), pd.Timestamp("2011-01-02", tz=tz), pd.Timestamp("2012-01-01", tz=tz_diff), pd.Timestamp("2012-01-02", tz=tz_diff), ], dtype=object, ) res = dti1.append(dti3) tm.assert_index_equal(res, exp) dts1 = Series(dti1) dts3 = Series(dti3) res = dts1._append_internal(dts3) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) res = pd.concat([dts1, dts3]) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) def test_concatlike_common_period(self): # GH 13660 pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M") exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M") res = pi1.append(pi2) tm.assert_index_equal(res, exp) ps1 = Series(pi1) ps2 = Series(pi2) res = ps1._append_internal(ps2) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) res = pd.concat([ps1, ps2]) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) def test_concatlike_common_period_diff_freq_to_object(self): # GH 13221 pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D") exp = Index( [ pd.Period("2011-01", freq="M"), pd.Period("2011-02", freq="M"), pd.Period("2012-01-01", freq="D"), pd.Period("2012-02-01", freq="D"), ], dtype=object, ) res = pi1.append(pi2) tm.assert_index_equal(res, exp) ps1 = Series(pi1) ps2 = Series(pi2) res = ps1._append_internal(ps2) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) res = pd.concat([ps1, ps2]) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) def test_concatlike_common_period_mixed_dt_to_object(self): # GH 13221 # different datetimelike pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") tdi = pd.TimedeltaIndex(["1 days", "2 days"]) exp = Index( [ pd.Period("2011-01", freq="M"), pd.Period("2011-02", freq="M"), pd.Timedelta("1 days"), pd.Timedelta("2 days"), ], dtype=object, ) res = pi1.append(tdi) tm.assert_index_equal(res, exp) ps1 = Series(pi1) tds = Series(tdi) res = ps1._append_internal(tds) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) res = pd.concat([ps1, tds]) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) # inverse exp = Index( [ pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Period("2011-01", freq="M"), pd.Period("2011-02", freq="M"), ], dtype=object, ) res = tdi.append(pi1) tm.assert_index_equal(res, exp) ps1 = Series(pi1) tds = Series(tdi) res = tds._append_internal(ps1) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) res = pd.concat([tds, ps1]) tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) def test_concat_categorical(self): # GH 13524 # same categories -> category s1 = Series([1, 2, np.nan], dtype="category") s2 = Series([2, 1, 2], dtype="category") exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category") tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), exp) # partially different categories => not-category s1 = Series([3, 2], dtype="category") s2 = Series([2, 1], dtype="category") exp = Series([3, 2, 2, 1]) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), exp) # completely different categories (same dtype) => not-category s1 = Series([10, 11, np.nan], dtype="category") s2 = Series([np.nan, 1, 3, 2], dtype="category") exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), exp) def test_union_categorical_same_categories_different_order(self): # https://github.com/pandas-dev/pandas/issues/19096 a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"])) b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"])) result = pd.concat([a, b], ignore_index=True) expected = Series( Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"]) ) tm.assert_series_equal(result, expected) def test_concat_categorical_coercion(self): # GH 13524 # category + not-category => not-category s1 = Series([1, 2, np.nan], dtype="category") s2 = Series([2, 1, 2]) exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), exp) # result shouldn't be affected by 1st elem dtype exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64) tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) tm.assert_series_equal(s2._append_internal(s1, ignore_index=True), exp) # all values are not in category => not-category s1 = Series([3, 2], dtype="category") s2 = Series([2, 1]) exp = Series([3, 2, 2, 1]) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), exp) exp = Series([2, 1, 3, 2]) tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) tm.assert_series_equal(s2._append_internal(s1, ignore_index=True), exp) # completely different categories => not-category s1 = Series([10, 11, np.nan], dtype="category") s2 = Series([1, 3, 2]) exp = Series([10, 11, np.nan, 1, 3, 2], dtype=np.float64) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), exp) exp = Series([1, 3, 2, 10, 11, np.nan], dtype=np.float64) tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) tm.assert_series_equal(s2._append_internal(s1, ignore_index=True), exp) # different dtype => not-category s1 = Series([10, 11, np.nan], dtype="category") s2 = Series(["a", "b", "c"]) exp = Series([10, 11, np.nan, "a", "b", "c"]) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), exp) exp = Series(["a", "b", "c", 10, 11, np.nan]) tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) tm.assert_series_equal(s2._append_internal(s1, ignore_index=True), exp) # if normal series only contains NaN-likes => not-category s1 = Series([10, 11], dtype="category") s2 = Series([np.nan, np.nan, np.nan]) exp = Series([10, 11, np.nan, np.nan, np.nan]) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), exp) exp = Series([np.nan, np.nan, np.nan, 10, 11]) tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) tm.assert_series_equal(s2._append_internal(s1, ignore_index=True), exp) def test_concat_categorical_3elem_coercion(self): # GH 13524 # mixed dtypes => not-category s1 = Series([1, 2, np.nan], dtype="category") s2 = Series([2, 1, 2], dtype="category") s3 = Series([1, 2, 1, 2, np.nan]) exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float") tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp) exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float") tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp) # values are all in either category => not-category s1 = Series([4, 5, 6], dtype="category") s2 = Series([1, 2, 3], dtype="category") s3 = Series([1, 3, 4]) exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4]) tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp) exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3]) tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp) # values are all in either category => not-category s1 = Series([4, 5, 6], dtype="category") s2 = Series([1, 2, 3], dtype="category") s3 = Series([10, 11, 12]) exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12]) tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp) exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3]) tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp) def test_concat_categorical_multi_coercion(self): # GH 13524 s1 = Series([1, 3], dtype="category") s2 = Series([3, 4], dtype="category") s3 = Series([2, 3]) s4 = Series([2, 2], dtype="category") s5 = Series([1, np.nan]) s6 = Series([1, 3, 2], dtype="category") # mixed dtype, values are all in categories => not-category exp = Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2]) res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True) tm.assert_series_equal(res, exp) exp = Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3]) res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True) tm.assert_series_equal(res, exp) def test_concat_categorical_ordered(self): # GH 13524 s1 = Series(Categorical([1, 2, np.nan], ordered=True)) s2 = Series(Categorical([2, 1, 2], ordered=True)) exp = Series(Categorical([1, 2, np.nan, 2, 1, 2], ordered=True)) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) exp = Series(Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True)) tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp) def test_concat_categorical_coercion_nan(self): # GH 13524 # some edge cases # category + not-category => not category s1 = Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category") s2 = Series([np.nan, 1]) exp = Series([np.nan, np.nan, np.nan, 1]) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) s1 = Series([1, np.nan], dtype="category") s2 = Series([np.nan, np.nan]) exp = Series([1, np.nan, np.nan, np.nan], dtype="float") tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) # mixed dtype, all nan-likes => not-category s1 = Series([np.nan, np.nan], dtype="category") s2 = Series([np.nan, np.nan]) exp = Series([np.nan, np.nan, np.nan, np.nan]) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) # all category nan-likes => category s1 = Series([np.nan, np.nan], dtype="category") s2 = Series([np.nan, np.nan], dtype="category") exp = Series([np.nan, np.nan, np.nan, np.nan], dtype="category") tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) def test_concat_categorical_empty(self): # GH 13524 s1 = Series([], dtype="category") s2 = Series([1, 2], dtype="category") exp = s2.astype(object) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), exp) tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) tm.assert_series_equal(s2._append_internal(s1, ignore_index=True), exp) s1 = Series([], dtype="category") s2 = Series([], dtype="category") tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), s2) s1 = Series([], dtype="category") s2 = Series([], dtype="object") # different dtype => not-category tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2) tm.assert_series_equal(s1._append_internal(s2, ignore_index=True), s2) tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2) tm.assert_series_equal(s2._append_internal(s1, ignore_index=True), s2) s1 = Series([], dtype="category") s2 = Series([np.nan, np.nan]) exp = Series([np.nan, np.nan], dtype=object) tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) def test_categorical_concat_append(self): cat = Categorical(["a", "b"], categories=["a", "b"]) vals = [1, 2] df = DataFrame({"cats": cat, "vals": vals}) cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"]) vals2 = [1, 2, 1, 2] exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1])) tm.assert_frame_equal(pd.concat([df, df]), exp) # GH 13524 can concat different categories cat3 = Categorical(["a", "b"], categories=["a", "b", "c"]) vals3 = [1, 2] df_different_categories = DataFrame({"cats": cat3, "vals": vals3}) res = pd.concat([df, df_different_categories], ignore_index=True) exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]}) tm.assert_frame_equal(res, exp)
TestConcatAppendCommon
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/callbackProtocol1.py
{ "start": 1565, "end": 1720 }
class ____(Protocol): def __call__(self) -> None: pass var3: TestClass3 = func1 var3 = func2 var3 = func3 var3 = func4 var3 = func5
TestClass3
python
huggingface__transformers
src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py
{ "start": 148441, "end": 151384 }
class ____(torch.nn.Module): def __init__( self, channels, kernel_size=3, dilation=(1, 3, 5), ): super().__init__() self.convs1 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[0], padding=self._get_padding(kernel_size, dilation[0]), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[1], padding=self._get_padding(kernel_size, dilation[1]), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=dilation[2], padding=self._get_padding(kernel_size, dilation[2]), ), ] ) self.convs2 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=self._get_padding(kernel_size, 1), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=self._get_padding(kernel_size, 1), ), nn.Conv1d( channels, channels, kernel_size, 1, dilation=1, padding=self._get_padding(kernel_size, 1), ), ] ) self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers self.activations = nn.ModuleList( [TorchActivation1d(activation=SnakeBeta(channels)) for _ in range(self.num_layers)] ) def _get_padding(self, kernel_size, dilation=1): return int((kernel_size * dilation - dilation) / 2) def forward(self, hidden_states): acts1, acts2 = self.activations[::2], self.activations[1::2] for conv1, conv2, act1, act2 in zip(self.convs1, self.convs2, acts1, acts2): residual = hidden_states hidden_states = act1(hidden_states) hidden_states = conv1(hidden_states) hidden_states = act2(hidden_states) hidden_states = conv2(hidden_states) hidden_states = residual + hidden_states return hidden_states @auto_docstring( custom_intro=""" The full Qwen2.5Omni Token2WavBigVGAN model. Which take mel spectrogram as input and predict waveform. """ )
AMPBlock
python
readthedocs__readthedocs.org
readthedocs/projects/migrations/0023_migrate-alias-slug.py
{ "start": 131, "end": 990 }
class ____(migrations.Migration): safe = Safe.after_deploy() def migrate_data(apps, schema_editor): # Keep things that slugify wouldn't normally accept, # so that we don't break a bunch of folks URL's. # They will have to change them on update. invalid_chars_re = re.compile("[^-._a-zA-Z0-9]") ProjectRelationship = apps.get_model("projects", "ProjectRelationship") for p in ProjectRelationship.objects.all(): if p.alias and invalid_chars_re.match(p.alias): new_alias = invalid_chars_re.sub("", p.alias) p.alias = new_alias p.save() def reverse(apps, schema_editor): pass dependencies = [ ("projects", "0022_add-alias-slug"), ] operations = [ migrations.RunPython(migrate_data, reverse), ]
Migration
python
ray-project__ray
python/ray/_private/runtime_env/plugin.py
{ "start": 3509, "end": 9423 }
class ____: """This manager is used to load plugins in runtime env agent.""" def __init__(self): self.plugins: Dict[str, PluginSetupContext] = {} plugin_config_str = os.environ.get(RAY_RUNTIME_ENV_PLUGINS_ENV_VAR) if plugin_config_str: plugin_configs = json.loads(plugin_config_str) self.load_plugins(plugin_configs) def validate_plugin_class(self, plugin_class: Type[RuntimeEnvPlugin]) -> None: if not issubclass(plugin_class, RuntimeEnvPlugin): raise RuntimeError( f"Invalid runtime env plugin class {plugin_class}. " "The plugin class must inherit " "ray._private.runtime_env.plugin.RuntimeEnvPlugin." ) if not plugin_class.name: raise RuntimeError(f"No valid name in runtime env plugin {plugin_class}.") if plugin_class.name in self.plugins: raise RuntimeError( f"The name of runtime env plugin {plugin_class} conflicts " f"with {self.plugins[plugin_class.name]}.", ) def validate_priority(self, priority: Any) -> None: if ( not isinstance(priority, int) or priority < RAY_RUNTIME_ENV_PLUGIN_MIN_PRIORITY or priority > RAY_RUNTIME_ENV_PLUGIN_MAX_PRIORITY ): raise RuntimeError( f"Invalid runtime env priority {priority}, " "it should be an integer between " f"{RAY_RUNTIME_ENV_PLUGIN_MIN_PRIORITY} " f"and {RAY_RUNTIME_ENV_PLUGIN_MAX_PRIORITY}." ) def load_plugins(self, plugin_configs: List[Dict]) -> None: """Load runtime env plugins and create URI caches for them.""" for plugin_config in plugin_configs: if ( not isinstance(plugin_config, dict) or RAY_RUNTIME_ENV_CLASS_FIELD_NAME not in plugin_config ): raise RuntimeError( f"Invalid runtime env plugin config {plugin_config}, " "it should be a object which contains the " f"{RAY_RUNTIME_ENV_CLASS_FIELD_NAME} field." ) plugin_class = import_attr(plugin_config[RAY_RUNTIME_ENV_CLASS_FIELD_NAME]) self.validate_plugin_class(plugin_class) # The priority should be an integer between 0 and 100. # The default priority is 10. A smaller number indicates a # higher priority and the plugin will be set up first. if RAY_RUNTIME_ENV_PRIORITY_FIELD_NAME in plugin_config: priority = plugin_config[RAY_RUNTIME_ENV_PRIORITY_FIELD_NAME] else: priority = plugin_class.priority self.validate_priority(priority) class_instance = plugin_class() self.plugins[plugin_class.name] = PluginSetupContext( plugin_class.name, class_instance, priority, self.create_uri_cache_for_plugin(class_instance), ) def add_plugin(self, plugin: RuntimeEnvPlugin) -> None: """Add a plugin to the manager and create a URI cache for it. Args: plugin: The class instance of the plugin. """ plugin_class = type(plugin) self.validate_plugin_class(plugin_class) self.validate_priority(plugin_class.priority) self.plugins[plugin_class.name] = PluginSetupContext( plugin_class.name, plugin, plugin_class.priority, self.create_uri_cache_for_plugin(plugin), ) def create_uri_cache_for_plugin(self, plugin: RuntimeEnvPlugin) -> URICache: """Create a URI cache for a plugin. Args: plugin_name: The name of the plugin. Returns: The created URI cache for the plugin. """ # Set the max size for the cache. Defaults to 10 GB. cache_size_env_var = f"RAY_RUNTIME_ENV_{plugin.name}_CACHE_SIZE_GB".upper() cache_size_bytes = int( (1024**3) * float(os.environ.get(cache_size_env_var, 10)) ) return URICache(plugin.delete_uri, cache_size_bytes) def sorted_plugin_setup_contexts(self) -> List[PluginSetupContext]: """Get the sorted plugin setup contexts, sorted by increasing priority. Returns: The sorted plugin setup contexts. """ return sorted(self.plugins.values(), key=lambda x: x.priority) async def create_for_plugin_if_needed( runtime_env: "RuntimeEnv", # noqa: F821 plugin: RuntimeEnvPlugin, uri_cache: URICache, context: RuntimeEnvContext, logger: logging.Logger = default_logger, ): """Set up the environment using the plugin if not already set up and cached.""" if plugin.name not in runtime_env or runtime_env[plugin.name] is None: return plugin.validate(runtime_env) uris = plugin.get_uris(runtime_env) if not uris: logger.debug( f"No URIs for runtime env plugin {plugin.name}; " "create always without checking the cache." ) await plugin.create(None, runtime_env, context, logger=logger) for uri in uris: if uri not in uri_cache: logger.debug(f"Cache miss for URI {uri}.") size_bytes = await plugin.create(uri, runtime_env, context, logger=logger) uri_cache.add(uri, size_bytes, logger=logger) else: logger.info( f"Runtime env {plugin.name} {uri} is already installed " "and will be reused. Search " "all runtime_env_setup-*.log to find the corresponding setup log." ) uri_cache.mark_used(uri, logger=logger) plugin.modify_context(uris, runtime_env, context, logger)
RuntimeEnvPluginManager
python
PrefectHQ__prefect
src/prefect/exceptions.py
{ "start": 11341, "end": 11430 }
class ____(PrefectException): """Raised when a flow pause times out"""
FlowPauseTimeout
python
encode__django-rest-framework
tests/test_relations_slug.py
{ "start": 169, "end": 455 }
class ____(serializers.ModelSerializer): sources = serializers.SlugRelatedField( slug_field='name', queryset=ForeignKeySource.objects.all(), many=True ) class Meta: model = ForeignKeyTarget fields = '__all__'
ForeignKeyTargetSerializer
python
huggingface__transformers
src/transformers/models/granitemoe/modular_granitemoe.py
{ "start": 6371, "end": 9375 }
class ____(MixtralModel): def __init__(self, config: GraniteMoeConfig): super().__init__(config) self.layers = nn.ModuleList( [GraniteMoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = GraniteMoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.embedding_multiplier = config.embedding_multiplier @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( # ONLY DIFF WITH MIXTRAL: NO SLIDING config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) inputs_embeds = inputs_embeds * self.embedding_multiplier hidden_states = inputs_embeds # create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, position_embeddings=position_embeddings, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, **kwargs, ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE last_hidden_state=hidden_states, past_key_values=past_key_values, )
GraniteMoeModel
python
django__django
tests/file_storage/models.py
{ "start": 894, "end": 990 }
class ____(LazyObject): def _setup(self): self._wrapped = temp_storage
LazyTempStorage
python
numba__numba
numba/tests/gdb/test_break_on_symbol.py
{ "start": 307, "end": 976 }
class ____(TestCase): def test(self): foo(120) sz = types.intp.bitwidth driver = GdbMIDriver(__file__) driver.set_breakpoint(symbol="__main__::foo") driver.run() # will hit cpython symbol match driver.check_hit_breakpoint(number=1) driver.cont() # will hit njit symbol match driver.check_hit_breakpoint(number=1, line=10) # Ensure line number driver.stack_list_arguments(2) expect = ('[frame={level="0",args=[{name="x",type="int%s",' 'value="120"}]}]' % sz) driver.assert_output(expect) driver.quit() if __name__ == '__main__': unittest.main()
Test
python
pytorch__pytorch
torch/onnx/errors.py
{ "start": 349, "end": 484 }
class ____(RuntimeError): """Errors raised by the ONNX exporter. This is the base class for all exporter errors."""
OnnxExporterError