language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/groupvit/modeling_groupvit.py | {
"start": 47402,
"end": 49208
} | class ____(GroupViTPreTrainedModel):
config: GroupViTVisionConfig
main_input_name = "pixel_values"
input_modalities = ("image",)
def __init__(self, config: GroupViTVisionConfig):
super().__init__(config)
self.vision_model = GroupViTVisionTransformer(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> GroupViTPatchEmbeddings:
return self.vision_model.embeddings.patch_embeddings
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, GroupViTVisionModel
>>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```"""
return self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
@auto_docstring
| GroupViTVisionModel |
python | kubernetes-client__python | kubernetes/base/leaderelection/leaderelection.py | {
"start": 1330,
"end": 8377
} | class ____:
def __init__(self, election_config):
if election_config is None:
sys.exit("argument config not passed")
# Latest record observed in the created lock object
self.observed_record = None
# The configuration set for this candidate
self.election_config = election_config
# Latest update time of the lock
self.observed_time_milliseconds = 0
# Point of entry to Leader election
def run(self):
# Try to create/ acquire a lock
if self.acquire():
logging.info("{} successfully acquired lease".format(self.election_config.lock.identity))
# Start leading and call OnStartedLeading()
threading.daemon = True
threading.Thread(target=self.election_config.onstarted_leading).start()
self.renew_loop()
# Failed to update lease, run OnStoppedLeading callback
self.election_config.onstopped_leading()
def acquire(self):
# Follower
logging.info("{} is a follower".format(self.election_config.lock.identity))
retry_period = self.election_config.retry_period
while True:
succeeded = self.try_acquire_or_renew()
if succeeded:
return True
time.sleep(retry_period)
def renew_loop(self):
# Leader
logging.info("Leader has entered renew loop and will try to update lease continuously")
retry_period = self.election_config.retry_period
renew_deadline = self.election_config.renew_deadline * 1000
while True:
timeout = int(time.time() * 1000) + renew_deadline
succeeded = False
while int(time.time() * 1000) < timeout:
succeeded = self.try_acquire_or_renew()
if succeeded:
break
time.sleep(retry_period)
if succeeded:
time.sleep(retry_period)
continue
# failed to renew, return
return
def try_acquire_or_renew(self):
now_timestamp = time.time()
now = datetime.datetime.fromtimestamp(now_timestamp)
# Check if lock is created
lock_status, old_election_record = self.election_config.lock.get(self.election_config.lock.name,
self.election_config.lock.namespace)
# create a default Election record for this candidate
leader_election_record = LeaderElectionRecord(self.election_config.lock.identity,
str(self.election_config.lease_duration), str(now), str(now))
# A lock is not created with that name, try to create one
if not lock_status:
# To be removed when support for python2 will be removed
if sys.version_info > (3, 0):
if json.loads(old_election_record.body)['code'] != HTTPStatus.NOT_FOUND:
logging.info("Error retrieving resource lock {} as {}".format(self.election_config.lock.name,
old_election_record.reason))
return False
else:
if json.loads(old_election_record.body)['code'] != httplib.NOT_FOUND:
logging.info("Error retrieving resource lock {} as {}".format(self.election_config.lock.name,
old_election_record.reason))
return False
logging.info("{} is trying to create a lock".format(leader_election_record.holder_identity))
create_status = self.election_config.lock.create(name=self.election_config.lock.name,
namespace=self.election_config.lock.namespace,
election_record=leader_election_record)
if create_status is False:
logging.info("{} Failed to create lock".format(leader_election_record.holder_identity))
return False
self.observed_record = leader_election_record
self.observed_time_milliseconds = int(time.time() * 1000)
return True
# A lock exists with that name
# Validate old_election_record
if old_election_record is None:
# try to update lock with proper annotation and election record
return self.update_lock(leader_election_record)
if (old_election_record.holder_identity is None or old_election_record.lease_duration is None
or old_election_record.acquire_time is None or old_election_record.renew_time is None):
# try to update lock with proper annotation and election record
return self.update_lock(leader_election_record)
# Report transitions
if self.observed_record and self.observed_record.holder_identity != old_election_record.holder_identity:
logging.info("Leader has switched to {}".format(old_election_record.holder_identity))
if self.observed_record is None or old_election_record.__dict__ != self.observed_record.__dict__:
self.observed_record = old_election_record
self.observed_time_milliseconds = int(time.time() * 1000)
# If This candidate is not the leader and lease duration is yet to finish
if (self.election_config.lock.identity != self.observed_record.holder_identity
and self.observed_time_milliseconds + self.election_config.lease_duration * 1000 > int(now_timestamp * 1000)):
logging.info("yet to finish lease_duration, lease held by {} and has not expired".format(old_election_record.holder_identity))
return False
# If this candidate is the Leader
if self.election_config.lock.identity == self.observed_record.holder_identity:
# Leader updates renewTime, but keeps acquire_time unchanged
leader_election_record.acquire_time = self.observed_record.acquire_time
return self.update_lock(leader_election_record)
def update_lock(self, leader_election_record):
# Update object with latest election record
update_status = self.election_config.lock.update(self.election_config.lock.name,
self.election_config.lock.namespace,
leader_election_record)
if update_status is False:
logging.info("{} failed to acquire lease".format(leader_election_record.holder_identity))
return False
self.observed_record = leader_election_record
self.observed_time_milliseconds = int(time.time() * 1000)
logging.info("leader {} has successfully acquired lease".format(leader_election_record.holder_identity))
return True
| LeaderElection |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py | {
"start": 1373,
"end": 1478
} | class ____(ToolCallEndEvent, Event):
type: EventType = EventType.TOOL_CALL_END
| ToolCallEndWorkflowEvent |
python | django-import-export__django-import-export | import_export/options.py | {
"start": 0,
"end": 6652
} | class ____:
"""
The inner Meta class allows for class-level configuration of how the
Resource should behave. The following options are available:
"""
model = None
"""
Django Model class or full application label string. It is used to introspect
available fields.
"""
fields = None
"""
Controls what introspected fields the Resource should include. A whitelist
of fields.
"""
exclude = None
"""
Controls what introspected fields the Resource should
NOT include. A blacklist of fields.
"""
instance_loader_class = None
"""
Controls which class instance will take
care of loading existing objects.
"""
import_id_fields = ["id"]
"""
Controls which object fields will be used to
identify existing instances.
"""
import_order = None
"""
Controls import order for columns.
"""
export_order = None
"""
Controls export order for columns.
"""
widgets = None
"""
This dictionary defines widget kwargs for fields.
"""
use_transactions = None
"""
Controls if import should use database transactions. Default value is
``None`` meaning ``settings.IMPORT_EXPORT_USE_TRANSACTIONS`` will be
evaluated.
"""
skip_unchanged = False
"""
Controls if the import should skip unchanged records.
If ``True``, then each existing instance is compared with the instance to be
imported, and if there are no changes detected, the row is recorded as skipped,
and no database update takes place.
The advantages of enabling this option are:
#. Avoids unnecessary database operations which can result in performance
improvements for large datasets.
#. Skipped records are recorded in each :class:`~import_export.results.RowResult`.
#. Skipped records are clearly visible in the
:ref:`import confirmation page<import-process>`.
For the default ``skip_unchanged`` logic to work, the
:attr:`~import_export.resources.ResourceOptions.skip_diff` must also be ``False``
(which is the default):
Default value is ``False``.
"""
report_skipped = True
"""
Controls if the result reports skipped rows. Default value is ``True``.
"""
clean_model_instances = False
"""
Controls whether
`full_clean <https://docs.djangoproject.com/en/stable/ref/models/instances/#django.db.models.Model.full_clean>`_
is called during the import
process to identify potential validation errors for each (non skipped) row.
The default value is ``False``.
""" # noqa: E501
chunk_size = None
"""
Controls the chunk_size argument of Queryset.iterator or,
if prefetch_related is used, the per_page attribute of Paginator.
"""
skip_diff = False
"""
Controls whether or not an instance should be diffed following import.
By default, an instance is copied prior to insert, update or delete.
After each row is processed, the instance's copy is diffed against the original,
and the value stored in each :class:`~import_export.results.RowResult`.
If diffing is not required, then disabling the diff operation by setting this value
to ``True`` improves performance, because the copy and comparison operations are
skipped for each row.
If enabled, then :meth:`~import_export.resources.Resource.skip_row` checks do not
execute, because 'skip' logic requires comparison between the stored and imported
versions of a row.
If enabled, then HTML row reports are also not generated, meaning that the
:attr:`~import_export.resources.ResourceOptions.skip_html_diff` value is ignored.
The default value is ``False``.
"""
skip_html_diff = False
"""
Controls whether or not a HTML report is generated after each row.
By default, the difference between a stored copy and an imported instance
is generated in HTML form and stored in each
:class:`~import_export.results.RowResult`.
The HTML report is used to present changes in the
:ref:`import confirmation page<import-process>` in the admin site, hence when this
value is ``True``, then changes will not be presented on the confirmation screen.
If the HTML report is not required, then setting this value to ``True`` improves
performance, because the HTML generation is skipped for each row.
This is a useful optimization when importing large datasets.
The default value is ``False``.
"""
use_bulk = False
"""
Controls whether import operations should be performed in bulk.
By default, an object's save() method is called for each row in a data set.
When bulk is enabled, objects are saved using bulk operations.
"""
batch_size = 1000
"""
The batch_size parameter controls how many objects are created in a single query.
The default is to create objects in batches of 1000.
See `bulk_create()
<https://docs.djangoproject.com/en/dev/ref/models/querysets/#bulk-create>`_.
This parameter is only used if ``use_bulk`` is ``True``.
"""
force_init_instance = False
"""
If ``True``, this parameter will prevent imports from checking the database for
existing instances.
Enabling this parameter is a performance enhancement if your import dataset is
guaranteed to contain new instances.
"""
using_db = None
"""
DB Connection name to use for db transactions. If not provided,
``router.db_for_write(model)`` will be evaluated and if it's missing,
``DEFAULT_DB_ALIAS`` constant ("default") is used.
"""
store_row_values = False
"""
If True, each row's raw data will be stored in each
:class:`~import_export.results.RowResult`.
Enabling this parameter will increase the memory usage during import
which should be considered when importing large datasets.
"""
store_instance = False
"""
If True, the row instance will be stored in each
:class:`~import_export.results.RowResult`.
Enabling this parameter will increase the memory usage during import
which should be considered when importing large datasets.
This value will always be set to ``True`` when importing via the Admin UI.
This is so that appropriate ``LogEntry`` instances can be created.
"""
use_natural_foreign_keys = False
"""
If ``True``, this value will be passed to all foreign
key widget fields whose models support natural foreign keys. That is,
the model has a natural_key function and the manager has a
``get_by_natural_key()`` function.
"""
| ResourceOptions |
python | pytorch__pytorch | tools/linter/adapters/no_merge_conflict_csv_linter.py | {
"start": 305,
"end": 2689
} | class ____(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def check_file(filename: str) -> list[LintMessage]:
with open(filename, "rb") as f:
original = f.read().decode("utf-8")
replacement = ""
with open(filename) as f:
lines = f.readlines()
for line in lines:
if len(line.strip()) > 0:
replacement += line
replacement += "\n" * 3
replacement = replacement[:-3]
if replacement == original:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code="MERGE_CONFLICTLESS_CSV",
severity=LintSeverity.WARNING,
name="format",
original=original,
replacement=replacement,
description="Run `lintrunner -a` to apply this patch.",
)
]
def main() -> None:
parser = argparse.ArgumentParser(
description="Format csv files to have 3 lines of space between each line to prevent merge conflicts.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(processName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
with concurrent.futures.ProcessPoolExecutor(
max_workers=os.cpu_count(),
) as executor:
futures = {executor.submit(check_file, x): x for x in args.filenames}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
| LintMessage |
python | Netflix__metaflow | metaflow/plugins/cards/card_datastore.py | {
"start": 1011,
"end": 12941
} | class ____(object):
@classmethod
def get_storage_root(cls, storage_type):
if storage_type == "s3":
return CARD_S3ROOT
elif storage_type == "azure":
return CARD_AZUREROOT
elif storage_type == "gs":
return CARD_GSROOT
elif storage_type == "local" or storage_type == "spin":
# Borrowing some of the logic from LocalStorage.get_storage_root
result = CARD_LOCALROOT
local_dir = (
DATASTORE_SPIN_LOCAL_DIR
if storage_type == "spin"
else DATASTORE_LOCAL_DIR
)
if result is None:
current_path = os.getcwd()
check_dir = os.path.join(current_path, local_dir)
check_dir = os.path.realpath(check_dir)
orig_path = check_dir
while not os.path.isdir(check_dir):
new_path = os.path.dirname(current_path)
if new_path == current_path:
# No longer making upward progress so we
# return the top level path
return os.path.join(orig_path, CARD_SUFFIX)
current_path = new_path
check_dir = os.path.join(current_path, local_dir)
return os.path.join(check_dir, CARD_SUFFIX)
else:
# Let's make it obvious we need to update this block for each new datastore backend...
raise NotImplementedError(
"Card datastore does not support backend %s" % (storage_type,)
)
def __init__(self, flow_datastore, pathspec=None):
self._backend = flow_datastore._storage_impl
self._flow_name = flow_datastore.flow_name
_, run_id, step_name, _ = pathspec.split("/")
self._run_id = run_id
self._step_name = step_name
self._pathspec = pathspec
self._temp_card_save_path = self._get_card_write_path(base_pth=TEMP_DIR_NAME)
@classmethod
def get_card_location(
cls, base_path, card_name, uuid, card_id=None, suffix=CardNameSuffix.CARD
):
chash = uuid
if card_id is None:
card_file_name = "%s-%s.%s" % (card_name, chash, suffix)
else:
card_file_name = "%s-%s-%s.%s" % (card_name, card_id, chash, suffix)
return os.path.join(base_path, card_file_name)
def _make_path(
self, base_pth, pathspec=None, with_steps=False, suffix=CardPathSuffix.CARD
):
sysroot = base_pth
if pathspec is not None:
# since most cards are at a task level there will always be 4 non-none values returned
flow_name, run_id, step_name, task_id = path_spec_resolver(pathspec)
# We have a condition that checks for `with_steps` because
# when cards were introduced there was an assumption made
# about task-ids being unique.
# This assumption is incorrect since pathspec needs to be
# unique but there is no such guarantees on task-ids.
# This is why we have a `with_steps` flag that allows
# constructing the path with and without steps so that
# older-cards (cards with a path without `steps/<stepname>` in them)
# can also be accessed by the card cli and the card client.
if with_steps:
pth_arr = [
sysroot,
flow_name,
"runs",
run_id,
"steps",
step_name,
"tasks",
task_id,
suffix,
]
else:
pth_arr = [
sysroot,
flow_name,
"runs",
run_id,
"tasks",
task_id,
suffix,
]
if sysroot == "" or sysroot is None:
pth_arr.pop(0)
return os.path.join(*pth_arr)
def _get_data_read_path(self, base_pth=""):
return self._make_path(
base_pth=base_pth,
pathspec=self._pathspec,
with_steps=True,
suffix=CardPathSuffix.DATA,
)
def _get_data_write_path(self, base_pth=""):
return self._make_path(
base_pth=base_pth,
pathspec=self._pathspec,
with_steps=True,
suffix=CardPathSuffix.DATA,
)
def _get_card_write_path(
self,
base_pth="",
):
return self._make_path(
base_pth,
pathspec=self._pathspec,
with_steps=True,
suffix=CardPathSuffix.CARD,
)
def _get_card_read_path(self, base_pth="", with_steps=False):
return self._make_path(
base_pth,
pathspec=self._pathspec,
with_steps=with_steps,
suffix=CardPathSuffix.CARD,
)
@staticmethod
def info_from_path(path, suffix=CardNameSuffix.CARD):
"""
Args:
path (str): The path to the card
Raises:
Exception: When the card_path is invalid
Returns:
CardInfo
"""
card_file_name = path.split("/")[-1]
file_split = card_file_name.split("-")
if len(file_split) not in [2, 3]:
raise Exception(
"Invalid file name %s. Card/Data file names should be of form TYPE-HASH.%s or TYPE-ID-HASH.%s"
% (card_file_name, suffix, suffix)
)
card_type, card_hash, card_id = None, None, None
if len(file_split) == 2:
card_type, card_hash = file_split
else:
card_type, card_id, card_hash = file_split
card_hash = card_hash.split("." + suffix)[0]
return CardInfo(card_type, card_hash, card_id, card_file_name)
def save_data(self, uuid, card_type, json_data, card_id=None):
card_file_name = card_type
loc = self.get_card_location(
self._get_data_write_path(),
card_file_name,
uuid,
card_id=card_id,
suffix=CardNameSuffix.DATA,
)
self._backend.save_bytes(
[(loc, BytesIO(json.dumps(json_data).encode("utf-8")))], overwrite=True
)
def save_card(self, uuid, card_type, card_html, card_id=None, overwrite=True):
card_file_name = card_type
card_path_with_steps = self.get_card_location(
self._get_card_write_path(),
card_file_name,
uuid,
card_id=card_id,
suffix=CardNameSuffix.CARD,
)
self._backend.save_bytes(
[(card_path_with_steps, BytesIO(bytes(card_html, "utf-8")))],
overwrite=overwrite,
)
return self.info_from_path(card_path_with_steps, suffix=CardNameSuffix.CARD)
def _list_card_paths(self, card_type=None, card_hash=None, card_id=None):
# Check for new cards first
card_paths = []
card_paths_with_steps = self._backend.list_content(
[self._get_card_read_path(with_steps=True)]
)
if len(card_paths_with_steps) == 0:
# The listing logic is reading the cards with steps and without steps
# because earlier versions of clients (ones that wrote cards before June 2022),
# would have written cards without steps. So as a fallback we will try to check for the
# cards without steps.
card_paths_without_steps = self._backend.list_content(
[self._get_card_read_path(with_steps=False)]
)
if len(card_paths_without_steps) == 0:
# If there are no files found on the Path then raise an error of
raise CardNotPresentException(
self._pathspec,
card_hash=card_hash,
card_type=card_type,
)
else:
card_paths = card_paths_without_steps
else:
card_paths = card_paths_with_steps
cards_found = []
for task_card_path in card_paths:
card_path = task_card_path.path
card_info = self.info_from_path(card_path, suffix=CardNameSuffix.CARD)
if card_type is not None and card_info.type != card_type:
continue
elif card_hash is not None:
if not card_info.hash.startswith(card_hash):
continue
elif card_id is not None and card_info.id != card_id:
continue
if task_card_path.is_file:
cards_found.append(card_path)
return cards_found
def _list_card_data(self, card_type=None, card_hash=None, card_id=None):
card_data_paths = self._backend.list_content([self._get_data_read_path()])
data_found = []
for data_path in card_data_paths:
_pth = data_path.path
card_info = self.info_from_path(_pth, suffix=CardNameSuffix.DATA)
if card_type is not None and card_info.type != card_type:
continue
elif card_hash is not None:
if not card_info.hash.startswith(card_hash):
continue
elif card_id is not None and card_info.id != card_id:
continue
if data_path.is_file:
data_found.append(_pth)
return data_found
def create_full_path(self, card_path):
return os.path.join(self._backend.datastore_root, card_path)
def get_card_names(self, card_paths):
return [
self.info_from_path(path, suffix=CardNameSuffix.CARD) for path in card_paths
]
def get_card_html(self, path):
with self._backend.load_bytes([path]) as get_results:
for _, path, _ in get_results:
if path is not None:
with open(path, "r") as f:
return f.read()
def get_card_data(self, path):
with self._backend.load_bytes([path]) as get_results:
for _, path, _ in get_results:
if path is not None:
with open(path, "r") as f:
return json.loads(f.read())
def cache_locally(self, path, save_path=None):
"""
Saves the data present in the `path` the `metaflow_card_cache` directory or to the `save_path`.
"""
# todo : replace this function with the FileCache
if save_path is None:
if not is_file_present(self._temp_card_save_path):
LocalStorage._makedirs(self._temp_card_save_path)
else:
save_dir = os.path.dirname(save_path)
if save_dir != "" and not is_file_present(save_dir):
LocalStorage._makedirs(os.path.dirname(save_path))
with self._backend.load_bytes([path]) as get_results:
for key, path, meta in get_results:
if path is not None:
main_path = path
if save_path is None:
file_name = key.split("/")[-1]
main_path = os.path.join(self._temp_card_save_path, file_name)
else:
main_path = save_path
shutil.copy(path, main_path)
return main_path
def extract_data_paths(self, card_type=None, card_hash=None, card_id=None):
return self._list_card_data(
# card_hash is the unique identifier to the card.
# Its no longer the actual hash!
card_type=card_type,
card_hash=card_hash,
card_id=card_id,
)
def extract_card_paths(self, card_type=None, card_hash=None, card_id=None):
return self._list_card_paths(
card_type=card_type, card_hash=card_hash, card_id=card_id
)
| CardDatastore |
python | sphinx-doc__sphinx | sphinx/util/logging.py | {
"start": 3071,
"end": 3440
} | class ____(SphinxLogRecord):
"""Warning log record class supporting location"""
@property
def prefix(self) -> str: # type: ignore[override]
if self.levelno >= logging.CRITICAL:
return 'CRITICAL: '
elif self.levelno >= logging.ERROR:
return 'ERROR: '
else:
return 'WARNING: '
| SphinxWarningLogRecord |
python | spack__spack | var/spack/test_repos/spack_repo/builder_test/packages/callbacks/package.py | {
"start": 574,
"end": 1395
} | class ____(GenericBuilder):
def install(self, pkg, spec, prefix):
os.environ["CALLBACKS_INSTALL_CALLED"] = "1"
os.environ["INSTALL_VALUE"] = "CALLBACKS"
mkdirp(prefix.bin)
@run_before("install")
def before_install_1(self):
os.environ["BEFORE_INSTALL_1_CALLED"] = "1"
os.environ["TEST_VALUE"] = "1"
@run_before("install")
def before_install_2(self):
os.environ["BEFORE_INSTALL_2_CALLED"] = "1"
os.environ["TEST_VALUE"] = "2"
@run_after("install")
def after_install_1(self):
os.environ["AFTER_INSTALL_1_CALLED"] = "1"
os.environ["TEST_VALUE"] = "3"
@run_after("install", when="@1.0")
def after_install_2(self):
os.environ["AFTER_INSTALL_2_CALLED"] = "1"
os.environ["TEST_VALUE"] = "4"
| GenericBuilder |
python | numba__numba | numba/core/types/functions.py | {
"start": 1787,
"end": 9797
} | class ____(object):
"""Collect and format function resolution failures.
"""
def __init__(self, context, function_type, args, kwargs, depth=0):
self._context = context
self._function_type = function_type
self._args = args
self._kwargs = kwargs
self._failures = defaultdict(list)
self._depth = depth
self._max_depth = 5
self._scale = 2
def __len__(self):
return len(self._failures)
def add_error(self, calltemplate, matched, error, literal):
"""
Args
----
calltemplate : CallTemplate
error : Exception or str
Error message
"""
isexc = isinstance(error, Exception)
errclazz = '%s: ' % type(error).__name__ if isexc else ''
key = "{}{}".format(errclazz, str(error))
self._failures[key].append(_FAILURE(calltemplate, matched, error,
literal))
def format(self):
"""Return a formatted error message from all the gathered errors.
"""
indent = ' ' * self._scale
argstr = argsnkwargs_to_str(self._args, self._kwargs)
ncandidates = sum([len(x) for x in self._failures.values()])
# sort out a display name for the function
tykey = self._function_type.typing_key
# most things have __name__
fname = getattr(tykey, '__name__', None)
is_external_fn_ptr = isinstance(self._function_type,
ExternalFunctionPointer)
if fname is None:
if is_external_fn_ptr:
fname = "ExternalFunctionPointer"
else:
fname = "<unknown function>"
msgbuf = [_header_template.format(the_function=self._function_type,
fname=fname,
signature=argstr,
ncandidates=ncandidates)]
nolitargs = tuple([unliteral(a) for a in self._args])
nolitkwargs = {k: unliteral(v) for k, v in self._kwargs.items()}
nolitargstr = argsnkwargs_to_str(nolitargs, nolitkwargs)
# depth could potentially get massive, so limit it.
ldepth = min(max(self._depth, 0), self._max_depth)
def template_info(tp):
src_info = tp.get_template_info()
unknown = "unknown"
source_name = src_info.get('name', unknown)
source_file = src_info.get('filename', unknown)
source_lines = src_info.get('lines', unknown)
source_kind = src_info.get('kind', 'Unknown template')
return source_name, source_file, source_lines, source_kind
for i, (k, err_list) in enumerate(self._failures.items()):
err = err_list[0]
nduplicates = len(err_list)
template, error = err.template, err.error
ifo = template_info(template)
source_name, source_file, source_lines, source_kind = ifo
largstr = argstr if err.literal else nolitargstr
if err.error == "No match.":
err_dict = defaultdict(set)
for errs in err_list:
err_dict[errs.template].add(errs.literal)
# if there's just one template, and it's erroring on
# literal/nonliteral be specific
if len(err_dict) == 1:
template = [_ for _ in err_dict.keys()][0]
source_name, source_file, source_lines, source_kind = \
template_info(template)
source_lines = source_lines[0]
else:
source_file = "<numerous>"
source_lines = "N/A"
msgbuf.append(_termcolor.errmsg(
_wrapper(_overload_template.format(nduplicates=nduplicates,
kind=source_kind.title(),
function=fname,
inof='of',
file=source_file,
line=source_lines,
args=largstr),
ldepth + 1)))
msgbuf.append(_termcolor.highlight(_wrapper(err.error,
ldepth + 2)))
else:
# There was at least one match in this failure class, but it
# failed for a specific reason try and report this.
msgbuf.append(_termcolor.errmsg(
_wrapper(_overload_template.format(nduplicates=nduplicates,
kind=source_kind.title(),
function=source_name,
inof='in',
file=source_file,
line=source_lines[0],
args=largstr),
ldepth + 1)))
if isinstance(error, BaseException):
reason = indent + self.format_error(error)
errstr = _err_reasons['specific_error'].format(reason)
else:
errstr = error
# if you are a developer, show the back traces
if config.DEVELOPER_MODE:
if isinstance(error, BaseException):
# if the error is an actual exception instance, trace it
bt = traceback.format_exception(type(error), error,
error.__traceback__)
else:
bt = [""]
bt_as_lines = _bt_as_lines(bt)
nd2indent = '\n{}'.format(2 * indent)
errstr += _termcolor.reset(nd2indent +
nd2indent.join(bt_as_lines))
msgbuf.append(_termcolor.highlight(_wrapper(errstr,
ldepth + 2)))
loc = self.get_loc(template, error)
if loc:
msgbuf.append('{}raised from {}'.format(indent, loc))
# the commented bit rewraps each block, may not be helpful?!
return _wrapper('\n'.join(msgbuf) + '\n') # , self._scale * ldepth)
def format_error(self, error):
"""Format error message or exception
"""
if isinstance(error, Exception):
return '{}: {}'.format(type(error).__name__, error)
else:
return '{}'.format(error)
def get_loc(self, classtemplate, error):
"""Get source location information from the error message.
"""
if isinstance(error, Exception) and hasattr(error, '__traceback__'):
# traceback is unavailable in py2
frame_list = traceback.extract_tb(error.__traceback__)
# Check if length of frame_list is 0
if len(frame_list) != 0:
frame = frame_list[-1]
return "{}:{}".format(frame[0], frame[1])
def raise_error(self):
for faillist in self._failures.values():
for fail in faillist:
if isinstance(fail.error, errors.ForceLiteralArg):
raise fail.error
raise errors.TypingError(self.format())
def _unlit_non_poison(ty):
"""Apply unliteral(ty) and raise a TypingError if type is Poison.
"""
out = unliteral(ty)
if isinstance(out, types.Poison):
m = f"Poison type used in arguments; got {out}"
raise errors.TypingError(m)
return out
| _ResolutionFailures |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_column06.py | {
"start": 315,
"end": 1398
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_column06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [47363584, 49673344]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet1.write_column("A1", data[0])
worksheet1.write_column("B1", data[1])
worksheet1.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet2.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | catalyst-team__catalyst | catalyst/contrib/data/transforms.py | {
"start": 2035,
"end": 2798
} | class ____:
"""Composes several transforms together."""
def __init__(self, transforms):
"""
Args:
transforms: list of transforms to compose.
Example:
>>> Compose([ToTensor(), Normalize()])
"""
self.transforms = transforms
def __call__(self, x):
"""Applies several transforms to the data."""
for t in self.transforms:
x = t(x)
return x
def __repr__(self):
"""@TODO: Docs. Contribution is welcome."""
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
| Compose |
python | huggingface__transformers | tests/models/levit/test_modeling_levit.py | {
"start": 1606,
"end": 1893
} | class ____(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, "hidden_sizes"))
self.parent.assertTrue(hasattr(config, "num_attention_heads"))
| LevitConfigTester |
python | cherrypy__cherrypy | cherrypy/test/test_session.py | {
"start": 15399,
"end": 18310
} | class ____(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.MemcachedSession')
self.getPage('/testStr')
assert self.body == b'1'
self.getPage('/testGen', self.cookies)
assert self.body == b'2'
self.getPage('/testStr', self.cookies)
assert self.body == b'3'
self.getPage('/length', self.cookies)
self.assertErrorPage(500)
assert b'NotImplementedError' in self.body
self.getPage('/delkey?key=counter', self.cookies)
assert self.status_code == 200
# Wait for the session.timeout (1 second)
time.sleep(1.25)
self.getPage('/')
assert self.body == b'1'
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
assert self.body == b'True'
# Test session delete
self.getPage('/delete', self.cookies)
assert self.body == b'done'
def test_1_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
assert self.body == b'1'
cookies = self.cookies
data_dict = {}
def request(index):
for i in range(request_count):
self.getPage('/', cookies)
# Uncomment the following line to prove threads overlap.
# sys.stdout.write("%d " % index)
if not self.body.isdigit():
self.fail(self.body)
data_dict[index] = int(self.body)
# Start <request_count> concurrent requests from
# each of <client_thread_count> clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
assert hitcount == expected
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
assert self.body == b'MemcachedSession'
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
| MemcachedSessionTest |
python | pytorch__pytorch | torch/_numpy/linalg.py | {
"start": 294,
"end": 5648
} | class ____(Exception):
pass
def _atleast_float_1(a):
if not (a.dtype.is_floating_point or a.dtype.is_complex):
a = a.to(_dtypes_impl.default_dtypes().float_dtype)
return a
def _atleast_float_2(a, b):
dtyp = _dtypes_impl.result_type_impl(a, b)
if not (dtyp.is_floating_point or dtyp.is_complex):
dtyp = _dtypes_impl.default_dtypes().float_dtype
a = _util.cast_if_needed(a, dtyp)
b = _util.cast_if_needed(b, dtyp)
return a, b
def linalg_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwds):
try:
return func(*args, **kwds)
except torch._C._LinAlgError as e:
raise LinAlgError(*e.args) # noqa: B904
return wrapped
# ### Matrix and vector products ###
@normalizer
@linalg_errors
def matrix_power(a: ArrayLike, n):
a = _atleast_float_1(a)
return torch.linalg.matrix_power(a, n)
@normalizer
@linalg_errors
def multi_dot(inputs: Sequence[ArrayLike], *, out=None):
return torch.linalg.multi_dot(inputs)
# ### Solving equations and inverting matrices ###
@normalizer
@linalg_errors
def solve(a: ArrayLike, b: ArrayLike):
a, b = _atleast_float_2(a, b)
return torch.linalg.solve(a, b)
@normalizer
@linalg_errors
def lstsq(a: ArrayLike, b: ArrayLike, rcond=None):
a, b = _atleast_float_2(a, b)
# NumPy is using gelsd: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/umath_linalg.cpp#L3991
# on CUDA, only `gels` is available though, so use it instead
driver = "gels" if a.is_cuda or b.is_cuda else "gelsd"
return torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
@normalizer
@linalg_errors
def inv(a: ArrayLike):
a = _atleast_float_1(a)
result = torch.linalg.inv(a)
return result
@normalizer
@linalg_errors
def pinv(a: ArrayLike, rcond=1e-15, hermitian=False):
a = _atleast_float_1(a)
return torch.linalg.pinv(a, rtol=rcond, hermitian=hermitian)
@normalizer
@linalg_errors
def tensorsolve(a: ArrayLike, b: ArrayLike, axes=None):
a, b = _atleast_float_2(a, b)
return torch.linalg.tensorsolve(a, b, dims=axes)
@normalizer
@linalg_errors
def tensorinv(a: ArrayLike, ind=2):
a = _atleast_float_1(a)
return torch.linalg.tensorinv(a, ind=ind)
# ### Norms and other numbers ###
@normalizer
@linalg_errors
def det(a: ArrayLike):
a = _atleast_float_1(a)
return torch.linalg.det(a)
@normalizer
@linalg_errors
def slogdet(a: ArrayLike):
a = _atleast_float_1(a)
return torch.linalg.slogdet(a)
@normalizer
@linalg_errors
def cond(x: ArrayLike, p=None):
x = _atleast_float_1(x)
# check if empty
# cf: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744
if x.numel() == 0 and math.prod(x.shape[-2:]) == 0:
raise LinAlgError("cond is not defined on empty arrays")
result = torch.linalg.cond(x, p=p)
# Convert nans to infs (numpy does it in a data-dependent way, depending on
# whether the input array has nans or not)
# XXX: NumPy does this: https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1744
return torch.where(torch.isnan(result), float("inf"), result)
@normalizer
@linalg_errors
def matrix_rank(a: ArrayLike, tol=None, hermitian=False):
a = _atleast_float_1(a)
if a.ndim < 2:
return int((a != 0).any())
if tol is None:
# follow https://github.com/numpy/numpy/blob/v1.24.0/numpy/linalg/linalg.py#L1885
atol = 0
rtol = max(a.shape[-2:]) * torch.finfo(a.dtype).eps
else:
atol, rtol = tol, 0
return torch.linalg.matrix_rank(a, atol=atol, rtol=rtol, hermitian=hermitian)
@normalizer
@linalg_errors
def norm(x: ArrayLike, ord=None, axis=None, keepdims: KeepDims = False):
x = _atleast_float_1(x)
return torch.linalg.norm(x, ord=ord, dim=axis)
# ### Decompositions ###
@normalizer
@linalg_errors
def cholesky(a: ArrayLike):
a = _atleast_float_1(a)
return torch.linalg.cholesky(a)
@normalizer
@linalg_errors
def qr(a: ArrayLike, mode="reduced"):
a = _atleast_float_1(a)
result = torch.linalg.qr(a, mode=mode)
if mode == "r":
# match NumPy
result = result.R
return result
@normalizer
@linalg_errors
def svd(a: ArrayLike, full_matrices=True, compute_uv=True, hermitian=False):
a = _atleast_float_1(a)
if not compute_uv:
return torch.linalg.svdvals(a)
# NB: ignore the hermitian= argument (no pytorch equivalent)
result = torch.linalg.svd(a, full_matrices=full_matrices)
return result
# ### Eigenvalues and eigenvectors ###
@normalizer
@linalg_errors
def eig(a: ArrayLike):
a = _atleast_float_1(a)
w, vt = torch.linalg.eig(a)
if not a.is_complex() and w.is_complex() and (w.imag == 0).all():
w = w.real
vt = vt.real
return w, vt
@normalizer
@linalg_errors
def eigh(a: ArrayLike, UPLO="L"):
a = _atleast_float_1(a)
return torch.linalg.eigh(a, UPLO=UPLO)
@normalizer
@linalg_errors
def eigvals(a: ArrayLike):
a = _atleast_float_1(a)
result = torch.linalg.eigvals(a)
if not a.is_complex() and result.is_complex() and (result.imag == 0).all():
result = result.real
return result
@normalizer
@linalg_errors
def eigvalsh(a: ArrayLike, UPLO="L"):
a = _atleast_float_1(a)
return torch.linalg.eigvalsh(a, UPLO=UPLO)
| LinAlgError |
python | python__mypy | mypyc/irbuild/nonlocalcontrol.py | {
"start": 5621,
"end": 6777
} | class ____(NonlocalControl):
"""Nonlocal control within try/finally."""
def __init__(self, target: BasicBlock) -> None:
self.target = target
self.ret_reg: None | Register | AssignmentTarget = None
def gen_break(self, builder: IRBuilder, line: int) -> None:
builder.error("break inside try/finally block is unimplemented", line)
def gen_continue(self, builder: IRBuilder, line: int) -> None:
builder.error("continue inside try/finally block is unimplemented", line)
def gen_return(self, builder: IRBuilder, value: Value, line: int) -> None:
if self.ret_reg is None:
if builder.fn_info.is_generator:
self.ret_reg = builder.make_spill_target(builder.ret_types[-1])
else:
self.ret_reg = Register(builder.ret_types[-1])
# assert needed because of apparent mypy bug... it loses track of the union
# and infers the type as object
assert isinstance(self.ret_reg, (Register, AssignmentTarget)), self.ret_reg
builder.assign(self.ret_reg, value, line)
builder.add(Goto(self.target))
| TryFinallyNonlocalControl |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/uninitializedVariable1.py | {
"start": 446,
"end": 533
} | class ____(TypedDict):
member1: str
member2: str
# Protocol classes are exempt.
| C |
python | django__django | tests/template_tests/syntax_tests/test_exceptions.py | {
"start": 196,
"end": 2726
} | class ____(SimpleTestCase):
@setup({"exception01": "{% extends 'nonexistent' %}"})
def test_exception01(self):
"""
Raise exception for invalid template name
"""
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string("exception01")
@setup({"exception02": "{% extends nonexistent %}"})
def test_exception02(self):
"""
Raise exception for invalid variable template name
"""
if self.engine.string_if_invalid:
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string("exception02")
else:
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string("exception02")
@setup(
{
"exception03": "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% extends 'inheritance16' %}"
},
inheritance_templates,
)
def test_exception03(self):
"""
Raise exception for extra {% extends %} tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("exception03")
@setup(
{
"exception04": (
"{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678"
"{% endblock %}"
)
},
inheritance_templates,
)
def test_exception04(self):
"""
Raise exception for custom tags used in child with {% load %} tag in
parent, not in child
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("exception04")
@setup({"exception05": "{% block first %}{{ block.super }}{% endblock %}"})
def test_exception05(self):
"""
Raise exception for block.super used in base template
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string("exception05")
def test_unknown_origin_relative_path(self):
files = ["./nonexistent.html", "../nonexistent.html"]
for template_name in files:
with self.subTest(template_name=template_name):
msg = (
f"The relative path '{template_name}' cannot be evaluated due to "
"an unknown template origin."
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template(f"{{% extends '{template_name}' %}}")
| ExceptionsTests |
python | pytorch__pytorch | torch/distributed/tensor/experimental/_context_parallel/_attention.py | {
"start": 7357,
"end": 8027
} | class ____(_RingRotater):
"""Use all_to_all to send the kv to the next rank."""
def __init__(self, pg: dist.ProcessGroup, seq_dim: int) -> None:
self._pg = pg
self._seq_dim = seq_dim
self._buffer: torch.Tensor | None = None
def exchange_buffers(self, curr_buffer: torch.Tensor) -> None:
curr_buffer = curr_buffer.contiguous()
size = dist.get_world_size(self._pg)
dsts = list(range(1, size)) + [0]
self._buffer = ft_c.permute_tensor(curr_buffer, dsts, self._pg)
def next_buffer(self) -> torch.Tensor:
assert self._buffer is not None
return _maybe_wait(self._buffer)
| _AllToAllRotater |
python | streamlit__streamlit | lib/tests/streamlit/delta_generator_test.py | {
"start": 8723,
"end": 12969
} | class ____(DeltaGeneratorTestCase):
"""Test DeltaGenerator Class."""
def test_constructor(self):
"""Test default DeltaGenerator()."""
dg = DeltaGenerator()
assert not dg._cursor.is_locked
assert dg._cursor.index == 0
def test_constructor_with_id(self):
"""Test DeltaGenerator() with an id."""
cursor = LockedCursor(root_container=RootContainer.MAIN, index=1234)
dg = DeltaGenerator(root_container=RootContainer.MAIN, cursor=cursor)
assert dg._cursor.is_locked
assert dg._cursor.index == 1234
def test_can_deepcopy_delta_generators(self):
cursor = LockedCursor(root_container=RootContainer.MAIN, index=1234)
dg1 = DeltaGenerator(root_container=RootContainer.MAIN, cursor=cursor)
dg2 = deepcopy(dg1)
assert dg1._root_container == dg2._root_container
assert dg1._parent is None
assert dg2._parent is None
assert dg1._block_type == dg2._block_type
# Check that the internals of the Cursors look the same. Note the cursors
# themselves will be different objects so won't compare equal.
c1 = dg1._cursor
c2 = dg2._cursor
assert isinstance(c1, LockedCursor)
assert isinstance(c2, LockedCursor)
assert c1._root_container == c2._root_container
assert c1._index == c2._index
assert c1._parent_path == c2._parent_path
assert c1._props == c2._props
def test_enqueue_null(self):
# Test "Null" Delta generators
dg = DeltaGenerator(root_container=None)
new_dg = dg._enqueue("empty", EmptyProto())
assert dg == new_dg
@parameterized.expand([(RootContainer.MAIN,), (RootContainer.SIDEBAR,)])
def test_enqueue(self, container):
dg = DeltaGenerator(root_container=container)
assert dg._cursor.index == 0
assert container == dg._root_container
test_data = "some test data"
text_proto = TextProto()
text_proto.body = test_data
new_dg = dg._enqueue("text", text_proto)
assert dg != new_dg
assert dg._cursor.index == 1
assert container == new_dg._root_container
delta = self.get_delta_from_queue()
element = delta.new_element
assert delta.fragment_id == ""
assert element.text.body == test_data
def test_enqueue_same_id(self):
cursor = LockedCursor(root_container=RootContainer.MAIN, index=123)
dg = DeltaGenerator(root_container=RootContainer.MAIN, cursor=cursor)
assert dg._cursor.index == 123
test_data = "some test data"
text_proto = TextProto()
text_proto.body = test_data
new_dg = dg._enqueue("text", text_proto)
assert dg._cursor == new_dg._cursor
msg = self.get_message_from_queue()
# The last element in delta_path is the delta's index in its container.
assert make_delta_path(RootContainer.MAIN, (), 123) == msg.metadata.delta_path
assert msg.delta.new_element.text.body == test_data
def test_enqueue_adds_fragment_id_to_delta_if_set(self):
ctx = get_script_run_ctx()
ctx.current_fragment_id = "my_fragment_id"
dg = DeltaGenerator(root_container=RootContainer.MAIN)
dg._enqueue("text", TextProto())
delta = self.get_delta_from_queue()
assert delta.fragment_id == "my_fragment_id"
def test_enqueue_explodes_if_fragment_writes_to_sidebar(self):
ctx = get_script_run_ctx()
ctx.current_fragment_id = "my_fragment_id"
ctx.fragment_ids_this_run = ["my_fragment_id"]
exc = "is not supported"
with pytest.raises(StreamlitAPIException, match=exc):
get_dg_singleton_instance().sidebar_dg._enqueue("text", TextProto())
def test_enqueue_can_write_to_container_in_sidebar(self):
ctx = get_script_run_ctx()
ctx.current_fragment_id = "my_fragment_id"
ctx.fragment_ids_this_run = ["my_fragment_id"]
get_dg_singleton_instance().sidebar_dg.container().write("Hello world")
deltas = self.get_all_deltas_from_queue()
assert [d.fragment_id for d in deltas] == ["my_fragment_id", "my_fragment_id"]
| DeltaGeneratorClassTest |
python | pydantic__pydantic | pydantic/experimental/pipeline.py | {
"start": 1349,
"end": 1459
} | class ____:
left: _Pipeline[Any, Any]
right: _Pipeline[Any, Any]
@dataclass(**_slots_frozen)
| _PipelineOr |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 57961,
"end": 58289
} | class ____:
"Test immediate stop"
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
"Test multiple tiers of iterators"
return chain(map(lambda x: x, R(Ig(G(seqn))))) # noqa: C417
| S |
python | has2k1__plotnine | plotnine/stats/stat.py | {
"start": 700,
"end": 11469
} | class ____(ABC, metaclass=Register):
"""Base class of all stats"""
DEFAULT_AES: dict[str, Any] = {}
"""Default aesthetics for the stat"""
REQUIRED_AES: set[str] = set()
"""Required aesthetics for the stat"""
NON_MISSING_AES: set[str] = set()
"""Required aesthetics for the stat"""
DEFAULT_PARAMS: dict[str, Any] = {}
"""Required parameters for the stat"""
CREATES: set[str] = set()
"""
Stats may modify existing columns or create extra
columns.
Any extra columns that may be created by the stat
should be specified in this set
see: stat_bin
Documentation for the aesthetics. It ie added under the
documentation for mapping parameter. Use {aesthetics_table}
placeholder to insert a table for all the aesthetics and
their default values.
"""
_aesthetics_doc = "{aesthetics_table}"
# Plot namespace, it gets its value when the plot is being
# built.
environment: Environment
def __init__(
self,
mapping: aes | None = None,
data: DataLike | None = None,
**kwargs: Any,
):
kwargs = data_mapping_as_kwargs((data, mapping), kwargs)
self._kwargs = kwargs # Will be used to create the geom
self.params = self.DEFAULT_PARAMS | {
k: v for k, v in kwargs.items() if k in self.DEFAULT_PARAMS
}
self.DEFAULT_AES = aes(**self.DEFAULT_AES)
self.aes_params = {
ae: kwargs[ae] for ae in self.aesthetics() & set(kwargs)
}
@staticmethod
def from_geom(geom: geom) -> stat:
"""
Return an instantiated stat object
stats should not override this method.
Parameters
----------
geom :
A geom object
Returns
-------
stat
A stat object
Raises
------
[](`~plotnine.exceptions.PlotnineError`) if unable to create a `stat`.
"""
name = geom.params["stat"]
kwargs = geom._kwargs
# More stable when reloading modules than
# using issubclass
if not isinstance(name, type) and hasattr(name, "compute_layer"):
return name
if isinstance(name, stat):
return name
elif isinstance(name, type) and issubclass(name, stat):
klass = name
elif isinstance(name, str):
if not name.startswith("stat_"):
name = f"stat_{name}"
klass = Registry[name]
else:
raise PlotnineError(f"Unknown stat of type {type(name)}")
valid_kwargs = (
klass.aesthetics() | klass.DEFAULT_PARAMS.keys()
) & kwargs.keys()
params = {k: kwargs[k] for k in valid_kwargs}
return klass(geom=geom, **params)
def __deepcopy__(self, memo: dict[Any, Any]) -> stat:
"""
Deep copy without copying the self.data dataframe
stats should not override this method.
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
old = self.__dict__
new = result.__dict__
# don't make a _kwargs
shallow = {"_kwargs"}
for key, item in old.items():
if key in shallow:
new[key] = item
memo[id(new[key])] = new[key]
else:
new[key] = deepcopy(item, memo)
return result
@classmethod
def aesthetics(cls) -> set[str]:
"""
Return a set of all non-computed aesthetics for this stat.
stats should not override this method.
"""
aesthetics = cls.REQUIRED_AES.copy()
calculated = aes(**cls.DEFAULT_AES)._calculated
for ae in set(cls.DEFAULT_AES) - set(calculated):
aesthetics.add(ae)
return aesthetics
def use_defaults(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Combine data with defaults and set aesthetics from parameters
stats should not override this method.
Parameters
----------
data :
Data used for drawing the geom.
Returns
-------
out :
Data used for drawing the geom.
"""
missing = (
self.aesthetics() - set(self.aes_params.keys()) - set(data.columns)
)
for ae in missing - self.REQUIRED_AES:
if self.DEFAULT_AES[ae] is not None:
data[ae] = self.DEFAULT_AES[ae]
missing = self.aes_params.keys() - set(data.columns)
for ae in self.aes_params:
data[ae] = self.aes_params[ae]
return data
def setup_params(self, data: pd.DataFrame):
"""
Override this to verify and/or adjust parameters
Parameters
----------
data :
Data
Returns
-------
out :
Parameters used by the stats.
"""
def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Override to modify data before compute_layer is called
Parameters
----------
data :
Data
Returns
-------
out :
Data
"""
return data
def finish_layer(self, data: pd.DataFrame) -> pd.DataFrame:
"""
Modify data after the aesthetics have been mapped
This can be used by stats that require access to the mapped
values of the computed aesthetics, part 3 as shown below.
1. stat computes and creates variables
2. variables mapped to aesthetics
3. stat sees and modifies data according to the
aesthetic values
The default to is to do nothing.
Parameters
----------
data :
Data for the layer
params :
Parameters
Returns
-------
data :
Modified data
"""
return data
def compute_layer(
self, data: pd.DataFrame, layout: Layout
) -> pd.DataFrame:
"""
Calculate statistics for this layers
This is the top-most computation method for the
stat. It does not do any computations, but it
knows how to verify the data, partition it call the
next computation method and merge results.
stats should not override this method.
Parameters
----------
data :
Data points for all objects in a layer.
layout :
Panel layout information
"""
check_required_aesthetics(
self.REQUIRED_AES,
list(data.columns) + list(self.params.keys()),
self.__class__.__name__,
)
data = remove_missing(
data,
na_rm=self.params.get("na_rm", False),
vars=list(self.REQUIRED_AES | self.NON_MISSING_AES),
name=self.__class__.__name__,
finite=True,
)
def fn(pdata):
"""
Compute function helper
"""
# Given data belonging to a specific panel, grab
# the corresponding scales and call the method
# that does the real computation
if len(pdata) == 0:
return pdata
pscales = layout.get_scales(pdata["PANEL"].iloc[0])
return self.compute_panel(pdata, pscales)
return groupby_apply(data, "PANEL", fn)
def compute_panel(self, data: pd.DataFrame, scales: pos_scales):
"""
Calculate the statistics for all the groups
Return the results in a single dataframe.
This is a default function that can be overridden
by individual stats
Parameters
----------
data :
data for the computing
scales :
x (``scales.x``) and y (``scales.y``) scale objects.
The most likely reason to use scale information is
to find out the physical size of a scale. e.g.
```python
range_x = scales.x.dimension()
```
params :
The parameters for the stat. It includes default
values if user did not set a particular parameter.
"""
if not len(data):
return type(data)()
stats = []
for _, old in data.groupby("group"):
new = self.compute_group(old, scales)
new.reset_index(drop=True, inplace=True)
unique = uniquecols(old)
missing = unique.columns.difference(new.columns)
idx = [0] * len(new)
u = unique.loc[idx, missing].reset_index(drop=True)
# concat can have problems with empty dataframes that
# have an index
if u.empty and len(u):
u = type(data)()
group_result = pd.concat([new, u], axis=1)
stats.append(group_result)
stats = pd.concat(stats, axis=0, ignore_index=True)
# Note: If the data coming in has columns with non-unique
# values with-in group(s), this implementation loses the
# columns. Individual stats may want to do some preparation
# before then fall back on this implementation or override
# it completely.
return stats
def compute_group(
self, data: pd.DataFrame, scales: pos_scales
) -> pd.DataFrame:
"""
Calculate statistics for the group
All stats should implement this method
Parameters
----------
data :
Data for a group
scales :
x (``scales.x``) and y (``scales.y``) scale objects.
The most likely reason to use scale information is
to find out the physical size of a scale. e.g.
```python
range_x = scales.x.dimension()
```
params :
Parameters
"""
msg = "{} should implement this method."
raise NotImplementedError(msg.format(self.__class__.__name__))
def __radd__(self, other: ggplot) -> ggplot:
"""
Add layer representing stat object on the right
Parameters
----------
gg :
ggplot object
Returns
-------
out :
ggplot object with added layer
"""
other += self.to_layer() # Add layer
return other
def to_layer(self) -> layer:
"""
Make a layer that represents this stat
Returns
-------
out :
Layer
"""
# Create, geom from stat, then layer from geom
from ..geoms.geom import geom
return layer.from_geom(geom.from_stat(self))
| stat |
python | python-attrs__attrs | src/attr/exceptions.py | {
"start": 1073,
"end": 1270
} | class ____(RuntimeError):
"""
A default has been set when defining the field and is attempted to be reset
using the decorator.
.. versionadded:: 17.1.0
"""
| DefaultAlreadySetError |
python | numba__numba | numba/core/typing/enumdecl.py | {
"start": 498,
"end": 789
} | class ____(AttributeTemplate):
key = types.EnumClass
def generic_resolve(self, ty, attr):
"""
Resolve attributes of an enum class as enum members.
"""
if attr in ty.instance_class.__members__:
return ty.member_type
@infer
| EnumClassAttribute |
python | joke2k__faker | faker/providers/internet/ro_RO/__init__.py | {
"start": 46,
"end": 795
} | class ____(InternetProvider):
user_name_formats = (
"{{last_name_female}}.{{first_name_female}}",
"{{last_name_female}}.{{first_name_female}}",
"{{last_name_male}}.{{first_name_male}}",
"{{last_name_male}}.{{first_name_male}}",
"{{first_name_female}}.{{last_name_female}}",
"{{first_name_male}}.{{last_name_male}}",
"{{first_name}}##",
"?{{last_name}}",
"?{{last_name}}",
"?{{last_name}}",
)
email_formats = ("{{user_name}}@{{free_email_domain}}",)
free_email_domains = (
"email.ro",
"gmail.com",
"kappa.ro",
"acasa.ro",
"zzup.ro",
"141.ro",
"post.ro",
)
tlds = ("ro", "com", "ro")
| Provider |
python | spack__spack | lib/spack/spack/vendor/macholib/util.py | {
"start": 1223,
"end": 7133
} | class ____(object):
"""
A proxy for file-like objects that exposes a given view of a file
"""
def __init__(self, fileobj, start, size):
self._fileobj = fileobj
self._start = start
self._end = start + size
def __repr__(self):
return "<fileview [%d, %d] %r>" % (self._start, self._end, self._fileobj)
def tell(self):
return self._fileobj.tell() - self._start
def _checkwindow(self, seekto, op):
if not (self._start <= seekto <= self._end):
raise IOError(
"%s to offset %d is outside window [%d, %d]"
% (op, seekto, self._start, self._end)
)
def seek(self, offset, whence=0):
seekto = offset
if whence == 0:
seekto += self._start
elif whence == 1:
seekto += self._fileobj.tell()
elif whence == 2:
seekto += self._end
else:
raise IOError("Invalid whence argument to seek: %r" % (whence,))
self._checkwindow(seekto, "seek")
self._fileobj.seek(seekto)
def write(self, bytes):
here = self._fileobj.tell()
self._checkwindow(here, "write")
self._checkwindow(here + len(bytes), "write")
self._fileobj.write(bytes)
def read(self, size=sys.maxsize):
if size < 0:
raise ValueError(
"Invalid size %s while reading from %s", size, self._fileobj
)
here = self._fileobj.tell()
self._checkwindow(here, "read")
bytes = min(size, self._end - here)
return self._fileobj.read(bytes)
def mergecopy(src, dest):
"""
copy2, but only if the destination isn't up to date
"""
if os.path.exists(dest) and os.stat(dest).st_mtime >= os.stat(src).st_mtime:
return
copy2(src, dest)
def mergetree(src, dst, condition=None, copyfn=mergecopy, srcbase=None):
"""
Recursively merge a directory tree using mergecopy().
"""
src = fsencoding(src)
dst = fsencoding(dst)
if srcbase is None:
srcbase = src
names = map(fsencoding, os.listdir(src))
try:
os.makedirs(dst)
except OSError:
pass
errors = []
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
if condition is not None and not condition(srcname):
continue
try:
if os.path.islink(srcname):
realsrc = os.readlink(srcname)
os.symlink(realsrc, dstname)
elif os.path.isdir(srcname):
mergetree(
srcname,
dstname,
condition=condition,
copyfn=copyfn,
srcbase=srcbase,
)
else:
copyfn(srcname, dstname)
except (IOError, os.error) as why:
errors.append((srcname, dstname, why))
if errors:
raise IOError(errors)
def sdk_normalize(filename):
"""
Normalize a path to strip out the SDK portion, normally so that it
can be decided whether it is in a system path or not.
"""
if filename.startswith("/Developer/SDKs/"):
pathcomp = filename.split("/")
del pathcomp[1:4]
filename = "/".join(pathcomp)
return filename
NOT_SYSTEM_FILES = []
def in_system_path(filename):
"""
Return True if the file is in a system path
"""
fn = sdk_normalize(os.path.realpath(filename))
if fn.startswith("/usr/local/"):
return False
elif fn.startswith("/System/") or fn.startswith("/usr/"):
if fn in NOT_SYSTEM_FILES:
return False
return True
else:
return False
def has_filename_filter(module):
"""
Return False if the module does not have a filename attribute
"""
return getattr(module, "filename", None) is not None
def get_magic():
"""
Get a list of valid Mach-O header signatures, not including the fat header
"""
return MAGIC
def is_platform_file(path):
"""
Return True if the file is Mach-O
"""
if not os.path.exists(path) or os.path.islink(path):
return False
# If the header is fat, we need to read into the first arch
with open(path, "rb") as fileobj:
bytes = fileobj.read(MAGIC_LEN)
if bytes == FAT_MAGIC_BYTES:
# Read in the fat header
fileobj.seek(0)
header = mach_o.fat_header.from_fileobj(fileobj, _endian_=">")
if header.nfat_arch < 1:
return False
# Read in the first fat arch header
arch = mach_o.fat_arch.from_fileobj(fileobj, _endian_=">")
fileobj.seek(arch.offset)
# Read magic off the first header
bytes = fileobj.read(MAGIC_LEN)
for magic in MAGIC:
if bytes == magic:
return True
return False
def iter_platform_files(dst):
"""
Walk a directory and yield each full path that is a Mach-O file
"""
for root, _dirs, files in os.walk(dst):
for fn in files:
fn = os.path.join(root, fn)
if is_platform_file(fn):
yield fn
def strip_files(files, argv_max=(256 * 1024)):
"""
Strip a list of files
"""
tostrip = [(fn, flipwritable(fn)) for fn in files]
while tostrip:
cmd = list(STRIPCMD)
flips = []
pathlen = sum(len(s) + 1 for s in cmd)
while pathlen < argv_max:
if not tostrip:
break
added, flip = tostrip.pop()
pathlen += len(added) + 1
cmd.append(added)
flips.append((added, flip))
else:
cmd.pop()
tostrip.append(flips.pop())
os.spawnv(os.P_WAIT, cmd[0], cmd)
for args in flips:
flipwritable(*args)
| fileview |
python | kamyu104__LeetCode-Solutions | Python/intersection-of-two-arrays.py | {
"start": 812,
"end": 1826
} | class ____(object):
def intersection(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
if len(nums1) > len(nums2):
return self.intersection(nums2, nums1)
def binary_search(compare, nums, left, right, target):
while left < right:
mid = left + (right - left) / 2
if compare(nums[mid], target):
right = mid
else:
left = mid + 1
return left
nums1.sort(), nums2.sort()
res = []
left = 0
for i in nums1:
left = binary_search(lambda x, y: x >= y, nums2, left, len(nums2), i)
if left != len(nums2) and nums2[left] == i:
res += i,
left = binary_search(lambda x, y: x > y, nums2, left, len(nums2), i)
return res
# Time: O(max(m, n) * log(max(m, n)))
# Space: O(1)
# Two pointers solution.
| Solution2 |
python | ray-project__ray | release/nightly_tests/dask_on_ray/large_scale_test.py | {
"start": 10102,
"end": 16071
} | class ____:
@staticmethod
def save_xarray(xarray_dataset, filename, dirpath):
"""
Save Xarray in zarr format.
"""
filepath = os.path.join(dirpath, filename)
if os.path.exists(filepath):
return "already_exists"
try:
xarray_dataset.to_zarr(filepath)
except Exception as e:
return "failure, exception = {}".format(e)
return "success"
@staticmethod
def save_all_xarrays(
xarray_filename_pairs: List[Tuple],
ray_scheduler,
dirpath: str,
batch_size: int,
):
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
for batch_idx, batch in enumerate(chunks(xarray_filename_pairs, batch_size)):
delayed_tasks = list()
for xarray_filename_pair in batch:
delayed_tasks.append(
dask.delayed(SaveRoutines.save_xarray)(
xarray_dataset=xarray_filename_pair[0],
filename=xarray_filename_pair[1],
dirpath=dirpath,
)
)
logging.info(
"[Batch Index {}] Batch size {}: Sending work to Ray Cluster.".format(
batch_idx, batch_size
)
)
res = []
try:
res = dask.compute(delayed_tasks, scheduler=ray_scheduler)
except Exception:
logging.warning(
"[Batch Index {}] Exception while computing batch!".format(
batch_idx
)
)
finally:
logging.info("[Batch Index {}], Result = {}".format(batch_idx, res))
def lazy_create_xarray_filename_pairs(
test_spec: TestSpec,
) -> List[Tuple[xarray.Dataset, str]]:
n_fft = 4096
hop_length = int(SAMPLING_RATE / 100)
decimate_factor = 100
logging.info("Creating 1 month lazy Xarray with decimation and FFT")
xr1 = LoadRoutines.lazy_load_xarray_one_month(test_spec)
xr2 = TransformRoutines.decimate_xarray_after_load(
xr_input=xr1, decimate_factor=decimate_factor
)
xr3 = TransformRoutines.fix_last_chunk_error(xr2, n_overlap=n_fft - hop_length)
xr4 = TransformRoutines.fft_xarray(xr_input=xr3, n_fft=n_fft, hop_length=hop_length)
num_segments = int(MINUTES_IN_A_MONTH / NUM_MINS_PER_OUTPUT_FILE)
start_time = 0
xarray_filename_pairs: List[Tuple[xarray.Dataset, str]] = list()
timestamp = int(time.time())
for step in range(num_segments):
segment_start = start_time + (NUM_MINS_PER_OUTPUT_FILE * step) # in minutes
segment_start_index = int(
SECONDS_IN_A_MIN
* NUM_MINS_PER_OUTPUT_FILE
* step
* (SAMPLING_RATE / decimate_factor)
/ hop_length
)
segment_end = segment_start + NUM_MINS_PER_OUTPUT_FILE
segment_len_sec = (segment_end - segment_start) * SECONDS_IN_A_MIN
segment_end_index = int(
segment_start_index + segment_len_sec * SAMPLING_RATE / hop_length
)
xr_segment = deepcopy(
xr4.isel(time=slice(segment_start_index, segment_end_index))
)
xarray_filename_pairs.append(
(xr_segment, "xarray_step_{}_{}.zarr".format(step, timestamp))
)
return xarray_filename_pairs
def parse_script_args():
parser = argparse.ArgumentParser()
parser.add_argument("--num_workers", type=int)
parser.add_argument("--worker_obj_store_size_in_gb", type=int)
parser.add_argument("--error_rate", type=float, default=0)
parser.add_argument("--data_save_path", type=str)
parser.add_argument(
"--trigger_object_spill",
dest="trigger_object_spill",
action="store_true",
)
parser.set_defaults(trigger_object_spill=False)
return parser.parse_known_args()
def main():
args, unknown = parse_script_args()
logging.info("Received arguments: {}".format(args))
# Create test spec
test_spec = TestSpec(
num_workers=args.num_workers,
worker_obj_store_size_in_gb=args.worker_obj_store_size_in_gb,
error_rate=args.error_rate,
trigger_object_spill=args.trigger_object_spill,
)
logging.info("Created test spec: {}".format(test_spec))
# Create the data save path if it doesn't exist.
data_save_path = args.data_save_path
if not os.path.exists(data_save_path):
os.makedirs(data_save_path, mode=0o777, exist_ok=True)
# Lazily construct Xarrays
xarray_filename_pairs = lazy_create_xarray_filename_pairs(test_spec)
# Connect to the Ray cluster
ray.init(address="auto")
monitor_actor = monitor_memory_usage()
# Save all the Xarrays to disk; this will trigger
# Dask computations on Ray.
logging.info("Saving {} xarrays..".format(len(xarray_filename_pairs)))
SaveRoutines.save_all_xarrays(
xarray_filename_pairs=xarray_filename_pairs,
dirpath=data_save_path,
batch_size=test_spec.batch_size,
ray_scheduler=ray_dask_get,
)
ray.get(monitor_actor.stop_run.remote())
used_gb, usage = ray.get(monitor_actor.get_peak_memory_info.remote())
print(f"Peak memory usage: {round(used_gb, 2)}GB")
print(f"Peak memory usage per processes:\n {usage}")
try:
print(ray._private.internal_api.memory_summary(stats_only=True))
except Exception as e:
print(f"Warning: query memory summary failed: {e}")
with open(os.environ["TEST_OUTPUT_JSON"], "w") as f:
f.write(
json.dumps(
{
"_peak_memory": round(used_gb, 2),
"_peak_process_memory": usage,
}
)
)
if __name__ == "__main__":
main()
| SaveRoutines |
python | pytest-dev__pytest-asyncio | docs/how-to-guides/class_scoped_loop_example.py | {
"start": 73,
"end": 374
} | class ____:
loop: asyncio.AbstractEventLoop
async def test_remember_loop(self):
TestInOneEventLoopPerClass.loop = asyncio.get_running_loop()
async def test_assert_same_loop(self):
assert asyncio.get_running_loop() is TestInOneEventLoopPerClass.loop
| TestInOneEventLoopPerClass |
python | doocs__leetcode | solution/0600-0699/0654.Maximum Binary Tree/Solution2.py | {
"start": 758,
"end": 1648
} | class ____:
def __init__(self, nums):
self.nums = nums
n = len(nums)
self.tr = [Node() for _ in range(n << 2)]
self.build(1, 1, n)
def build(self, u, l, r):
self.tr[u].l, self.tr[u].r = l, r
if l == r:
self.tr[u].v = self.nums[l - 1]
return
mid = (l + r) >> 1
self.build(u << 1, l, mid)
self.build(u << 1 | 1, mid + 1, r)
self.pushup(u)
def query(self, u, l, r):
if self.tr[u].l >= l and self.tr[u].r <= r:
return self.tr[u].v
mid = (self.tr[u].l + self.tr[u].r) >> 1
v = 0
if l <= mid:
v = max(v, self.query(u << 1, l, r))
if r > mid:
v = max(v, self.query(u << 1 | 1, l, r))
return v
def pushup(self, u):
self.tr[u].v = max(self.tr[u << 1].v, self.tr[u << 1 | 1].v)
| SegmentTree |
python | doocs__leetcode | solution/2300-2399/2317.Maximum XOR After Operations/Solution.py | {
"start": 0,
"end": 99
} | class ____:
def maximumXOR(self, nums: List[int]) -> int:
return reduce(or_, nums)
| Solution |
python | keon__algorithms | algorithms/graph/dijkstra.py | {
"start": 58,
"end": 1625
} | class ____():
"""
A fully connected directed graph with edge weights
"""
def __init__(self, vertex_count):
self.vertex_count = vertex_count
self.graph = [[0 for _ in range(vertex_count)] for _ in range(vertex_count)]
def min_distance(self, dist, min_dist_set):
"""
Find the vertex that is closest to the visited set
"""
min_dist = float("inf")
for target in range(self.vertex_count):
if min_dist_set[target]:
continue
if dist[target] < min_dist:
min_dist = dist[target]
min_index = target
return min_index
def dijkstra(self, src):
"""
Given a node, returns the shortest distance to every other node
"""
dist = [float("inf")] * self.vertex_count
dist[src] = 0
min_dist_set = [False] * self.vertex_count
for _ in range(self.vertex_count):
#minimum distance vertex that is not processed
source = self.min_distance(dist, min_dist_set)
#put minimum distance vertex in shortest tree
min_dist_set[source] = True
#Update dist value of the adjacent vertices
for target in range(self.vertex_count):
if self.graph[source][target] <= 0 or min_dist_set[target]:
continue
if dist[target] > dist[source] + self.graph[source][target]:
dist[target] = dist[source] + self.graph[source][target]
return dist
| Dijkstra |
python | pytest-dev__pytest-django | tests/test_fixtures.py | {
"start": 11550,
"end": 15700
} | class ____:
"""Tests for the settings fixture, order matters"""
def test_modify_existing(self, settings) -> None:
assert settings.SECRET_KEY == "foobar"
assert real_settings.SECRET_KEY == "foobar"
settings.SECRET_KEY = "spam"
assert settings.SECRET_KEY == "spam"
assert real_settings.SECRET_KEY == "spam"
def test_modify_existing_again(self, settings) -> None:
assert settings.SECRET_KEY == "foobar"
assert real_settings.SECRET_KEY == "foobar"
def test_new(self, settings) -> None:
assert not hasattr(settings, "SPAM")
assert not hasattr(real_settings, "SPAM")
settings.SPAM = "ham"
assert settings.SPAM == "ham"
assert real_settings.SPAM == "ham"
def test_new_again(self, settings) -> None:
assert not hasattr(settings, "SPAM")
assert not hasattr(real_settings, "SPAM")
def test_deleted(self, settings) -> None:
assert hasattr(settings, "SECRET_KEY")
assert hasattr(real_settings, "SECRET_KEY")
del settings.SECRET_KEY
assert not hasattr(settings, "SECRET_KEY")
assert not hasattr(real_settings, "SECRET_KEY")
def test_deleted_again(self, settings) -> None:
assert hasattr(settings, "SECRET_KEY")
assert hasattr(real_settings, "SECRET_KEY")
def test_signals(self, settings) -> None:
result = []
def assert_signal(
signal, # noqa: ARG001
sender, # noqa: ARG001
setting,
value,
enter,
) -> None:
result.append((setting, value, enter))
from django.test.signals import setting_changed
setting_changed.connect(assert_signal)
result = []
settings.SECRET_KEY = "change 1"
settings.SECRET_KEY = "change 2"
assert result == [
("SECRET_KEY", "change 1", True),
("SECRET_KEY", "change 2", True),
]
result = []
settings.FOOBAR = "abc123"
assert sorted(result) == [("FOOBAR", "abc123", True)]
def test_modification_signal(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
import pytest
from django.conf import settings
from django.test.signals import setting_changed
@pytest.fixture(autouse=True, scope='session')
def settings_change_printer():
def receiver(sender, **kwargs):
fmt_dict = {'actual_value': getattr(settings, kwargs['setting'],
'<<does not exist>>')}
fmt_dict.update(kwargs)
print(
'Setting changed: '
'enter=%(enter)s,setting=%(setting)s,'
'value=%(value)s,actual_value=%(actual_value)s'
% fmt_dict
)
setting_changed.connect(receiver, weak=False)
def test_set(settings):
settings.SECRET_KEY = 'change 1'
settings.SECRET_KEY = 'change 2'
def test_set_non_existent(settings):
settings.FOOBAR = 'abc123'
"""
)
result = django_pytester.runpytest_subprocess("--tb=short", "-v", "-s")
# test_set
result.stdout.fnmatch_lines(
[
"*Setting changed: enter=True,setting=SECRET_KEY,value=change 1*",
"*Setting changed: enter=True,setting=SECRET_KEY,value=change 2*",
"*Setting changed: enter=False,setting=SECRET_KEY,value=change 1*",
"*Setting changed: enter=False,setting=SECRET_KEY,value=foobar*",
]
)
result.stdout.fnmatch_lines(
[
"*Setting changed: enter=True,setting=FOOBAR,value=abc123*",
(
"*Setting changed: enter=False,setting=FOOBAR,value=None,"
"actual_value=<<does not exist>>*"
),
]
)
| TestSettings |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 16813,
"end": 19354
} | class ____(Base):
__tablename__ = "datasets"
__table_args__ = (
PrimaryKeyConstraint("experiment_id", "name", "digest", name="dataset_pk"),
Index(f"index_{__tablename__}_dataset_uuid", "dataset_uuid"),
Index(
f"index_{__tablename__}_experiment_id_dataset_source_type",
"experiment_id",
"dataset_source_type",
),
)
dataset_uuid = Column(String(36), nullable=False)
"""
Dataset UUID: `String` (limit 36 characters). Defined as *Non-null* in schema.
Part of *Primary Key* for ``datasets`` table.
"""
experiment_id = Column(Integer, ForeignKey("experiments.experiment_id", ondelete="CASCADE"))
"""
Experiment ID to which this dataset belongs: *Foreign Key* into ``experiments`` table.
"""
name = Column(String(500), nullable=False)
"""
Param name: `String` (limit 500 characters). Defined as *Non-null* in schema.
Part of *Primary Key* for ``datasets`` table.
"""
digest = Column(String(36), nullable=False)
"""
Param digest: `String` (limit 500 characters). Defined as *Non-null* in schema.
Part of *Primary Key* for ``datasets`` table.
"""
dataset_source_type = Column(String(36), nullable=False)
"""
Param dataset_source_type: `String` (limit 36 characters). Defined as *Non-null* in schema.
"""
dataset_source = Column(UnicodeText, nullable=False)
"""
Param dataset_source: `UnicodeText`. Defined as *Non-null* in schema.
"""
dataset_schema = Column(UnicodeText, nullable=True)
"""
Param dataset_schema: `UnicodeText`.
"""
dataset_profile = Column(UnicodeText, nullable=True)
"""
Param dataset_profile: `UnicodeText`.
"""
def __repr__(self):
return "<SqlDataset ({}, {}, {}, {}, {}, {}, {}, {})>".format(
self.dataset_uuid,
self.experiment_id,
self.name,
self.digest,
self.dataset_source_type,
self.dataset_source,
self.dataset_schema,
self.dataset_profile,
)
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
Returns:
mlflow.entities.Dataset.
"""
return Dataset(
name=self.name,
digest=self.digest,
source_type=self.dataset_source_type,
source=self.dataset_source,
schema=self.dataset_schema,
profile=self.dataset_profile,
)
| SqlDataset |
python | sympy__sympy | sympy/combinatorics/schur_number.py | {
"start": 327,
"end": 4437
} | class ____(Function):
r"""
This function creates a SchurNumber object
which is evaluated for `k \le 5` otherwise only
the lower bound information can be retrieved.
Examples
========
>>> from sympy.combinatorics.schur_number import SchurNumber
Since S(3) = 13, hence the output is a number
>>> SchurNumber(3)
13
We do not know the Schur number for values greater than 5, hence
only the object is returned
>>> SchurNumber(6)
SchurNumber(6)
Now, the lower bound information can be retrieved using lower_bound()
method
>>> SchurNumber(6).lower_bound()
536
"""
@classmethod
def eval(cls, k):
if k.is_Number:
if k is S.Infinity:
return S.Infinity
if k.is_zero:
return S.Zero
if not k.is_integer or k.is_negative:
raise ValueError("k should be a positive integer")
first_known_schur_numbers = {1: 1, 2: 4, 3: 13, 4: 44, 5: 160}
if k <= 5:
return Integer(first_known_schur_numbers[k])
def lower_bound(self):
f_ = self.args[0]
# Improved lower bounds known for S(6) and S(7)
if f_ == 6:
return Integer(536)
if f_ == 7:
return Integer(1680)
# For other cases, use general expression
if f_.is_Integer:
return 3*self.func(f_ - 1).lower_bound() - 1
return (3**f_ - 1)/2
def _schur_subsets_number(n):
if n is S.Infinity:
raise ValueError("Input must be finite")
if n <= 0:
raise ValueError("n must be a non-zero positive integer.")
elif n <= 3:
min_k = 1
else:
min_k = math.ceil(math.log(2*n + 1, 3))
return Integer(min_k)
def schur_partition(n):
"""
This function returns the partition in the minimum number of sum-free subsets
according to the lower bound given by the Schur Number.
Parameters
==========
n: a number
n is the upper limit of the range [1, n] for which we need to find and
return the minimum number of free subsets according to the lower bound
of schur number
Returns
=======
List of lists
List of the minimum number of sum-free subsets
Notes
=====
It is possible for some n to make the partition into less
subsets since the only known Schur numbers are:
S(1) = 1, S(2) = 4, S(3) = 13, S(4) = 44.
e.g for n = 44 the lower bound from the function above is 5 subsets but it has been proven
that can be done with 4 subsets.
Examples
========
For n = 1, 2, 3 the answer is the set itself
>>> from sympy.combinatorics.schur_number import schur_partition
>>> schur_partition(2)
[[1, 2]]
For n > 3, the answer is the minimum number of sum-free subsets:
>>> schur_partition(5)
[[3, 2], [5], [1, 4]]
>>> schur_partition(8)
[[3, 2], [6, 5, 8], [1, 4, 7]]
"""
if isinstance(n, Basic) and not n.is_Number:
raise ValueError("Input value must be a number")
number_of_subsets = _schur_subsets_number(n)
if n == 1:
sum_free_subsets = [[1]]
elif n == 2:
sum_free_subsets = [[1, 2]]
elif n == 3:
sum_free_subsets = [[1, 2, 3]]
else:
sum_free_subsets = [[1, 4], [2, 3]]
while len(sum_free_subsets) < number_of_subsets:
sum_free_subsets = _generate_next_list(sum_free_subsets, n)
missed_elements = [3*k + 1 for k in range(len(sum_free_subsets), (n-1)//3 + 1)]
sum_free_subsets[-1] += missed_elements
return sum_free_subsets
def _generate_next_list(current_list, n):
new_list = []
for item in current_list:
temp_1 = [number*3 for number in item if number*3 <= n]
temp_2 = [number*3 - 1 for number in item if number*3 - 1 <= n]
new_item = temp_1 + temp_2
new_list.append(new_item)
last_list = [3*k + 1 for k in range(len(current_list)+1) if 3*k + 1 <= n]
new_list.append(last_list)
current_list = new_list
return current_list
| SchurNumber |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 45788,
"end": 46941
} | class ____(Stmt):
"""
An `initializes` declaration.
Attributes
----------
annotation : Name | Attribute | Subscript
An imported module which this module initializes
"""
__slots__ = ("annotation",)
_only_empty_fields = ("value",)
def validate(self):
module_ref = self.annotation
if isinstance(module_ref, Subscript):
dependencies = as_tuple(module_ref.slice)
module_ref = module_ref.value
for item in dependencies:
if not isinstance(item, NamedExpr):
raise StructureException(
"invalid dependency (hint: should be [dependency := dependency]", item
)
if not isinstance(item.target, (Name, Attribute)):
raise StructureException("invalid module", item.target)
if not isinstance(item.value, (Name, Attribute)):
raise StructureException("invalid module", item.target)
if not isinstance(module_ref, (Name, Attribute)):
raise StructureException("invalid module", module_ref)
| InitializesDecl |
python | kamyu104__LeetCode-Solutions | Python/shortest-path-with-alternating-colors.py | {
"start": 97,
"end": 987
} | class ____(object):
def shortestAlternatingPaths(self, n, red_edges, blue_edges):
"""
:type n: int
:type red_edges: List[List[int]]
:type blue_edges: List[List[int]]
:rtype: List[int]
"""
neighbors = [[set() for _ in xrange(2)] for _ in xrange(n)]
for i, j in red_edges:
neighbors[i][0].add(j)
for i, j in blue_edges:
neighbors[i][1].add(j)
INF = max(2*n-3, 0)+1
dist = [[INF, INF] for i in xrange(n)]
dist[0] = [0, 0]
q = collections.deque([(0, 0), (0, 1)])
while q:
i, c = q.popleft()
for j in neighbors[i][c]:
if dist[j][c] != INF:
continue
dist[j][c] = dist[i][1^c]+1
q.append((j, 1^c))
return [x if x != INF else -1 for x in map(min, dist)]
| Solution |
python | pandas-dev__pandas | asv_bench/benchmarks/multiindex_object.py | {
"start": 9864,
"end": 10819
} | class ____:
params = [
("string", "int", "datetime"),
]
param_names = ["dtype"]
def setup(self, dtype):
N = 10**5
level1 = range(1000)
level2 = date_range(start="1/1/2000", periods=N // 1000)
dates_midx = MultiIndex.from_product([level1, level2])
level2 = range(N // 1000)
int_midx = MultiIndex.from_product([level1, level2])
level2 = Index([f"i-{i}" for i in range(N // 1000)], dtype=object).values
str_midx = MultiIndex.from_product([level1, level2])
data = {
"datetime": dates_midx,
"int": int_midx,
"string": str_midx,
}
self.midx = data[dtype]
self.values_small = self.midx[:100]
self.values_large = self.midx[100:]
def time_isin_small(self, dtype):
self.midx.isin(self.values_small)
def time_isin_large(self, dtype):
self.midx.isin(self.values_large)
| Isin |
python | joke2k__faker | tests/providers/test_job.py | {
"start": 5102,
"end": 5351
} | class ____:
"""Test vi_VN job provider"""
def test_job(self, faker, num_samples):
for _ in range(num_samples):
job = faker.job()
assert isinstance(job, str)
assert job in ViVNJobProvider.jobs
| TestViVn |
python | getsentry__sentry-python | sentry_sdk/tracing.py | {
"start": 26944,
"end": 45108
} | class ____(Span):
"""The Transaction is the root element that holds all the spans
for Sentry performance instrumentation.
:param name: Identifier of the transaction.
Will show up in the Sentry UI.
:param parent_sampled: Whether the parent transaction was sampled.
If True this transaction will be kept, if False it will be discarded.
:param baggage: The W3C baggage header value.
(see https://www.w3.org/TR/baggage/)
:param source: A string describing the source of the transaction name.
This will be used to determine the transaction's type.
See https://develop.sentry.dev/sdk/event-payloads/transaction/#transaction-annotations
for more information. Default "custom".
:param kwargs: Additional arguments to be passed to the Span constructor.
See :py:class:`sentry_sdk.tracing.Span` for available arguments.
"""
__slots__ = (
"name",
"source",
"parent_sampled",
# used to create baggage value for head SDKs in dynamic sampling
"sample_rate",
"_measurements",
"_contexts",
"_profile",
"_continuous_profile",
"_baggage",
"_sample_rand",
)
def __init__( # type: ignore[misc]
self,
name="", # type: str
parent_sampled=None, # type: Optional[bool]
baggage=None, # type: Optional[Baggage]
source=TransactionSource.CUSTOM, # type: str
**kwargs, # type: Unpack[SpanKwargs]
):
# type: (...) -> None
super().__init__(**kwargs)
self.name = name
self.source = source
self.sample_rate = None # type: Optional[float]
self.parent_sampled = parent_sampled
self._measurements = {} # type: Dict[str, MeasurementValue]
self._contexts = {} # type: Dict[str, Any]
self._profile = None # type: Optional[Profile]
self._continuous_profile = None # type: Optional[ContinuousProfile]
self._baggage = baggage
baggage_sample_rand = (
None if self._baggage is None else self._baggage._sample_rand()
)
if baggage_sample_rand is not None:
self._sample_rand = baggage_sample_rand
else:
self._sample_rand = _generate_sample_rand(self.trace_id)
def __repr__(self):
# type: () -> str
return (
"<%s(name=%r, op=%r, trace_id=%r, span_id=%r, parent_span_id=%r, sampled=%r, source=%r, origin=%r)>"
% (
self.__class__.__name__,
self.name,
self.op,
self.trace_id,
self.span_id,
self.parent_span_id,
self.sampled,
self.source,
self.origin,
)
)
def _possibly_started(self):
# type: () -> bool
"""Returns whether the transaction might have been started.
If this returns False, we know that the transaction was not started
with sentry_sdk.start_transaction, and therefore the transaction will
be discarded.
"""
# We must explicitly check self.sampled is False since self.sampled can be None
return self._span_recorder is not None or self.sampled is False
def __enter__(self):
# type: () -> Transaction
if not self._possibly_started():
logger.debug(
"Transaction was entered without being started with sentry_sdk.start_transaction."
"The transaction will not be sent to Sentry. To fix, start the transaction by"
"passing it to sentry_sdk.start_transaction."
)
super().__enter__()
if self._profile is not None:
self._profile.__enter__()
return self
def __exit__(self, ty, value, tb):
# type: (Optional[Any], Optional[Any], Optional[Any]) -> None
if self._profile is not None:
self._profile.__exit__(ty, value, tb)
if self._continuous_profile is not None:
self._continuous_profile.stop()
super().__exit__(ty, value, tb)
@property
def containing_transaction(self):
# type: () -> Transaction
"""The root element of the span tree.
In the case of a transaction it is the transaction itself.
"""
# Transactions (as spans) belong to themselves (as transactions). This
# is a getter rather than a regular attribute to avoid having a circular
# reference.
return self
def _get_scope_from_finish_args(
self,
scope_arg, # type: Optional[Union[sentry_sdk.Scope, sentry_sdk.Hub]]
hub_arg, # type: Optional[Union[sentry_sdk.Scope, sentry_sdk.Hub]]
):
# type: (...) -> Optional[sentry_sdk.Scope]
"""
Logic to get the scope from the arguments passed to finish. This
function exists for backwards compatibility with the old finish.
TODO: Remove this function in the next major version.
"""
scope_or_hub = scope_arg
if hub_arg is not None:
warnings.warn(
"The `hub` parameter is deprecated. Please use the `scope` parameter, instead.",
DeprecationWarning,
stacklevel=3,
)
scope_or_hub = hub_arg
if isinstance(scope_or_hub, sentry_sdk.Hub):
warnings.warn(
"Passing a Hub to finish is deprecated. Please pass a Scope, instead.",
DeprecationWarning,
stacklevel=3,
)
return scope_or_hub.scope
return scope_or_hub
def _get_log_representation(self):
# type: () -> str
return "{op}transaction <{name}>".format(
op=("<" + self.op + "> " if self.op else ""), name=self.name
)
def finish(
self,
scope=None, # type: Optional[sentry_sdk.Scope]
end_timestamp=None, # type: Optional[Union[float, datetime]]
*,
hub=None, # type: Optional[sentry_sdk.Hub]
):
# type: (...) -> Optional[str]
"""Finishes the transaction and sends it to Sentry.
All finished spans in the transaction will also be sent to Sentry.
:param scope: The Scope to use for this transaction.
If not provided, the current Scope will be used.
:param end_timestamp: Optional timestamp that should
be used as timestamp instead of the current time.
:param hub: The hub to use for this transaction.
This argument is DEPRECATED. Please use the `scope`
parameter, instead.
:return: The event ID if the transaction was sent to Sentry,
otherwise None.
"""
if self.timestamp is not None:
# This transaction is already finished, ignore.
return None
# For backwards compatibility, we must handle the case where `scope`
# or `hub` could both either be a `Scope` or a `Hub`.
scope = self._get_scope_from_finish_args(scope, hub) # type: Optional[sentry_sdk.Scope]
scope = scope or self.scope or sentry_sdk.get_current_scope()
client = sentry_sdk.get_client()
if not client.is_active():
# We have no active client and therefore nowhere to send this transaction.
return None
if self._span_recorder is None:
# Explicit check against False needed because self.sampled might be None
if self.sampled is False:
logger.debug("Discarding transaction because sampled = False")
else:
logger.debug(
"Discarding transaction because it was not started with sentry_sdk.start_transaction"
)
# This is not entirely accurate because discards here are not
# exclusively based on sample rate but also traces sampler, but
# we handle this the same here.
if client.transport and has_tracing_enabled(client.options):
if client.monitor and client.monitor.downsample_factor > 0:
reason = "backpressure"
else:
reason = "sample_rate"
client.transport.record_lost_event(reason, data_category="transaction")
# Only one span (the transaction itself) is discarded, since we did not record any spans here.
client.transport.record_lost_event(reason, data_category="span")
return None
if not self.name:
logger.warning(
"Transaction has no name, falling back to `<unlabeled transaction>`."
)
self.name = "<unlabeled transaction>"
super().finish(scope, end_timestamp)
status_code = self._data.get(SPANDATA.HTTP_STATUS_CODE)
if (
status_code is not None
and status_code in client.options["trace_ignore_status_codes"]
):
logger.debug(
"[Tracing] Discarding {transaction_description} because the HTTP status code {status_code} is matched by trace_ignore_status_codes: {trace_ignore_status_codes}".format(
transaction_description=self._get_log_representation(),
status_code=self._data[SPANDATA.HTTP_STATUS_CODE],
trace_ignore_status_codes=client.options[
"trace_ignore_status_codes"
],
)
)
if client.transport:
client.transport.record_lost_event(
"event_processor", data_category="transaction"
)
num_spans = len(self._span_recorder.spans) + 1
client.transport.record_lost_event(
"event_processor", data_category="span", quantity=num_spans
)
self.sampled = False
if not self.sampled:
# At this point a `sampled = None` should have already been resolved
# to a concrete decision.
if self.sampled is None:
logger.warning("Discarding transaction without sampling decision.")
return None
finished_spans = [
span.to_json()
for span in self._span_recorder.spans
if span.timestamp is not None
]
len_diff = len(self._span_recorder.spans) - len(finished_spans)
dropped_spans = len_diff + self._span_recorder.dropped_spans
# we do this to break the circular reference of transaction -> span
# recorder -> span -> containing transaction (which is where we started)
# before either the spans or the transaction goes out of scope and has
# to be garbage collected
self._span_recorder = None
contexts = {}
contexts.update(self._contexts)
contexts.update({"trace": self.get_trace_context()})
profile_context = self.get_profile_context()
if profile_context is not None:
contexts.update({"profile": profile_context})
event = {
"type": "transaction",
"transaction": self.name,
"transaction_info": {"source": self.source},
"contexts": contexts,
"tags": self._tags,
"timestamp": self.timestamp,
"start_timestamp": self.start_timestamp,
"spans": finished_spans,
} # type: Event
if dropped_spans > 0:
event["_dropped_spans"] = dropped_spans
if self._profile is not None and self._profile.valid():
event["profile"] = self._profile
self._profile = None
event["measurements"] = self._measurements
return scope.capture_event(event)
def set_measurement(self, name, value, unit=""):
# type: (str, float, MeasurementUnit) -> None
"""
.. deprecated:: 2.28.0
This function is deprecated and will be removed in the next major release.
"""
warnings.warn(
"`set_measurement()` is deprecated and will be removed in the next major version. Please use `set_data()` instead.",
DeprecationWarning,
stacklevel=2,
)
self._measurements[name] = {"value": value, "unit": unit}
def set_context(self, key, value):
# type: (str, dict[str, Any]) -> None
"""Sets a context. Transactions can have multiple contexts
and they should follow the format described in the "Contexts Interface"
documentation.
:param key: The name of the context.
:param value: The information about the context.
"""
self._contexts[key] = value
def set_http_status(self, http_status):
# type: (int) -> None
"""Sets the status of the Transaction according to the given HTTP status.
:param http_status: The HTTP status code."""
super().set_http_status(http_status)
self.set_context("response", {"status_code": http_status})
def to_json(self):
# type: () -> Dict[str, Any]
"""Returns a JSON-compatible representation of the transaction."""
rv = super().to_json()
rv["name"] = self.name
rv["source"] = self.source
rv["sampled"] = self.sampled
return rv
def get_trace_context(self):
# type: () -> Any
trace_context = super().get_trace_context()
if self._data:
trace_context["data"] = self._data
return trace_context
def get_baggage(self):
# type: () -> Baggage
"""Returns the :py:class:`~sentry_sdk.tracing_utils.Baggage`
associated with the Transaction.
The first time a new baggage with Sentry items is made,
it will be frozen."""
if not self._baggage or self._baggage.mutable:
self._baggage = Baggage.populate_from_transaction(self)
return self._baggage
def _set_initial_sampling_decision(self, sampling_context):
# type: (SamplingContext) -> None
"""
Sets the transaction's sampling decision, according to the following
precedence rules:
1. If a sampling decision is passed to `start_transaction`
(`start_transaction(name: "my transaction", sampled: True)`), that
decision will be used, regardless of anything else
2. If `traces_sampler` is defined, its decision will be used. It can
choose to keep or ignore any parent sampling decision, or use the
sampling context data to make its own decision or to choose a sample
rate for the transaction.
3. If `traces_sampler` is not defined, but there's a parent sampling
decision, the parent sampling decision will be used.
4. If `traces_sampler` is not defined and there's no parent sampling
decision, `traces_sample_rate` will be used.
"""
client = sentry_sdk.get_client()
transaction_description = self._get_log_representation()
# nothing to do if tracing is disabled
if not has_tracing_enabled(client.options):
self.sampled = False
return
# if the user has forced a sampling decision by passing a `sampled`
# value when starting the transaction, go with that
if self.sampled is not None:
self.sample_rate = float(self.sampled)
return
# we would have bailed already if neither `traces_sampler` nor
# `traces_sample_rate` were defined, so one of these should work; prefer
# the hook if so
sample_rate = (
client.options["traces_sampler"](sampling_context)
if callable(client.options.get("traces_sampler"))
# default inheritance behavior
else (
sampling_context["parent_sampled"]
if sampling_context["parent_sampled"] is not None
else client.options["traces_sample_rate"]
)
)
# Since this is coming from the user (or from a function provided by the
# user), who knows what we might get. (The only valid values are
# booleans or numbers between 0 and 1.)
if not is_valid_sample_rate(sample_rate, source="Tracing"):
logger.warning(
"[Tracing] Discarding {transaction_description} because of invalid sample rate.".format(
transaction_description=transaction_description,
)
)
self.sampled = False
return
self.sample_rate = float(sample_rate)
if client.monitor:
self.sample_rate /= 2**client.monitor.downsample_factor
# if the function returned 0 (or false), or if `traces_sample_rate` is
# 0, it's a sign the transaction should be dropped
if not self.sample_rate:
logger.debug(
"[Tracing] Discarding {transaction_description} because {reason}".format(
transaction_description=transaction_description,
reason=(
"traces_sampler returned 0 or False"
if callable(client.options.get("traces_sampler"))
else "traces_sample_rate is set to 0"
),
)
)
self.sampled = False
return
# Now we roll the dice.
self.sampled = self._sample_rand < self.sample_rate
if self.sampled:
logger.debug(
"[Tracing] Starting {transaction_description}".format(
transaction_description=transaction_description,
)
)
else:
logger.debug(
"[Tracing] Discarding {transaction_description} because it's not included in the random sample (sampling rate = {sample_rate})".format(
transaction_description=transaction_description,
sample_rate=self.sample_rate,
)
)
| Transaction |
python | vyperlang__vyper | vyper/semantics/types/subscriptable.py | {
"start": 6830,
"end": 9667
} | class ____(_SequenceT):
"""
Dynamic array type
"""
typeclass = "dynamic_array"
_valid_literal = (vy_ast.List,)
is_valid_element_type = True
_id = "DynArray" # CMC 2024-03-03 maybe this would be better as repr(self)
def __init__(self, value_type: VyperType, length: int) -> None:
super().__init__(value_type, length)
from vyper.semantics.types.function import MemberFunctionT
self.add_member("append", MemberFunctionT(self, "append", [self.value_type], None, True))
self.add_member("pop", MemberFunctionT(self, "pop", [], self.value_type, True))
def __repr__(self):
return f"DynArray[{self.value_type}, {self.length}]"
@property
def subtype(self):
"""
Alias for backwards compatibility.
"""
return self.value_type
@property
def count(self):
"""
Alias for backwards compatibility.
"""
return self.length
@property
def abi_type(self) -> ABIType:
return ABI_DynamicArray(self.value_type.abi_type, self.length)
def to_abi_arg(self, name: str = "") -> Dict[str, Any]:
ret = self.value_type.to_abi_arg()
# modify the child name in place.
ret["type"] += "[]"
return _set_first_key(ret, "name", name)
# TODO rename me to memory_bytes_required
@property
def size_in_bytes(self):
# one length word + size of the array items
return 32 + self.value_type.size_in_bytes * self.length
def compare_type(self, other):
# TODO allow static array to be assigned to dyn array?
# if not isinstance(other, (DArrayT, SArrayT)):
if not isinstance(self, type(other)):
return False
if self.length < other.length:
return False
return self.value_type.compare_type(other.value_type)
@classmethod
def from_annotation(cls, node: vy_ast.Subscript) -> "DArrayT":
# common error message, different ast locations
err_msg = "DynArray must be defined with base type and max length, e.g. DynArray[bool, 5]"
if not isinstance(node, vy_ast.Subscript):
raise StructureException(err_msg, node)
if not isinstance(node.slice, vy_ast.Tuple) or len(node.slice.elements) != 2:
raise StructureException(err_msg, node.slice)
length_node = node.slice.elements[1].reduced()
if not isinstance(length_node, vy_ast.Int):
raise StructureException(err_msg, length_node)
length = length_node.value
value_node = node.slice.elements[0]
value_type = type_from_annotation(value_node)
if not value_type._as_darray:
raise StructureException(f"Arrays of {value_type} are not allowed", value_node)
return cls(value_type, length)
| DArrayT |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/assert_cardinality_test.py | {
"start": 1376,
"end": 6094
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
"""Tests for `tf.data.experimental.assert_cardinality()`."""
@combinations.generate(test_base.default_test_combinations())
def testCorrectCardinality(self):
dataset = dataset_ops.Dataset.range(10).filter(lambda x: True)
self.assertEqual(
self.evaluate(cardinality.cardinality(dataset)), cardinality.UNKNOWN)
self.assertDatasetProduces(dataset, expected_output=range(10))
dataset = dataset.apply(cardinality.assert_cardinality(10))
self.assertEqual(self.evaluate(cardinality.cardinality(dataset)), 10)
self.assertDatasetProduces(dataset, expected_output=range(10))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_elements=10,
asserted_cardinality=20,
expected_error="Input dataset was expected to contain 20 "
"elements but contained only 10 elements.") +
combinations.combine(
num_elements=1,
asserted_cardinality=20,
expected_error="Input dataset was expected to contain 20 "
"elements but contained only 1 element.") +
combinations.combine(
num_elements=10,
asserted_cardinality=cardinality.INFINITE,
expected_error="Input dataset was expected to contain an "
"infinite number of elements but contained only 10 elements.") +
combinations.combine(
num_elements=1,
asserted_cardinality=cardinality.INFINITE,
expected_error="Input dataset was expected to contain an "
"infinite number of elements but contained only 1 element.") +
combinations.combine(
num_elements=10,
asserted_cardinality=5,
expected_error="Input dataset was expected to contain 5 "
"elements but contained at least 6 elements.") +
combinations.combine(
num_elements=10,
asserted_cardinality=1,
expected_error="Input dataset was expected to contain 1 "
"element but contained at least 2 elements.")))
def testIncorrectCardinality(self, num_elements, asserted_cardinality,
expected_error):
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.apply(
cardinality.assert_cardinality(asserted_cardinality))
get_next = self.getNext(dataset)
with self.assertRaisesRegex(errors.FailedPreconditionError, expected_error):
while True:
self.evaluate(get_next())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
num_elements=10,
asserted_cardinality=100,
expected_error=errors.FailedPreconditionError,
expected_error_message=(
"Input dataset was expected to contain 100 elements.")) +
combinations.combine(
num_elements=10,
asserted_cardinality=cardinality.INFINITE,
expected_error=errors.InvalidArgumentError,
expected_error_message=(
"`global_shuffle` requires the input dataset to have a "
"non-empty finite cardinality."))))
def testIncorrectCardinalityForGlobalShuffle(
self,
num_elements: int,
asserted_cardinality: int,
expected_error: Exception,
expected_error_message: str):
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.apply(
cardinality.assert_cardinality(asserted_cardinality))
with self.assertRaisesRegex(
expected_error, expected_error_message):
dataset = global_shuffle_op._global_shuffle(dataset)
self.getDatasetOutput(dataset, requires_initialization=True)
@combinations.generate(test_base.default_test_combinations())
def testRandomAccess(self):
num_elements = 10
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.apply(cardinality.assert_cardinality(num_elements))
self.verifyRandomAccess(dataset, expected=range(num_elements))
@combinations.generate(test_base.default_test_combinations())
def testRandomAccessOutOfRange(self):
dataset = dataset_ops.Dataset.range(5)
dataset = dataset.apply(cardinality.assert_cardinality(10))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=10))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(random_access.at(dataset, index=5))
| AssertCardinalityTest |
python | lazyprogrammer__machine_learning_examples | rnn_class/srn_parity.py | {
"start": 512,
"end": 3451
} | class ____:
def __init__(self, M):
self.M = M # hidden layer size
def fit(self, X, Y, learning_rate=0.1, mu=0.99, reg=1.0, activation=T.tanh, epochs=100, show_fig=False):
D = X[0].shape[1] # X is of size N x T(n) x D
K = len(set(Y.flatten()))
N = len(Y)
M = self.M
self.f = activation
# initial weights
Wx = init_weight(D, M)
Wh = init_weight(M, M)
bh = np.zeros(M)
h0 = np.zeros(M)
Wo = init_weight(M, K)
bo = np.zeros(K)
# make them theano shared
self.Wx = theano.shared(Wx)
self.Wh = theano.shared(Wh)
self.bh = theano.shared(bh)
self.h0 = theano.shared(h0)
self.Wo = theano.shared(Wo)
self.bo = theano.shared(bo)
self.params = [self.Wx, self.Wh, self.bh, self.h0, self.Wo, self.bo]
thX = T.fmatrix('X')
thY = T.ivector('Y')
def recurrence(x_t, h_t1):
# returns h(t), y(t)
h_t = self.f(x_t.dot(self.Wx) + h_t1.dot(self.Wh) + self.bh)
y_t = T.nnet.softmax(h_t.dot(self.Wo) + self.bo)
return h_t, y_t
[h, y], _ = theano.scan(
fn=recurrence,
outputs_info=[self.h0, None],
sequences=thX,
n_steps=thX.shape[0],
)
py_x = y[:, 0, :]
prediction = T.argmax(py_x, axis=1)
cost = -T.mean(T.log(py_x[T.arange(thY.shape[0]), thY]))
grads = T.grad(cost, self.params)
dparams = [theano.shared(p.get_value()*0) for p in self.params]
updates = [
(p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads)
] + [
(dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads)
]
self.predict_op = theano.function(inputs=[thX], outputs=prediction)
self.train_op = theano.function(
inputs=[thX, thY],
outputs=[cost, prediction, y],
updates=updates
)
costs = []
for i in range(epochs):
X, Y = shuffle(X, Y)
n_correct = 0
cost = 0
for j in range(N):
c, p, rout = self.train_op(X[j], Y[j])
# print "p:", p
cost += c
if p[-1] == Y[j,-1]:
n_correct += 1
print("shape y:", rout.shape)
print("i:", i, "cost:", cost, "classification rate:", (float(n_correct)/N))
costs.append(cost)
if n_correct == N:
break
if show_fig:
plt.plot(costs)
plt.show()
def parity(B=12, learning_rate=1e-4, epochs=200):
X, Y = all_parity_pairs_with_sequence_labels(B)
rnn = SimpleRNN(20)
rnn.fit(X, Y, learning_rate=learning_rate, epochs=epochs, activation=T.nnet.relu, show_fig=False)
if __name__ == '__main__':
parity()
| SimpleRNN |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/instigation.py | {
"start": 4778,
"end": 6184
} | class ____:
# Mixin this class to implement DynamicPartitionsRequest
#
# Graphene has some strange properties that make it so that you cannot
# implement ABCs nor use properties in an overridable way. So the way
# the mixin works is that the target classes have to have a method
# get_dynamic_partitions_request()
partitionKeys = graphene.List(graphene.NonNull(graphene.String))
partitionsDefName = graphene.NonNull(graphene.String)
type = graphene.NonNull(GrapheneDynamicPartitionsRequestType)
class Meta:
name = "DynamicPartitionRequestMixin"
def get_dynamic_partitions_request(
self,
) -> Union[
AddDynamicPartitionsRequest,
DeleteDynamicPartitionsRequest,
]:
raise NotImplementedError()
def resolve_partitionKeys(self, _graphene_info: ResolveInfo):
return self.get_dynamic_partitions_request().partition_keys
def resolve_partitionsDefName(self, _graphene_info: ResolveInfo):
return self.get_dynamic_partitions_request().partitions_def_name
def resolve_type(self, _graphene_info: ResolveInfo):
return (
GrapheneDynamicPartitionsRequestType.ADD_PARTITIONS
if isinstance(self.get_dynamic_partitions_request(), AddDynamicPartitionsRequest)
else GrapheneDynamicPartitionsRequestType.DELETE_PARTITIONS
)
| DynamicPartitionsRequestMixin |
python | Pylons__pyramid | docs/tutorials/wiki2/src/tests/tests/conftest.py | {
"start": 1850,
"end": 5211
} | class ____(webtest.TestApp):
def get_cookie(self, name, default=None):
# webtest currently doesn't expose the unescaped cookie values
# so we're using webob to parse them for us
# see https://github.com/Pylons/webtest/issues/171
cookie = Cookie(' '.join(
'%s=%s' % (c.name, c.value)
for c in self.cookiejar
if c.name == name
))
return next(
(m.value.decode('latin-1') for m in cookie.values()),
default,
)
def get_csrf_token(self):
"""
Convenience method to get the current CSRF token.
This value must be passed to POST/PUT/DELETE requests in either the
"X-CSRF-Token" header or the "csrf_token" form value.
testapp.post(..., headers={'X-CSRF-Token': testapp.get_csrf_token()})
or
testapp.post(..., {'csrf_token': testapp.get_csrf_token()})
"""
return self.get_cookie('csrf_token')
def login(self, params, status=303, **kw):
""" Convenience method to login the client."""
body = dict(csrf_token=self.get_csrf_token())
body.update(params)
return self.post('/login', body, **kw)
@pytest.fixture
def testapp(app, tm, dbsession):
# override request.dbsession and request.tm with our own
# externally-controlled values that are shared across requests but aborted
# at the end
testapp = TestApp(app, extra_environ={
'HTTP_HOST': 'example.com',
'tm.active': True,
'tm.manager': tm,
'app.dbsession': dbsession,
})
# initialize a csrf token instead of running an initial request to get one
# from the actual app - this only works using the CookieCSRFStoragePolicy
testapp.set_cookie('csrf_token', 'dummy_csrf_token')
return testapp
@pytest.fixture
def app_request(app, tm, dbsession):
"""
A real request.
This request is almost identical to a real request but it has some
drawbacks in tests as it's harder to mock data and is heavier.
"""
with prepare(registry=app.registry) as env:
request = env['request']
request.host = 'example.com'
# without this, request.dbsession will be joined to the same transaction
# manager but it will be using a different sqlalchemy.orm.Session using
# a separate database transaction
request.dbsession = dbsession
request.tm = tm
yield request
@pytest.fixture
def dummy_request(tm, dbsession):
"""
A lightweight dummy request.
This request is ultra-lightweight and should be used only when the request
itself is not a large focus in the call-stack. It is much easier to mock
and control side-effects using this object, however:
- It does not have request extensions applied.
- Threadlocals are not properly pushed.
"""
request = DummyRequest()
request.host = 'example.com'
request.dbsession = dbsession
request.tm = tm
return request
@pytest.fixture
def dummy_config(dummy_request):
"""
A dummy :class:`pyramid.config.Configurator` object. This allows for
mock configuration, including configuration for ``dummy_request``, as well
as pushing the appropriate threadlocals.
"""
with testConfig(request=dummy_request) as config:
yield config
| TestApp |
python | simonw__datasette | datasette/views/database.py | {
"start": 9726,
"end": 15545
} | class ____(Context):
database: str = field(metadata={"help": "The name of the database being queried"})
database_color: str = field(metadata={"help": "The color of the database"})
query: dict = field(
metadata={"help": "The SQL query object containing the `sql` string"}
)
canned_query: str = field(
metadata={"help": "The name of the canned query if this is a canned query"}
)
private: bool = field(
metadata={"help": "Boolean indicating if this is a private database"}
)
# urls: dict = field(
# metadata={"help": "Object containing URL helpers like `database()`"}
# )
canned_query_write: bool = field(
metadata={
"help": "Boolean indicating if this is a canned query that allows writes"
}
)
metadata: dict = field(
metadata={"help": "Metadata about the database or the canned query"}
)
db_is_immutable: bool = field(
metadata={"help": "Boolean indicating if this database is immutable"}
)
error: str = field(metadata={"help": "Any query error message"})
hide_sql: bool = field(
metadata={"help": "Boolean indicating if the SQL should be hidden"}
)
show_hide_link: str = field(
metadata={"help": "The URL to toggle showing/hiding the SQL"}
)
show_hide_text: str = field(
metadata={"help": "The text for the show/hide SQL link"}
)
editable: bool = field(
metadata={"help": "Boolean indicating if the SQL can be edited"}
)
allow_execute_sql: bool = field(
metadata={"help": "Boolean indicating if custom SQL can be executed"}
)
tables: list = field(metadata={"help": "List of table objects in the database"})
named_parameter_values: dict = field(
metadata={"help": "Dictionary of parameter names/values"}
)
edit_sql_url: str = field(
metadata={"help": "URL to edit the SQL for a canned query"}
)
display_rows: list = field(metadata={"help": "List of result rows to display"})
columns: list = field(metadata={"help": "List of column names"})
renderers: dict = field(metadata={"help": "Dictionary of renderer name to URL"})
url_csv: str = field(metadata={"help": "URL for CSV export"})
show_hide_hidden: str = field(
metadata={"help": "Hidden input field for the _show_sql parameter"}
)
table_columns: dict = field(
metadata={"help": "Dictionary of table name to list of column names"}
)
alternate_url_json: str = field(
metadata={"help": "URL for alternate JSON version of this page"}
)
# TODO: refactor this to somewhere else, probably ds.render_template()
select_templates: list = field(
metadata={
"help": "List of templates that were considered for rendering this page"
}
)
top_query: callable = field(
metadata={"help": "Callable to render the top_query slot"}
)
top_canned_query: callable = field(
metadata={"help": "Callable to render the top_canned_query slot"}
)
query_actions: callable = field(
metadata={
"help": "Callable returning a list of links for the query action menu"
}
)
async def get_tables(datasette, request, db, allowed_dict):
"""
Get list of tables with metadata for the database view.
Args:
datasette: The Datasette instance
request: The current request
db: The database
allowed_dict: Dict mapping table name -> Resource object with .private attribute
"""
tables = []
table_counts = await db.table_counts(100)
hidden_table_names = set(await db.hidden_table_names())
all_foreign_keys = await db.get_all_foreign_keys()
for table in table_counts:
if table not in allowed_dict:
continue
table_columns = await db.table_columns(table)
tables.append(
{
"name": table,
"columns": table_columns,
"primary_keys": await db.primary_keys(table),
"count": table_counts[table],
"hidden": table in hidden_table_names,
"fts_table": await db.fts_table(table),
"foreign_keys": all_foreign_keys[table],
"private": allowed_dict[table].private,
}
)
tables.sort(key=lambda t: (t["hidden"], t["name"]))
return tables
async def database_download(request, datasette):
from datasette.resources import DatabaseResource
database = tilde_decode(request.url_vars["database"])
await datasette.ensure_permission(
action="view-database-download",
resource=DatabaseResource(database=database),
actor=request.actor,
)
try:
db = datasette.get_database(route=database)
except KeyError:
raise DatasetteError("Invalid database", status=404)
if db.is_memory:
raise DatasetteError("Cannot download in-memory databases", status=404)
if not datasette.setting("allow_download") or db.is_mutable:
raise Forbidden("Database download is forbidden")
if not db.path:
raise DatasetteError("Cannot download database", status=404)
filepath = db.path
headers = {}
if datasette.cors:
add_cors_headers(headers)
if db.hash:
etag = '"{}"'.format(db.hash)
headers["Etag"] = etag
# Has user seen this already?
if_none_match = request.headers.get("if-none-match")
if if_none_match and if_none_match == etag:
return Response("", status=304)
headers["Transfer-Encoding"] = "chunked"
return AsgiFileDownload(
filepath,
filename=os.path.basename(filepath),
content_type="application/octet-stream",
headers=headers,
)
| QueryContext |
python | streamlit__streamlit | lib/streamlit/elements/widgets/camera_input.py | {
"start": 3123,
"end": 10194
} | class ____:
@gather_metrics("camera_input")
def camera_input(
self,
label: str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> UploadedFile | None:
r"""Display a widget that returns pictures from the user's webcam.
Parameters
----------
label : str
A short label explaining to the user what this widget is used for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_change : callable
An optional callback invoked when this camera_input's value
changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean that disables the camera input if set to
``True``. Default is ``False``.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
If this is ``"collapsed"``, Streamlit displays no label or spacer.
width : "stretch" or int
The width of the camera input widget. This can be one of the
following:
- ``"stretch"`` (default): The width of the widget matches the
width of the parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
None or UploadedFile
The UploadedFile class is a subclass of BytesIO, and therefore is
"file-like". This means you can pass an instance of it anywhere a
file is expected.
Examples
--------
>>> import streamlit as st
>>>
>>> enable = st.checkbox("Enable camera")
>>> picture = st.camera_input("Take a picture", disabled=not enable)
>>>
>>> if picture:
... st.image(picture)
.. output::
https://doc-camera-input.streamlit.app/
height: 600px
"""
ctx = get_script_run_ctx()
return self._camera_input(
label=label,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
width=width,
ctx=ctx,
)
def _camera_input(
self,
label: str,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
ctx: ScriptRunContext | None = None,
) -> UploadedFile | None:
key = to_key(key)
check_widget_policies(
self.dg,
key,
on_change,
default_value=None,
writes_allowed=False,
)
maybe_raise_label_warnings(label, label_visibility)
element_id = compute_and_register_element_id(
"camera_input",
user_key=key,
key_as_main_identity=True,
dg=self.dg,
label=label,
help=help,
width=width,
)
camera_input_proto = CameraInputProto()
camera_input_proto.id = element_id
camera_input_proto.label = label
camera_input_proto.form_id = current_form_id(self.dg)
camera_input_proto.disabled = disabled
camera_input_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
if help is not None:
camera_input_proto.help = dedent(help)
validate_width(width)
layout_config = LayoutConfig(width=width)
serde = CameraInputSerde()
camera_input_state = register_widget(
camera_input_proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="file_uploader_state_value",
)
self.dg._enqueue(
"camera_input", camera_input_proto, layout_config=layout_config
)
if isinstance(camera_input_state.value, DeletedFile):
return None
return camera_input_state.value
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| CameraInputMixin |
python | huggingface__transformers | src/transformers/models/pix2struct/modeling_pix2struct.py | {
"start": 42254,
"end": 58633
} | class ____(Pix2StructPreTrainedModel):
config: Pix2StructTextConfig
input_modalities = ("text",)
_no_split_modules = ["Pix2StructTextBlock"]
_tied_weights_keys = {"lm_head.weight": "embed_tokens.weight"}
supports_gradient_checkpointing = True
def __init__(self, config):
super().__init__(config)
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size)
self.layer = nn.ModuleList(
[
Pix2StructTextBlock(config, has_relative_attention_bias=bool(i == 0), layer_idx=i)
for i in range(config.num_layers)
]
)
self.final_layer_norm = Pix2StructLayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
self.gradient_checkpointing = False
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Union[tuple[torch.FloatTensor, ...], CausalLMOutputWithCrossAttentions]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Pix2StructText is a model with relative position
embeddings so you should be able to pad the inputs on both the right and the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for detail.
[What are input IDs?](../glossary#input-ids)
To know more on how to prepare `input_ids` for pretraining take a look a [Pix2StructText
Training](./t5#training).
Example:
```python
>>> from transformers import AutoProcessor, Pix2StructTextModel
>>> processor = AutoProcessor.from_pretrained("google/pix2struct-textcaps-base")
>>> model = Pix2StructTextModel.from_pretrained("google/pix2struct-textcaps-base")
>>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> loss = outputs.loss
```
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.gradient_checkpointing and self.training and use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
if use_cache and past_key_values is None:
if self.config.is_encoder_decoder:
past_key_values = EncoderDecoderCache(
DynamicCache(config=self.config), DynamicCache(config=self.config)
)
else:
past_key_values = DynamicCache(config=self.config)
past_key_values_length = 0
if cache_position is not None:
past_key_values_length = cache_position[0]
elif past_key_values is not None:
past_key_values_length = past_key_values.get_seq_length()
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if attention_mask is None:
# required mask seq length can be calculated via length of past
mask_seq_length = (
past_key_values.get_seq_length() + seq_length if past_key_values is not None else seq_length
)
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
if self.config.is_decoder:
causal_mask = self._update_causal_mask(
attention_mask,
inputs_embeds,
cache_position,
past_key_values.self_attention_cache
if isinstance(past_key_values, EncoderDecoderCache)
else past_key_values,
output_attentions,
)
else:
causal_mask = attention_mask[:, None, None, :]
causal_mask = causal_mask.to(dtype=inputs_embeds.dtype)
causal_mask = (1.0 - causal_mask) * torch.finfo(inputs_embeds.dtype).min
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
causal_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias, # as a positional argument for gradient checkpointing
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[1]
if encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],)
if encoder_hidden_states is not None:
all_cross_attentions = all_cross_attentions + (layer_outputs[4],)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(logits.device)
loss_fct = nn.CrossEntropyLoss(ignore_index=-100, reduction="mean")
loss = loss_fct(logits.contiguous().view(-1, logits.size(-1)), labels.contiguous().view(-1))
if not return_dict:
return tuple(
v
for v in [
loss,
logits,
past_key_values,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.gptj.modeling_gptj.GPTJModel._update_causal_mask
def _update_causal_mask(
self,
attention_mask: Union[torch.Tensor, "BlockMask"],
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool = False,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and (attention_mask == 0.0).any():
return attention_mask
return None
if self.config._attn_implementation == "flex_attention":
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not using_compilable_cache and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
if using_compilable_cache:
target_length = past_key_values.get_max_cache_shape()
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
# Copied from transformers.models.gptj.modeling_gptj.GPTJModel._prepare_4d_causal_attention_mask_with_cache_position
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
@auto_docstring(
custom_intro="""
A conditional generation model with a language modeling head. Can be used for sequence generation tasks.
"""
)
| Pix2StructTextModel |
python | qdrant__qdrant-client | qdrant_client/uploader/rest_uploader.py | {
"start": 2546,
"end": 4137
} | class ____(BaseUploader):
def __init__(
self,
uri: str,
collection_name: str,
max_retries: int,
wait: bool = False,
shard_key_selector: Optional[types.ShardKeySelector] = None,
update_filter: Optional[types.Filter] = None,
**kwargs: Any,
):
self.collection_name = collection_name
self.openapi_client: SyncApis = SyncApis(host=uri, **kwargs)
self.max_retries = max_retries
self._wait = wait
self._shard_key_selector = shard_key_selector
self._update_filter = (
GrpcToRest.convert_filter(model=update_filter)
if isinstance(update_filter, grpc.Filter)
else update_filter
)
@classmethod
def start(
cls,
collection_name: Optional[str] = None,
uri: str = "http://localhost:6333",
max_retries: int = 3,
**kwargs: Any,
) -> "RestBatchUploader":
if not collection_name:
raise RuntimeError("Collection name could not be empty")
return cls(uri=uri, collection_name=collection_name, max_retries=max_retries, **kwargs)
def process(self, items: Iterable[Any]) -> Iterable[bool]:
for batch in items:
yield upload_batch(
self.openapi_client,
self.collection_name,
batch,
shard_key_selector=self._shard_key_selector,
max_retries=self.max_retries,
update_filter=self._update_filter,
wait=self._wait,
)
| RestBatchUploader |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 14047,
"end": 14878
} | class ____:
xlAnd = 1 # from enum XlAutoFilterOperator
xlBottom10Items = 4 # from enum XlAutoFilterOperator
xlBottom10Percent = 6 # from enum XlAutoFilterOperator
xlFilterAutomaticFontColor = 13 # from enum XlAutoFilterOperator
xlFilterCellColor = 8 # from enum XlAutoFilterOperator
xlFilterDynamic = 11 # from enum XlAutoFilterOperator
xlFilterFontColor = 9 # from enum XlAutoFilterOperator
xlFilterIcon = 10 # from enum XlAutoFilterOperator
xlFilterNoFill = 12 # from enum XlAutoFilterOperator
xlFilterNoIcon = 14 # from enum XlAutoFilterOperator
xlFilterValues = 7 # from enum XlAutoFilterOperator
xlOr = 2 # from enum XlAutoFilterOperator
xlTop10Items = 3 # from enum XlAutoFilterOperator
xlTop10Percent = 5 # from enum XlAutoFilterOperator
| AutoFilterOperator |
python | numba__numba | numba/tests/test_obj_lifetime.py | {
"start": 408,
"end": 805
} | class ____(object):
def __init__(self, recorder, name):
self.recorder = recorder
self.name = name
recorder._add_dummy(self)
def __add__(self, other):
assert isinstance(other, _Dummy)
return _Dummy(self.recorder, "%s + %s" % (self.name, other.name))
def __iter__(self):
return _DummyIterator(self.recorder, "iter(%s)" % self.name)
| _Dummy |
python | doocs__leetcode | solution/3000-3099/3067.Count Pairs of Connectable Servers in a Weighted Tree Network/Solution.py | {
"start": 0,
"end": 715
} | class ____:
def countPairsOfConnectableServers(
self, edges: List[List[int]], signalSpeed: int
) -> List[int]:
def dfs(a: int, fa: int, ws: int) -> int:
cnt = 0 if ws % signalSpeed else 1
for b, w in g[a]:
if b != fa:
cnt += dfs(b, a, ws + w)
return cnt
n = len(edges) + 1
g = [[] for _ in range(n)]
for a, b, w in edges:
g[a].append((b, w))
g[b].append((a, w))
ans = [0] * n
for a in range(n):
s = 0
for b, w in g[a]:
t = dfs(b, a, w)
ans[a] += s * t
s += t
return ans
| Solution |
python | huggingface__transformers | tests/models/nemotron/test_modeling_nemotron.py | {
"start": 1928,
"end": 4737
} | class ____(unittest.TestCase):
@slow
@require_read_token
def test_nemotron_8b_generation_sdpa(self):
text = ["What is the largest planet in solar system?"]
EXPECTED_TEXT = [
"What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer",
]
model_id = "thhaus/nemotron3-8b"
model = NemotronForCausalLM.from_pretrained(
model_id, dtype=torch.float16, device_map="auto", attn_implementation="sdpa"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(text, return_tensors="pt").to(torch_device)
output = model.generate(**inputs, do_sample=False, max_new_tokens=10)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, output_text)
@slow
@require_read_token
def test_nemotron_8b_generation_eager(self):
text = ["What is the largest planet in solar system?"]
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): [
"What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer: What is the name of the 19",
],
("cuda", 7): [
"What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer",
],
}
)
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
model_id = "thhaus/nemotron3-8b"
model = NemotronForCausalLM.from_pretrained(
model_id, dtype=torch.float16, device_map="auto", attn_implementation="eager"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(text, return_tensors="pt").to(torch_device)
output = model.generate(**inputs, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, output_text)
@slow
@require_read_token
def test_nemotron_8b_generation_fa2(self):
text = ["What is the largest planet in solar system?"]
EXPECTED_TEXT = [
"What is the largest planet in solar system?\nAnswer: Jupiter\n\nWhat is the answer",
]
model_id = "thhaus/nemotron3-8b"
model = NemotronForCausalLM.from_pretrained(
model_id, dtype=torch.float16, device_map="auto", attn_implementation="flash_attention_2"
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer(text, return_tensors="pt").to(torch_device)
output = model.generate(**inputs, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT, output_text)
| NemotronIntegrationTest |
python | kamyu104__LeetCode-Solutions | Python/subsets-ii.py | {
"start": 1247,
"end": 1784
} | class ____(object):
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = []
self.subsetsWithDupRecu(result, [], sorted(nums))
return result
def subsetsWithDupRecu(self, result, cur, nums):
if not nums:
if cur not in result:
result.append(cur)
else:
self.subsetsWithDupRecu(result, cur, nums[1:])
self.subsetsWithDupRecu(result, cur + [nums[0]], nums[1:])
| Solution3 |
python | matplotlib__matplotlib | galleries/examples/scales/custom_scale.py | {
"start": 1061,
"end": 6817
} | class ____(mscale.ScaleBase):
"""
Scales data in range -pi/2 to pi/2 (-90 to 90 degrees) using
the system used to scale latitudes in a Mercator__ projection.
The scale function:
ln(tan(y) + sec(y))
The inverse scale function:
atan(sinh(y))
Since the Mercator scale tends to infinity at +/- 90 degrees,
there is user-defined threshold, above and below which nothing
will be plotted. This defaults to +/- 85 degrees.
__ https://en.wikipedia.org/wiki/Mercator_projection
"""
# The scale class must have a member ``name`` that defines the string used
# to select the scale. For example, ``ax.set_yscale("mercator")`` would be
# used to select this scale.
name = 'mercator'
def __init__(self, axis, *, thresh=np.deg2rad(85), **kwargs):
"""
Any keyword arguments passed to ``set_xscale`` and ``set_yscale`` will
be passed along to the scale's constructor.
thresh: The degree above which to crop the data.
"""
super().__init__(axis)
if thresh >= np.pi / 2:
raise ValueError("thresh must be less than pi/2")
self.thresh = thresh
def get_transform(self):
"""
Override this method to return a new instance that does the
actual transformation of the data.
The MercatorLatitudeTransform class is defined below as a
nested class of this one.
"""
return self.MercatorLatitudeTransform(self.thresh)
def set_default_locators_and_formatters(self, axis):
"""
Override to set up the locators and formatters to use with the
scale. This is only required if the scale requires custom
locators and formatters. Writing custom locators and
formatters is rather outside the scope of this example, but
there are many helpful examples in :mod:`.ticker`.
In our case, the Mercator example uses a fixed locator from -90 to 90
degrees and a custom formatter to convert the radians to degrees and
put a degree symbol after the value.
"""
fmt = FuncFormatter(
lambda x, pos=None: f"{np.degrees(x):.0f}\N{DEGREE SIGN}")
axis.set(major_locator=FixedLocator(np.radians(range(-90, 90, 10))),
major_formatter=fmt, minor_formatter=fmt)
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Override to limit the bounds of the axis to the domain of the
transform. In the case of Mercator, the bounds should be
limited to the threshold that was passed in. Unlike the
autoscaling provided by the tick locators, this range limiting
will always be adhered to, whether the axis range is set
manually, determined automatically or changed through panning
and zooming.
"""
return max(vmin, -self.thresh), min(vmax, self.thresh)
class MercatorLatitudeTransform(mtransforms.Transform):
# There are two value members that must be defined.
# ``input_dims`` and ``output_dims`` specify number of input
# dimensions and output dimensions to the transformation.
# These are used by the transformation framework to do some
# error checking and prevent incompatible transformations from
# being connected together. When defining transforms for a
# scale, which are, by definition, separable and have only one
# dimension, these members should always be set to 1.
input_dims = output_dims = 1
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
"""
This transform takes a numpy array and returns a transformed copy.
Since the range of the Mercator scale is limited by the
user-specified threshold, the input array must be masked to
contain only valid values. Matplotlib will handle masked arrays
and remove the out-of-range data from the plot. However, the
returned array *must* have the same shape as the input array, since
these values need to remain synchronized with values in the other
dimension.
"""
masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a)
if masked.mask.any():
return ma.log(np.abs(ma.tan(masked) + 1 / ma.cos(masked)))
else:
return np.log(np.abs(np.tan(a) + 1 / np.cos(a)))
def inverted(self):
"""
Override this method so Matplotlib knows how to get the
inverse transform for this transform.
"""
return MercatorLatitudeScale.InvertedMercatorLatitudeTransform(
self.thresh)
class InvertedMercatorLatitudeTransform(mtransforms.Transform):
input_dims = output_dims = 1
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform_non_affine(self, a):
return np.arctan(np.sinh(a))
def inverted(self):
return MercatorLatitudeScale.MercatorLatitudeTransform(self.thresh)
# Now that the Scale class has been defined, it must be registered so
# that Matplotlib can find it.
mscale.register_scale(MercatorLatitudeScale)
if __name__ == '__main__':
import matplotlib.pyplot as plt
t = np.arange(-180.0, 180.0, 0.1)
s = np.radians(t)/2.
plt.plot(t, s, '-', lw=2)
plt.yscale('mercator')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.title('Mercator projection')
plt.grid(True)
plt.show()
| MercatorLatitudeScale |
python | kamyu104__LeetCode-Solutions | Python/count-subtrees-with-max-distance-between-cities.py | {
"start": 54,
"end": 2242
} | class ____(object):
def countSubgraphsForEachDiameter(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
def dfs(n, adj, curr, parent, lookup, count, dp):
for child in adj[curr]:
if child == parent or lookup[child]:
continue
dfs(n, adj, child, curr, lookup, count, dp)
dp[curr][0][0] = 1
for child in adj[curr]:
if child == parent or lookup[child]:
continue
new_dp_curr = [row[:] for row in dp[curr]]
for curr_d in xrange(count[curr]):
for curr_max_d in xrange(curr_d, min(2*curr_d+1, count[curr])):
if not dp[curr][curr_d][curr_max_d]: # pruning
continue
for child_d in xrange(count[child]):
for child_max_d in xrange(child_d, min(2*child_d+1, count[child])):
new_dp_curr[max(curr_d, child_d+1)][max(curr_max_d, child_max_d, curr_d+child_d+1)] += \
dp[curr][curr_d][curr_max_d]*dp[child][child_d][child_max_d] # count subtrees with new child
count[curr] += count[child] # merge new child
dp[curr] = new_dp_curr
adj = collections.defaultdict(list)
for u, v in edges:
u -= 1
v -= 1
adj[u].append(v)
adj[v].append(u)
lookup, result = [0]*n, [0]*(n-1)
for i in xrange(n): # Time: sum(O(k^5) for k in [1, n]) = O(n^6)
dp = [[[0]*n for _ in xrange(n)] for _ in xrange(n)]
count = [1]*n
dfs(n, adj, i, -1, lookup, count, dp) # Time: O(k^5), k is the number of the remaining cities
lookup[i] = 1
for d in xrange(1, n): # for each depth from city i
for max_d in xrange(d, min(2*d+1, n)): # for each max distance
result[max_d-1] += dp[i][d][max_d]
return result
# Time: O(n * 2^n)
# Space: O(n)
import collections
import math
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_build.py | {
"start": 37213,
"end": 41163
} | class ____(GoogleCloudBaseOperator):
"""
Runs a BuildTrigger at a particular source revision.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudBuildRunBuildTriggerOperator`
:param trigger_id: The ID of the trigger.
:param source: Source to build against this trigger. If a dict is provided, it must be of the same form
as the protobuf message `google.cloud.devtools.cloudbuild_v1.types.RepoSource`
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:param wait: Optional, wait for operation to finish.
:param retry: Optional, a retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional, the amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional, additional metadata that is provided to the method.
:param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param location: The location of the project.
"""
template_fields: Sequence[str] = ("project_id", "trigger_id", "source", "gcp_conn_id", "location")
operator_extra_links = (CloudBuildLink(),)
def __init__(
self,
*,
trigger_id: str,
source: dict | RepoSource,
project_id: str = PROVIDE_PROJECT_ID,
wait: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
location: str = "global",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.trigger_id = trigger_id
self.source = source
self.project_id = project_id
self.wait = wait
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.location = location
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"region": self.location,
}
def execute(self, context: Context):
hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
result = hook.run_build_trigger(
trigger_id=self.trigger_id,
source=self.source,
project_id=self.project_id,
wait=self.wait,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
location=self.location,
)
context["task_instance"].xcom_push(key="id", value=result.id)
project_id = self.project_id or hook.project_id
if project_id:
CloudBuildLink.persist(
context=context,
project_id=project_id,
build_id=result.id,
)
return Build.to_dict(result)
| CloudBuildRunBuildTriggerOperator |
python | pypa__warehouse | tests/unit/admin/views/test_banners.py | {
"start": 7429,
"end": 8213
} | class ____:
def test_validate(self, banner_data):
form = views.BannerForm(MultiDict(banner_data))
assert form.validate(), str(form.errors)
data = form.data
defaults = {
"fa_icon": Banner.DEFAULT_FA_ICON,
"active": False,
"dismissable": False,
"link_label": Banner.DEFAULT_BTN_LABEL,
}
# Mash the `end` into a date object to match the form's coerced result.
banner_data["end"] = datetime.date.fromisoformat(banner_data["end"])
assert data == {**banner_data, **defaults}
def test_required_fields(self, banner_data):
form = views.BannerForm(MultiDict())
assert form.validate() is False
assert set(form.errors) == set(banner_data)
| TestBannerForm |
python | numba__numba | numba/core/fastmathpass.py | {
"start": 573,
"end": 1211
} | class ____(CallVisitor):
"""
A pass to change all float function calls to use fastmath.
"""
def __init__(self, flags):
self.flags = flags
def visit_Call(self, instr):
# Add to any call that has float/double return type
if instr.type in (ir.FloatType(), ir.DoubleType()):
for flag in self.flags:
instr.fastmath.add(flag)
def rewrite_module(mod, options):
"""
Rewrite the given LLVM module to use fastmath everywhere.
"""
flags = options.flags
FastFloatBinOpVisitor(flags).visit(mod)
FastFloatCallVisitor(flags).visit(mod)
| FastFloatCallVisitor |
python | apache__airflow | providers/google/tests/unit/google/cloud/transfers/test_sheets_to_gcs.py | {
"start": 1228,
"end": 4983
} | class ____:
@mock.patch("airflow.providers.google.cloud.transfers.sheets_to_gcs.csv.writer")
@mock.patch("airflow.providers.google.cloud.transfers.sheets_to_gcs.NamedTemporaryFile")
def test_upload_data(self, mock_tempfile, mock_writer):
filename = "file://97g23r"
file_handle = mock.MagicMock()
mock_tempfile.return_value.__enter__.return_value = file_handle
mock_tempfile.return_value.__enter__.return_value.name = filename
mock_sheet_hook = mock.MagicMock()
mock_sheet_hook.get_spreadsheet.return_value = {"properties": {"title": SHEET_TITLE}}
expected_dest_file = f"{PATH}/{SHEET_TITLE}_{RANGE}.csv"
mock_gcs_hook = mock.MagicMock()
op = GoogleSheetsToGCSOperator(
task_id="test_task",
spreadsheet_id=SPREADSHEET_ID,
destination_bucket=BUCKET,
sheet_filter=FILTER,
destination_path=PATH,
)
result = op._upload_data(
gcs_hook=mock_gcs_hook,
hook=mock_sheet_hook,
sheet_range=RANGE,
sheet_values=VALUES,
)
# Test writing to file
mock_sheet_hook.get_spreadsheet.assert_called_once_with(SPREADSHEET_ID)
mock_writer.assert_called_once_with(file_handle)
mock_writer.return_value.writerows.assert_called_once_with(VALUES)
file_handle.flush.assert_called_once_with()
# Test upload
mock_gcs_hook.upload.assert_called_once_with(
bucket_name=BUCKET, object_name=expected_dest_file, filename=filename
)
# Assert path to file is returned
assert result == expected_dest_file
@mock.patch("airflow.providers.google.cloud.transfers.sheets_to_gcs.GCSHook")
@mock.patch("airflow.providers.google.cloud.transfers.sheets_to_gcs.GSheetsHook")
@mock.patch(
"airflow.providers.google.cloud.transfers.sheets_to_gcs.GoogleSheetsToGCSOperator._upload_data"
)
def test_execute(self, mock_upload_data, mock_sheet_hook, mock_gcs_hook):
mock_ti = mock.MagicMock()
mock_context = {"ti": mock_ti}
data = ["data1", "data2"]
mock_sheet_hook.return_value.get_sheet_titles.return_value = RANGES
mock_sheet_hook.return_value.get_values.side_effect = data
mock_upload_data.side_effect = [PATH, PATH]
op = GoogleSheetsToGCSOperator(
task_id="test_task",
spreadsheet_id=SPREADSHEET_ID,
destination_bucket=BUCKET,
sheet_filter=FILTER,
destination_path=PATH,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(mock_context)
mock_sheet_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_sheet_hook.return_value.get_sheet_titles.assert_called_once_with(
spreadsheet_id=SPREADSHEET_ID, sheet_filter=FILTER
)
calls = [mock.call(spreadsheet_id=SPREADSHEET_ID, range_=r) for r in RANGES]
mock_sheet_hook.return_value.get_values.assert_has_calls(calls)
calls = [
mock.call(mock_gcs_hook.return_value, mock_sheet_hook.return_value, r, v)
for r, v in zip(RANGES, data)
]
mock_upload_data.assert_has_calls(calls)
actual_call_count = mock_upload_data.call_count
assert len(RANGES) == actual_call_count
mock_ti.xcom_push.assert_called_once_with(key="destination_objects", value=[PATH, PATH])
| TestGoogleSheetsToGCSOperator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 51911,
"end": 52341
} | class ____(sgqlc.types.Enum):
"""The possible organization invitation sources.
Enumeration Choices:
* `MEMBER`: The invitation was created from the web interface or
from API
* `SCIM`: The invitation was created from SCIM
* `UNKNOWN`: The invitation was sent before this feature was added
"""
__schema__ = github_schema
__choices__ = ("MEMBER", "SCIM", "UNKNOWN")
| OrganizationInvitationSource |
python | readthedocs__readthedocs.org | readthedocs/oauth/tests/test_models.py | {
"start": 186,
"end": 1395
} | class ____(TestCase):
def test_post_save_signal(self):
clone_url_a = "https://github.com/readthedocs/test-builds"
clone_url_b = "https://github.com/readthedocs/readthedocs.org"
remote_repo = get(
RemoteRepository,
clone_url=clone_url_a,
)
project = get(
Project,
repo=clone_url_b,
remote_repository=remote_repo,
)
# The project uses the clone url from the remote repository.
assert project.repo == clone_url_a
assert remote_repo.clone_url == clone_url_a
remote_repo.clone_url = clone_url_b
remote_repo.save()
project.refresh_from_db()
# The project clone URL is updated when the remote repository clone URL changes.
assert project.repo == clone_url_b
# The project URL is not updated when the feature is set to not sync with the remote repository.
feature = get(Feature, feature_id=Feature.DONT_SYNC_WITH_REMOTE_REPO)
project.feature_set.add(feature)
remote_repo.clone_url = clone_url_a
remote_repo.save()
project.refresh_from_db()
assert project.repo == clone_url_b
| TestsModels |
python | Textualize__textual | src/textual/scrollbar.py | {
"start": 13249,
"end": 13584
} | class ____(Widget):
"""Widget which fills the gap between horizontal and vertical scrollbars,
should they both be present."""
def render(self) -> RenderableType:
assert self.parent is not None
styles = self.parent.styles
color = styles.scrollbar_corner_color
return Blank(color)
| ScrollBarCorner |
python | huggingface__transformers | src/transformers/testing_utils.py | {
"start": 55710,
"end": 57358
} | class ____:
"""
Context manager to capture `logging` streams
Args:
logger: 'logging` logger object
Returns:
The captured output is available via `self.out`
Example:
```python
>>> from transformers import logging
>>> from transformers.testing_utils import CaptureLogger
>>> msg = "Testing 1, 2, 3"
>>> logging.set_verbosity_info()
>>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
>>> with CaptureLogger(logger) as cl:
... logger.info(msg)
>>> assert cl.out, msg + "\n"
```
"""
def __init__(self, logger):
self.logger = logger
self.io = StringIO()
self.sh = logging.StreamHandler(self.io)
self.out = ""
def __enter__(self):
self.logger.addHandler(self.sh)
return self
def __exit__(self, *exc):
self.logger.removeHandler(self.sh)
self.out = self.io.getvalue()
def __repr__(self):
return f"captured: {self.out}\n"
@contextlib.contextmanager
def LoggingLevel(level):
"""
This is a context manager to temporarily change transformers modules logging level to the desired value and have it
restored to the original setting at the end of the scope.
Example:
```python
with LoggingLevel(logging.INFO):
AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
```
"""
orig_level = transformers_logging.get_verbosity()
try:
transformers_logging.set_verbosity(level)
yield
finally:
transformers_logging.set_verbosity(orig_level)
| CaptureLogger |
python | getsentry__sentry | src/sentry/api/serializers/models/group.py | {
"start": 30248,
"end": 33474
} | class ____(GroupSerializerBase):
def __init__(
self,
collapse=None,
expand=None,
environment_func: Callable[[], Environment | None] | None = None,
):
GroupSerializerBase.__init__(self, collapse=collapse, expand=expand)
self.environment_func = environment_func if environment_func is not None else lambda: None
def _seen_stats_error(self, item_list, user) -> Mapping[Group, SeenStats]:
return self.__seen_stats_impl(
item_list,
tagstore.backend.get_groups_user_counts,
tagstore.backend.get_group_list_tag_value,
)
def _seen_stats_generic(
self, generic_issue_list: Sequence[Group], user
) -> Mapping[Group, SeenStats]:
return self.__seen_stats_impl(
generic_issue_list,
tagstore.backend.get_generic_groups_user_counts,
tagstore.backend.get_generic_group_list_tag_value,
)
def __seen_stats_impl(
self,
issue_list: Sequence[Group],
user_counts_func: _GroupUserCountsFunc,
environment_seen_stats_func: _EnvironmentSeenStatsFunc,
) -> Mapping[Group, SeenStats]:
if not issue_list:
return {}
try:
environment = self.environment_func()
except Environment.DoesNotExist:
return {
item: {"times_seen": 0, "first_seen": None, "last_seen": None, "user_count": 0}
for item in issue_list
}
project_id = issue_list[0].project_id
item_ids = [g.id for g in issue_list]
tenant_ids = {"organization_id": issue_list[0].project.organization_id}
user_counts: Mapping[int, int] = user_counts_func(
[project_id],
item_ids,
environment_ids=[environment.id] if environment is not None else None,
tenant_ids=tenant_ids,
)
first_seen: MutableMapping[int, datetime] = {}
last_seen: MutableMapping[int, datetime] = {}
times_seen: MutableMapping[int, int] = {}
if environment is not None:
environment_seen_stats = environment_seen_stats_func(
[project_id],
item_ids,
[environment.id],
"environment",
environment.name,
tenant_ids=tenant_ids,
)
for item_id, value in environment_seen_stats.items():
first_seen[item_id] = value.first_seen
last_seen[item_id] = value.last_seen
times_seen[item_id] = value.times_seen
else:
# fallback to the model data since we can't query tagstore
for item in issue_list:
first_seen[item.id] = item.first_seen
last_seen[item.id] = item.last_seen
times_seen[item.id] = item.times_seen
return {
item: {
"times_seen": times_seen.get(item.id, 0),
"first_seen": first_seen.get(item.id),
"last_seen": last_seen.get(item.id),
"user_count": user_counts.get(item.id, 0),
}
for item in issue_list
}
| GroupSerializer |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_variables.py | {
"start": 3906,
"end": 5260
} | class ____(TestVariableEndpoint):
def test_delete_should_respond_204(self, test_client, session):
self.create_variables()
variables = session.query(Variable).all()
assert len(variables) == 5
response = test_client.delete(f"/variables/{TEST_VARIABLE_KEY}")
assert response.status_code == 204
response = test_client.delete(f"/variables/{TEST_VARIABLE_KEY4}")
assert response.status_code == 204
variables = session.query(Variable).all()
assert len(variables) == 3
check_last_log(session, dag_id=None, event="delete_variable", logical_date=None)
def test_delete_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.delete(f"/variables/{TEST_VARIABLE_KEY}")
assert response.status_code == 401
def test_delete_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.delete(f"/variables/{TEST_VARIABLE_KEY}")
assert response.status_code == 403
def test_delete_should_respond_404(self, test_client):
response = test_client.delete(f"/variables/{TEST_VARIABLE_KEY}")
assert response.status_code == 404
body = response.json()
assert f"The Variable with key: `{TEST_VARIABLE_KEY}` was not found" == body["detail"]
| TestDeleteVariable |
python | nedbat__coveragepy | tests/test_data.py | {
"start": 4484,
"end": 23505
} | class ____(CoverageTest):
"""Test cases for CoverageData."""
def test_empty_data_is_false(self) -> None:
covdata = DebugCoverageData()
assert not covdata
self.assert_doesnt_exist(".coverage")
def test_empty_data_is_false_when_read(self) -> None:
covdata = DebugCoverageData()
covdata.read()
assert not covdata
self.assert_doesnt_exist(".coverage")
def test_line_data_is_true(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
assert covdata
def test_arc_data_is_true(self) -> None:
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
assert covdata
def test_empty_line_data_is_false(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines({})
assert not covdata
def test_empty_arc_data_is_false(self) -> None:
covdata = DebugCoverageData()
covdata.add_arcs({})
assert not covdata
@pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)])
def test_adding_lines(self, lines: Mapping[str, Collection[TLineNo]]) -> None:
covdata = DebugCoverageData()
covdata.add_lines(lines)
assert_lines1_data(covdata)
@pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)])
def test_adding_arcs(self, arcs: Mapping[str, Collection[TArc]]) -> None:
covdata = DebugCoverageData()
covdata.add_arcs(arcs)
assert_arcs3_data(covdata)
def test_ok_to_add_lines_twice(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
covdata.add_lines(LINES_2)
assert_line_counts(covdata, SUMMARY_1_2)
assert_measured_files(covdata, MEASURED_FILES_1_2)
def test_ok_to_add_arcs_twice(self) -> None:
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_arcs(ARCS_4)
assert_line_counts(covdata, SUMMARY_3_4)
assert_measured_files(covdata, MEASURED_FILES_3_4)
def test_ok_to_add_empty_arcs(self) -> None:
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_arcs(ARCS_4)
covdata.add_arcs(dict.fromkeys(ARCS_3, set()))
assert_line_counts(covdata, SUMMARY_3_4)
assert_measured_files(covdata, MEASURED_FILES_3_4)
@pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData])
def test_cant_add_arcs_with_lines(self, klass: TCoverageData) -> None:
covdata = klass()
covdata.add_lines(LINES_1)
msg = "Can't add branch measurements to existing line data"
with pytest.raises(DataError, match=msg):
covdata.add_arcs(ARCS_3)
@pytest.mark.parametrize("klass", [CoverageData, DebugCoverageData])
def test_cant_add_lines_with_arcs(self, klass: TCoverageData) -> None:
covdata = klass()
covdata.add_arcs(ARCS_3)
msg = "Can't add line measurements to existing branch data"
with pytest.raises(DataError, match=msg):
covdata.add_lines(LINES_1)
def test_touch_file_with_lines(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
covdata.touch_file("zzz.py")
assert_measured_files(covdata, MEASURED_FILES_1 + ["zzz.py"])
def test_touch_file_with_arcs(self) -> None:
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.touch_file("zzz.py")
assert_measured_files(covdata, MEASURED_FILES_3 + ["zzz.py"])
def test_set_query_contexts(self) -> None:
covdata = DebugCoverageData()
covdata.set_context("test_a")
covdata.add_lines(LINES_1)
covdata.set_query_contexts(["te.*a"])
assert covdata.lines("a.py") == [1, 2]
covdata.set_query_contexts(["other"])
assert covdata.lines("a.py") == []
def test_no_lines_vs_unmeasured_file(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
covdata.touch_file("zzz.py")
assert covdata.lines("zzz.py") == []
assert covdata.lines("no_such_file.py") is None
def test_lines_with_contexts(self) -> None:
covdata = DebugCoverageData()
covdata.set_context("test_a")
covdata.add_lines(LINES_1)
assert covdata.lines("a.py") == [1, 2]
covdata.set_query_contexts(["test"])
assert covdata.lines("a.py") == [1, 2]
covdata.set_query_contexts(["other"])
assert covdata.lines("a.py") == []
def test_contexts_by_lineno_with_lines(self) -> None:
covdata = DebugCoverageData()
covdata.set_context("test_a")
covdata.add_lines(LINES_1)
expected = {1: ["test_a"], 2: ["test_a"]}
assert covdata.contexts_by_lineno("a.py") == expected
@pytest.mark.parametrize("lines", [LINES_1, dicts_from_sets(LINES_1)])
def test_no_duplicate_lines(self, lines: Mapping[str, Collection[TLineNo]]) -> None:
covdata = DebugCoverageData()
covdata.set_context("context1")
covdata.add_lines(lines)
covdata.set_context("context2")
covdata.add_lines(lines)
assert covdata.lines("a.py") == A_PY_LINES_1
@pytest.mark.parametrize("arcs", [ARCS_3, dicts_from_sets(ARCS_3)])
def test_no_duplicate_arcs(self, arcs: Mapping[str, Collection[TArc]]) -> None:
covdata = DebugCoverageData()
covdata.set_context("context1")
covdata.add_arcs(arcs)
covdata.set_context("context2")
covdata.add_arcs(arcs)
assert covdata.arcs("x.py") == X_PY_ARCS_3
def test_no_arcs_vs_unmeasured_file(self) -> None:
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.touch_file("zzz.py")
assert covdata.lines("zzz.py") == []
assert covdata.lines("no_such_file.py") is None
assert covdata.arcs("zzz.py") == []
assert covdata.arcs("no_such_file.py") is None
def test_arcs_with_contexts(self) -> None:
covdata = DebugCoverageData()
covdata.set_context("test_x")
covdata.add_arcs(ARCS_3)
assert covdata.arcs("x.py") == [(-1, 1), (1, 2), (2, 3), (3, -1)]
covdata.set_query_contexts(["test_.$"])
assert covdata.arcs("x.py") == [(-1, 1), (1, 2), (2, 3), (3, -1)]
covdata.set_query_contexts(["other"])
assert covdata.arcs("x.py") == []
def test_contexts_by_lineno_with_arcs(self) -> None:
covdata = DebugCoverageData()
covdata.set_context("test_x")
covdata.add_arcs(ARCS_3)
expected = {1: ["test_x"], 2: ["test_x"], 3: ["test_x"]}
assert covdata.contexts_by_lineno("x.py") == expected
def test_contexts_by_lineno_with_unknown_file(self) -> None:
covdata = DebugCoverageData()
covdata.set_context("test_x")
covdata.add_arcs(ARCS_3)
assert covdata.contexts_by_lineno("xyz.py") == {}
def test_context_by_lineno_with_query_contexts_with_lines(self) -> None:
covdata = DebugCoverageData()
covdata.set_context("test_1")
covdata.add_lines(LINES_1)
covdata.set_context("test_2")
covdata.add_lines(LINES_2)
covdata.set_query_context("test_1")
assert covdata.contexts_by_lineno("a.py") == dict.fromkeys([1, 2], ["test_1"])
def test_context_by_lineno_with_query_contexts_with_arcs(self) -> None:
covdata = DebugCoverageData()
covdata.set_context("test_1")
covdata.add_arcs(ARCS_3)
covdata.set_context("test_2")
covdata.add_arcs(ARCS_4)
covdata.set_query_context("test_1")
assert covdata.contexts_by_lineno("x.py") == dict.fromkeys([1, 2, 3], ["test_1"])
def test_file_tracer_name(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines(
{
"p1.foo": [1, 2, 3],
"p2.html": [10, 11, 12],
"main.py": [20],
}
)
covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"})
assert covdata.file_tracer("p1.foo") == "p1.plugin"
assert covdata.file_tracer("p2.html") == "p2.plugin"
assert covdata.file_tracer("main.py") == ""
assert covdata.file_tracer("p3.not_here") is None
def test_ok_to_repeat_file_tracer(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines(
{
"p1.foo": [1, 2, 3],
"p2.html": [10, 11, 12],
}
)
covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"})
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
assert covdata.file_tracer("p1.foo") == "p1.plugin"
def test_ok_to_set_empty_file_tracer(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines(
{
"p1.foo": [1, 2, 3],
"p2.html": [10, 11, 12],
"main.py": [20],
}
)
covdata.add_file_tracers({"p1.foo": "p1.plugin", "main.py": ""})
assert covdata.file_tracer("p1.foo") == "p1.plugin"
assert covdata.file_tracer("main.py") == ""
def test_cant_change_file_tracer_name(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines({"p1.foo": [1, 2, 3]})
covdata.add_file_tracers({"p1.foo": "p1.plugin"})
msg = "Conflicting file tracer name for 'p1.foo': 'p1.plugin' vs 'p1.plugin.foo'"
with pytest.raises(DataError, match=msg):
covdata.add_file_tracers({"p1.foo": "p1.plugin.foo"})
def test_update_lines(self) -> None:
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_lines(LINES_1)
covdata2 = DebugCoverageData(suffix="2")
covdata2.add_lines(LINES_2)
covdata3 = DebugCoverageData(suffix="3")
covdata3.update(covdata1)
covdata3.update(covdata2)
assert_line_counts(covdata3, SUMMARY_1_2)
assert_measured_files(covdata3, MEASURED_FILES_1_2)
def test_update_arcs(self) -> None:
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_arcs(ARCS_3)
covdata2 = DebugCoverageData(suffix="2")
covdata2.add_arcs(ARCS_4)
covdata3 = DebugCoverageData(suffix="3")
covdata3.update(covdata1)
covdata3.update(covdata2)
assert_line_counts(covdata3, SUMMARY_3_4)
assert_measured_files(covdata3, MEASURED_FILES_3_4)
def test_update_cant_mix_lines_and_arcs(self) -> None:
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_lines(LINES_1)
covdata2 = DebugCoverageData(suffix="2")
covdata2.add_arcs(ARCS_3)
msg = "Can't combine branch coverage data with statement data"
with pytest.raises(DataError, match=msg):
covdata1.update(covdata2)
msg = "Can't combine statement coverage data with branch data"
with pytest.raises(DataError, match=msg):
covdata2.update(covdata1)
def test_update_file_tracers(self) -> None:
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_lines(
{
"p1.html": [1, 2, 3, 4],
"p2.html": [5, 6, 7],
"main.py": [10, 11, 12],
}
)
covdata1.add_file_tracers(
{
"p1.html": "html.plugin",
"p2.html": "html.plugin2",
}
)
covdata2 = DebugCoverageData(suffix="2")
covdata2.add_lines(
{
"p1.html": [3, 4, 5, 6],
"p2.html": [7, 8, 9],
"p3.foo": [1000, 1001],
"main.py": [10, 11, 12],
}
)
covdata2.add_file_tracers(
{
"p1.html": "html.plugin",
"p2.html": "html.plugin2",
"p3.foo": "foo_plugin",
}
)
covdata3 = DebugCoverageData(suffix="3")
covdata3.update(covdata1)
covdata3.update(covdata2)
assert covdata3.file_tracer("p1.html") == "html.plugin"
assert covdata3.file_tracer("p2.html") == "html.plugin2"
assert covdata3.file_tracer("p3.foo") == "foo_plugin"
assert covdata3.file_tracer("main.py") == ""
def test_update_conflicting_file_tracers(self) -> None:
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_lines({"p1.html": [1, 2, 3]})
covdata1.add_file_tracers({"p1.html": "html.plugin"})
covdata2 = DebugCoverageData(suffix="2")
covdata2.add_lines({"p1.html": [1, 2, 3]})
covdata2.add_file_tracers({"p1.html": "html.other_plugin"})
msg = "Conflicting file tracer name for 'p1.html': 'html.plugin' vs 'html.other_plugin'"
with pytest.raises(DataError, match=msg):
covdata1.update(covdata2)
msg = "Conflicting file tracer name for 'p1.html': 'html.other_plugin' vs 'html.plugin'"
with pytest.raises(DataError, match=msg):
covdata2.update(covdata1)
def test_update_file_tracer_vs_no_file_tracer(self) -> None:
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_lines({"p1.html": [1, 2, 3]})
covdata1.add_file_tracers({"p1.html": "html.plugin"})
covdata2 = DebugCoverageData(suffix="2")
covdata2.add_lines({"p1.html": [1, 2, 3]})
msg = "Conflicting file tracer name for 'p1.html': 'html.plugin' vs ''"
with pytest.raises(DataError, match=msg):
covdata1.update(covdata2)
msg = "Conflicting file tracer name for 'p1.html': '' vs 'html.plugin'"
with pytest.raises(DataError, match=msg):
covdata2.update(covdata1)
def test_update_lines_empty(self) -> None:
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_lines(LINES_1)
covdata2 = DebugCoverageData(suffix="2")
covdata1.update(covdata2)
assert_line_counts(covdata1, SUMMARY_1)
def test_update_arcs_empty(self) -> None:
covdata1 = DebugCoverageData(suffix="1")
covdata1.add_arcs(ARCS_3)
covdata2 = DebugCoverageData(suffix="2")
covdata1.update(covdata2)
assert_line_counts(covdata1, SUMMARY_3)
def test_asking_isnt_measuring(self) -> None:
# Asking about an unmeasured file shouldn't make it seem measured.
covdata = DebugCoverageData()
assert_measured_files(covdata, [])
assert covdata.arcs("missing.py") is None
assert_measured_files(covdata, [])
def test_add_to_hash_with_lines(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
hasher = mock.Mock()
add_data_to_hash(covdata, "a.py", hasher)
assert hasher.method_calls == [
mock.call.update([1, 2]), # lines
mock.call.update(""), # file_tracer name
]
def test_add_to_hash_with_arcs(self) -> None:
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_file_tracers({"y.py": "hologram_plugin"})
hasher = mock.Mock()
add_data_to_hash(covdata, "y.py", hasher)
assert hasher.method_calls == [
mock.call.update([(-1, 17), (17, 23), (23, -1)]), # arcs
mock.call.update("hologram_plugin"), # file_tracer name
]
def test_add_to_lines_hash_with_missing_file(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/403
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
hasher = mock.Mock()
add_data_to_hash(covdata, "missing.py", hasher)
assert hasher.method_calls == [
mock.call.update([]),
mock.call.update(None),
]
def test_add_to_arcs_hash_with_missing_file(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/403
covdata = DebugCoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_file_tracers({"y.py": "hologram_plugin"})
hasher = mock.Mock()
add_data_to_hash(covdata, "missing.py", hasher)
assert hasher.method_calls == [
mock.call.update([]),
mock.call.update(None),
]
def test_empty_lines_are_still_lines(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines({})
covdata.touch_file("abc.py")
assert not covdata.has_arcs()
def test_empty_arcs_are_still_arcs(self) -> None:
covdata = DebugCoverageData()
covdata.add_arcs({})
covdata.touch_file("abc.py")
assert covdata.has_arcs()
def test_cant_touch_in_empty_data(self) -> None:
covdata = DebugCoverageData()
msg = "Can't touch files in an empty CoverageData"
with pytest.raises(DataError, match=msg):
covdata.touch_file("abc.py")
def test_read_and_write_are_opposites(self) -> None:
covdata1 = DebugCoverageData()
covdata1.add_arcs(ARCS_3)
covdata1.write()
covdata2 = DebugCoverageData()
covdata2.read()
assert_arcs3_data(covdata2)
def test_thread_stress(self) -> None:
covdata = DebugCoverageData()
exceptions = []
def thread_main() -> None:
"""Every thread will try to add the same data."""
try:
covdata.add_lines(LINES_1)
except Exception as ex: # pragma: only failure
exceptions.append(ex)
threads = [threading.Thread(target=thread_main) for _ in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_lines1_data(covdata)
assert not exceptions
def test_purge_files_lines(self) -> None:
covdata = DebugCoverageData()
covdata.add_lines(LINES_1)
covdata.add_lines(LINES_2)
assert_line_counts(covdata, SUMMARY_1_2)
covdata.purge_files(["a.py", "b.py"])
assert_line_counts(covdata, {"a.py": 0, "b.py": 0, "c.py": 1})
covdata.purge_files(["c.py"])
assert_line_counts(covdata, {"a.py": 0, "b.py": 0, "c.py": 0})
# It's OK to "purge" a file that wasn't measured.
covdata.purge_files(["xyz.py"])
assert_line_counts(covdata, {"a.py": 0, "b.py": 0, "c.py": 0})
def test_purge_files_arcs(self) -> None:
covdata = CoverageData()
covdata.add_arcs(ARCS_3)
covdata.add_arcs(ARCS_4)
assert_line_counts(covdata, SUMMARY_3_4)
covdata.purge_files(["x.py", "y.py"])
assert_line_counts(covdata, {"x.py": 0, "y.py": 0, "z.py": 1})
covdata.purge_files(["z.py"])
assert_line_counts(covdata, {"x.py": 0, "y.py": 0, "z.py": 0})
def test_cant_purge_in_empty_data(self) -> None:
covdata = DebugCoverageData()
msg = "Can't purge files in an empty CoverageData"
with pytest.raises(DataError, match=msg):
covdata.purge_files(["abc.py"])
| CoverageDataTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes5.py | {
"start": 6060,
"end": 6124
} | class ____(Protocol):
x: ClassVar[int]
y: int
| ParentClass4 |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mysqlconnector.py | {
"start": 4447,
"end": 9855
} | class ____(MySQLDialect):
driver = "mysqlconnector"
supports_statement_cache = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
supports_native_bit = True
# not until https://bugs.mysql.com/bug.php?id=117548
supports_server_side_cursors = False
default_paramstyle = "format"
statement_compiler = MySQLCompiler_mysqlconnector
execution_ctx_cls = MySQLExecutionContext_mysqlconnector
preparer: type[MySQLIdentifierPreparer] = (
MySQLIdentifierPreparer_mysqlconnector
)
colspecs = util.update_copy(MySQLDialect.colspecs, {BIT: _myconnpyBIT})
@classmethod
def import_dbapi(cls) -> DBAPIModule:
return cast("DBAPIModule", __import__("mysql.connector").connector)
def do_ping(self, dbapi_connection: DBAPIConnection) -> bool:
dbapi_connection.ping(False)
return True
def create_connect_args(self, url: URL) -> ConnectArgsType:
opts = url.translate_connect_args(username="user")
opts.update(url.query)
util.coerce_kw_type(opts, "allow_local_infile", bool)
util.coerce_kw_type(opts, "autocommit", bool)
util.coerce_kw_type(opts, "buffered", bool)
util.coerce_kw_type(opts, "client_flag", int)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "connection_timeout", int)
util.coerce_kw_type(opts, "connect_timeout", int)
util.coerce_kw_type(opts, "consume_results", bool)
util.coerce_kw_type(opts, "force_ipv6", bool)
util.coerce_kw_type(opts, "get_warnings", bool)
util.coerce_kw_type(opts, "pool_reset_session", bool)
util.coerce_kw_type(opts, "pool_size", int)
util.coerce_kw_type(opts, "raise_on_warnings", bool)
util.coerce_kw_type(opts, "raw", bool)
util.coerce_kw_type(opts, "ssl_verify_cert", bool)
util.coerce_kw_type(opts, "use_pure", bool)
util.coerce_kw_type(opts, "use_unicode", bool)
# note that "buffered" is set to False by default in MySQL/connector
# python. If you set it to True, then there is no way to get a server
# side cursor because the logic is written to disallow that.
# leaving this at True until
# https://bugs.mysql.com/bug.php?id=117548 can be fixed
opts["buffered"] = True
# FOUND_ROWS must be set in ClientFlag to enable
# supports_sane_rowcount.
if self.dbapi is not None:
try:
from mysql.connector import constants # type: ignore
ClientFlag = constants.ClientFlag
client_flags = opts.get(
"client_flags", ClientFlag.get_default()
)
client_flags |= ClientFlag.FOUND_ROWS
opts["client_flags"] = client_flags
except Exception:
pass
return [], opts
@util.memoized_property
def _mysqlconnector_version_info(self) -> Optional[tuple[int, ...]]:
if self.dbapi and hasattr(self.dbapi, "__version__"):
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
if m:
return tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
return None
def _detect_charset(self, connection: Connection) -> str:
return connection.connection.charset # type: ignore
def _extract_error_code(self, exception: BaseException) -> int:
return exception.errno # type: ignore
def is_disconnect(
self,
e: Exception,
connection: Optional[Union[PoolProxiedConnection, DBAPIConnection]],
cursor: Optional[DBAPICursor],
) -> bool:
errnos = (2006, 2013, 2014, 2045, 2055, 2048)
exceptions = (
self.loaded_dbapi.OperationalError, #
self.loaded_dbapi.InterfaceError,
self.loaded_dbapi.ProgrammingError,
)
if isinstance(e, exceptions):
return (
e.errno in errnos
or "MySQL Connection not available." in str(e)
or "Connection to MySQL is not available" in str(e)
)
else:
return False
def _compat_fetchall(
self,
rp: CursorResult[Unpack[TupleAny]],
charset: Optional[str] = None,
) -> Sequence[Row[Unpack[TupleAny]]]:
return rp.fetchall()
def _compat_fetchone(
self,
rp: CursorResult[Unpack[TupleAny]],
charset: Optional[str] = None,
) -> Optional[Row[Unpack[TupleAny]]]:
return rp.fetchone()
def get_isolation_level_values(
self, dbapi_conn: DBAPIConnection
) -> Sequence[IsolationLevel]:
return (
"SERIALIZABLE",
"READ UNCOMMITTED",
"READ COMMITTED",
"REPEATABLE READ",
"AUTOCOMMIT",
)
def detect_autocommit_setting(self, dbapi_conn: DBAPIConnection) -> bool:
return bool(dbapi_conn.autocommit)
def set_isolation_level(
self, dbapi_connection: DBAPIConnection, level: IsolationLevel
) -> None:
if level == "AUTOCOMMIT":
dbapi_connection.autocommit = True
else:
dbapi_connection.autocommit = False
super().set_isolation_level(dbapi_connection, level)
| MySQLDialect_mysqlconnector |
python | huggingface__transformers | src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py | {
"start": 28348,
"end": 34060
} | class ____(RecurrentGemmaPreTrainedModel):
def __init__(self, config: RecurrentGemmaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[RecurrentGemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.final_norm = RecurrentGemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
self.register_buffer(
"normalizer", torch.tensor(self.config.hidden_size**0.5, dtype=torch.bfloat16), persistent=False
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cache_position: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
if use_cache and inputs_embeds.shape[1] != 1: # TODO let's maybe only call in the `generate`?
self._setup_cache(self.config, hidden_states.shape[0], hidden_states.device, hidden_states.dtype)
if cache_position is None:
cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
hidden_states = hidden_states * self.normalizer.type(hidden_states.dtype)
all_hidden_states = () if output_hidden_states else None
for i, residual_block in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
hidden_states = residual_block(hidden_states, position_ids, causal_mask, cache_position, use_cache)
hidden_states = self.final_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
)
# Ignore copy
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
dtype, device = input_tensor.dtype, input_tensor.device
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
target_length = max(self.config.attention_window_size, sequence_length)
diagonal = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
causal_mask = diagonal
if sequence_length != 1:
causal_mask = torch.triu(diagonal, diagonal=-1)
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
if attention_mask.dim() == 2:
# Crop the attention mask to the target length.
attention_mask = attention_mask[:, -target_length:]
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
if attention_mask is not None and attention_mask.device.type in ["cuda", "xpu", "npu"]:
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
# TODO: re-enable check: Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with LLAMA->RECURRENTGEMMA,Llama->RecurrentGemma,llama->gemma
@auto_docstring
| RecurrentGemmaModel |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/run_coordinator/sync_in_memory_run_coordinator.py | {
"start": 432,
"end": 1989
} | class ____(RunCoordinator, ConfigurableClass):
"""Immediately send runs to the run launcher."""
def __init__(self, inst_data: Optional[ConfigurableClassData] = None):
self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
self._logger = logging.getLogger("dagster.run_coordinator.sync_in_memory_run_coordinator")
super().__init__()
@property
def inst_data(self) -> Optional[ConfigurableClassData]:
return self._inst_data
@classmethod
def config_type(cls) -> UserConfigSchema:
return {}
@classmethod
def from_config_value(
cls, inst_data: Optional[ConfigurableClassData], config_value: Mapping[str, object]
) -> Self:
return cls(inst_data=inst_data, **config_value)
def submit_run(self, context: SubmitRunContext) -> DagsterRun:
dagster_run = context.dagster_run
if dagster_run.status == DagsterRunStatus.NOT_STARTED:
self._instance.launch_run(dagster_run.run_id, context.workspace)
else:
self._logger.warning(
f"submit_run called for run {dagster_run.run_id} with status "
f"{dagster_run.status.value}, skipping launch."
)
run = self._instance.get_run_by_id(dagster_run.run_id)
if run is None:
check.failed(f"Failed to reload run {dagster_run.run_id}")
return run
def cancel_run(self, run_id: str) -> bool:
return self._instance.run_launcher.terminate(run_id)
| SyncInMemoryRunCoordinator |
python | numba__numba | numba/core/controlflow.py | {
"start": 1956,
"end": 2651
} | class ____(collections.defaultdict):
"""A defaultdict with customized equality checks that ignore empty values.
Non-empty value is checked by: `bool(value_item) == True`.
"""
def __eq__(self, other):
if isinstance(other, _DictOfContainers):
mine = self._non_empty_items()
theirs = other._non_empty_items()
return mine == theirs
return NotImplemented
def __ne__(self, other):
ret = self.__eq__(other)
if ret is NotImplemented:
return ret
else:
return not ret
def _non_empty_items(self):
return [(k, vs) for k, vs in sorted(self.items()) if vs]
| _DictOfContainers |
python | pytorch__pytorch | test/dynamo/test_backward_higher_order_ops.py | {
"start": 2670,
"end": 4268
} | class ____(torch.nn.Module):
def forward(self, grad_1: "f32[2]"):
trace_wrapped: "f32[2]" = torch__dynamo__trace_wrapped_higher_order_op_self_invoke(grad_1); grad_1 = None
return trace_wrapped
""",
)
@mock.patch(
"torch._functorch.aot_autograd.AOT_COUNTER", new_callable=itertools.count
)
def test_invoke_in_pt2_compiled_autograd(self, _):
graph = None
def compiler_fn(gm):
def inner_compiler(gm_, example_inputs_):
nonlocal graph
self.assertEqual(graph, None)
graph = gm_
return inductor.compile(gm_, example_inputs_)
return torch.compile(
gm, backend=inner_compiler, fullgraph=True, dynamic=True
)
for backend in ["eager", "aot_eager", "inductor"]:
torch._dynamo.reset()
x = torch.tensor([0.5, 0.5], requires_grad=True)
y = torch.tensor([0.5, 0.5], requires_grad=True)
def fn(x, y):
x.register_hook(_multiply_invoke)
return x + y
fn = torch.compile(fn, backend=backend)
out = fn(x, y)
grad_out = torch.tensor([2.0, 2.0])
with compiled_autograd._enable(compiler_fn):
out.backward(grad_out)
actual = normalize_gm(graph.print_readable(False))
self.assertEqual(x.grad, grad_out * grad_out)
if backend == "aot_eager":
self.assertExpectedInline(
actual,
"""\
| _multiply_invoke |
python | plotly__plotly.py | plotly/graph_objs/layout/legend/_grouptitlefont.py | {
"start": 235,
"end": 10028
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.legend"
_path_str = "layout.legend.grouptitlefont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Grouptitlefont object
Sets the font for group titles in legend. Defaults to
`legend.font` with its size increased about 10%.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.legend.Grouptitlefont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Grouptitlefont
"""
super().__init__("grouptitlefont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.legend.Grouptitlefont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.legend.Grouptitlefont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Grouptitlefont |
python | getsentry__sentry | tests/sentry/auth/test_helper.py | {
"start": 3306,
"end": 7596
} | class ____(AuthIdentityHandlerTest, HybridCloudTestMixin):
@mock.patch("sentry.analytics.record")
def test_simple(self, mock_record: mock.MagicMock) -> None:
auth_identity = self.handler.handle_new_user()
user = auth_identity.user
assert user.email == self.email
with assume_test_silo_mode(SiloMode.REGION):
org_member = OrganizationMember.objects.get(
organization=self.organization, user_id=user.id
)
self.assert_org_member_mapping(org_member=org_member)
assert_last_analytics_event(
mock_record,
UserSignUpEvent(
user_id=user.id,
source="sso",
provider=self.provider,
referrer="in-app",
),
)
def test_associated_existing_member_invite_by_email(self) -> None:
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.create(
organization=self.organization, email=self.email
)
auth_identity = self.handler.handle_new_user()
with assume_test_silo_mode(SiloMode.REGION):
assigned_member = OrganizationMember.objects.get(
organization=self.organization, user_id=auth_identity.user_id
)
assert assigned_member.id == member.id
def test_demo_user_cannot_be_added_new_user(self) -> None:
with mock.patch("sentry.auth.helper.is_demo_user", return_value=True):
with self.assertRaisesMessage(
Exception,
"Demo user cannot be added to an organization that is not a demo organization.",
):
self.handler.handle_new_user()
def test_associated_existing_member_invite_request(self) -> None:
member = self.create_member(
organization=self.organization,
email=self.email,
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
)
auth_identity = self.handler.handle_new_user()
with assume_test_silo_mode(SiloMode.REGION):
org_member = OrganizationMember.objects.get(
organization=self.organization,
user_id=auth_identity.user_id,
invite_status=InviteStatus.APPROVED.value,
)
self.assert_org_member_mapping(org_member=org_member)
self.assert_org_member_mapping_not_exists(org_member=member)
with assume_test_silo_mode(SiloMode.REGION):
assert not OrganizationMember.objects.filter(id=member.id).exists()
def test_associate_pending_invite(self) -> None:
# The org member invite should have a non matching email, but the
# member id and token will match from the session, allowing association
with assume_test_silo_mode(SiloMode.REGION):
member = OrganizationMember.objects.create(
organization=self.organization, email="different.email@example.com", token="abc"
)
self.request.session["invite_member_id"] = member.id
self.request.session["invite_token"] = member.token
self.save_session()
auth_identity = self.handler.handle_new_user()
with assume_test_silo_mode(SiloMode.REGION):
assigned_member = OrganizationMember.objects.get(
organization=self.organization, user_id=auth_identity.user.id
)
assert assigned_member.id == member.id
def test_demo_user_can_be_added_new_user_when_demo_org(self) -> None:
# Force demo user behavior, and mark org as demo org
with override_options(
{"demo-mode.enabled": True, "demo-mode.orgs": [self.organization.id]}
):
with mock.patch("sentry.auth.helper.is_demo_user", return_value=True):
# Should not raise when org is demo org
auth_identity = self.handler.handle_new_user()
with assume_test_silo_mode(SiloMode.REGION):
org_member = OrganizationMember.objects.get(
organization=self.organization, user_id=auth_identity.user.id
)
assert getattr(org_member.flags, "sso:linked")
@control_silo_test
| HandleNewUserTest |
python | davidhalter__jedi | jedi/inference/context.py | {
"start": 13350,
"end": 17164
} | class ____(CompiledContext):
code_lines = None
def get_value(self):
return self._value
@property
def string_names(self):
return self._value.string_names
def py__file__(self) -> Optional[Path]:
return self._value.py__file__() # type: ignore[no-any-return]
def _get_global_filters_for_name(context, name_or_none, position):
# For functions and classes the defaults don't belong to the
# function and get inferred in the value before the function. So
# make sure to exclude the function/class name.
if name_or_none is not None:
ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef', 'lambdef')
lambdef = None
if ancestor == 'lambdef':
# For lambdas it's even more complicated since parts will
# be inferred later.
lambdef = ancestor
ancestor = search_ancestor(name_or_none, 'funcdef', 'classdef')
if ancestor is not None:
colon = ancestor.children[-2]
if position is not None and position < colon.start_pos:
if lambdef is None or position < lambdef.children[-2].start_pos:
position = ancestor.start_pos
return get_global_filters(context, position, name_or_none)
def get_global_filters(context, until_position, origin_scope):
"""
Returns all filters in order of priority for name resolution.
For global name lookups. The filters will handle name resolution
themselves, but here we gather possible filters downwards.
>>> from jedi import Script
>>> script = Script('''
... x = ['a', 'b', 'c']
... def func():
... y = None
... ''')
>>> module_node = script._module_node
>>> scope = next(module_node.iter_funcdefs())
>>> scope
<Function: func@3-5>
>>> context = script._get_module_context().create_context(scope)
>>> filters = list(get_global_filters(context, (4, 0), None))
First we get the names from the function scope.
>>> print(filters[0]) # doctest: +ELLIPSIS
MergedFilter(<ParserTreeFilter: ...>, <GlobalNameFilter: ...>)
>>> sorted(str(n) for n in filters[0].values()) # doctest: +NORMALIZE_WHITESPACE
['<TreeNameDefinition: string_name=func start_pos=(3, 4)>',
'<TreeNameDefinition: string_name=x start_pos=(2, 0)>']
>>> filters[0]._filters[0]._until_position
(4, 0)
>>> filters[0]._filters[1]._until_position
Then it yields the names from one level "lower". In this example, this is
the module scope (including globals).
As a side note, you can see, that the position in the filter is None on the
globals filter, because there the whole module is searched.
>>> list(filters[1].values()) # package modules -> Also empty.
[]
>>> sorted(name.string_name for name in filters[2].values()) # Module attributes
['__doc__', '__name__', '__package__']
Finally, it yields the builtin filter, if `include_builtin` is
true (default).
>>> list(filters[3].values()) # doctest: +ELLIPSIS
[...]
"""
base_context = context
from jedi.inference.value.function import BaseFunctionExecutionContext
while context is not None:
# Names in methods cannot be resolved within the class.
yield from context.get_filters(
until_position=until_position,
origin_scope=origin_scope
)
if isinstance(context, (BaseFunctionExecutionContext, ModuleContext)):
# The position should be reset if the current scope is a function.
until_position = None
context = context.parent_context
b = next(base_context.inference_state.builtins_module.get_filters(), None)
assert b is not None
# Add builtins to the global scope.
yield b
| CompiledModuleContext |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E30_isort.py | {
"start": 50,
"end": 103
} | class ____(TypeError): ... # noqa: N818
| MissingCommand |
python | getsentry__sentry | src/sentry/backup/findings.py | {
"start": 864,
"end": 911
} | class ____(IntEnum):
pass
@unique
| FindingKind |
python | kubernetes-client__python | kubernetes/client/models/v1_scheduling.py | {
"start": 383,
"end": 5328
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'node_selector': 'dict(str, str)',
'tolerations': 'list[V1Toleration]'
}
attribute_map = {
'node_selector': 'nodeSelector',
'tolerations': 'tolerations'
}
def __init__(self, node_selector=None, tolerations=None, local_vars_configuration=None): # noqa: E501
"""V1Scheduling - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._node_selector = None
self._tolerations = None
self.discriminator = None
if node_selector is not None:
self.node_selector = node_selector
if tolerations is not None:
self.tolerations = tolerations
@property
def node_selector(self):
"""Gets the node_selector of this V1Scheduling. # noqa: E501
nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission. # noqa: E501
:return: The node_selector of this V1Scheduling. # noqa: E501
:rtype: dict(str, str)
"""
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
"""Sets the node_selector of this V1Scheduling.
nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission. # noqa: E501
:param node_selector: The node_selector of this V1Scheduling. # noqa: E501
:type: dict(str, str)
"""
self._node_selector = node_selector
@property
def tolerations(self):
"""Gets the tolerations of this V1Scheduling. # noqa: E501
tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. # noqa: E501
:return: The tolerations of this V1Scheduling. # noqa: E501
:rtype: list[V1Toleration]
"""
return self._tolerations
@tolerations.setter
def tolerations(self, tolerations):
"""Sets the tolerations of this V1Scheduling.
tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass. # noqa: E501
:param tolerations: The tolerations of this V1Scheduling. # noqa: E501
:type: list[V1Toleration]
"""
self._tolerations = tolerations
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Scheduling):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Scheduling):
return True
return self.to_dict() != other.to_dict()
| V1Scheduling |
python | kamyu104__LeetCode-Solutions | Python/play-with-chips.py | {
"start": 29,
"end": 269
} | class ____(object):
def minCostToMoveChips(self, chips):
"""
:type chips: List[int]
:rtype: int
"""
count = [0]*2
for p in chips:
count[p%2] += 1
return min(count)
| Solution |
python | PyCQA__pylint | tests/functional/ext/typing/typing_deprecated_alias.py | {
"start": 2325,
"end": 2399
} | class ____:
my_var: List[int] # [deprecated-typing-alias]
| CustomDataClass |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_sequence.py | {
"start": 9704,
"end": 9979
} | class ____(fixtures.TestBase):
__requires__ = ("sequences",)
__sparse_driver_backend__ = True
def test_get_sequence_names_no_sequence(self, connection):
eq_(
inspect(connection).get_sequence_names(),
[],
)
| HasSequenceTestEmpty |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 35472,
"end": 40681
} | class ____(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
hidden_size)` is output.
past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.
decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
decoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
Router logits of the decoder model, useful to compute the auxiliary loss for Mixture of Experts models.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
encoder_router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_logits=True` is passed or when `config.add_router_probs=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.
Router logits of the encoder model, useful to compute the auxiliary loss and the z_loss for the sparse
modules.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[EncoderDecoderCache] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
decoder_router_logits: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_router_logits: Optional[tuple[torch.FloatTensor]] = None
@dataclass
| Seq2SeqMoEModelOutput |
python | getsentry__sentry | tests/sentry/api/test_permissions.py | {
"start": 472,
"end": 1802
} | class ____(DRFPermissionTestCase):
superuser_permission = SuperuserPermission()
staff_permission = StaffPermission()
superuser_staff_flagged_permission = SuperuserOrStaffFeatureFlaggedPermission()
def test_superuser_permission(self) -> None:
assert self.superuser_permission.has_permission(self.superuser_request, APIView())
def test_staff_permission(self) -> None:
assert self.staff_permission.has_permission(self.staff_request, APIView())
@override_options({"staff.ga-rollout": True})
def test_superuser_or_staff_feature_flagged_permission_active_option(self) -> None:
# With active superuser
assert not self.superuser_staff_flagged_permission.has_permission(
self.superuser_request, APIView()
)
# With active staff
assert self.superuser_staff_flagged_permission.has_permission(self.staff_request, APIView())
def test_superuser_or_staff_feature_flagged_permission_inactive_option(self) -> None:
# With active staff
assert not self.superuser_staff_flagged_permission.has_permission(
self.staff_request, APIView()
)
# With active superuser
assert self.superuser_staff_flagged_permission.has_permission(
self.superuser_request, APIView()
)
| PermissionsTest |
python | jina-ai__jina | tests/integration/crud/__init__.py | {
"start": 168,
"end": 3549
} | class ____(Executor):
"""Simple indexer class"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logger = JinaLogger('CrudIndexer')
self._docs = DocumentArray()
self._dump_location = os.path.join(self.metas.workspace, 'docs.json')
if os.path.exists(self._dump_location):
self._docs = DocumentArray.load_json(self._dump_location)
self.logger.debug(f'Loaded {len(self._docs)} from {self._dump_location}')
else:
self.logger.warning(f'No data found at {self._dump_location}')
@requests(on='/index')
def index(self, docs: DocumentArray, **kwargs):
self._docs.extend(docs)
@requests(on='/update')
def update(self, docs: DocumentArray, **kwargs):
self.delete(docs)
self.index(docs)
def close(self) -> None:
self.logger.debug(f'Dumping {len(self._docs)} to {self._dump_location}')
self._docs.save_json(self._dump_location)
@requests(on='/delete')
def delete(self, docs: DocumentArray, **kwargs):
# TODO we can do del _docs[d.id] once
# tests.unit.types.arrays.test_documentarray.test_delete_by_id is fixed
ids_to_delete = [d.id for d in docs]
idx_to_delete = []
for i, doc in enumerate(self._docs):
if doc.id in ids_to_delete:
idx_to_delete.append(i)
for i in sorted(idx_to_delete, reverse=True):
del self._docs[i]
@requests(on='/search')
def search(self, docs: DocumentArray, parameters: Dict, **kwargs):
top_k = int(parameters.get('top_k', 1))
a = np.stack(docs[:, 'embedding'])
b = np.stack(self._docs[:, 'embedding'])
q_emb = _ext_A(_norm(a))
d_emb = _ext_B(_norm(b))
dists = _cosine(q_emb, d_emb)
idx, dist = self._get_sorted_top_k(dists, top_k)
for _q, _ids, _dists in zip(docs, idx, dist):
for _id, _dist in zip(_ids, _dists):
d = Document(self._docs[int(_id)], copy=True)
d.scores['cosine'].value = 1 - _dist
_q.matches.append(d)
@staticmethod
def _get_sorted_top_k(
dist: 'np.array', top_k: int
) -> Tuple['np.ndarray', 'np.ndarray']:
if top_k >= dist.shape[1]:
idx = dist.argsort(axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx, axis=1)
else:
idx_ps = dist.argpartition(kth=top_k, axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx_ps, axis=1)
idx_fs = dist.argsort(axis=1)
idx = np.take_along_axis(idx_ps, idx_fs, axis=1)
dist = np.take_along_axis(dist, idx_fs, axis=1)
return idx, dist
def _get_ones(x, y):
return np.ones((x, y))
def _ext_A(A):
nA, dim = A.shape
A_ext = _get_ones(nA, dim * 3)
A_ext[:, dim : 2 * dim] = A
A_ext[:, 2 * dim :] = A**2
return A_ext
def _ext_B(B):
nB, dim = B.shape
B_ext = _get_ones(dim * 3, nB)
B_ext[:dim] = (B**2).T
B_ext[dim : 2 * dim] = -2.0 * B.T
del B
return B_ext
def _euclidean(A_ext, B_ext):
sqdist = A_ext.dot(B_ext).clip(min=0)
return np.sqrt(sqdist)
def _norm(A):
return A / np.linalg.norm(A, ord=2, axis=1, keepdims=True)
def _cosine(A_norm_ext, B_norm_ext):
return A_norm_ext.dot(B_norm_ext).clip(min=0) / 2
| CrudIndexer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 999280,
"end": 999757
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "created_at", "subscribable")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
subscribable = sgqlc.types.Field(
sgqlc.types.non_null(Subscribable), graphql_name="subscribable"
)
| SubscribedEvent |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/utils/delete_from.py | {
"start": 4760,
"end": 5195
} | class ____(Exception):
"""For handling error if an error occurred when handling a yaml file during deletion of the resource."""
def __init__(self, api_exceptions: list):
self.api_exceptions = api_exceptions
def __str__(self):
msg = ""
for api_exception in self.api_exceptions:
msg += f"Error from server ({api_exception.reason}):{api_exception.body}\n"
return msg
| FailToDeleteError |
python | ansible__ansible | lib/ansible/modules/group.py | {
"start": 12267,
"end": 13867
} | class ____(Group):
"""
This is a FreeBSD Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
"""
platform = 'FreeBSD'
distribution = None
GROUPFILE = '/etc/group'
def group_del(self):
cmd = [self.module.get_bin_path('pw', True), 'groupdel', self.name]
return self.execute_command(cmd)
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupadd', self.name]
if self.gid is not None:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
if self.gid_min is not None:
cmd.append('-K')
cmd.append('GID_MIN=' + str(self.gid_min))
if self.gid_max is not None:
cmd.append('-K')
cmd.append('GID_MAX=' + str(self.gid_max))
return self.execute_command(cmd)
def group_mod(self, **kwargs):
cmd = [self.module.get_bin_path('pw', True), 'groupmod', self.name]
info = self.group_info()
cmd_len = len(cmd)
if self.gid is not None and int(self.gid) != info[2]:
cmd.append('-g')
cmd.append(str(self.gid))
if self.non_unique:
cmd.append('-o')
# modify the group if cmd will do anything
if cmd_len != len(cmd):
if self.module.check_mode:
return (0, '', '')
return self.execute_command(cmd)
return (None, '', '')
| FreeBsdGroup |
python | keras-team__keras | keras/src/constraints/constraints.py | {
"start": 150,
"end": 2359
} | class ____:
"""Base class for weight constraints.
A `Constraint` instance works like a stateless function.
Users who subclass this
class should override the `__call__()` method, which takes a single
weight parameter and return a projected version of that parameter
(e.g. normalized or clipped). Constraints can be used with various Keras
layers via the `kernel_constraint` or `bias_constraint` arguments.
Here's a simple example of a non-negative weight constraint:
>>> class NonNegative(keras.constraints.Constraint):
...
... def __call__(self, w):
... return w * ops.cast(ops.greater_equal(w, 0.), dtype=w.dtype)
>>> weight = ops.convert_to_tensor((-1.0, 1.0))
>>> NonNegative()(weight)
[0., 1.]
Usage in a layer:
>>> keras.layers.Dense(4, kernel_constraint=NonNegative())
"""
def __call__(self, w):
"""Applies the constraint to the input weight variable.
By default, the inputs weight variable is not modified.
Users should override this method to implement their own projection
function.
Args:
w: Input weight variable.
Returns:
Projected variable (by default, returns unmodified inputs).
"""
return w
def get_config(self):
"""Returns a Python dict of the object config.
A constraint config is a Python dictionary (JSON-serializable) that can
be used to reinstantiate the same object.
Returns:
Python dict containing the configuration of the constraint object.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates a weight constraint from a configuration dictionary.
Example:
```python
constraint = UnitNorm()
config = constraint.get_config()
constraint = UnitNorm.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config()`.
Returns:
A `keras.constraints.Constraint` instance.
"""
return cls(**config)
@keras_export(["keras.constraints.MaxNorm", "keras.constraints.max_norm"])
| Constraint |
python | huggingface__transformers | src/transformers/models/starcoder2/modeling_starcoder2.py | {
"start": 18883,
"end": 21957
} | class ____(Starcoder2PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Starcoder2Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, Starcoder2ForCausalLM
>>> model = Starcoder2ForCausalLM.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| Starcoder2ForCausalLM |
python | crytic__slither | slither/detectors/operations/bad_prng.py | {
"start": 2991,
"end": 4662
} | class ____(AbstractDetector):
"""
Detect weak PRNG due to a modulo operation on block.timestamp, now or blockhash
"""
ARGUMENT = "weak-prng"
HELP = "Weak PRNG"
IMPACT = DetectorClassification.HIGH
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#weak-PRNG"
WIKI_TITLE = "Weak PRNG"
WIKI_DESCRIPTION = "Weak PRNG due to a modulo on `block.timestamp`, `now` or `blockhash`. These can be influenced by miners to some extent so they should be avoided."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Game {
uint reward_determining_number;
function guessing() external{
reward_determining_number = uint256(block.blockhash(10000)) % 10;
}
}
```
Eve is a miner. Eve calls `guessing` and re-orders the block containing the transaction.
As a result, Eve wins the game."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = (
"Do not use `block.timestamp`, `now` or `blockhash` as a source of randomness"
)
def _detect(self) -> List[Output]:
"""Detect bad PRNG due to the use of block.timestamp, now or blockhash (block.blockhash) as a source of randomness"""
results = []
for c in self.compilation_unit.contracts_derived:
values = detect_bad_PRNG(c)
for func, nodes in values:
for node in nodes:
info: List[AllSupportedOutput] = [func, ' uses a weak PRNG: "', node, '" \n']
res = self.generate_result(info)
results.append(res)
return results
| BadPRNG |
python | lxml__lxml | src/lxml/sax.py | {
"start": 737,
"end": 944
} | class ____(etree.LxmlError):
"""General SAX error.
"""
def _getNsTag(tag):
if tag[0] == '{' and '}' in tag:
return tuple(tag[1:].split('}', 1))
else:
return None, tag
| SaxError |
python | huggingface__transformers | src/transformers/models/omdet_turbo/processing_omdet_turbo.py | {
"start": 1499,
"end": 2104
} | class ____(ProcessingKwargs, total=False):
text_kwargs: OmDetTurboTextKwargs
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": "max_length",
"truncation": True,
"max_length": 77,
"stride": 0,
"return_overflowing_tokens": False,
"return_special_tokens_mask": False,
"return_offsets_mapping": False,
"return_token_type_ids": False,
"return_length": False,
"verbose": True,
"task": None,
},
}
| OmDetTurboProcessorKwargs |
python | cython__cython | tests/run/pure_mode_cmethod_inheritance_T583.py | {
"start": 688,
"end": 1108
} | class ____(Derived):
'''
>>> derived = DerivedDerived()
>>> print(derived.noargs())
DerivedDerived
>>> print(derived.int_arg(1))
DerivedDerived
>>> print(derived._class())
DerivedDerived
'''
def noargs(self):
return "DerivedDerived"
def int_arg(self, i):
return "DerivedDerived"
@classmethod
def _class(tp):
return "DerivedDerived"
| DerivedDerived |
python | huggingface__transformers | tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py | {
"start": 51351,
"end": 53719
} | class ____(unittest.TestCase):
@slow
def test_inference_coco_en(self):
loc = "ydshieh/vit-gpt2-coco-en"
image_processor = ViTImageProcessor.from_pretrained(loc)
tokenizer = AutoTokenizer.from_pretrained(loc)
model = VisionEncoderDecoderModel.from_pretrained(loc)
model.to(torch_device)
model.eval()
# We will verify our results on an image of cute cats
img = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
pixel_values = image_processor(images=img, return_tensors="pt").pixel_values.to(torch_device)
decoder_input_ids = torch.tensor([[model.config.decoder_start_token_id]]).to(torch_device)
with torch.no_grad():
logits = model(pixel_values, decoder_input_ids)[0].detach().cpu().numpy()
# verify the logits
expected_shape = (1, 1, model.config.decoder.vocab_size)
self.assertEqual(logits.shape, expected_shape)
EXPECTED_LOGIT_SLICE = np.array(
[
-38.705807,
-30.639929,
-31.41903,
-39.012012,
-38.38696,
-34.887207,
-33.290855,
-35.68447,
-38.508484,
-36.124645,
]
)
max_diff = np.amax(np.abs(logits[0, 0, :10] - EXPECTED_LOGIT_SLICE))
self.assertLessEqual(max_diff, 1e-4)
def generate_step(pixel_values):
outputs = model.generate(
pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True, output_scores=True
)
output_ids = outputs.sequences
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
preds = [pred.strip() for pred in preds]
return preds, outputs.sequences_scores.detach().cpu().numpy()
preds, scores = generate_step(pixel_values)
EXPECTED_SCORES = np.array([-0.5956343])
max_diff = np.amax(np.abs(scores - EXPECTED_SCORES))
self.assertLessEqual(max_diff, 1e-4)
# should produce
# ["a cat laying on top of a couch next to another cat"]
self.assertEqual(preds, ["a cat laying on top of a couch next to another cat"])
@require_vision
@require_torch
@require_sentencepiece
| ViT2GPT2ModelIntegrationTest |
python | ansible__ansible | test/lib/ansible_test/_internal/http.py | {
"start": 277,
"end": 3041
} | class ____:
"""Make HTTP requests."""
def __init__(self, args: CommonConfig, always: bool = False) -> None:
self.args = args
self.always = always
def get(self, url: str) -> HttpResponse:
"""Perform an HTTP GET and return the response."""
return self.request('GET', url)
def delete(self, url: str) -> HttpResponse:
"""Perform an HTTP DELETE and return the response."""
return self.request('DELETE', url)
def put(self, url: str, data: t.Optional[str] = None, headers: t.Optional[dict[str, str]] = None) -> HttpResponse:
"""Perform an HTTP PUT and return the response."""
return self.request('PUT', url, data, headers)
def request(self, method: str, url: str, data: t.Optional[str] = None, headers: t.Optional[dict[str, str]] = None) -> HttpResponse:
"""Perform an HTTP request and return the response."""
if headers is None:
headers = {}
data_bytes = data.encode() if data else None
request = urllib.request.Request(method=method, url=url, data=data_bytes, headers=headers)
response: http.client.HTTPResponse
display.info(f'HTTP {method} {url}', verbosity=2)
attempts = 0
max_attempts = 3
sleep_seconds = 3
status_code = 200
reason = 'OK'
body_bytes = b''
while True:
attempts += 1
start = time.monotonic()
if self.args.explain and not self.always:
break
try:
try:
with urllib.request.urlopen(request) as response:
status_code = response.status
reason = response.reason
body_bytes = response.read()
except urllib.error.HTTPError as ex:
status_code = ex.status
reason = ex.reason
body_bytes = ex.read()
except Exception as ex: # pylint: disable=broad-exception-caught
if attempts >= max_attempts:
raise
# all currently implemented methods are idempotent, so retries are unconditionally supported
duration = time.monotonic() - start
display.warning(f'{type(ex).__module__}.{type(ex).__name__}: {ex} [{duration:.2f} seconds]')
time.sleep(sleep_seconds)
continue
break
duration = time.monotonic() - start
display.info(f'HTTP {method} {url} -> HTTP {status_code} ({reason}) [{len(body_bytes)} bytes, {duration:.2f} seconds]', verbosity=3)
body = body_bytes.decode()
return HttpResponse(method, url, status_code, body)
| HttpClient |
python | getsentry__sentry | src/sentry/seer/endpoints/trace_explorer_ai_setup.py | {
"start": 687,
"end": 1421
} | class ____(OrganizationPermission):
scope_map = {
"POST": ["org:read"],
}
def fire_setup_request(org_id: int, project_ids: list[int]) -> None:
"""
Sends a request to seer to create the initial cached prompt / setup the AI models
"""
body = orjson.dumps(
{
"org_id": org_id,
"project_ids": project_ids,
}
)
response = requests.post(
f"{settings.SEER_AUTOFIX_URL}/v1/assisted-query/create-cache",
data=body,
headers={
"content-type": "application/json;charset=utf-8",
**sign_with_seer_secret(body),
},
)
response.raise_for_status()
@region_silo_endpoint
| OrganizationTraceExplorerAIPermission |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dlp.py | {
"start": 103590,
"end": 107744
} | class ____(GoogleCloudBaseOperator):
"""
Updates the DeidentifyTemplate.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPUpdateDeidentifyTemplateOperator`
:param template_id: The ID of deidentify template to be updated.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param deidentify_template: New DeidentifyTemplate value.
:param update_mask: Mask to control which fields get updated.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"template_id",
"organization_id",
"project_id",
"deidentify_template",
"update_mask",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPDeidentifyTemplateDetailsLink(),)
def __init__(
self,
*,
template_id: str,
organization_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
deidentify_template: dict | DeidentifyTemplate | None = None,
update_mask: dict | FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.deidentify_template = deidentify_template
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
template = hook.update_deidentify_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
deidentify_template=self.deidentify_template,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPDeidentifyTemplateDetailsLink.persist(
context=context,
project_id=project_id,
template_name=self.template_id,
)
return DeidentifyTemplate.to_dict(template)
| CloudDLPUpdateDeidentifyTemplateOperator |
python | joke2k__faker | tests/providers/test_currency.py | {
"start": 7727,
"end": 8926
} | class ____:
"""Test de_DE currency provider"""
num_samples = 100
@classmethod
def setup_class(cls):
from faker.providers.currency.de_DE import Provider as DeDeCurrencyProvider
cls.provider = DeDeCurrencyProvider
cls.currencies = cls.provider.currencies
cls.currency_names = [currency_name for currency_code, currency_name in cls.currencies]
cls.currency_codes = [currency_code for currency_code, currency_name in cls.currencies]
def test_pricetag(self, faker, num_samples):
for _ in range(num_samples):
pricetag = faker.pricetag()
assert isinstance(pricetag, str)
def test_currency(self, faker, num_samples):
for _ in range(num_samples):
cur = faker.currency()
assert cur in self.provider.currencies
def test_currency_name(self, faker, num_samples):
for _ in range(num_samples):
name = faker.currency_name()
assert name in self.currency_names
def test_currency_code(self, faker, num_samples):
for _ in range(num_samples):
code = faker.currency_code()
assert code in self.currency_codes
| TestDeDe |
python | walkccc__LeetCode | solutions/1628. Design an Expression Tree With Evaluate Function/1628.py | {
"start": 277,
"end": 963
} | class ____(Node):
op = {
'+': lambda a, b: a + b,
'-': lambda a, b: a - b,
'*': lambda a, b: a * b,
'/': lambda a, b: int(a / b),
}
def __init__(
self,
val: str,
left: Optional['ExpNode'],
right: Optional['ExpNode'],
):
self.val = val
self.left = left
self.right = right
def evaluate(self) -> int:
if not self.left and not self.right:
return int(self.val)
return ExpNode.op[self.val](self.left.evaluate(), self.right.evaluate())
"""
This is the TreeBuilder class.
You can treat it as the driver code that takes the postinfix input
and returns the expression tree represnting it as a Node.
"""
| ExpNode |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.