language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/operators/test_kueue.py | {
"start": 1590,
"end": 7134
} | class ____:
def setup_method(self):
self.operator = KubernetesInstallKueueOperator(
task_id=TEST_TASK_ID,
kueue_version=KUEUE_VERSION,
kubernetes_conn_id=TEST_K8S_CONN_ID,
)
def test_template_fields(self):
expected_template_fields = {"kueue_version", "kubernetes_conn_id"}
assert set(KubernetesInstallKueueOperator.template_fields) == expected_template_fields
@mock.patch(KUEUE_OPERATORS_PATH.format("KubernetesHook"))
def test_hook(self, mock_hook):
mock_hook_instance = mock_hook.return_value
actual_hook = self.operator.hook
mock_hook.assert_called_once_with(conn_id=TEST_K8S_CONN_ID)
assert actual_hook == mock_hook_instance
@mock.patch(KUEUE_OPERATORS_PATH.format("KubernetesInstallKueueOperator.log"))
@mock.patch(KUEUE_OPERATORS_PATH.format("KubernetesHook"))
def test_execute(self, mock_hook, mock_log):
mock_get_yaml_content_from_file = mock_hook.return_value.get_yaml_content_from_file
mock_yaml_objects = mock_get_yaml_content_from_file.return_value
self.operator.execute(context=mock.MagicMock())
mock_get_yaml_content_from_file.assert_called_once_with(kueue_yaml_url=KUEUE_YAML_URL)
mock_hook.return_value.apply_from_yaml_file.assert_called_once_with(yaml_objects=mock_yaml_objects)
mock_hook.return_value.check_kueue_deployment_running.assert_called_once_with(
name="kueue-controller-manager",
namespace="kueue-system",
)
mock_log.info.assert_called_once_with("Kueue installed successfully!")
@mock.patch(KUEUE_OPERATORS_PATH.format("KubernetesInstallKueueOperator.log"))
@mock.patch(KUEUE_OPERATORS_PATH.format("KubernetesHook"))
def test_execute_already_exist(self, mock_hook, mock_log):
mock_get_yaml_content_from_file = mock_hook.return_value.get_yaml_content_from_file
mock_yaml_objects = mock_get_yaml_content_from_file.return_value
mock_apply_from_yaml_file = mock_hook.return_value.apply_from_yaml_file
api_exceptions = [mock.MagicMock(body=json.dumps({"reason": "AlreadyExists"})) for _ in range(4)]
mock_apply_from_yaml_file.side_effect = FailToCreateError(api_exceptions)
self.operator.execute(context=mock.MagicMock())
mock_get_yaml_content_from_file.assert_called_once_with(kueue_yaml_url=KUEUE_YAML_URL)
mock_apply_from_yaml_file.assert_called_once_with(yaml_objects=mock_yaml_objects)
mock_hook.return_value.check_kueue_deployment_running.assert_not_called()
mock_log.info.assert_called_once_with("Kueue is already enabled for the cluster")
@mock.patch(KUEUE_OPERATORS_PATH.format("KubernetesInstallKueueOperator.log"))
@mock.patch(KUEUE_OPERATORS_PATH.format("KubernetesHook"))
def test_execute_error(self, mock_hook, mock_log):
mock_get_yaml_content_from_file = mock_hook.return_value.get_yaml_content_from_file
mock_yaml_objects = mock_get_yaml_content_from_file.return_value
mock_apply_from_yaml_file = mock_hook.return_value.apply_from_yaml_file
api_exceptions = [
mock.MagicMock(body=json.dumps({"reason": "AlreadyExists"})),
mock.MagicMock(body=json.dumps({"reason": TEST_ERROR_CLASS, "body": TEST_ERROR_BODY})),
mock.MagicMock(body=json.dumps({"reason": TEST_ERROR_CLASS, "body": TEST_ERROR_BODY})),
]
mock_apply_from_yaml_file.side_effect = FailToCreateError(api_exceptions)
expected_error_message = f"{TEST_ERROR_BODY}\n{TEST_ERROR_BODY}"
with pytest.raises(AirflowException, match=expected_error_message):
self.operator.execute(context=mock.MagicMock())
mock_get_yaml_content_from_file.assert_called_once_with(kueue_yaml_url=KUEUE_YAML_URL)
mock_apply_from_yaml_file.assert_called_once_with(yaml_objects=mock_yaml_objects)
mock_hook.return_value.check_kueue_deployment_running.assert_not_called()
mock_log.info.assert_called_once_with("Kueue is already enabled for the cluster")
@mock.patch(KUEUE_OPERATORS_PATH.format("KubernetesInstallKueueOperator.log"))
@mock.patch(KUEUE_OPERATORS_PATH.format("KubernetesHook"))
def test_execute_non_json_response(self, mock_hook, mock_log):
"""Test handling of non-JSON API response bodies (e.g., 429 errors)."""
mock_get_yaml_content_from_file = mock_hook.return_value.get_yaml_content_from_file
mock_yaml_objects = mock_get_yaml_content_from_file.return_value
mock_apply_from_yaml_file = mock_hook.return_value.apply_from_yaml_file
# Create mock exceptions with non-JSON bodies (simulating 429 errors)
api_exceptions = [
mock.MagicMock(body="Too many requests, please try again later.", reason="TooManyRequests"),
mock.MagicMock(body="", reason="RateLimited"), # Empty body case
]
mock_apply_from_yaml_file.side_effect = FailToCreateError(api_exceptions)
expected_error_message = "Too many requests, please try again later.\nRateLimited"
with pytest.raises(AirflowException, match=expected_error_message):
self.operator.execute(context=mock.MagicMock())
mock_get_yaml_content_from_file.assert_called_once_with(kueue_yaml_url=KUEUE_YAML_URL)
mock_apply_from_yaml_file.assert_called_once_with(yaml_objects=mock_yaml_objects)
mock_hook.return_value.check_kueue_deployment_running.assert_not_called()
| TestKubernetesInstallKueueOperator |
python | astropy__astropy | astropy/io/ascii/tdat.py | {
"start": 1473,
"end": 1546
} | class ____(AstropyWarning):
"""Tdat Format Warning"""
| TdatFormatWarning |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 60821,
"end": 63278
} | class ____:
"""Test pt_BR address provider methods"""
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in PtBrAddressProvider.countries
def test_bairro(self, faker, num_samples):
for _ in range(num_samples):
bairro = faker.bairro()
assert isinstance(bairro, str)
assert bairro in PtBrAddressProvider.bairros
def test_neighborhood(self, faker, num_samples):
for _ in range(num_samples):
neighborhood = faker.neighborhood()
assert isinstance(neighborhood, str)
assert neighborhood in PtBrAddressProvider.bairros
def test_estado(self, faker, num_samples):
for _ in range(num_samples):
estado = faker.estado()
assert isinstance(estado, tuple)
assert estado in PtBrAddressProvider.estados
def test_estado_nome(self, faker, num_samples):
state_names = [state_name for state_abbr, state_name in PtBrAddressProvider.estados]
for _ in range(num_samples):
estado_nome = faker.estado_nome()
assert isinstance(estado_nome, str)
assert estado_nome in state_names
def test_estado_sigla(self, faker, num_samples):
state_abbrs = [state_abbr for state_abbr, state_name in PtBrAddressProvider.estados]
for _ in range(num_samples):
estado_sigla = faker.estado_sigla()
assert isinstance(estado_sigla, str)
assert estado_sigla in state_abbrs
def test_address(self, faker, num_samples):
for _ in range(num_samples):
street = faker.street_name()
assert isinstance(street, str)
city = faker.street_address()
assert isinstance(city, str)
address = faker.address()
assert isinstance(address, str)
def test_raw_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode(formatted=False)
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{8}", postcode)
def test_formatted_postcode(self, faker, num_samples):
for _ in range(num_samples):
postcode = faker.postcode()
assert isinstance(postcode, str)
assert re.fullmatch(r"\d{5}-?\d{3}", postcode)
| TestPtBr |
python | tensorflow__tensorflow | tensorflow/python/distribute/cluster_resolver/gce_cluster_resolver.py | {
"start": 1295,
"end": 7660
} | class ____(ClusterResolver):
"""ClusterResolver for Google Compute Engine.
This is an implementation of cluster resolvers for the Google Compute Engine
instance group platform. By specifying a project, zone, and instance group,
this will retrieve the IP address of all the instances within the instance
group and return a ClusterResolver object suitable for use for distributed
TensorFlow.
Note: this cluster resolver cannot retrieve `task_type`, `task_id` or
`rpc_layer`. To use it with some distribution strategies like
`tf.distribute.experimental.MultiWorkerMirroredStrategy`, you will need to
specify `task_type` and `task_id` in the constructor.
Usage example with tf.distribute.Strategy:
```Python
# On worker 0
cluster_resolver = GCEClusterResolver("my-project", "us-west1",
"my-instance-group",
task_type="worker", task_id=0)
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=cluster_resolver)
# On worker 1
cluster_resolver = GCEClusterResolver("my-project", "us-west1",
"my-instance-group",
task_type="worker", task_id=1)
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy(
cluster_resolver=cluster_resolver)
```
"""
def __init__(self,
project,
zone,
instance_group,
port,
task_type='worker',
task_id=0,
rpc_layer='grpc',
credentials='default',
service=None):
"""Creates a new GCEClusterResolver object.
This takes in a few parameters and creates a GCEClusterResolver project. It
will then use these parameters to query the GCE API for the IP addresses of
each instance in the instance group.
Args:
project: Name of the GCE project.
zone: Zone of the GCE instance group.
instance_group: Name of the GCE instance group.
port: Port of the listening TensorFlow server (default: 8470)
task_type: Name of the TensorFlow job this GCE instance group of VM
instances belong to.
task_id: The task index for this particular VM, within the GCE
instance group. In particular, every single instance should be assigned
a unique ordinal index within an instance group manually so that they
can be distinguished from each other.
rpc_layer: The RPC layer TensorFlow should use to communicate across
instances.
credentials: GCE Credentials. If nothing is specified, this defaults to
GoogleCredentials.get_application_default().
service: The GCE API object returned by the googleapiclient.discovery
function. (Default: discovery.build('compute', 'v1')). If you specify a
custom service object, then the credentials parameter will be ignored.
Raises:
ImportError: If the googleapiclient is not installed.
"""
self._project = project
self._zone = zone
self._instance_group = instance_group
self._task_type = task_type
self._task_id = task_id
self._rpc_layer = rpc_layer
self._port = port
self._credentials = credentials
if credentials == 'default':
if _GOOGLE_API_CLIENT_INSTALLED:
self._credentials = GoogleCredentials.get_application_default()
if service is None:
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('googleapiclient must be installed before using the '
'GCE cluster resolver')
self._service = discovery.build(
'compute', 'v1',
credentials=self._credentials)
else:
self._service = service
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest instance group info.
This returns a ClusterSpec object for use based on information from the
specified instance group. We will retrieve the information from the GCE APIs
every time this method is called.
Returns:
A ClusterSpec containing host information retrieved from GCE.
"""
request_body = {'instanceState': 'RUNNING'}
request = self._service.instanceGroups().listInstances(
project=self._project,
zone=self._zone,
instanceGroups=self._instance_group,
body=request_body,
orderBy='name')
worker_list = []
while request is not None:
response = request.execute()
items = response['items']
for instance in items:
instance_name = instance['instance'].split('/')[-1]
instance_request = self._service.instances().get(
project=self._project,
zone=self._zone,
instance=instance_name)
if instance_request is not None:
instance_details = instance_request.execute()
ip_address = instance_details['networkInterfaces'][0]['networkIP']
instance_url = '%s:%s' % (ip_address, self._port)
worker_list.append(instance_url)
request = self._service.instanceGroups().listInstances_next(
previous_request=request,
previous_response=response)
worker_list.sort()
return ClusterSpec({self._task_type: worker_list})
def master(self, task_type=None, task_id=None, rpc_layer=None):
task_type = task_type if task_type is not None else self._task_type
task_id = task_id if task_id is not None else self._task_id
if task_type is not None and task_id is not None:
master = self.cluster_spec().task_address(task_type, task_id)
if rpc_layer or self._rpc_layer:
return '%s://%s' % (rpc_layer or self._rpc_layer, master)
else:
return master
return ''
@property
def task_type(self):
return self._task_type
@property
def task_id(self):
return self._task_id
@task_type.setter
def task_type(self, task_type):
raise RuntimeError(
'You cannot reset the task_type of the GCEClusterResolver after it has '
'been created.')
@task_id.setter
def task_id(self, task_id):
self._task_id = task_id
@property
def rpc_layer(self):
return self._rpc_layer
@rpc_layer.setter
def rpc_layer(self, rpc_layer):
self._rpc_layer = rpc_layer
| GCEClusterResolver |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 2201,
"end": 2309
} | class ____[**P = [T1]]: ...
# This should generate an error because ParamSpec must be a list of types.
| ClassP5 |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/boost/package.py | {
"start": 217,
"end": 2150
} | class ____(Package):
"""Fake boost package."""
homepage = "http://www.boost.org"
url = "http://downloads.sourceforge.net/project/boost/boost/1.63.0/boost_1_63_0.tar.bz2"
version("1.63.0", md5="1c837ecd990bb022d07e7aab32b09847")
default_install_libs = set(
[
"atomic",
"chrono",
"date_time",
"filesystem",
"graph",
"iostreams",
"locale",
"log",
"math",
"program_options",
"random",
"regex",
"serialization",
"signals",
"system",
"test",
"thread",
"timer",
"wave",
]
)
# mpi/python are not installed by default because they pull in many
# dependencies and/or because there is a great deal of customization
# possible (and it would be difficult to choose sensible defaults)
default_noinstall_libs = set(["mpi", "python"])
all_libs = default_install_libs | default_noinstall_libs
for lib in all_libs:
variant(
lib,
default=(lib not in default_noinstall_libs),
description="Compile with {0} library".format(lib),
)
variant("debug", default=False, description="Switch to the debug version of Boost")
variant("shared", default=True, description="Additionally build shared libraries")
variant(
"multithreaded", default=True, description="Build multi-threaded versions of libraries"
)
variant(
"singlethreaded", default=False, description="Build single-threaded versions of libraries"
)
variant("icu", default=False, description="Build with Unicode and ICU suport")
variant("graph", default=False, description="Build the Boost Graph library")
variant("taggedlayout", default=False, description="Augment library names with build options")
| Boost |
python | pyodide__pyodide | src/py/_pyodide/_core_docs.py | {
"start": 27484,
"end": 28069
} | class ____(JsProxy):
"""A :py:class:`JsFetchResponse` object represents a :js:data:`Response` to a
:js:func:`fetch` request.
"""
bodyUsed: bool
ok: bool
redirected: bool
status: int
statusText: str
type: str
url: str
headers: Any
def clone(self) -> "JsFetchResponse":
raise NotImplementedError
async def arrayBuffer(self) -> JsBuffer:
raise NotImplementedError
async def text(self) -> str:
raise NotImplementedError
async def json(self) -> JsProxy:
raise NotImplementedError
| JsFetchResponse |
python | PyCQA__isort | isort/exceptions.py | {
"start": 4716,
"end": 5348
} | class ____(ISortError):
"""Raised when isort is told to sort assignments but the format of the assignment section
doesn't match isort's expectation.
"""
def __init__(self, code: str):
super().__init__(
"isort was told to sort a section of assignments, however the given code:\n\n"
f"{code}\n\n"
"Does not match isort's strict single line formatting requirement for assignment "
"sorting:\n\n"
"{variable_name} = {value}\n"
"{variable_name2} = {value2}\n"
"...\n\n"
)
self.code = code
| AssignmentsFormatMismatch |
python | sphinx-doc__sphinx | sphinx/transforms/i18n.py | {
"start": 14656,
"end": 24112
} | class ____(SphinxTransform):
"""Replace translatable nodes with their translated doctree."""
default_priority = 20
def apply(self, **kwargs: Any) -> None:
settings, source = self.document.settings, self.document['source']
msgstr = ''
textdomain = docname_to_domain(
self.env.current_document.docname, self.config.gettext_compact
)
# fetch translations
srcdir = self.env.srcdir
dirs = [srcdir / directory for directory in self.config.locale_dirs]
catalog, has_catalog = init_locale(dirs, self.config.language, textdomain)
if not has_catalog:
return
catalogues = [getattr(catalog, '_catalog', None)]
while (catalog := catalog._fallback) is not None: # type: ignore[attr-defined]
catalogues.append(getattr(catalog, '_catalog', None))
merged: dict[str, str] = {}
for catalogue in filter(None, reversed(catalogues)): # type: dict[str, str]
merged |= catalogue
# phase1: replace reference ids with translated names
for node, msg in extract_messages(self.document):
msgstr = merged.get(msg, '')
# There is no point in having noqa on literal blocks because
# they cannot contain references. Recognizing it would just
# completely prevent escaping the noqa. Outside of literal
# blocks, one can always write \#noqa.
if not isinstance(node, LITERAL_TYPE_NODES):
msgstr, _ = parse_noqa(msgstr)
if not msgstr.strip():
# as-of-yet untranslated
node['translated'] = False
continue
if msgstr == msg:
# identical source and translated messages
node['translated'] = True
continue
# Avoid "Literal block expected; none found." warnings.
# If msgstr ends with '::' then it cause warning message at
# parser.parse() processing.
# literal-block-warning is only appear in avobe case.
if msgstr.strip().endswith('::'):
msgstr += '\n\n dummy literal'
# dummy literal node will discard by 'patch = patch[0]'
# literalblock need literal block notation to avoid it become
# paragraph.
if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
patch = _publish_msgstr(
msgstr,
source,
node.line, # type: ignore[arg-type]
config=self.config,
env=self.env,
registry=self.env._registry,
settings=settings,
)
# FIXME: no warnings about inconsistent references in this part
# XXX doctest and other block markup
if not isinstance(patch, nodes.paragraph):
continue # skip for now
updater = _NodeUpdater(node, patch, self.document, noqa=False)
processed = updater.update_title_mapping()
# glossary terms update refid
if isinstance(node, nodes.term):
for _id in node['ids']:
term, first_classifier = split_term_classifiers(msgstr)
patch = _publish_msgstr(
term or '',
source,
node.line, # type: ignore[arg-type]
config=self.config,
env=self.env,
registry=self.env._registry,
settings=settings,
)
updater.patch = make_glossary_term(
self.env,
patch,
first_classifier,
source,
node.line, # type: ignore[arg-type]
_id,
self.document,
)
processed = True
# update leaves with processed nodes
if processed:
updater.update_leaves()
node['translated'] = True # to avoid double translation
else:
node['translated'] = False
# phase2: translation
for node, msg in extract_messages(self.document):
if node.setdefault('translated', False): # to avoid double translation
continue # skip if the node is already translated by phase1
msgstr = merged.get(msg, '')
noqa = False
# See above.
if not isinstance(node, LITERAL_TYPE_NODES):
msgstr, noqa = parse_noqa(msgstr)
if not msgstr or msgstr == msg: # as-of-yet untranslated
node['translated'] = False
continue
# update translatable nodes
if isinstance(node, addnodes.translatable):
node.apply_translated_message(msg, msgstr)
continue
# update meta nodes
if isinstance(node, nodes.meta):
node['content'] = msgstr
node['translated'] = True
continue
if isinstance(node, nodes.image) and node.get('alt') == msg:
node['alt'] = msgstr
continue
# Avoid "Literal block expected; none found." warnings.
# If msgstr ends with '::' then it cause warning message at
# parser.parse() processing.
# literal-block-warning is only appear in avobe case.
if msgstr.strip().endswith('::'):
msgstr += '\n\n dummy literal'
# dummy literal node will discard by 'patch = patch[0]'
# literalblock need literal block notation to avoid it become
# paragraph.
if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
# Structural Subelements phase1
# There is a possibility that only the title node is created.
# see: https://docutils.sourceforge.io/docs/ref/doctree.html#structural-subelements
if isinstance(node, nodes.title):
# This generates: <section ...><title>msgstr</title></section>
msgstr = msgstr + '\n' + '=' * len(msgstr) * 2
patch = _publish_msgstr(
msgstr,
source,
node.line, # type: ignore[arg-type]
config=self.config,
env=self.env,
registry=self.env._registry,
settings=settings,
)
# Structural Subelements phase2
if isinstance(node, nodes.title):
# get <title> node that placed as a first child
patch = patch.next_node() # type: ignore[assignment]
# ignore unexpected markups in translation message
unexpected: tuple[type[nodes.Element], ...] = (
nodes.paragraph, # expected form of translation
nodes.title, # generated by above "Subelements phase2"
)
# following types are expected if
# config.gettext_additional_targets is configured
unexpected += LITERAL_TYPE_NODES
unexpected += IMAGE_TYPE_NODES
if not isinstance(patch, unexpected):
continue # skip
updater = _NodeUpdater(node, patch, self.document, noqa)
updater.update_autofootnote_references()
updater.update_refnamed_references()
updater.update_refnamed_footnote_references()
updater.update_citation_references()
updater.update_pending_xrefs()
updater.update_leaves()
# for highlighting that expects .rawsource and .astext() are same.
if isinstance(node, LITERAL_TYPE_NODES):
node.rawsource = node.astext()
if isinstance(node, nodes.image) and node.get('alt') != msg:
node['uri'] = patch['uri']
node['translated'] = False
continue # do not mark translated
node['translated'] = True # to avoid double translation
if 'index' in self.config.gettext_additional_targets:
# Extract and translate messages for index entries.
for node, entries in traverse_translatable_index(self.document):
new_entries: list[tuple[str, str, str, str, str | None]] = []
for entry_type, value, target_id, main, _category_key in entries:
msg_parts = split_index_msg(entry_type, value)
msgstr_parts = []
for part in msg_parts:
msgstr = merged.get(part, '')
if not msgstr:
msgstr = part
msgstr_parts.append(msgstr)
new_entry = (
entry_type,
';'.join(msgstr_parts),
target_id,
main,
None,
)
new_entries.append(new_entry)
node['raw_entries'] = entries
node['entries'] = new_entries
| Locale |
python | aio-libs__aiohttp | aiohttp/helpers.py | {
"start": 25075,
"end": 25952
} | class ____(Protocol):
def set_exception(
self,
exc: type[BaseException] | BaseException,
exc_cause: BaseException = ...,
) -> None: ...
def set_exception(
fut: Union["asyncio.Future[_T]", ErrorableProtocol],
exc: type[BaseException] | BaseException,
exc_cause: BaseException = _EXC_SENTINEL,
) -> None:
"""Set future exception.
If the future is marked as complete, this function is a no-op.
:param exc_cause: An exception that is a direct cause of ``exc``.
Only set if provided.
"""
if asyncio.isfuture(fut) and fut.done():
return
exc_is_sentinel = exc_cause is _EXC_SENTINEL
exc_causes_itself = exc is exc_cause
if not exc_is_sentinel and not exc_causes_itself:
exc.__cause__ = exc_cause
fut.set_exception(exc)
@functools.total_ordering
| ErrorableProtocol |
python | fastai__fastai | fastai/text/models/core.py | {
"start": 3897,
"end": 5918
} | class ____(Module):
"Create an encoder over `module` that can process a full sentence."
def __init__(self,
bptt:int, # Backpropagation through time
module:nn.Module, # A module that can process up to [`bs`, `bptt`] tokens
pad_idx:int=1, # Padding token id
max_len:int=None # Maximal output length
):
store_attr('bptt,module,pad_idx,max_len')
def reset(self): getcallable(self.module, 'reset')()
def forward(self, input):
bs,sl = input.size()
self.reset()
mask = input == self.pad_idx
outs,masks = [],[]
for i in range(0, sl, self.bptt):
#Note: this expects that sequence really begins on a round multiple of bptt
real_bs = (input[:,i] != self.pad_idx).long().sum()
o = self.module(input[:real_bs,i: min(i+self.bptt, sl)])
if self.max_len is None or sl-i <= self.max_len:
outs.append(o)
masks.append(mask[:,i: min(i+self.bptt, sl)])
outs = torch.cat([_pad_tensor(o, bs) for o in outs], dim=1)
mask = torch.cat(masks, dim=1)
return outs,mask
# %% ../../../nbs/33_text.models.core.ipynb 21
def masked_concat_pool(
output:Tensor, # Output of sentence encoder
mask:Tensor, # Boolean mask as returned by sentence encoder
bptt:int # Backpropagation through time
) -> Tensor: # Concatenation of [last_hidden, max_pool, avg_pool]
"Pool `MultiBatchEncoder` outputs into one vector [last_hidden, max_pool, avg_pool]"
lens = output.shape[1] - mask.long().sum(dim=1)
last_lens = mask[:,-bptt:].long().sum(dim=1)
avg_pool = output.masked_fill(mask[:, :, None], 0).sum(dim=1)
avg_pool.div_(lens.type(avg_pool.dtype)[:,None])
max_pool = output.masked_fill(mask[:,:,None], -float('inf')).max(dim=1)[0]
x = torch.cat([output[torch.arange(0, output.size(0)),-last_lens-1], max_pool, avg_pool], 1) #Concat pooling.
return x
# %% ../../../nbs/33_text.models.core.ipynb 24
| SentenceEncoder |
python | scrapy__scrapy | scrapy/spidermiddlewares/referer.py | {
"start": 11612,
"end": 15111
} | class ____(BaseSpiderMiddleware):
def __init__(self, settings: BaseSettings | None = None): # pylint: disable=super-init-not-called
self.default_policy: type[ReferrerPolicy] = DefaultReferrerPolicy
if settings is not None:
settings_policy = _load_policy_class(settings.get("REFERRER_POLICY"))
assert settings_policy
self.default_policy = settings_policy
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
if not crawler.settings.getbool("REFERER_ENABLED"):
raise NotConfigured
mw = cls(crawler.settings)
# Note: this hook is a bit of a hack to intercept redirections
crawler.signals.connect(mw.request_scheduled, signal=signals.request_scheduled)
return mw
def policy(self, resp_or_url: Response | str, request: Request) -> ReferrerPolicy:
"""
Determine Referrer-Policy to use from a parent Response (or URL),
and a Request to be sent.
- if a valid policy is set in Request meta, it is used.
- if the policy is set in meta but is wrong (e.g. a typo error),
the policy from settings is used
- if the policy is not set in Request meta,
but there is a Referrer-policy header in the parent response,
it is used if valid
- otherwise, the policy from settings is used.
"""
policy_name = request.meta.get("referrer_policy")
if policy_name is None and isinstance(resp_or_url, Response):
policy_header = resp_or_url.headers.get("Referrer-Policy")
if policy_header is not None:
policy_name = to_unicode(policy_header.decode("latin1"))
if policy_name is None:
return self.default_policy()
cls = _load_policy_class(policy_name, warning_only=True)
return cls() if cls else self.default_policy()
def get_processed_request(
self, request: Request, response: Response | None
) -> Request | None:
if response is None:
# start requests
return request
referrer = self.policy(response, request).referrer(response.url, request.url)
if referrer is not None:
request.headers.setdefault("Referer", referrer)
return request
def request_scheduled(self, request: Request, spider: Spider) -> None:
# check redirected request to patch "Referer" header if necessary
redirected_urls = request.meta.get("redirect_urls", [])
if redirected_urls:
request_referrer = request.headers.get("Referer")
# we don't patch the referrer value if there is none
if request_referrer is not None:
# the request's referrer header value acts as a surrogate
# for the parent response URL
#
# Note: if the 3xx response contained a Referrer-Policy header,
# the information is not available using this hook
parent_url = safe_url_string(request_referrer)
policy_referrer = self.policy(parent_url, request).referrer(
parent_url, request.url
)
if policy_referrer != request_referrer.decode("latin1"):
if policy_referrer is None:
request.headers.pop("Referer")
else:
request.headers["Referer"] = policy_referrer
| RefererMiddleware |
python | kamyu104__LeetCode-Solutions | Python/number-of-different-integers-in-a-string.py | {
"start": 29,
"end": 500
} | class ____(object):
def numDifferentIntegers(self, word):
"""
:type word: str
:rtype: int
"""
result, num = set(), None
for i in xrange(len(word)+1):
c = word[i] if i < len(word) else ' '
if c.isdigit():
num = 10*num+int(c) if num is not None else int(c)
elif num is not None:
result.add(num)
num = None
return len(result)
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/llms/llm.py | {
"start": 4186,
"end": 30335
} | class ____(BaseLLM):
"""
The LLM class is the main class for interacting with language models.
Attributes:
system_prompt (Optional[str]):
System prompt for LLM calls.
messages_to_prompt (Callable):
Function to convert a list of messages to an LLM prompt.
completion_to_prompt (Callable):
Function to convert a completion to an LLM prompt.
output_parser (Optional[BaseOutputParser]):
Output parser to parse, validate, and correct errors programmatically.
pydantic_program_mode (PydanticProgramMode):
Pydantic program mode to use for structured prediction.
"""
system_prompt: Optional[str] = Field(
default=None, description="System prompt for LLM calls."
)
messages_to_prompt: MessagesToPromptCallable = Field(
description="Function to convert a list of messages to an LLM prompt.",
default=None,
exclude=True,
)
completion_to_prompt: CompletionToPromptCallable = Field(
description="Function to convert a completion to an LLM prompt.",
default=None,
exclude=True,
)
output_parser: Optional[BaseOutputParser] = Field(
description="Output parser to parse, validate, and correct errors programmatically.",
default=None,
exclude=True,
)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT
# deprecated
query_wrapper_prompt: Optional[BasePromptTemplate] = Field(
description="Query wrapper prompt for LLM calls.",
default=None,
exclude=True,
)
# -- Pydantic Configs --
@field_validator("messages_to_prompt")
@classmethod
def set_messages_to_prompt(
cls, messages_to_prompt: Optional[MessagesToPromptType]
) -> MessagesToPromptType:
return messages_to_prompt or generic_messages_to_prompt
@field_validator("completion_to_prompt")
@classmethod
def set_completion_to_prompt(
cls, completion_to_prompt: Optional[CompletionToPromptType]
) -> CompletionToPromptType:
return completion_to_prompt or default_completion_to_prompt
@model_validator(mode="after")
def check_prompts(self) -> "LLM":
if self.completion_to_prompt is None:
self.completion_to_prompt = default_completion_to_prompt
if self.messages_to_prompt is None:
self.messages_to_prompt = generic_messages_to_prompt
return self
# -- Utils --
def _log_template_data(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> None:
template_vars = {
k: v
for k, v in ChainMap(prompt.kwargs, prompt_args).items()
if k in prompt.template_vars
}
with self.callback_manager.event(
CBEventType.TEMPLATING,
payload={
EventPayload.TEMPLATE: prompt.get_template(llm=self),
EventPayload.TEMPLATE_VARS: template_vars,
EventPayload.SYSTEM_PROMPT: self.system_prompt,
EventPayload.QUERY_WRAPPER_PROMPT: self.query_wrapper_prompt,
},
):
pass
def _get_prompt(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str:
formatted_prompt = prompt.format(
llm=self,
messages_to_prompt=self.messages_to_prompt,
completion_to_prompt=self.completion_to_prompt,
**prompt_args,
)
if self.output_parser is not None:
formatted_prompt = self.output_parser.format(formatted_prompt)
return self._extend_prompt(formatted_prompt)
def _get_messages(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> List[ChatMessage]:
messages = prompt.format_messages(llm=self, **prompt_args)
if self.output_parser is not None:
messages = self.output_parser.format_messages(messages)
return self._extend_messages(messages)
def _parse_output(self, output: str) -> str:
if self.output_parser is not None:
return str(self.output_parser.parse(output))
return output
def _extend_prompt(
self,
formatted_prompt: str,
) -> str:
"""Add system and query wrapper prompts to base prompt."""
extended_prompt = formatted_prompt
if self.system_prompt:
extended_prompt = self.system_prompt + "\n\n" + extended_prompt
if self.query_wrapper_prompt:
extended_prompt = self.query_wrapper_prompt.format(
query_str=extended_prompt
)
return extended_prompt
def _extend_messages(self, messages: List[ChatMessage]) -> List[ChatMessage]:
"""Add system prompt to chat message list."""
if self.system_prompt:
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=self.system_prompt),
*messages,
]
return messages
# -- Structured outputs --
@dispatcher.span
def structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
r"""
Structured predict.
Args:
output_cls (BaseModel):
Output class to use for structured prediction.
prompt (PromptTemplate):
Prompt template to use for structured prediction.
llm_kwargs (Optional[Dict[str, Any]]):
Arguments that are passed down to the LLM invoked by the program.
prompt_args (Any):
Additional arguments to format the prompt with.
Returns:
BaseModel: The structured prediction output.
Examples:
```python
from pydantic import BaseModel
class Test(BaseModel):
\"\"\"My test class.\"\"\"
name: str
from llama_index.core.prompts import PromptTemplate
prompt = PromptTemplate("Please predict a Test with a random name related to {topic}.")
output = llm.structured_predict(Test, prompt, topic="cats")
print(output.name)
```
"""
from llama_index.core.program.utils import get_program_for_llm
dispatcher.event(
LLMStructuredPredictStartEvent(
output_cls=output_cls, template=prompt, template_args=prompt_args
)
)
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
result = program(llm_kwargs=llm_kwargs, **prompt_args)
assert not isinstance(result, list)
dispatcher.event(LLMStructuredPredictEndEvent(output=result))
return result
@dispatcher.span
async def astructured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Model:
r"""
Async Structured predict.
Args:
output_cls (BaseModel):
Output class to use for structured prediction.
prompt (PromptTemplate):
Prompt template to use for structured prediction.
llm_kwargs (Optional[Dict[str, Any]]):
Arguments that are passed down to the LLM invoked by the program.
prompt_args (Any):
Additional arguments to format the prompt with.
Returns:
BaseModel: The structured prediction output.
Examples:
```python
from pydantic import BaseModel
class Test(BaseModel):
\"\"\"My test class.\"\"\"
name: str
from llama_index.core.prompts import PromptTemplate
prompt = PromptTemplate("Please predict a Test with a random name related to {topic}.")
output = await llm.astructured_predict(Test, prompt, topic="cats")
print(output.name)
```
"""
from llama_index.core.program.utils import get_program_for_llm
dispatcher.event(
LLMStructuredPredictStartEvent(
output_cls=output_cls, template=prompt, template_args=prompt_args
)
)
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
result = await program.acall(llm_kwargs=llm_kwargs, **prompt_args)
assert not isinstance(result, list)
dispatcher.event(LLMStructuredPredictEndEvent(output=result))
return result
def _structured_stream_call(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Generator[
Union[Model, List[Model], "FlexibleModel", List["FlexibleModel"]], None, None
]:
from llama_index.core.program.utils import get_program_for_llm
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
return program.stream_call(llm_kwargs=llm_kwargs, **prompt_args)
@dispatcher.span
def stream_structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> Generator[Union[Model, "FlexibleModel"], None, None]:
r"""
Stream Structured predict.
Args:
output_cls (BaseModel):
Output class to use for structured prediction.
prompt (PromptTemplate):
Prompt template to use for structured prediction.
llm_kwargs (Optional[Dict[str, Any]]):
Arguments that are passed down to the LLM invoked by the program.
prompt_args (Any):
Additional arguments to format the prompt with.
Returns:
Generator: A generator returning partial copies of the model or list of models.
Examples:
```python
from pydantic import BaseModel
class Test(BaseModel):
\"\"\"My test class.\"\"\"
name: str
from llama_index.core.prompts import PromptTemplate
prompt = PromptTemplate("Please predict a Test with a random name related to {topic}.")
stream_output = llm.stream_structured_predict(Test, prompt, topic="cats")
for partial_output in stream_output:
# stream partial outputs until completion
print(partial_output.name)
```
"""
dispatcher.event(
LLMStructuredPredictStartEvent(
output_cls=output_cls, template=prompt, template_args=prompt_args
)
)
result = self._structured_stream_call(
output_cls, prompt, llm_kwargs, **prompt_args
)
for r in result:
dispatcher.event(LLMStructuredPredictInProgressEvent(output=r))
assert not isinstance(r, list)
yield r
dispatcher.event(LLMStructuredPredictEndEvent(output=r))
async def _structured_astream_call(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> AsyncGenerator[
Union[Model, List[Model], "FlexibleModel", List["FlexibleModel"]], None
]:
from llama_index.core.program.utils import get_program_for_llm
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
return await program.astream_call(llm_kwargs=llm_kwargs, **prompt_args)
@dispatcher.span
async def astream_structured_predict(
self,
output_cls: Type[Model],
prompt: PromptTemplate,
llm_kwargs: Optional[Dict[str, Any]] = None,
**prompt_args: Any,
) -> AsyncGenerator[Union[Model, "FlexibleModel"], None]:
r"""
Async Stream Structured predict.
Args:
output_cls (BaseModel):
Output class to use for structured prediction.
prompt (PromptTemplate):
Prompt template to use for structured prediction.
llm_kwargs (Optional[Dict[str, Any]]):
Arguments that are passed down to the LLM invoked by the program.
prompt_args (Any):
Additional arguments to format the prompt with.
Returns:
Generator: A generator returning partial copies of the model or list of models.
Examples:
```python
from pydantic import BaseModel
class Test(BaseModel):
\"\"\"My test class.\"\"\"
name: str
from llama_index.core.prompts import PromptTemplate
prompt = PromptTemplate("Please predict a Test with a random name related to {topic}.")
stream_output = await llm.astream_structured_predict(Test, prompt, topic="cats")
async for partial_output in stream_output:
# stream partial outputs until completion
print(partial_output.name)
```
"""
async def gen() -> AsyncGenerator[Union[Model, "FlexibleModel"], None]:
dispatcher.event(
LLMStructuredPredictStartEvent(
output_cls=output_cls, template=prompt, template_args=prompt_args
)
)
result = await self._structured_astream_call(
output_cls, prompt, llm_kwargs, **prompt_args
)
async for r in result:
dispatcher.event(LLMStructuredPredictInProgressEvent(output=r))
assert not isinstance(r, list)
yield r
dispatcher.event(LLMStructuredPredictEndEvent(output=r))
return gen()
# -- Prompt Chaining --
@dispatcher.span
def predict(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> str:
"""
Predict for a given prompt.
Args:
prompt (BasePromptTemplate):
The prompt to use for prediction.
prompt_args (Any):
Additional arguments to format the prompt with.
Returns:
str: The prediction output.
Examples:
```python
from llama_index.core.prompts import PromptTemplate
prompt = PromptTemplate("Please write a random name related to {topic}.")
output = llm.predict(prompt, topic="cats")
print(output)
```
"""
dispatcher.event(
LLMPredictStartEvent(template=prompt, template_args=prompt_args)
)
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = self.chat(messages)
output = chat_response.message.content or ""
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
response = self.complete(formatted_prompt, formatted=True)
output = response.text
parsed_output = self._parse_output(output)
dispatcher.event(LLMPredictEndEvent(output=parsed_output))
return parsed_output
@dispatcher.span
def stream(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> TokenGen:
"""
Stream predict for a given prompt.
Args:
prompt (BasePromptTemplate):
The prompt to use for prediction.
prompt_args (Any):
Additional arguments to format the prompt with.
Yields:
str: Each streamed token.
Examples:
```python
from llama_index.core.prompts import PromptTemplate
prompt = PromptTemplate("Please write a random name related to {topic}.")
gen = llm.stream(prompt, topic="cats")
for token in gen:
print(token, end="", flush=True)
```
"""
self._log_template_data(prompt, **prompt_args)
dispatcher.event(
LLMPredictStartEvent(template=prompt, template_args=prompt_args)
)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = self.stream_chat(messages)
stream_tokens = stream_chat_response_to_tokens(chat_response)
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
stream_response = self.stream_complete(formatted_prompt, formatted=True)
stream_tokens = stream_completion_response_to_tokens(stream_response)
if prompt.output_parser is not None or self.output_parser is not None:
raise NotImplementedError("Output parser is not supported for streaming.")
return stream_tokens
@dispatcher.span
async def apredict(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> str:
"""
Async Predict for a given prompt.
Args:
prompt (BasePromptTemplate):
The prompt to use for prediction.
prompt_args (Any):
Additional arguments to format the prompt with.
Returns:
str: The prediction output.
Examples:
```python
from llama_index.core.prompts import PromptTemplate
prompt = PromptTemplate("Please write a random name related to {topic}.")
output = await llm.apredict(prompt, topic="cats")
print(output)
```
"""
dispatcher.event(
LLMPredictStartEvent(template=prompt, template_args=prompt_args)
)
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = await self.achat(messages)
output = chat_response.message.content or ""
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
response = await self.acomplete(formatted_prompt, formatted=True)
output = response.text
parsed_output = self._parse_output(output)
dispatcher.event(LLMPredictEndEvent(output=parsed_output))
return parsed_output
@dispatcher.span
async def astream(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> TokenAsyncGen:
"""
Async stream predict for a given prompt.
Args:
prompt (BasePromptTemplate):
The prompt to use for prediction.
prompt_args (Any):
Additional arguments to format the prompt with.
Yields:
str: An async generator that yields strings of tokens.
Examples:
```python
from llama_index.core.prompts import PromptTemplate
prompt = PromptTemplate("Please write a random name related to {topic}.")
gen = await llm.astream(prompt, topic="cats")
async for token in gen:
print(token, end="", flush=True)
```
"""
self._log_template_data(prompt, **prompt_args)
dispatcher.event(
LLMPredictStartEvent(template=prompt, template_args=prompt_args)
)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = await self.astream_chat(messages)
stream_tokens = await astream_chat_response_to_tokens(chat_response)
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
stream_response = await self.astream_complete(
formatted_prompt, formatted=True
)
stream_tokens = await astream_completion_response_to_tokens(stream_response)
if prompt.output_parser is not None or self.output_parser is not None:
raise NotImplementedError("Output parser is not supported for streaming.")
return stream_tokens
@dispatcher.span
def predict_and_call(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
**kwargs: Any,
) -> "AgentChatResponse":
"""
Predict and call the tool.
By default uses a ReAct agent to do tool calling (through text prompting),
but function calling LLMs will implement this differently.
"""
from llama_index.core.agent.workflow import ReActAgent
from llama_index.core.chat_engine.types import AgentChatResponse
from llama_index.core.memory import Memory
from llama_index.core.tools.calling import call_tool_with_selection
from llama_index.core.workflow import Context
from workflows.context.state_store import DictState
agent = ReActAgent(
tools=tools,
llm=self,
verbose=verbose,
formatter=kwargs.get("react_chat_formatter"),
output_parser=kwargs.get("output_parser"),
tool_retriever=kwargs.get("tool_retriever"),
)
memory = kwargs.get("memory", Memory.from_defaults())
if isinstance(user_msg, ChatMessage) and isinstance(user_msg.content, str):
pass
elif isinstance(user_msg, str):
user_msg = ChatMessage(content=user_msg, role=MessageRole.USER)
llm_input = []
if chat_history:
llm_input.extend(chat_history)
if user_msg:
llm_input.append(user_msg)
ctx: Context[DictState] = Context(agent)
try:
resp = asyncio_run(
agent.take_step(
ctx=ctx, llm_input=llm_input, tools=tools or [], memory=memory
)
)
tool_outputs = []
for tool_call in resp.tool_calls:
tool_output = call_tool_with_selection(
tool_call=tool_call,
tools=tools or [],
verbose=verbose,
)
tool_outputs.append(tool_output)
output_text = "\n\n".join(
[tool_output.content for tool_output in tool_outputs]
)
return AgentChatResponse(
response=output_text,
sources=tool_outputs,
)
except Exception as e:
output = AgentChatResponse(
response="An error occurred while running the tool: " + str(e),
sources=[],
)
return output
@dispatcher.span
async def apredict_and_call(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
**kwargs: Any,
) -> "AgentChatResponse":
"""Predict and call the tool."""
from llama_index.core.agent.workflow import ReActAgent
from llama_index.core.chat_engine.types import AgentChatResponse
from llama_index.core.memory import Memory
from llama_index.core.tools.calling import acall_tool_with_selection
from llama_index.core.workflow import Context
from workflows.context.state_store import DictState
agent = ReActAgent(
tools=tools,
llm=self,
verbose=verbose,
formatter=kwargs.get("react_chat_formatter"),
output_parser=kwargs.get("output_parser"),
tool_retriever=kwargs.get("tool_retriever"),
)
memory = kwargs.get("memory", Memory.from_defaults())
if isinstance(user_msg, ChatMessage) and isinstance(user_msg.content, str):
pass
elif isinstance(user_msg, str):
user_msg = ChatMessage(content=user_msg, role=MessageRole.USER)
llm_input = []
if chat_history:
llm_input.extend(chat_history)
if user_msg:
llm_input.append(user_msg)
ctx: Context[DictState] = Context(agent)
try:
resp = await agent.take_step(
ctx=ctx, llm_input=llm_input, tools=tools or [], memory=memory
)
tool_outputs = []
for tool_call in resp.tool_calls:
tool_output = await acall_tool_with_selection(
tool_call=tool_call,
tools=tools or [],
verbose=verbose,
)
tool_outputs.append(tool_output)
output_text = "\n\n".join(
[tool_output.content for tool_output in tool_outputs]
)
return AgentChatResponse(
response=output_text,
sources=tool_outputs,
)
except Exception as e:
output = AgentChatResponse(
response="An error occurred while running the tool: " + str(e),
sources=[],
)
return output
def as_structured_llm(
self,
output_cls: Type[BaseModel],
**kwargs: Any,
) -> "StructuredLLM":
"""Return a structured LLM around a given object."""
from llama_index.core.llms.structured_llm import StructuredLLM
return StructuredLLM(llm=self, output_cls=output_cls, **kwargs)
| LLM |
python | scipy__scipy | scipy/io/arff/tests/test_arffread.py | {
"start": 11882,
"end": 13094
} | class ____:
"""
Regression test for issue #10232:
Exception in loadarff with quoted nominal attributes.
"""
def setup_method(self):
self.data, self.meta = loadarff(test_quoted_nominal_spaces)
def test_attributes(self):
assert_equal(len(self.meta._attributes), 2)
age, smoker = self.meta._attributes.values()
assert_equal(age.name, 'age')
assert_equal(age.type_name, 'numeric')
assert_equal(smoker.name, 'smoker')
assert_equal(smoker.type_name, 'nominal')
assert_equal(smoker.values, [' yes', 'no '])
def test_data(self):
age_dtype_instance = np.float64
smoker_dtype_instance = '<S5'
age_expected = np.array([
18,
24,
44,
56,
89,
11,
], dtype=age_dtype_instance)
smoker_expected = np.array([
'no ',
' yes',
'no ',
'no ',
' yes',
'no ',
], dtype=smoker_dtype_instance)
assert_array_equal(self.data["age"], age_expected)
assert_array_equal(self.data["smoker"], smoker_expected)
| TestQuotedNominalSpaces |
python | getsentry__sentry | src/sentry/search/events/types.py | {
"start": 2752,
"end": 7948
} | class ____:
start: datetime | None = None
end: datetime | None = None
stats_period: str | None = None
query_string: str | None = None
# granularity is used with timeseries requests to specifiy bucket size
granularity_secs: int | None = None
# The None value in this sequence is because the filter params could include that
environments: Sequence[Environment | None] = field(default_factory=list)
projects: Sequence[Project] = field(default_factory=list)
user: RpcUser | None = None
teams: Iterable[Team] = field(default_factory=list)
organization: Organization | None = None
sampling_mode: SAMPLING_MODES | None = None
debug: bool = False
case_insensitive: bool = False
def __post_init__(self) -> None:
if self.start:
self.start = self.start.replace(tzinfo=timezone.utc)
if self.end:
self.end = self.end.replace(tzinfo=timezone.utc)
if self.start is None and self.end is None:
self.parse_stats_period()
if self.organization is None and len(self.projects) > 0:
self.organization = self.projects[0].organization
# Only used in the trend query builder
self.aliases: dict[str, Alias] | None = {}
def __repr__(self) -> str:
return f"<SnubaParams: start={self.start},end={self.end},environments={self.environment_ids},projects={self.project_ids}>"
def parse_stats_period(self) -> None:
if self.stats_period is not None:
self.end = django_timezone.now()
from sentry.api.utils import get_datetime_from_stats_period
self.start = get_datetime_from_stats_period(self.stats_period, self.end)
@property
def start_date(self) -> datetime:
# This and end_date are helper functions so callers don't have to check if either are defined for typing
if self.start is None:
raise InvalidSearchQuery("start is required")
return self.start
@property
def rpc_start_date(self) -> Timestamp:
timestamp = Timestamp()
timestamp.FromDatetime(self.start_date)
return timestamp
@property
def end_date(self) -> datetime:
if self.end is None:
raise InvalidSearchQuery("end is required")
return self.end
@property
def rpc_end_date(self) -> Timestamp:
timestamp = Timestamp()
timestamp.FromDatetime(self.end_date)
return timestamp
@property
def timeseries_granularity_secs(self) -> int:
if self.granularity_secs is None:
raise InvalidSearchQuery("granularity is required")
return self.granularity_secs
@property
def is_timeseries_request(self) -> bool:
return self.granularity_secs is not None
@property
def date_range(self) -> timedelta:
return self.end_date - self.start_date
@property
def environment_names(self) -> list[str]:
return (
[env.name if env is not None else "" for env in self.environments]
if self.environments
else []
)
@property
def environment_ids(self) -> list[int]:
return (
[env.id for env in self.environments if env is not None and env.id is not None]
if self.environments
else []
)
@property
def project_ids(self) -> list[int]:
# proj.id can be None if the project no longer exists
return sorted([proj.id for proj in self.projects if proj.id is not None])
@property
def project_slug_map(self) -> dict[str, int]:
return {proj.slug: proj.id for proj in self.projects}
@property
def project_id_map(self) -> dict[int, str]:
return {proj.id: proj.slug for proj in self.projects}
@property
def team_ids(self) -> list[int]:
return [team.id for team in self.teams]
@property
def interval(self) -> float:
return (self.end_date - self.start_date).total_seconds()
@property
def organization_id(self) -> int | None:
if self.organization is not None:
return self.organization.id
return None
@property
def filter_params(self) -> ParamsType:
# Compatibility function so we can switch over to this dataclass more easily
filter_params: ParamsType = {
"project_id": list(self.project_ids),
"projects": list(self.projects),
"project_objects": list(self.projects),
"environment": list(self.environment_names),
"team_id": list(self.team_ids),
"environment_objects": (
[env for env in self.environments if env is not None] if self.environments else []
),
}
if self.organization_id:
filter_params["organization_id"] = self.organization_id
if self.start:
filter_params["start"] = self.start
if self.end:
filter_params["end"] = self.end
if self.stats_period:
filter_params["statsPeriod"] = self.stats_period
return filter_params
def copy(self) -> SnubaParams:
return deepcopy(self)
@dataclass
| SnubaParams |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 31408,
"end": 32515
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.feat_out = nn.Linear(config.hidden_size, config.num_mel_bins * config.reduction_factor)
self.prob_out = nn.Linear(config.hidden_size, config.reduction_factor)
self.layers = nn.ModuleList(
[SpeechT5BatchNormConvLayer(config, i) for i in range(config.speech_decoder_postnet_layers)]
)
def forward(self, hidden_states: torch.Tensor):
outputs_before_postnet = self.feat_out(hidden_states).view(hidden_states.size(0), -1, self.config.num_mel_bins)
outputs_after_postnet = self.postnet(outputs_before_postnet)
logits = self.prob_out(hidden_states).view(hidden_states.size(0), -1)
return outputs_before_postnet, outputs_after_postnet, logits
def postnet(self, hidden_states: torch.Tensor):
layer_output = hidden_states.transpose(1, 2)
for layer in self.layers:
layer_output = layer(layer_output)
return hidden_states + layer_output.transpose(1, 2)
| SpeechT5SpeechDecoderPostnet |
python | getsentry__sentry | src/sentry/conf/types/taskworker.py | {
"start": 176,
"end": 665
} | class ____:
"""
crontab schedule value object
Used in configuration to define a task schedule.
:see sentry.taskworker.scheduler.schedules.CrontabSchedule for more details.
"""
minute: str = "*"
hour: str = "*"
day_of_week: str = "*"
day_of_month: str = "*"
month_of_year: str = "*"
def __str__(self) -> str:
return (
f"{self.minute} {self.hour} {self.day_of_month} {self.month_of_year} {self.day_of_week}"
)
| crontab |
python | sympy__sympy | sympy/solvers/ode/single.py | {
"start": 104995,
"end": 106741
} | class ____(SingleODESolver):
r"""
Gives solution of the Airy differential equation
.. math :: \frac{d^2y}{dx^2} + (a + b x) y(x) = 0
in terms of Airy special functions airyai and airybi.
Examples
========
>>> from sympy import dsolve, Function
>>> from sympy.abc import x
>>> f = Function("f")
>>> eq = f(x).diff(x, 2) - x*f(x)
>>> dsolve(eq)
Eq(f(x), C1*airyai(x) + C2*airybi(x))
"""
hint = "2nd_linear_airy"
has_integral = False
def _matches(self):
eq = self.ode_problem.eq_high_order_free
f = self.ode_problem.func
order = self.ode_problem.order
x = self.ode_problem.sym
df = f.diff(x)
a4 = Wild('a4', exclude=[x,f,df])
b4 = Wild('b4', exclude=[x,f,df])
match = self.ode_problem.get_linear_coefficients(eq, f, order)
does_match = False
if order == 2 and match and match[2] != 0:
if match[1].is_zero:
self.rn = cancel(match[0]/match[2]).match(a4+b4*x)
if self.rn and self.rn[b4] != 0:
self.rn = {'b':self.rn[a4],'m':self.rn[b4]}
does_match = True
return does_match
def _get_general_solution(self, *, simplify_flag: bool = True):
f = self.ode_problem.func.func
x = self.ode_problem.sym
(C1, C2) = self.ode_problem.get_numbered_constants(num=2)
b = self.rn['b']
m = self.rn['m']
if m.is_positive:
arg = - b/cbrt(m)**2 - cbrt(m)*x
elif m.is_negative:
arg = - b/cbrt(-m)**2 + cbrt(-m)*x
else:
arg = - b/cbrt(-m)**2 + cbrt(-m)*x
return [Eq(f(x), C1*airyai(arg) + C2*airybi(arg))]
| SecondLinearAiry |
python | ansible__ansible | lib/ansible/plugins/inventory/ini.py | {
"start": 3750,
"end": 19358
} | class ____(BaseFileInventoryPlugin):
"""
Takes an INI-format inventory file and builds a list of groups and subgroups
with their associated hosts and variable settings.
"""
NAME = 'ini'
_COMMENT_MARKERS = frozenset((u';', u'#'))
b_COMMENT_MARKERS = frozenset((b';', b'#'))
# template trust is applied internally to strings
def __init__(self) -> None:
super(InventoryModule, self).__init__()
self.patterns: dict[str, re.Pattern] = {}
self._origin: Origin | None = None
def verify_file(self, path):
# hardcode exclusion for TOML to prevent partial parsing of things we know we don't want
return super().verify_file(path) and os.path.splitext(path)[1] != '.toml'
def parse(self, inventory, loader, path: str, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
try:
# Read in the hosts, groups, and variables defined in the inventory file.
if self.loader:
(b_data, private) = self.loader._get_file_contents(path)
else:
b_path = to_bytes(path, errors='surrogate_or_strict')
with open(b_path, 'rb') as fh:
b_data = fh.read()
try:
# Faster to do to_text once on a long string than many
# times on smaller strings
data = to_text(b_data, errors='surrogate_or_strict').splitlines()
except UnicodeError:
# Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593
data = []
for line in b_data.splitlines():
if line and line[0] in self.b_COMMENT_MARKERS:
# Replace is okay for comment lines
# data.append(to_text(line, errors='surrogate_then_replace'))
# Currently we only need these lines for accurate lineno in errors
data.append(u'')
else:
# Non-comment lines still have to be valid uf-8
data.append(to_text(line, errors='surrogate_or_strict'))
self._origin = Origin(path=path, line_num=0)
try:
self._parse(data)
finally:
self._origin = self._origin.replace(line_num=None)
except Exception as ex:
raise AnsibleParserError('Failed to parse inventory.', obj=self._origin) from ex
def _raise_error(self, message):
raise AnsibleError(message)
def _parse(self, lines):
"""
Populates self.groups from the given array of lines. Raises an error on
any parse failure.
"""
self._compile_patterns()
# We behave as though the first line of the inventory is '[ungrouped]',
# and begin to look for host definitions. We make a single pass through
# each line of the inventory, building up self.groups and adding hosts,
# subgroups, and setting variables as we go.
pending_declarations = {}
groupname = 'ungrouped'
state = 'hosts'
for line in lines:
self._origin = self._origin.replace(line_num=self._origin.line_num + 1)
line = line.strip()
# Skip empty lines and comments
if not line or line[0] in self._COMMENT_MARKERS:
continue
# Is this a [section] header? That tells us what group we're parsing
# definitions for, and what kind of definitions to expect.
m = self.patterns['section'].match(line)
if m:
(groupname, state) = m.groups()
groupname = to_safe_group_name(groupname)
state = state or 'hosts'
if state not in ['hosts', 'children', 'vars']:
title = ":".join(m.groups())
self._raise_error("Section [%s] has unknown type: %s" % (title, state))
# If we haven't seen this group before, we add a new Group.
if groupname not in self.inventory.groups:
# Either [groupname] or [groupname:children] is sufficient to declare a group,
# but [groupname:vars] is allowed only if the # group is declared elsewhere.
# We add the group anyway, but make a note in pending_declarations to check at the end.
#
# It's possible that a group is previously pending due to being defined as a child
# group, in that case we simply pass so that the logic below to process pending
# declarations will take the appropriate action for a pending child group instead of
# incorrectly handling it as a var state pending declaration
if state == 'vars' and groupname not in pending_declarations:
pending_declarations[groupname] = dict(line=self._origin.line_num, state=state, name=groupname)
self.inventory.add_group(groupname)
# When we see a declaration that we've been waiting for, we process and delete.
if groupname in pending_declarations and state != 'vars':
if pending_declarations[groupname]['state'] == 'children':
self._add_pending_children(groupname, pending_declarations)
elif pending_declarations[groupname]['state'] == 'vars':
del pending_declarations[groupname]
continue
elif line.startswith('[') and line.endswith(']'):
self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + " " +
"in the section entry, and that there are no other invalid characters")
# It's not a section, so the current state tells us what kind of
# definition it must be. The individual parsers will raise an
# error if we feed them something they can't digest.
# [groupname] contains host definitions that must be added to
# the current group.
if state == 'hosts':
hosts, port, variables = self._parse_host_definition(line)
self._populate_host_vars(hosts, variables, groupname, port)
# [groupname:vars] contains variable definitions that must be
# applied to the current group.
elif state == 'vars':
(k, v) = self._parse_variable_definition(line)
self.inventory.set_variable(groupname, k, v)
# [groupname:children] contains subgroup names that must be
# added as children of the current group. The subgroup names
# must themselves be declared as groups, but as before, they
# may only be declared later.
elif state == 'children':
child = self._parse_group_name(line)
if child not in self.inventory.groups:
if child not in pending_declarations:
pending_declarations[child] = dict(line=self._origin.line_num, state=state, name=child, parents=[groupname])
else:
pending_declarations[child]['parents'].append(groupname)
else:
self.inventory.add_child(groupname, child)
else:
# This can happen only if the state checker accepts a state that isn't handled above.
self._raise_error("Entered unhandled state: %s" % (state))
# Any entries in pending_declarations not removed by a group declaration above mean that there was an unresolved reference.
# We report only the first such error here.
for g in pending_declarations:
decl = pending_declarations[g]
self._origin = self._origin.replace(line_num=decl['line'])
if decl['state'] == 'vars':
raise ValueError(f"Section [{decl['name']}:vars] not valid for undefined group {decl['name']!r}.")
elif decl['state'] == 'children':
raise ValueError(f"Section [{decl['parents'][-1]}:children] includes undefined group {decl['name']!r}.")
def _add_pending_children(self, group, pending):
for parent in pending[group]['parents']:
self.inventory.add_child(parent, group)
if parent in pending and pending[parent]['state'] == 'children':
self._add_pending_children(parent, pending)
del pending[group]
def _parse_group_name(self, line):
"""
Takes a single line and tries to parse it as a group name. Returns the
group name if successful, or raises an error.
"""
m = self.patterns['groupname'].match(line)
if m:
return m.group(1)
self._raise_error("Expected group name, got: %s" % (line))
def _parse_variable_definition(self, line):
"""
Takes a string and tries to parse it as a variable definition. Returns
the key and value if successful, or raises an error.
"""
# TODO: We parse variable assignments as a key (anything to the left of
# an '='"), an '=', and a value (anything left) and leave the value to
# _parse_value to sort out. We should be more systematic here about
# defining what is acceptable, how quotes work, and so on.
if '=' in line:
(k, v) = [e.strip() for e in line.split("=", 1)]
return (self._origin.tag(k), self._parse_value(v))
self._raise_error("Expected key=value, got: %s" % (line))
def _parse_host_definition(self, line):
"""
Takes a single line and tries to parse it as a host definition. Returns
a list of Hosts if successful, or raises an error.
"""
# A host definition comprises (1) a non-whitespace hostname or range,
# optionally followed by (2) a series of key="some value" assignments.
# We ignore any trailing whitespace and/or comments. For example, here
# are a series of host definitions in a group:
#
# [groupname]
# alpha
# beta:2345 user=admin # we'll tell shlex
# gamma sudo=True user=root # to ignore comments
try:
tokens = shlex_split(line, comments=True)
except ValueError as e:
self._raise_error("Error parsing host definition '%s': %s" % (line, e))
(hostnames, port) = self._expand_hostpattern(tokens[0])
# Try to process anything remaining as a series of key=value pairs.
variables = {}
for t in tokens[1:]:
if '=' not in t:
self._raise_error("Expected key=value host variable assignment, got: %s" % (t))
(k, v) = t.split('=', 1)
variables[self._origin.tag(k)] = self._parse_value(v)
return hostnames, port, variables
def _expand_hostpattern(self, hostpattern):
"""
do some extra checks over normal processing
"""
# specification?
hostnames, port = super(InventoryModule, self)._expand_hostpattern(hostpattern)
if hostpattern.strip().endswith(':') and port is None:
raise AnsibleParserError("Invalid host pattern '%s' supplied, ending in ':' is not allowed, this character is reserved to provide a port." %
hostpattern)
for pattern in hostnames:
# some YAML parsing prevention checks
if pattern.strip() == '---':
raise AnsibleParserError("Invalid host pattern '%s' supplied, '---' is normally a sign this is a YAML file." % hostpattern)
return (hostnames, port)
def _parse_recursive_coerce_types_and_tag(self, value: t.Any) -> t.Any:
if isinstance(value, str):
return TrustedAsTemplate().tag(self._origin.tag(value))
if isinstance(value, (list, tuple, set)):
# NB: intentional coercion of tuple/set to list, deal with it
return self._origin.tag([self._parse_recursive_coerce_types_and_tag(v) for v in value])
if isinstance(value, dict):
# FIXME: enforce keys are strings
return self._origin.tag({self._origin.tag(k): self._parse_recursive_coerce_types_and_tag(v) for k, v in value.items()})
if value is ...: # literal_eval parses ellipsis, but it's not a supported variable type
value = TrustedAsTemplate().tag("...")
if isinstance(value, complex): # convert unsupported variable types recognized by literal_eval back to str
value = TrustedAsTemplate().tag(str(value))
value = to_text(value, nonstring='passthru', errors='surrogate_or_strict')
return self._origin.tag(value)
def _parse_value(self, v: str) -> t.Any:
"""
Attempt to transform the string value from an ini file into a basic python object
(int, dict, list, unicode string, etc).
"""
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", SyntaxWarning)
v = ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We will then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
# this is mostly unnecessary, but prevents the (possible) case of bytes literals showing up in inventory
return self._parse_recursive_coerce_types_and_tag(v)
def _compile_patterns(self):
"""
Compiles the regular expressions required to parse the inventory and
stores them in self.patterns.
"""
# Section names are square-bracketed expressions at the beginning of a
# line, comprising (1) a group name optionally followed by (2) a tag
# that specifies the contents of the section. We ignore any trailing
# whitespace and/or comments. For example:
#
# [groupname]
# [somegroup:vars]
# [naughty:children] # only get coal in their stockings
self.patterns['section'] = re.compile(
to_text(r"""^\[
([^:\]\s]+) # group name (see groupname below)
(?::(\w+))? # optional : and tag name
\]
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
""", errors='surrogate_or_strict'), re.X
)
# FIXME: What are the real restrictions on group names, or rather, what
# should they be? At the moment, they must be non-empty sequences of non
# whitespace characters excluding ':' and ']', but we should define more
# precise rules in order to support better diagnostics.
self.patterns['groupname'] = re.compile(
to_text(r"""^
([^:\]\s]+)
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
""", errors='surrogate_or_strict'), re.X
)
| InventoryModule |
python | pytorch__pytorch | test/distributed/_shard/sharding_spec/test_sharding_spec.py | {
"start": 19870,
"end": 21822
} | class ____(ShardingSpec):
grid_size: int
placements: list[Union[torch.distributed._remote_device, str]]
def __post_init__(self):
for i, remote_device in enumerate(self.placements):
if not isinstance(remote_device, torch.distributed._remote_device):
self.placements[i] = torch.distributed._remote_device(remote_device)
def build_metadata(
self,
tensor_sizes: torch.Size,
tensor_properties: TensorProperties,
) -> ShardedTensorMetadata:
tensor_num_dim = len(tensor_sizes)
assert tensor_num_dim == 2, "only support 2-dim tensor for grid sharding"
shards_metadata = []
def chunk_num(dim_size, grid_size):
assert dim_size % grid_size == 0, "only support dim_size mod grid_size == 0"
return dim_size // grid_size
row_chunks = chunk_num(tensor_sizes[0], self.grid_size)
col_chunks = chunk_num(tensor_sizes[1], self.grid_size)
assert row_chunks * col_chunks == len(self.placements)
for row_idx in range(row_chunks):
for col_idx in range(col_chunks):
shards_metadata.append(
ShardMetadata(
shard_offsets=[
row_idx * self.grid_size,
col_idx * self.grid_size,
],
shard_sizes=[self.grid_size, self.grid_size],
placement=self.placements[row_idx * row_chunks + col_idx],
)
)
return ShardedTensorMetadata(
shards_metadata=shards_metadata,
size=tensor_sizes,
tensor_properties=tensor_properties,
)
def shard(
self, tensor: torch.Tensor, src_rank: int = 0, process_group=None
) -> ShardedTensor:
raise NotImplementedError("GridShardingSpec.shard not implemented yet!")
| GridShardingSpec |
python | django__django | tests/foreign_object/models/customers.py | {
"start": 704,
"end": 1046
} | class ____(models.Model):
company_code = models.CharField(max_length=1)
customer_code = models.IntegerField()
customer = models.ForeignObject(
Customer,
models.CASCADE,
related_name="contacts",
to_fields=["customer_id", "company"],
from_fields=["customer_code", "company_code"],
)
| Contact |
python | conda__conda | conda/plugins/prefix_data_loaders/pypi/pkg_format.py | {
"start": 15087,
"end": 15835
} | class ____(PythonDistribution):
"""
Python distribution installed via distutils.
Notes
-----
- https://www.python.org/dev/peps/pep-0376/
"""
MANIFEST_FILES = ("RECORD",)
REQUIRES_FILES = ()
MANDATORY_FILES = ("METADATA",)
# FIXME: Do this check? Disabled for tests where only Metadata file is stored
# MANDATORY_FILES = ('METADATA', 'RECORD', 'INSTALLER')
ENTRY_POINTS_FILES = ()
is_manageable = True
def __init__(self, prefix_path, anchor_file, python_version):
anchor_full_path = join(prefix_path, win_path_ok(dirname(anchor_file)))
super().__init__(anchor_full_path, python_version)
self.sp_reference = basename(dirname(anchor_file))
| PythonInstalledDistribution |
python | kamyu104__LeetCode-Solutions | Python/minimum-total-distance-traveled.py | {
"start": 105,
"end": 1104
} | class ____(object):
def minimumTotalDistance(self, robot, factory):
"""
:type robot: List[int]
:type factory: List[List[int]]
:rtype: int
"""
robot.sort(), factory.sort()
dp = [float("inf")]*(len(robot)+1) # dp[j] at i: min of factory[:i+1] and robot[:j]
dp[0] = 0
for i in xrange(len(factory)):
prefix = 0
dq = collections.deque([(dp[0]-prefix, 0)]) # pattern of min in the sliding window with size (limit+1)
for j in xrange(1, len(robot)+1):
prefix += abs(robot[j-1]-factory[i][0])
if j-dq[0][1] == factory[i][1]+1:
dq.popleft()
while dq and dq[-1][0] >= dp[j]-prefix:
dq.pop()
dq.append((dp[j]-prefix, j))
dp[j] = dq[0][0]+prefix
return dp[-1]
# Time: O(mlogm + nlogn + m * n * l), l is the max limit
# Space: O(n)
import collections
# sort, dp
| Solution |
python | huggingface__transformers | src/transformers/models/align/modeling_align.py | {
"start": 1684,
"end": 2317
} | class ____(ModelOutput):
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
"""
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for text model's outputs that also contains a pooling of the last hidden states.
"""
)
| AlignVisionModelOutput |
python | falconry__falcon | tests/test_before_hooks.py | {
"start": 684,
"end": 1759
} | class ____:
def __call__(self, req, resp, resource, params):
assert resource
validate_param(req, resp, resource, params, 'limit')
def validate_field(req, resp, resource, params, field_name='test'):
assert resource
try:
params[field_name] = int(params[field_name])
except ValueError:
raise falcon.HTTPBadRequest()
def parse_body(req, resp, resource, params):
assert resource
length = req.content_length
if length:
params['doc'] = json.load(io.TextIOWrapper(req.bounded_stream, 'utf-8'))
async def parse_body_async(req, resp, resource, params):
assert resource
length = req.content_length
if length:
data = await req.bounded_stream.read()
params['doc'] = json.loads(data.decode('utf-8'))
def bunnies(req, resp, resource, params):
assert resource
params['bunnies'] = 'fuzzy'
def frogs(req, resp, resource, params):
assert resource
if 'bunnies' in params:
params['bunnies'] = 'fluffy'
params['frogs'] = 'not fluffy'
| ResourceAwareValidateParam |
python | run-llama__llama_index | llama-index-core/llama_index/core/base/embeddings/base_sparse.py | {
"start": 2073,
"end": 11323
} | class ____(BaseModel, DispatcherSpanMixin):
"""Base class for embeddings."""
model_config = ConfigDict(
protected_namespaces=("pydantic_model_",), arbitrary_types_allowed=True
)
model_name: str = Field(
default="unknown", description="The name of the embedding model."
)
embed_batch_size: int = Field(
default=DEFAULT_EMBED_BATCH_SIZE,
description="The batch size for embedding calls.",
gt=0,
le=2048,
)
num_workers: Optional[int] = Field(
default=None,
description="The number of workers to use for async embedding calls.",
)
@classmethod
def class_name(cls) -> str:
return "BaseSparseEmbedding"
@model_serializer(mode="wrap")
def custom_model_dump(self, handler: Any) -> Dict[str, Any]:
data = handler(self)
# add class name
data["class_name"] = self.class_name()
# del api_key if it exists
data.pop("api_key", None)
return data
@abstractmethod
def _get_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query synchronously."""
@abstractmethod
async def _aget_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query asynchronously."""
@dispatcher.span
def get_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query."""
model_dict = self.model_dump()
dispatcher.event(
SparseEmbeddingStartEvent(
model_dict=model_dict,
)
)
query_embedding = self._get_query_embedding(query)
dispatcher.event(
SparseEmbeddingEndEvent(
chunks=[query],
embeddings=[query_embedding],
)
)
return query_embedding
@dispatcher.span
async def aget_query_embedding(self, query: str) -> SparseEmbedding:
"""Get query embedding."""
model_dict = self.model_dump()
dispatcher.event(
SparseEmbeddingStartEvent(
model_dict=model_dict,
)
)
query_embedding = await self._aget_query_embedding(query)
dispatcher.event(
SparseEmbeddingEndEvent(
chunks=[query],
embeddings=[query_embedding],
)
)
return query_embedding
def get_agg_embedding_from_queries(
self,
queries: List[str],
agg_fn: Optional[Callable[..., SparseEmbedding]] = None,
) -> SparseEmbedding:
"""Get aggregated embedding from multiple queries."""
query_embeddings = [self.get_query_embedding(query) for query in queries]
agg_fn = agg_fn or mean_agg
return agg_fn(query_embeddings)
async def aget_agg_embedding_from_queries(
self,
queries: List[str],
agg_fn: Optional[Callable[..., SparseEmbedding]] = None,
) -> SparseEmbedding:
"""Async get aggregated embedding from multiple queries."""
query_embeddings = [await self.aget_query_embedding(query) for query in queries]
agg_fn = agg_fn or mean_agg
return agg_fn(query_embeddings)
@abstractmethod
def _get_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text synchronously."""
@abstractmethod
async def _aget_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text asynchronously."""
def _get_text_embeddings(self, texts: List[str]) -> List[SparseEmbedding]:
"""
Embed the input sequence of text synchronously.
Subclasses can implement this method if batch queries are supported.
"""
# Default implementation just loops over _get_text_embedding
return [self._get_text_embedding(text) for text in texts]
async def _aget_text_embeddings(self, texts: List[str]) -> List[SparseEmbedding]:
"""
Embed the input sequence of text asynchronously.
Subclasses can implement this method if batch queries are supported.
"""
return await asyncio.gather(
*[self._aget_text_embedding(text) for text in texts]
)
@dispatcher.span
def get_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text."""
model_dict = self.model_dump()
dispatcher.event(
SparseEmbeddingStartEvent(
model_dict=model_dict,
)
)
text_embedding = self._get_text_embedding(text)
dispatcher.event(
SparseEmbeddingEndEvent(
chunks=[text],
embeddings=[text_embedding],
)
)
return text_embedding
@dispatcher.span
async def aget_text_embedding(self, text: str) -> SparseEmbedding:
"""Async get text embedding."""
model_dict = self.model_dump()
dispatcher.event(
SparseEmbeddingStartEvent(
model_dict=model_dict,
)
)
text_embedding = await self._aget_text_embedding(text)
dispatcher.event(
SparseEmbeddingEndEvent(
chunks=[text],
embeddings=[text_embedding],
)
)
return text_embedding
@dispatcher.span
def get_text_embedding_batch(
self,
texts: List[str],
show_progress: bool = False,
**kwargs: Any,
) -> List[SparseEmbedding]:
"""Get a list of text embeddings, with batching."""
cur_batch: List[str] = []
result_embeddings: List[SparseEmbedding] = []
queue_with_progress = enumerate(
get_tqdm_iterable(texts, show_progress, "Generating embeddings")
)
model_dict = self.model_dump()
for idx, text in queue_with_progress:
cur_batch.append(text)
if idx == len(texts) - 1 or len(cur_batch) == self.embed_batch_size:
# flush
dispatcher.event(
SparseEmbeddingStartEvent(
model_dict=model_dict,
)
)
embeddings = self._get_text_embeddings(cur_batch)
result_embeddings.extend(embeddings)
dispatcher.event(
SparseEmbeddingEndEvent(
chunks=cur_batch,
embeddings=embeddings,
)
)
cur_batch = []
return result_embeddings
@dispatcher.span
async def aget_text_embedding_batch(
self, texts: List[str], show_progress: bool = False
) -> List[SparseEmbedding]:
"""Asynchronously get a list of text embeddings, with batching."""
num_workers = self.num_workers
model_dict = self.model_dump()
cur_batch: List[str] = []
callback_payloads: List[List[str]] = []
result_embeddings: List[SparseEmbedding] = []
embeddings_coroutines: List[Coroutine] = []
for idx, text in enumerate(texts):
cur_batch.append(text)
if idx == len(texts) - 1 or len(cur_batch) == self.embed_batch_size:
# flush
dispatcher.event(
SparseEmbeddingStartEvent(
model_dict=model_dict,
)
)
callback_payloads.append(cur_batch)
embeddings_coroutines.append(self._aget_text_embeddings(cur_batch))
cur_batch = []
# flatten the results of asyncio.gather, which is a list of embeddings lists
nested_embeddings = []
if num_workers and num_workers > 1:
nested_embeddings = await run_jobs(
embeddings_coroutines,
show_progress=show_progress,
workers=self.num_workers,
desc="Generating embeddings",
)
else:
if show_progress:
try:
from tqdm.asyncio import tqdm_asyncio
nested_embeddings = await tqdm_asyncio.gather(
*embeddings_coroutines,
total=len(embeddings_coroutines),
desc="Generating embeddings",
)
except ImportError:
nested_embeddings = await asyncio.gather(*embeddings_coroutines)
else:
nested_embeddings = await asyncio.gather(*embeddings_coroutines)
result_embeddings = [
embedding for embeddings in nested_embeddings for embedding in embeddings
]
for text_batch, embeddings in zip(callback_payloads, nested_embeddings):
dispatcher.event(
SparseEmbeddingEndEvent(
chunks=text_batch,
embeddings=embeddings,
)
)
return result_embeddings
def similarity(
self,
embedding1: SparseEmbedding,
embedding2: SparseEmbedding,
) -> float:
"""Get sparse embedding similarity."""
return sparse_similarity(embedding1, embedding2)
| BaseSparseEmbedding |
python | scipy__scipy | benchmarks/benchmarks/fft_basic.py | {
"start": 7477,
"end": 8805
} | class ____(Benchmark):
params = [
["100x100", "313x100", "1000x100", "256x256", "512x512"],
['real', 'cmplx'],
['pocketfft', 'pyfftw', 'numpy', 'direct']
]
param_names = ['size', 'type', 'backend']
def setup(self, size, cmplx, backend):
import scipy.fft
size = list(map(int, size.split("x")))
if cmplx == 'cmplx':
self.x = random(size).astype(double)+random(size).astype(double)*1j
else:
self.x = random(size).astype(double)
self.fftn = scipy.fft.fftn
self.ifftn = scipy.fft.ifftn
if backend == 'pocketfft':
scipy.fft.set_global_backend('scipy')
elif backend == 'pyfftw':
if not has_pyfftw:
raise NotImplementedError
scipy.fft.set_global_backend(PyfftwBackend)
elif backend == 'numpy':
from scipy.fft._debug_backends import NumPyBackend
scipy.fft.set_global_backend(NumPyBackend)
elif backend == 'direct':
import scipy.fft._pocketfft
self.fftn = scipy.fft._pocketfft.fftn
self.ifftn = scipy.fft._pocketfft.ifftn
def time_fft(self, size, cmplx, module):
self.fftn(self.x)
def time_ifft(self, size, cmplx, module):
self.ifftn(self.x)
| FftnBackends |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/transfers/test_salesforce_to_s3.py | {
"start": 1915,
"end": 4438
} | class ____:
@mock.patch.object(S3Hook, "load_file")
@mock.patch.object(SalesforceHook, "write_object_to_file")
@mock.patch.object(SalesforceHook, "make_query")
def test_execute(self, mock_make_query, mock_write_object_to_file, mock_load_file):
mock_make_query.return_value = SALESFORCE_RESPONSE
operator = SalesforceToS3Operator(
task_id=TASK_ID,
salesforce_query=QUERY,
s3_bucket_name=S3_BUCKET,
s3_key=S3_KEY,
salesforce_conn_id=SALESFORCE_CONNECTION_ID,
export_format=EXPORT_FORMAT,
query_params=QUERY_PARAMS,
include_deleted=INCLUDE_DELETED,
coerce_to_timestamp=COERCE_TO_TIMESTAMP,
record_time_added=RECORD_TIME_ADDED,
aws_conn_id=AWS_CONNECTION_ID,
replace=REPLACE,
encrypt=ENCRYPT,
gzip=GZIP,
acl_policy=ACL_POLICY,
)
assert operator.task_id == TASK_ID
assert operator.salesforce_query == QUERY
assert operator.s3_bucket_name == S3_BUCKET
assert operator.s3_key == S3_KEY
assert operator.salesforce_conn_id == SALESFORCE_CONNECTION_ID
assert operator.export_format == EXPORT_FORMAT
assert operator.query_params == QUERY_PARAMS
assert operator.include_deleted == INCLUDE_DELETED
assert operator.coerce_to_timestamp == COERCE_TO_TIMESTAMP
assert operator.record_time_added == RECORD_TIME_ADDED
assert operator.aws_conn_id == AWS_CONNECTION_ID
assert operator.replace == REPLACE
assert operator.encrypt == ENCRYPT
assert operator.gzip == GZIP
assert operator.acl_policy == ACL_POLICY
assert f"s3://{S3_BUCKET}/{S3_KEY}" == operator.execute({})
mock_make_query.assert_called_once_with(
query=QUERY, include_deleted=INCLUDE_DELETED, query_params=QUERY_PARAMS
)
mock_write_object_to_file.assert_called_once_with(
query_results=SALESFORCE_RESPONSE["records"],
filename=mock.ANY,
fmt=EXPORT_FORMAT,
coerce_to_timestamp=COERCE_TO_TIMESTAMP,
record_time_added=RECORD_TIME_ADDED,
)
mock_load_file.assert_called_once_with(
bucket_name=S3_BUCKET,
key=S3_KEY,
filename=mock.ANY,
replace=REPLACE,
encrypt=ENCRYPT,
gzip=GZIP,
acl_policy=ACL_POLICY,
)
| TestSalesforceToGcsOperator |
python | google__jax | jax/_src/pallas/mosaic/core.py | {
"start": 7926,
"end": 8646
} | class ____(pallas_core.GridSpec):
num_scalar_prefetch: int
def __init__(
self,
num_scalar_prefetch: int,
grid: pallas_core.Grid = (),
in_specs: pallas_core.BlockSpecTree = no_block_spec,
out_specs: pallas_core.BlockSpecTree = no_block_spec,
scratch_shapes: pallas_core.ScratchShapeTree = ()
):
super().__init__(grid, in_specs, out_specs, scratch_shapes)
self.num_scalar_prefetch = num_scalar_prefetch
self.scratch_shapes = tuple(scratch_shapes)
def _make_scalar_ref_aval(self, aval):
return state.AbstractRef(jax_core.ShapedArray(aval.shape, aval.dtype),
MemorySpace.SMEM)
@dataclasses.dataclass(frozen=True)
| PrefetchScalarGridSpec |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/kw_only.py | {
"start": 165,
"end": 228
} | class ____:
a: int
_: KW_ONLY
b: str
@dataclass
| Test1 |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-to-visit-a-cell-in-a-grid.py | {
"start": 88,
"end": 1315
} | class ____(object):
def minimumTime(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
DIRECTIONS = ((1, 0), (0, 1), (-1, 0), (0, -1))
def dijkstra(start, target):
best = [[float("inf")]*len(grid[0]) for _ in xrange(len(grid))]
best[start[0]][start[1]] = 0
min_heap = [(0, start[0], start[1])]
while min_heap:
curr, i, j = heapq.heappop(min_heap)
if best[i][j] < curr:
continue
if (i, j) == target:
break
for di, dj in DIRECTIONS:
ni, nj = i+di, j+dj
if not (0 <= ni < len(grid) and 0 <= nj < len(grid[0]) and best[ni][nj] > max(grid[ni][nj]+int(grid[ni][nj]%2 == best[i][j]%2), curr+1)):
continue
best[ni][nj] = max(grid[ni][nj]+int(grid[ni][nj]%2 == best[i][j]%2), curr+1)
heapq.heappush(min_heap, (best[ni][nj], ni, nj))
return best[target[0]][target[1]]
if min(grid[0][1], grid[1][0]) > 1:
return -1
return dijkstra((0, 0), (len(grid)-1, len(grid[0])-1))
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/generator1.py | {
"start": 300,
"end": 336
} | class ____:
pass
s = True
| ClassA |
python | getsentry__sentry | tests/sentry/services/nodestore/bigtable/test_backend.py | {
"start": 472,
"end": 3094
} | class ____(BigtableKVStorage):
class Cell:
def __init__(self, value: bytes, timestamp: int) -> None:
self.value = value
self.timestamp = timestamp
class Row:
def __init__(self, table: MockedBigtableKVStorage.Table, row_key: str) -> None:
self.row_key = row_key.encode("utf8")
self.table = table
def delete(self) -> None:
self.table._rows.pop(self.row_key, None)
def set_cell(self, family: str, col: str, value: bytes, timestamp: int) -> None:
assert family == "x"
self.table._rows.setdefault(self.row_key, {})[col] = [
MockedBigtableKVStorage.Cell(value, timestamp)
]
def commit(self) -> Status:
# commits not implemented, changes are applied immediately
return Status(code=0)
@property
def cells(self) -> dict[str, dict[str, list[MockedBigtableKVStorage.Cell]]]:
return {"x": dict(self.table._rows.get(self.row_key) or ())}
class Table(table.Table):
def __init__(self) -> None:
self._rows: dict[bytes, dict[str, list[MockedBigtableKVStorage.Cell]]] = {}
def direct_row(self, key: str) -> MockedBigtableKVStorage.Row:
return MockedBigtableKVStorage.Row(self, key)
def read_row(
self, row_key: str, filter_: Any = None, retry: Any = DEFAULT_RETRY_READ_ROWS
) -> MockedBigtableKVStorage.Row:
return MockedBigtableKVStorage.Row(self, row_key)
def read_rows(
self,
start_key: str | None = None,
end_key: str | None = None,
limit: int | None = None,
filter_: Any = None,
end_inclusive: bool = False,
row_set: Any = None,
retry: Any = None,
) -> list[MockedBigtableKVStorage.Row]:
assert not row_set.row_ranges, "unsupported"
return [self.read_row(key) for key in row_set.row_keys]
def mutate_rows(
self, rows: list[Any], retry: Any = None, timeout: float | None = None
) -> list[Status]:
# commits not implemented, changes are applied immediately
return [Status(code=0) for row in rows]
def _get_table(self, admin: bool = False) -> MockedBigtableKVStorage.Table:
try:
table = self.__table
except AttributeError:
table = self.__table = MockedBigtableKVStorage.Table()
return table
def bootstrap(self, automatic_expiry: bool = True) -> None:
pass
| MockedBigtableKVStorage |
python | ray-project__ray | python/ray/serve/tests/test_fastapi.py | {
"start": 4080,
"end": 4120
} | class ____(BaseModel):
val: int
| Nested |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1009269,
"end": 1009785
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of UnlinkProjectV2FromRepository"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "repository")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
repository = sgqlc.types.Field("Repository", graphql_name="repository")
"""The repository the project is no longer linked to."""
| UnlinkProjectV2FromRepositoryPayload |
python | ray-project__ray | python/ray/data/_internal/execution/callbacks/insert_issue_detectors.py | {
"start": 324,
"end": 804
} | class ____(ExecutionCallback):
"""ExecutionCallback that handles issue detection."""
def before_execution_starts(self, executor: "StreamingExecutor"):
# Initialize issue detector in StreamingExecutor
executor._issue_detector_manager = IssueDetectorManager(executor)
def on_execution_step(self, executor: "StreamingExecutor"):
# Invoke all issue detectors
executor._issue_detector_manager.invoke_detectors()
| IssueDetectionExecutionCallback |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 10353,
"end": 11333
} | class ____:
class B:
def foo():
st_error = STError(
f"This string ({string_leaf.value}) appears to be pointless (i.e. has"
" no parent)."
)
def foo():
user_regex = _lazy_re_compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string
re.IGNORECASE)
def foo():
user_regex = _lazy_re_compile(
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" # dot-atom
'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', # quoted-string
xyz
)
def foo():
user_regex = _lazy_re_compile(
"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" # dot-atom
'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', # quoted-string
xyz
)
| A |
python | TheAlgorithms__Python | data_structures/binary_tree/symmetric_tree.py | {
"start": 268,
"end": 3636
} | class ____:
"""
A Node represents an element of a binary tree, which contains:
Attributes:
data: The value stored in the node (int).
left: Pointer to the left child node (Node or None).
right: Pointer to the right child node (Node or None).
Example:
>>> node = Node(1, Node(2), Node(3))
>>> node.data
1
>>> node.left.data
2
>>> node.right.data
3
"""
data: int
left: Node | None = None
right: Node | None = None
def make_symmetric_tree() -> Node:
r"""
Create a symmetric tree for testing.
The tree looks like this:
1
/ \
2 2
/ \ / \
3 4 4 3
Returns:
Node: Root node of a symmetric tree.
Example:
>>> tree = make_symmetric_tree()
>>> tree.data
1
>>> tree.left.data == tree.right.data
True
>>> tree.left.left.data == tree.right.right.data
True
"""
root = Node(1)
root.left = Node(2)
root.right = Node(2)
root.left.left = Node(3)
root.left.right = Node(4)
root.right.left = Node(4)
root.right.right = Node(3)
return root
def make_asymmetric_tree() -> Node:
r"""
Create an asymmetric tree for testing.
The tree looks like this:
1
/ \
2 2
/ \ / \
3 4 3 4
Returns:
Node: Root node of an asymmetric tree.
Example:
>>> tree = make_asymmetric_tree()
>>> tree.data
1
>>> tree.left.data == tree.right.data
True
>>> tree.left.left.data == tree.right.right.data
False
"""
root = Node(1)
root.left = Node(2)
root.right = Node(2)
root.left.left = Node(3)
root.left.right = Node(4)
root.right.left = Node(3)
root.right.right = Node(4)
return root
def is_symmetric_tree(tree: Node) -> bool:
"""
Check if a binary tree is symmetric (i.e., a mirror of itself).
Parameters:
tree: The root node of the binary tree.
Returns:
bool: True if the tree is symmetric, False otherwise.
Example:
>>> is_symmetric_tree(make_symmetric_tree())
True
>>> is_symmetric_tree(make_asymmetric_tree())
False
"""
if tree:
return is_mirror(tree.left, tree.right)
return True # An empty tree is considered symmetric.
def is_mirror(left: Node | None, right: Node | None) -> bool:
"""
Check if two subtrees are mirror images of each other.
Parameters:
left: The root node of the left subtree.
right: The root node of the right subtree.
Returns:
bool: True if the two subtrees are mirrors of each other, False otherwise.
Example:
>>> tree1 = make_symmetric_tree()
>>> is_mirror(tree1.left, tree1.right)
True
>>> tree2 = make_asymmetric_tree()
>>> is_mirror(tree2.left, tree2.right)
False
"""
if left is None and right is None:
# Both sides are empty, which is symmetric.
return True
if left is None or right is None:
# One side is empty while the other is not, which is not symmetric.
return False
if left.data == right.data:
# The values match, so check the subtrees recursively.
return is_mirror(left.left, right.right) and is_mirror(left.right, right.left)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| Node |
python | pytorch__pytorch | test/quantization/core/test_quantized_op.py | {
"start": 365388,
"end": 368650
} | class ____(TestCase):
@given(batch_size=st.integers(1, 64),
channels=st.integers(1, 64),
width=st.integers(16, 128),
qtype=st.sampled_from(hu._ALL_QINT_TYPES))
def test_reflection_pad1d(self, batch_size, channels, width, qtype):
padding = width // 4
x = torch.arange(batch_size * channels * width).to(torch.float)
x = x.resize(batch_size, channels, width)
# Per-Tensor test
scale, zp = _calculate_dynamic_qparams(x, qtype)
qx = torch.quantize_per_tensor(x, scale, zp, qtype)
padding_op = torch.nn.ReflectionPad1d(padding)
y_ref = padding_op(x)
qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype)
qy_hat = padding_op(qx)
self.assertEqual(qy_ref, qy_hat)
# Out variant
qy_hat = torch._C._nn.reflection_pad1d(qx, padding, out=qy_hat)
self.assertEqual(qy_ref, qy_hat)
@given(batch_size=st.integers(1, 64),
channels=st.integers(1, 64),
height=st.integers(16, 128),
width=st.integers(16, 128),
qtype=st.sampled_from(hu._ALL_QINT_TYPES))
def test_reflection_pad2d(self, batch_size, channels, height, width, qtype):
padding = (width // 4, width // 4, height // 4, height // 4)
x = torch.arange(batch_size * channels * height * width).to(torch.float)
x = x.resize(batch_size, channels, height, width)
# Per-Tensor test
scale, zp = _calculate_dynamic_qparams(x, qtype)
qx = torch.quantize_per_tensor(x, scale, zp, qtype)
padding_op = torch.nn.ReflectionPad2d(padding)
y_ref = padding_op(x)
qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype)
qy_hat = padding_op(qx)
self.assertEqual(qy_ref, qy_hat)
# Out variant
qy_hat = torch._C._nn.reflection_pad2d(qx, padding, out=qy_hat)
self.assertEqual(qy_ref, qy_hat)
@given(batch_size=st.integers(1, 64),
channels=st.integers(1, 64),
hwd=st.integers(1, 16), # For 3D, max input size would be 16x16x16
d=st.sampled_from([1, 2, 3]),
value=st.floats(-5, 5, allow_nan=False, allow_infinity=False),
qtype=st.sampled_from(hu._ALL_QINT_TYPES))
def test_constant_padNd(self, batch_size, channels, d, hwd, value, qtype):
padding = hwd // 4
shape = [batch_size, channels, hwd]
op = torch.nn.ConstantPad1d
if d >= 2:
shape.append(hwd)
op = torch.nn.ConstantPad2d
if d == 3:
shape.append(hwd)
op = torch.nn.ConstantPad3d
numel = np.prod(shape)
x = torch.arange(numel).to(torch.float)
x = x.resize(*shape)
# Per-Tensor test
scale, zp = _calculate_dynamic_qparams(x, qtype)
qx = torch.quantize_per_tensor(x, scale, zp, qtype)
padding_op = op(padding, value)
y_ref = padding_op(x)
qy_ref = torch.quantize_per_tensor(y_ref, scale, zp, qtype)
qy_hat = padding_op(qx)
self.assertEqual(qy_ref, qy_hat)
@unittest.skipUnless('qnnpack' in supported_qengines,
"This Pytorch Build has not been built with or does not support QNNPACK")
| TestPadding |
python | huggingface__transformers | tests/peft_integration/test_peft_integration.py | {
"start": 1324,
"end": 1639
} | class ____:
peft_test_model_ids = ("peft-internal-testing/tiny-OPTForCausalLM-lora",)
transformers_test_model_ids = ("hf-internal-testing/tiny-random-OPTForCausalLM",)
transformers_test_model_classes = (AutoModelForCausalLM, OPTForCausalLM)
# TODO: run it with CI after PEFT release.
@slow
| PeftTesterMixin |
python | anthropics__anthropic-sdk-python | src/anthropic/types/tool_choice_none_param.py | {
"start": 219,
"end": 306
} | class ____(TypedDict, total=False):
type: Required[Literal["none"]]
| ToolChoiceNoneParam |
python | coleifer__peewee | tests/libs/mock.py | {
"start": 11927,
"end": 27671
} | class ____(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
**kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
if spec_set is not None:
spec = spec_set
spec_set = True
self._mock_add_spec(spec, spec_set)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
if kwargs:
self.configure_mock(**kwargs)
_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set):
_spec_class = None
if spec is not None and not _is_list(spec):
if isinstance(spec, ClassTypes):
_spec_class = spec
else:
_spec_class = _get_class(spec)
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
sig = self._mock_delegate
if sig is None:
return self._mock_side_effect
return sig.side_effect
def __set_side_effect(self, value):
value = _try_iter(value)
sig = self._mock_delegate
if sig is None:
self._mock_side_effect = value
else:
sig.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self):
"Restore the mock object to its initial state."
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock()
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock()
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name == '_mock_methods':
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def __repr__(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
name = ''.join(_name_list)
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members.
XXXX
"""
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
if FILTER_DIR:
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
if self.call_args != (args, kwargs):
msg = self._format_mock_failure_message(args, kwargs)
raise AssertionError(msg)
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and with the specified
arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected to be called once. Called %s times." %
self.call_count)
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
if not any_order:
if calls not in self.mock_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (calls, self.mock_calls)
)
return
all_calls = list(self.mock_calls)
not_found = []
for kall in calls:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r not all found in call list' % (tuple(not_found),)
)
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
kall = call(*args, **kwargs)
if kall not in self.call_args_list:
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
)
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
| NonCallableMock |
python | spyder-ide__spyder | spyder/api/shellconnect/main_widget.py | {
"start": 541,
"end": 1392
} | class ____(EmptyMessageWidget):
"""Widget to show when the kernel's shell failed to start."""
def __init__(self, parent, shellwidget):
# Initialize EmptyMessageWidget with the content we want to show for
# errors
super().__init__(
parent,
icon_filename=(
"console-remote-off"
if shellwidget.is_remote()
else "console-off"
),
text=_("No connected console"),
description=_(
"The current console has no active kernel, so there is no "
"content to show here"
),
adjust_on_resize=True,
)
# This is necessary to show this widget in case ShellConnectMainWidget
# shows an empty message.
self.is_empty = False
| _ErroredMessageWidget |
python | walkccc__LeetCode | solutions/1210. Minimum Moves to Reach Target with Rotations/1210.py | {
"start": 81,
"end": 1884
} | class ____:
def minimumMoves(self, grid: list[list[int]]) -> int:
n = len(grid)
ans = 0
# the state of (x, y, pos)
# pos := 0 (horizontal) / 1 (vertical)
q = collections.deque([(0, 0, Pos.HORIZONTAL)])
seen = {(0, 0, Pos.HORIZONTAL)}
def canMoveRight(x: int, y: int, pos: Pos) -> bool:
if pos == Pos.HORIZONTAL:
return y + 2 < n and not grid[x][y + 2]
return y + 1 < n and not grid[x][y + 1] and not grid[x + 1][y + 1]
def canMoveDown(x: int, y: int, pos: Pos) -> bool:
if pos == Pos.VERTICAL:
return x + 2 < n and not grid[x + 2][y]
return x + 1 < n and not grid[x + 1][y] and not grid[x + 1][y + 1]
def canRotateClockwise(x: int, y: int, pos: Pos) -> bool:
return (pos == Pos.HORIZONTAL and x + 1 < n and
not grid[x + 1][y + 1] and not grid[x + 1][y])
def canRotateCounterclockwise(x: int, y: int, pos: Pos) -> bool:
return (pos == Pos.VERTICAL and y + 1 < n and
not grid[x + 1][y + 1] and not grid[x][y + 1])
while q:
for _ in range(len(q)):
x, y, pos = q.popleft()
if x == n - 1 and y == n - 2 and pos == Pos.HORIZONTAL:
return ans
if canMoveRight(x, y, pos) and (x, y + 1, pos) not in seen:
q.append((x, y + 1, pos))
seen.add((x, y + 1, pos))
if canMoveDown(x, y, pos) and (x + 1, y, pos) not in seen:
q.append((x + 1, y, pos))
seen.add((x + 1, y, pos))
newPos = Pos.VERTICAL if pos == Pos.HORIZONTAL else Pos.HORIZONTAL
if ((canRotateClockwise(x, y, pos) or
canRotateCounterclockwise(x, y, pos)) and
(x, y, newPos) not in seen):
q.append((x, y, newPos))
seen.add((x, y, newPos))
ans += 1
return -1
| Solution |
python | django__django | tests/gis_tests/geoapp/models.py | {
"start": 1528,
"end": 1684
} | class ____(NamedModel):
geom = models.GeometryField(dim=3)
class Meta:
required_db_features = {"supports_3d_storage"}
| ThreeDimensionalFeature |
python | pytorch__pytorch | torch/_inductor/select_algorithm.py | {
"start": 84698,
"end": 87966
} | class ____(ir.TritonTemplateCallerBase):
def __init__(
self,
name,
input_nodes,
layout,
make_kernel_render,
description,
bmreq,
log_info: Optional[
dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]
] = None,
mutated_inputs=None,
workspace_arg: Optional[WorkspaceArg] = None,
allowed_prologue_inps: Optional[OrderedSet[str]] = None,
hint_override: Optional[int] = None,
) -> None:
super().__init__(name, input_nodes, layout, description)
self.make_kernel_render = make_kernel_render
self.bmreq: TritonBenchmarkRequest = bmreq
if log_info is None:
log_info = {}
self.log_info: dict[str, Any] = log_info
self.log_info.update(
{
"backend": "Triton",
"num_stages": self.bmreq.num_stages,
"num_warps": self.bmreq.num_warps,
}
)
self.mutated_inputs = mutated_inputs
self.workspace_arg = workspace_arg
self.allowed_prologue_inps = (
allowed_prologue_inps if allowed_prologue_inps is not None else OrderedSet()
)
self.hint_override = hint_override
def benchmark(self, *args, out):
assert self.bmreq is not None
if config.profile_bandwidth_with_do_bench_using_profiling:
algo = self.bmreq.make_run_fn(*args, out=out)
return do_bench_using_profiling(algo)
return self.bmreq.benchmark(*args, out=out)
def precompile(self):
assert self.bmreq is not None
self.bmreq.precompile()
def __str__(self) -> str:
return f"TritonTemplateCaller({self.bmreq.module_path}, {self.description})"
def call_name(self):
return f"template_kernels.{self.name}"
def hash_key(self):
return "-".join(
[
self.name.rsplit("_", 1)[0],
self.bmreq.module_cache_key,
]
)
def output_node(self):
return ir.TensorBox.create(
ir.TritonTemplateBuffer(
layout=self.layout,
inputs=self.input_nodes,
make_kernel_render=self.make_kernel_render,
mutated_inputs=self.mutated_inputs,
allowed_prologue_inps=self.allowed_prologue_inps,
)
)
def info_dict(self) -> dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]:
"""Information returned here is logged to the autotune log file when that is enabled."""
return self.log_info
def get_make_kernel_render(self):
return self.make_kernel_render
def autoheuristic_id(self):
type_name = "triton"
info = self.info_dict()
# TODO(AlnisM): Does tile_shape always exist?
tile = info["tile_shape"]
tile_vals = eval(tile) # type: ignore[arg-type]
BLOCK_M = tile_vals[0]
BLOCK_K = tile_vals[1]
BLOCK_N = tile_vals[2]
num_stages = info["num_stages"]
num_warps = info["num_warps"]
return f"type={type_name}_BLOCK-M={BLOCK_M}_BLOCK-K={BLOCK_K}_BLOCK-N={BLOCK_N}_numstages={num_stages}_numwarps={num_warps}"
| TritonTemplateCaller |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_N.py | {
"start": 2771,
"end": 4094
} | class ____(Benchmark):
r"""
NewFunction02 objective function.
This class defines the NewFunction02 global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{NewFunction02}}(x) = \left | {\sin\left(\sqrt{\lvert{x_{1}^{2}
+ x_{2}}\rvert}\right)} \right |^{0.5} + (x_{1} + x_{2})/100
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -0.19933159253` for
:math:`x = [-9.94103375, -9.99771235]`
.. [1] Mishra, S. Global Optimization by Differential Evolution and
Particle Swarm Methods: Evaluation on Some Benchmark Functions.
Munich Personal RePEc Archive, 2006, 1005
TODO Line 368
TODO WARNING, minimum value is estimated from running many optimisations and
choosing the best.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[-9.94114736324, -9.99997128772]]
self.fglob = -0.199409030092
def fun(self, x, *args):
self.nfev += 1
return ((abs(sin(sqrt(abs(x[0] ** 2 + x[1]))))) ** 0.5
+ 0.01 * (x[0] + x[1]))
#Newfunction 3 from Gavana is entered as Mishra05.
| NewFunction02 |
python | sympy__sympy | sympy/solvers/ode/single.py | {
"start": 1621,
"end": 1755
} | class ____(NotImplementedError):
"""Raised if a SingleODESolver is asked to solve an ODE it does not match"""
pass
| ODEMatchError |
python | pytorch__pytorch | torch/_higher_order_ops/triton_kernel_wrap.py | {
"start": 32853,
"end": 41930
} | class ____:
fn: Callable[..., Any]
cache: dict[tuple[Any], Any]
def __init__(self, fn: Callable[..., Any]) -> None:
self.fn = fn
self.reset()
def __call__(
self,
functions: dict[str, dict[Intermediate, list[Op]]],
fn_name: str,
*args: Any,
) -> list[bool]:
key: tuple[Any, ...] = (fn_name, *args)
if key not in self.cache:
self.cache[key] = None
self.cache[key] = self.fn(functions, fn_name, *args)
if self.cache[key] is None:
raise RuntimeError("Recursion is not supported")
return self.cache[key]
def reset(self) -> None:
self.cache = {}
@MemoizeWithCycleCheck
def get_tma_stores(
functions: dict[str, dict[Intermediate, list[Op]]], fn_name: str
) -> set[Union[Intermediate, Param]]:
"""
Identifies all intermediates and parameters that are written to by a
`tt.experimental_descriptor_store`. It tracks only the specific values
written to via experimental_descriptor_store and the input values to
`tt.reinterpret_tensor_descriptor` used to construct the direct inputs
to tt.experimental_descriptor_store - not any recursive values
used to construct those values.
For example: for
tt.reinterpret_tensor_descriptor(Intermediate(idx=0), ...)
Intermediate(idx=1) = tt.experimental_descriptor_store(Intermediate(idx=0), ...)
this function will return [Intermediate(idx=0), Intermediate(idx=1)],
However
Intermediate(idx=4) = arith.addptr(Intermediate(idx=2), Intermediate(idx=3))
Intermediate(idx=5) = tt.experimental_descriptor_store(Intermediate(idx=4), ...)
tt.experimental_descriptor_store(Intermediate(idx=5), ...)
this function will mark only idx=4 and idx=5 (but not idx=2 or idx=3)
If an intermediate/parameter is passed into a function and is written to
via experimental_descriptor_store within that function, the argument to the
function will also be marked.
"""
result: set[Union[Intermediate, Param]] = set()
ops = functions[fn_name]
for op_list in ops.values():
for op in op_list:
if op.name == "tt.call":
assert op.fn_call_name in functions
# pyrefly: ignore [bad-argument-type]
tma_stores = get_tma_stores(functions, op.fn_call_name)
for i, inp in enumerate(op.args):
if Param(idx=i) in tma_stores:
result.add(inp)
elif op.name == "tt.experimental_descriptor_store":
assert len(op.args) >= 1
result.add(op.args[0])
elif op.name == "tt.descriptor_store":
assert len(op.args) >= 1
result.add(op.args[0])
for val in list(result):
if val in ops:
if not isinstance(val, Intermediate):
continue
for op in ops[val]:
if op.name == "tt.reinterpret_tensor_descriptor":
assert len(op.args) >= 1
result.add(op.args[0])
return result
@MemoizeWithCycleCheck
def analyze_kernel_mutations(
functions: dict[str, dict[Intermediate, list[Op]]], fn_name: str, num_args: int
) -> list[bool]:
"""
Analyzes the graph to detect all sinks from a predefined list of sinks
by using triton's MemWrite trait list. NOTE: What if triton exposed this?
From each sink, it traverses the CFG backwards to identify all the input
pointers that are mutated.
"""
# Name of mutation op to mutated parameter indices
# List from Triton Github include/triton/Dialect/Triton/IR/TritonOps.td
# All the OPs that have MemWrite trait.
# What if Triton exposed this?
MUTATION_OPS = {
"tt.store": [0],
"tt.atomic_cas": [0],
"tt.atomic_rmw": [0],
"tt.experimental_descriptor_store": [0],
"tt.experimental_tensormap_create": [0],
"tt.descriptor_store": [0],
}
# Ops that we want to bail out on
UNKNOWN_OPS = {"tt.elementwise_inline_asm"}
stack: list[Union[Param, Intermediate]] = []
visited = set()
ops = functions[fn_name]
tma_stores = get_tma_stores(functions, fn_name)
for op_list in ops.values():
for op in op_list:
# If we encounter an operation with effects that cannot be reliably analyzed
# (e.g. `tt.elementwise_inline_asm`), we assume it does not mutate any input parameters.
if op.name in UNKNOWN_OPS:
if op.name == "tt.elementwise_inline_asm" and op.is_pure:
continue
raise RuntimeError(
f"ttir analysis hit an op we do not know how to analyze: {op.name}"
)
if op.name == "tt.experimental_tensormap_create":
# Note: this is how we implement experimental_descriptor_store mutation analysis.
# for on-device TMA.
# experimental_tensormap_store(a, b, ...) stores b to the location specified
# by descriptor in the memory of a.
# To track this, we first find all the intermediates/params to which we store via
# experimental_tensormap_store (get_tma_stores, called above). Then, during this
# analysis we wait to find the corresponding experimental_tensormap_create (if it
# exists), at which point we will mark the global_ptr as mutated (as done below).
assert len(op.args) >= 2
if op.args[0] in tma_stores:
stack.append(op.args[1])
if op.name == "tt.call":
assert op.fn_call_name in functions
mutations = analyze_kernel_mutations(
functions,
# pyrefly: ignore [bad-argument-type]
op.fn_call_name,
len(op.args),
)
stack.extend(arg for arg, mutated in zip(op.args, mutations) if mutated)
else:
stack.extend(op.args[idx] for idx in MUTATION_OPS.get(op.name, []))
# The following is an iterative DFS algorithm
mutated = [False] * num_args
while stack:
arg = stack.pop()
if arg in visited:
continue
visited.add(arg)
if isinstance(arg, Param):
if arg.idx >= num_args:
# This is an argument defined in the kernel, not passed in
continue
mutated[arg.idx] = True
elif isinstance(arg, Intermediate) and not arg.fake():
for op in ops[arg]:
# Skip arguments to load
if op.name != "tt.load":
stack.extend(op.args)
return mutated
def identify_mutated_tensors(
kernel: "TritonKernelType",
kwargs: dict[str, Any],
tma_descriptor_metadata: TMADescriptorMetadata,
) -> list[str]:
"""
Given a triton kernel and the arguments for this kernel, this function
1) Retrieves the TTIR converted version of the kernel from Triton's API.
2) Parses the TTIR and creates a control flow graph
3) Analyzes the graph to detect all input tensor mutations
"""
ttir_module = None
functions = None
try:
ttir_module, ordered_tensor_names = generate_ttir(
kernel, kwargs, tma_descriptor_metadata
)
# extract functions from TTIR using MLIR bindings exposed by Triton code
functions = ttir_to_functions(ttir_module)
assert functions is not None
kernel_name = next(iter(functions.keys()))
# Triton codegen modifies the name
# pyrefly: ignore [missing-attribute]
assert kernel.fn.__name__ in kernel_name
# Reset the cache between top level invocations
# The cache for analyze kernel mutations is mainly used for cycle
# detection, so each top level invocation needs a clean cache
analyze_kernel_mutations.reset()
get_tma_stores.reset()
mutations = analyze_kernel_mutations(
functions, kernel_name, len(ordered_tensor_names)
)
return [
ordered_tensor_names[i] for i, mutated in enumerate(mutations) if mutated
]
except Exception:
log.warning(
"Encountered an exception in identify_mutated_tensors, assuming every input is mutated",
exc_info=True,
)
if ttir_module is not None:
log.debug("TTIR:\n%s", str(ttir_module))
if functions is not None:
log.debug("functions:")
for name, fn in functions.items():
log.debug("===\t%s\t===", name)
for ret, ops in fn.items():
log.debug("%s\t=>\t%s", ret, ops)
return [key for key, value in kwargs.items() if isinstance(value, Tensor)]
###############################################################################
# Triton Kernel Wrappers
# Used for wrapping a Triton Kernel
| MemoizeWithCycleCheck |
python | walkccc__LeetCode | solutions/206. Reverse Linked List/206-2.py | {
"start": 0,
"end": 231
} | class ____:
def reverseList(self, head: ListNode | None) -> ListNode | None:
prev = None
curr = head
while curr:
next = curr.next
curr.next = prev
prev = curr
curr = next
return prev
| Solution |
python | jschneier__django-storages | tests/test_sftp.py | {
"start": 7943,
"end": 8974
} | class ____(TestCase):
def setUp(self):
self.storage = sftpstorage.SFTPStorage(host="foo")
self.file = sftpstorage.SFTPStorageFile("bar", self.storage, "wb")
@patch(
"storages.backends.sftpstorage.SFTPStorage.sftp",
**{
"stat.return_value.st_size": 42,
},
)
def test_size(self, mock_sftp):
self.assertEqual(self.file.size, 42)
@patch(
"storages.backends.sftpstorage.SFTPStorage.sftp",
**{
"open.return_value.read.return_value": b"foo",
},
)
def test_read(self, mock_sftp):
self.assertEqual(self.file.read(), b"foo")
self.assertTrue(mock_sftp.open.called)
def test_write(self):
self.file.write(b"foo")
self.assertEqual(self.file.file.read(), b"foo")
@patch("storages.backends.sftpstorage.SFTPStorage.sftp")
def test_close(self, mock_sftp):
self.file.write(b"foo")
self.file.close()
self.assertTrue(mock_sftp.putfo.called)
| SFTPStorageFileTest |
python | doocs__leetcode | solution/0900-0999/0922.Sort Array By Parity II/Solution.py | {
"start": 0,
"end": 304
} | class ____:
def sortArrayByParityII(self, nums: List[int]) -> List[int]:
n, j = len(nums), 1
for i in range(0, n, 2):
if nums[i] % 2:
while nums[j] % 2:
j += 2
nums[i], nums[j] = nums[j], nums[i]
return nums
| Solution |
python | PyCQA__pylint | tests/functional/ext/private_import/private_import.py | {
"start": 4303,
"end": 4483
} | class ____:
"""Ensure that an import statement precedes this case."""
def get_example(self):
example: Example = Example().save()
return example
| Regression6624 |
python | google__jax | tests/pallas/tpu_fusible_matmul_test.py | {
"start": 30776,
"end": 35513
} | class ____(jtu.JaxTestCase):
def setUp(self):
if not jtu.is_device_tpu_at_least(4):
self.skipTest('Only works with TPU v4+')
super().setUp()
def test_matmul_bf16_out(self):
if not jtu.is_device_tpu_at_least(4):
self.skipTest('TPU v4+ required')
dtype = jnp.bfloat16
k0, k1 = jax.random.split(jax.random.key(0), 2)
x = jax.random.normal(k0, (512, 256), dtype)
y = jax.random.normal(k1, (256, 384), dtype)
def matmul(impl, x, y):
z = impl(x, y)
return z
impl = fuser.fuse(
functools.partial(
matmul,
fusible_matmul,
)
)
ref = functools.partial(matmul, dot_ref)
# XLA should be bitwise equivalent.
self.assertAllClose(
jax.jit(impl)(x, y),
jax.jit(ref)(x, y),
atol=0,
)
def test_matmul_bf16_activation(self):
dtype = jnp.bfloat16
k0, k1 = jax.random.split(jax.random.key(0), 2)
x = jax.random.normal(k0, (512, 256), dtype)
y = jax.random.normal(k1, (256, 384), dtype)
def matmul(impl, x, y):
z = impl(x, y)
return jnp.exp(jnp.tanh(z))
ref = functools.partial(matmul, dot_ref)
out_ref = jit_no_excess_precision(ref)(x, y)
impl = fuser.fuse(functools.partial(matmul, fusible_matmul))
out = jax.jit(impl)(x, y)
self.assertAllClose(out, out_ref, atol=0)
def test_matmul_f32_out_simple(self):
dtype = jnp.bfloat16
k0, k1 = jax.random.split(jax.random.key(0), 2)
x = jax.random.normal(k0, (512, 256), dtype)
y = jax.random.normal(k1, (256, 384), dtype)
def matmul(impl, x, y):
z = impl(x, y)
return z
ref = functools.partial(matmul, mm_ref)
out_ref = jit_no_excess_precision(ref)(x, y)
impl = fuser.fuse(
functools.partial(
matmul,
functools.partial(fusible_matmul, bk=256, bn=128),
)
)
out = jax.jit(impl)(x, y)
atol = 0
if jtu.is_device_tpu_at_least(6):
# 256 MXU changes some tols.
atol = 1e-5
self.assertAllClose(out, out_ref, atol=atol)
def test_matmul_f32_out_fused_downcast(self):
dtype = jnp.bfloat16
k0, k1 = jax.random.split(jax.random.key(0), 2)
x = jax.random.normal(k0, (2048, 2048), dtype)
y = jax.random.normal(k1, (2048, 2048), dtype)
def matmul(impl, x, y):
z = impl(x, y)
return z.astype(x.dtype)
bm = 512
bk = 256
bn = 1024
ref = functools.partial(
matmul,
functools.partial(dot_ref, bm=bm, bk=bk, bn=bn),
)
out_ref = jit_no_excess_precision(ref)(x, y)
impl = fuser.fuse(
functools.partial(
matmul,
functools.partial(
fusible_matmul,
bm=bm,
bk=bk,
bn=bn,
),
)
)
out = jax.jit(impl)(x, y)
self.assertArraysEqual(out, out_ref)
def test_matmul_out_bf16_with_f32_activation(self):
if not jtu.is_device_tpu_at_least(4):
self.skipTest('TPU v4+ required')
dtype = jnp.bfloat16
k0, k1 = jax.random.split(jax.random.key(0), 2)
x = jax.random.normal(k0, (2048, 2048), dtype)
y = jax.random.normal(k1, (2048, 2048), dtype)
def matmul(impl, x, y):
z = impl(x, y)
return jnp.exp(jnp.tanh(z)).astype(x.dtype)
bm = 512
bk = 256
bn = 1024
ref = functools.partial(
matmul,
functools.partial(dot_ref, bm=bm, bk=bk, bn=bn),
)
out_ref = jit_no_excess_precision(ref)(x, y)
impl = fuser.fuse(
functools.partial(
matmul,
functools.partial(
fusible_matmul,
bm=bm,
bk=bk,
bn=bn,
),
)
)
out = jax.jit(impl)(x, y)
self.assertArraysEqual(out, out_ref)
def test_matmul_out_bf16_with_bf16_activation(self):
dtype = jnp.bfloat16
k0, k1 = jax.random.split(jax.random.key(0), 2)
x = jax.random.normal(k0, (2048, 2048), dtype)
y = jax.random.normal(k1, (2048, 2048), dtype)
def matmul(impl, x, y):
z = impl(x, y)
return jnp.exp(jnp.tanh(z)).astype(x.dtype)
bm = 512
bk = 256
bn = 1024
ref = functools.partial(
matmul,
functools.partial(dot_ref, bm=bm, bk=bk, bn=bn),
)
out_ref = jit_no_excess_precision(ref)(x, y)
impl = fuser.fuse(
functools.partial(
matmul,
functools.partial(
fusible_matmul,
bm=bm,
bk=bk,
bn=bn,
),
)
)
out = jax.jit(impl)(x, y)
self.assertArraysEqual(out, out_ref)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| ExcessPrecisionTest |
python | redis__redis-py | tests/test_pubsub.py | {
"start": 37422,
"end": 41808
} | class ____:
def mysetup(self, r, method):
self.messages = queue.Queue()
self.pubsub = r.pubsub()
self.state = 0
self.cond = threading.Condition()
if method == "get_message":
self.get_message = self.loop_step_get_message
else:
self.get_message = self.loop_step_listen
self.thread = threading.Thread(target=self.loop)
self.thread.daemon = True
self.thread.start()
# get the initial connect message
message = self.messages.get(timeout=1)
assert message == {
"channel": b"foo",
"data": 1,
"pattern": None,
"type": "subscribe",
}
def wait_for_reconnect(self):
self.cond.wait_for(lambda: self.pubsub.connection._sock is not None, timeout=2)
assert self.pubsub.connection._sock is not None # we didn't time out
assert self.state == 3
message = self.messages.get(timeout=1)
assert message == {
"channel": b"foo",
"data": 1,
"pattern": None,
"type": "subscribe",
}
def mycleanup(self):
# kill thread
with self.cond:
self.state = 4 # quit
self.cond.notify()
self.thread.join()
def test_reconnect_socket_error(self, r: redis.Redis, method):
"""
Test that a socket error will cause reconnect
"""
self.mysetup(r, method)
try:
# now, disconnect the connection, and wait for it to be re-established
with self.cond:
self.state = 1
with mock.patch.object(self.pubsub.connection, "_parser") as mockobj:
mockobj.read_response.side_effect = socket.error
mockobj.can_read.side_effect = socket.error
# wait until thread notices the disconnect until we undo the patch
self.cond.wait_for(lambda: self.state >= 2)
assert (
self.pubsub.connection._sock is None
) # it is in a disconnected state
self.wait_for_reconnect()
finally:
self.mycleanup()
def test_reconnect_disconnect(self, r: redis.Redis, method):
"""
Test that a manual disconnect() will cause reconnect
"""
self.mysetup(r, method)
try:
# now, disconnect the connection, and wait for it to be re-established
with self.cond:
self.state = 1
self.pubsub.connection.disconnect()
assert self.pubsub.connection._sock is None
# wait for reconnect
self.wait_for_reconnect()
finally:
self.mycleanup()
def loop(self):
# reader loop, performing state transitions as it
# discovers disconnects and reconnects
self.pubsub.subscribe("foo")
while True:
time.sleep(0.01) # give main thread chance to get lock
with self.cond:
old_state = self.state
try:
if self.state == 4:
break
# print ('state, %s, sock %s' % (state, pubsub.connection._sock))
got_msg = self.get_message()
assert got_msg
if self.state in (1, 2):
self.state = 3 # successful reconnect
except redis.ConnectionError:
assert self.state in (1, 2)
self.state = 2
finally:
self.cond.notify()
# assert that we noticed a connect error, or automatically
# reconnected without error
if old_state == 1:
assert self.state in (2, 3)
def loop_step_get_message(self):
# get a single message via listen()
message = self.pubsub.get_message(timeout=0.1)
if message is not None:
self.messages.put(message)
return True
return False
def loop_step_listen(self):
# get a single message via listen()
for message in self.pubsub.listen():
self.messages.put(message)
return True
@pytest.mark.onlynoncluster
| TestPubSubAutoReconnect |
python | readthedocs__readthedocs.org | readthedocs/api/v3/mixins.py | {
"start": 487,
"end": 1565
} | class ____:
"""
Set the change_reason on the model changed through this API view.
The view should inherit one of:
- CreateModelMixin
- UpdateModelMixin
- DestroyModelMixin
Unlike the original methods,
these return the instance that was created/updated,
so they are easy to override without having to save the object twice.
"""
change_reason = None
def get_change_reason(self):
if self.change_reason:
return self.change_reason
klass = self.__class__.__name__
return f"origin=api-v3 class={klass}"
def perform_create(self, serializer):
obj = serializer.save()
safe_update_change_reason(obj, self.get_change_reason())
return obj
def perform_update(self, serializer):
set_change_reason(serializer.instance, self.get_change_reason())
obj = serializer.save()
return obj
def perform_destroy(self, instance):
set_change_reason(instance, self.get_change_reason())
super().perform_destroy(instance)
| UpdateChangeReasonMixin |
python | huggingface__transformers | tests/models/t5/test_modeling_t5.py | {
"start": 33619,
"end": 36116
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (T5EncoderModel, T5ForTokenClassification) if is_torch_available() else ()
test_resize_embeddings = False
pipeline_model_mapping = (
{
"token-classification": T5ForTokenClassification,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = T5EncoderOnlyModelTester(self)
self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Can't do half precision")
def test_model_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
def test_with_token_classification_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs)
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if tokenizer_name is None:
return True
# `T5EncoderOnlyModelTest` is not working well with slow tokenizers (for some models) and we don't want to touch the file
# `src/transformers/data/processors/squad.py` (where this test fails for this model)
if pipeline_test_case_name == "TokenClassificationPipelineTests" and not tokenizer_name.endswith("Fast"):
return True
return False
def use_task_specific_params(model, task):
task_params = model.config.task_specific_params[task]
# Get all valid GenerationConfig attributes
temp_config = GenerationConfig()
generation_config_attrs = set(temp_config.to_dict().keys())
for key, value in task_params.items():
if key in generation_config_attrs:
setattr(model.generation_config, key, value)
else:
setattr(model.config, key, value)
@require_torch
@require_accelerate
@require_tokenizers
@slow
| T5EncoderOnlyModelTest |
python | pytorch__pytorch | torch/_inductor/select_algorithm.py | {
"start": 4828,
"end": 7573
} | class ____:
"""
Some parts of a template need to be generated at the end, but
inserted into the template at the start. This allows doing a bunch
of replacements after the initial render.
"""
HookFn = Callable[[], str]
def __init__(
self, code: str, replacement_hooks: dict[str, Optional[HookFn]]
) -> None:
super().__init__()
self._code: str = code
self.replacement_hooks: dict[str, Optional[PartialRender.HookFn]] = (
replacement_hooks
)
@property
def code(self) -> str:
"""
The fully rendered code. Will **error** if any hooks have yet to be
finalized.
"""
remaining_active_hooks = [
key for key, fn in self.replacement_hooks.items() if fn is not None
]
assert len(remaining_active_hooks) == 0, (
f"The following hooks have not yet been finalized:\n {remaining_active_hooks=}"
)
return self._code
def finalize_hook(self, hook_key: str, strict: bool = True) -> None:
"""
Finalize a hook by name.
:param strict: If ``True``, raise an error if the hook wasn't found.
NOTE: Will **error** if the hook has already been finalized.
"""
if hook_key not in self.replacement_hooks:
if strict:
raise RuntimeError(
f"{hook_key} not registered in self.replacement_hooks"
)
else:
return
hook = self.replacement_hooks[hook_key]
assert hook is not None, f"Hook key {hook_key} can only be called once"
self._code = self._code.replace(hook_key, hook())
self.replacement_hooks[hook_key] = None
def finalize_remaining(self) -> str:
"""
Finalize the remaining active hooks. This function can be used in cases
where the caller uses `finalize_hook` rather than `finalize_all`.
Note: `finalize_all` errors if a hook that has already been finalized
is attempted to be called again. This function only attempts to
finalize active hooks.
"""
for key, fn in self.replacement_hooks.items():
if fn is not None:
self.finalize_hook(key)
return self.code
def finalize_all(self) -> str:
"""
Finalize all active hooks.
NOTE: unlike ``finalize_remaining``, this method will **error** if any
hook has already been finalized.
"""
for key in self.replacement_hooks:
self.finalize_hook(key)
return self.code
# This is used to store info needed for lowering each subgraph in triton
# templates
@dataclasses.dataclass()
| PartialRender |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/api_fastapi/datamodels/test_roles.py | {
"start": 1099,
"end": 5074
} | class ____:
def test_rolebody_accepts_actions_alias_and_maps_to_permissions(self):
data = {
"name": "viewer",
"actions": [
{"action": {"name": "can_read"}, "resource": {"name": "DAG"}},
{"action": {"name": "can_read"}, "resource": {"name": "Connection"}},
],
}
body = RoleBody.model_validate(data)
assert body.name == "viewer"
# Field(validation_alias="actions") should populate `permissions`
assert len(body.permissions) == 2
assert body.permissions[0].action.name == "can_read"
assert body.permissions[0].resource.name == "DAG"
def test_rolebody_defaults_permissions_to_empty_when_actions_missing(self):
body = RoleBody.model_validate({"name": "empty"})
assert body.name == "empty"
assert body.permissions == []
def test_rolebody_name_min_length_enforced(self):
with pytest.raises(ValidationError):
RoleBody.model_validate({"name": "", "actions": []})
def test_roleresponse_serializes_permissions_under_actions_alias(self):
ar = ActionResourceResponse(
action=ActionResponse(name="can_read"),
resource=ResourceResponse(name="DAG"),
)
rr = RoleResponse(name="viewer", permissions=[ar])
dumped = rr.model_dump(by_alias=True)
# Field(serialization_alias="actions") should rename `permissions` -> `actions`
assert "actions" in dumped
assert "permissions" not in dumped
assert dumped["name"] == "viewer"
assert dumped["actions"][0]["action"]["name"] == "can_read"
assert dumped["actions"][0]["resource"]["name"] == "DAG"
def test_roleresponse_model_validate_from_simple_namespace(self):
# Service returns plain objects; ensure model_validate handles them
obj = types.SimpleNamespace(
name="viewer",
permissions=[
types.SimpleNamespace(
action=types.SimpleNamespace(name="can_read"),
resource=types.SimpleNamespace(name="DAG"),
)
],
)
rr = RoleResponse.model_validate(obj)
assert rr.name == "viewer"
assert rr.permissions
first = rr.permissions[0]
assert first.action.name == "can_read"
def test_rolecollection_response_dump_and_counts(self):
ar = ActionResourceResponse(
action=ActionResponse(name="can_read"),
resource=ResourceResponse(name="DAG"),
)
rc = RoleCollectionResponse(
roles=[RoleResponse(name="viewer", permissions=[ar])],
total_entries=1,
)
dumped = rc.model_dump(by_alias=True)
assert dumped["total_entries"] == 1
assert isinstance(dumped["roles"], list)
assert dumped["roles"][0]["name"] == "viewer"
assert "actions" in dumped["roles"][0]
assert "permissions" not in dumped["roles"][0]
def test_rolecollection_model_validate_from_objects(self):
obj = types.SimpleNamespace(
roles=[
types.SimpleNamespace(
name="admin",
permissions=[
types.SimpleNamespace(
action=types.SimpleNamespace(name="can_read"),
resource=types.SimpleNamespace(name="DAG"),
)
],
)
],
total_entries=1,
)
rc = RoleCollectionResponse.model_validate(obj)
assert rc.total_entries == 1
assert len(rc.roles) == 1
assert rc.roles[0].name == "admin"
assert rc.roles[0].permissions[0].action.name == "can_read"
def test_rolecollection_missing_total_entries_raises(self):
with pytest.raises(ValidationError):
RoleCollectionResponse.model_validate({"roles": []})
| TestRoleModels |
python | streamlit__streamlit | lib/tests/streamlit/elements/arrow_dataframe_test.py | {
"start": 20207,
"end": 20687
} | class ____(DeltaGeneratorTestCase):
"""Test Public Streamlit Public APIs."""
def test_table(self):
"""Test st.table."""
from streamlit.dataframe_util import convert_arrow_bytes_to_pandas_df
df = pd.DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"])
st.table(df)
proto = self.get_delta_from_queue().new_element.arrow_table
pd.testing.assert_frame_equal(convert_arrow_bytes_to_pandas_df(proto.data), df)
| StArrowTableAPITest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-jira/integration_tests/fixtures/data_generator/streams.py | {
"start": 9391,
"end": 10467
} | class ____(IssueWorklogs, GeneratorMixin):
"""
https://developer.atlassian.com/cloud/jira/platform/rest/v3/api-group-issue-worklogs/#api-rest-api-3-issue-issueidorkey-worklog-id-get
"""
def generate(self):
issues_stream = Issues(authenticator=self._session.auth, domain=self._domain)
for issue in issues_stream.read_records(sync_mode=SyncMode.full_refresh):
for index in range(random.randrange(1, 6)):
payload = json.dumps(
{
"timeSpentSeconds": random.randrange(600, 12000),
"comment": {
"type": "doc",
"version": 1,
"content": [{"type": "paragraph", "content": [{"text": f"I did some work here. {index}", "type": "text"}]}],
},
"started": "2021-04-15T01:48:52.747+0000",
}
)
self.generate_record(payload, stream_slice={"key": issue["key"]})
| IssueWorklogsGenerator |
python | h5py__h5py | h5py/tests/test_objects.py | {
"start": 414,
"end": 2792
} | class ____(TestCase):
def test_invalid(self):
# Check for segfault on close
oid = o.ObjectID(0)
del oid
oid = o.ObjectID(1)
del oid
def test_equality(self):
# Identifier-based equality
oid1 = o.ObjectID(42)
oid2 = o.ObjectID(42)
oid3 = o.ObjectID(43)
self.assertEqual(oid1, oid2)
self.assertNotEqual(oid1, oid3)
def test_hash(self):
# Default objects are not hashable
oid = o.ObjectID(42)
with self.assertRaises(TypeError):
hash(oid)
@pytest.mark.thread_unsafe(reason="fork() from a thread may deadlock")
def test_phil_fork_with_threads(self):
# Test that handling of the phil Lock after fork is correct.
# We simulate a deadlock in the forked process by explicitly
# waiting for the phil Lock to be acquired in a different thread
# before forking.
# On Windows forking (and the register_at_fork handler)
# are not available, skip this test.
if not hasattr(os, "fork"):
raise SkipTest("os.fork not available")
thread_acquired_phil_event = threading.Event()
def f():
o.phil.acquire()
try:
thread_acquired_phil_event.set()
time.sleep(1)
finally:
o.phil.release()
thread = threading.Thread(target=f)
thread.start()
try:
# wait for the thread running "f" to have acquired the phil lock
thread_acquired_phil_event.wait()
# now fork the current (main) thread while the other thread holds the lock
pid = os.fork()
if pid == 0:
# child process
# If we handle the phil lock correctly, this should not deadlock,
# and we should be able to acquire the lock here.
if o.phil.acquire(blocking=False):
o.phil.release()
os._exit(0)
else:
os._exit(1)
else:
# parent process
# wait for the child process to finish
_, status = os.waitpid(pid, 0)
assert os.WIFEXITED(status)
assert os.WEXITSTATUS(status) == 0
finally:
thread.join()
| TestObjects |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0141_create_addonsconfig.py | {
"start": 645,
"end": 873
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0140_addons_options_base_version"),
]
operations = [
migrations.RunPython(forwards_func),
]
| Migration |
python | modin-project__modin | modin/core/execution/dask/implementations/pandas_on_dask/partitioning/virtual_partition.py | {
"start": 8322,
"end": 10394
} | class ____(PandasOnDaskDataframeVirtualPartition):
axis = 1
def _deploy_dask_func(
deployer,
axis,
f_to_deploy,
f_args,
f_kwargs,
*args,
extract_metadata=True,
**kwargs,
):
"""
Execute a function on an axis partition in a worker process.
This is ALWAYS called on either ``PandasDataframeAxisPartition.deploy_axis_func``
or ``PandasDataframeAxisPartition.deploy_func_between_two_axis_partitions``, which both
serve to deploy another dataframe function on a Dask worker process.
Parameters
----------
deployer : callable
A `PandasDataFrameAxisPartition.deploy_*` method that will call `deploy_f`.
axis : {0, 1}
The axis to perform the function along.
f_to_deploy : callable or RayObjectID
The function to deploy.
f_args : list or tuple
Positional arguments to pass to ``f_to_deploy``.
f_kwargs : dict
Keyword arguments to pass to ``f_to_deploy``.
*args : list
Positional arguments to pass to ``func``.
extract_metadata : bool, default: True
Whether to return metadata (length, width, ip) of the result. Passing `False` may relax
the load on object storage as the remote function would return 4 times fewer futures.
Passing `False` makes sense for temporary results where you know for sure that the
metadata will never be requested.
**kwargs : dict
Keyword arguments to pass to ``func``.
Returns
-------
list
The result of the function ``func`` and metadata for it.
"""
result = deployer(axis, f_to_deploy, f_args, f_kwargs, *args, **kwargs)
if not extract_metadata:
return result
ip = get_ip()
if isinstance(result, pandas.DataFrame):
return result, len(result), len(result.columns), ip
elif all(isinstance(r, pandas.DataFrame) for r in result):
return [i for r in result for i in [r, len(r), len(r.columns), ip]]
else:
return [i for r in result for i in [r, None, None, ip]]
| PandasOnDaskDataframeRowPartition |
python | google__jax | jax/_src/state/types.py | {
"start": 6365,
"end": 7452
} | class ____:
permutation: tuple[int, ...] = dataclasses.field(metadata=dict(static=True))
@classmethod
def from_ref_new_permutation(
cls, ref_or_view: Any, *perm: int
) -> RefTransposer:
if len(perm) == 1 and isinstance(perm[0], tuple):
perm = perm[0]
if len(perm) != ref_or_view.ndim:
raise ValueError(
f"Permutation {perm} does not match the rank of the ref"
f" ({ref_or_view.ndim})"
)
return cls(perm)
def transform_shape(
self, shape: tuple[int | Array, ...] | None
) -> tuple[int | Array, ...] | None:
if shape is None:
return None
return tuple(shape[i] for i in self.permutation)
def transform_dtype(self, dtype):
return dtype
def transform_sharding(self, sharding):
# If there are no explicit axes, do nothing.
if all(p is None for p in sharding.spec):
return sharding
raise NotImplementedError
def pretty_print(self, context: core.JaxprPpContext) -> pp.Doc:
del context # Unused.
return pp.text(f"{{transpose({list(self.permutation)})}}")
| RefTransposer |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 4792,
"end": 6025
} | class ____(TaskRunOrchestrationPolicy):
"""
Orchestration rules that run against task-run-state transitions in priority order,
specifically for clients doing client-side orchestration.
"""
@staticmethod
def priority() -> list[
Union[
type[BaseUniversalTransform[orm_models.TaskRun, core.TaskRunPolicy]],
type[BaseOrchestrationRule[orm_models.TaskRun, core.TaskRunPolicy]],
]
]:
return cast(
list[
Union[
type[
BaseUniversalTransform[orm_models.TaskRun, core.TaskRunPolicy]
],
type[BaseOrchestrationRule[orm_models.TaskRun, core.TaskRunPolicy]],
]
],
[
CacheRetrieval,
HandleTaskTerminalStateTransitions,
PreventRunningTasksFromStoppedFlows,
CopyScheduledTime,
WaitForScheduledTime,
RetryFailedTasks,
RenameReruns,
UpdateFlowRunTrackerOnTasks,
CacheInsertion,
ReleaseTaskConcurrencySlots,
],
)
| ClientSideTaskOrchestrationPolicy |
python | kamyu104__LeetCode-Solutions | Python/design-hashset.py | {
"start": 1026,
"end": 1935
} | class ____(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.__data = [LinkedList() for _ in xrange(10000)]
def add(self, key):
"""
:type key: int
:rtype: void
"""
l = self.__data[key % len(self.__data)]
node = l.find(key)
if not node:
l.insert(ListNode(key, 0))
def remove(self, key):
"""
:type key: int
:rtype: void
"""
l = self.__data[key % len(self.__data)]
node = l.find(key)
if node:
l.delete(node)
def contains(self, key):
"""
Returns true if this set did not already contain the specified element
:type key: int
:rtype: bool
"""
l = self.__data[key % len(self.__data)]
node = l.find(key)
return node is not None
| MyHashSet |
python | bokeh__bokeh | tests/unit/bokeh/document/test_events__document.py | {
"start": 2102,
"end": 2172
} | class ____(Model):
data = ColumnData(Any, Any, default={})
| OtherModel |
python | palantir__python-language-server | pyls/python_ls.py | {
"start": 644,
"end": 3190
} | class ____(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
# pylint: disable=no-member
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
try:
self.delegate.start()
except OSError as e:
if os.name == 'nt':
# Catch and pass on ConnectionResetError when parent process
# dies
# pylint: disable=no-member, undefined-variable
if isinstance(e, WindowsError) and e.winerror == 10054:
pass
# pylint: disable=no-member
self.SHUTDOWN_CALL()
def start_tcp_lang_server(bind_addr, port, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
def shutdown_server(check_parent_process, *args):
# pylint: disable=unused-argument
if check_parent_process:
log.debug('Shutting down server')
# Shutdown call must be done on a thread, to prevent deadlocks
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + 'Handler',
(_StreamHandlerWrapper,),
{'DELEGATE_CLASS': partial(handler_class,
check_parent_process=check_parent_process),
'SHUTDOWN_CALL': partial(shutdown_server, check_parent_process)}
)
server = socketserver.TCPServer((bind_addr, port), wrapper_class, bind_and_activate=False)
server.allow_reuse_address = True
try:
server.server_bind()
server.server_activate()
log.info('Serving %s on (%s, %s)', handler_class.__name__, bind_addr, port)
server.serve_forever()
finally:
log.info('Shutting down')
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
log.info('Starting %s IO language server', handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
| _StreamHandlerWrapper |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 930543,
"end": 931016
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of ReopenDiscussion"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "discussion")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
discussion = sgqlc.types.Field("Discussion", graphql_name="discussion")
"""The discussion that was reopened."""
| ReopenDiscussionPayload |
python | django__django | django/templatetags/static.py | {
"start": 225,
"end": 2421
} | class ____(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return."
)
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
# token.split_contents() isn't useful here because tags using this
# method don't accept variable as arguments.
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != "as":
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0]
)
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ""
else:
prefix = iri_to_uri(getattr(settings, name, ""))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ""
@register.tag
def get_static_prefix(parser, token):
"""
Populate a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populate a template variable with the media prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
| PrefixNode |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/property16.py | {
"start": 217,
"end": 337
} | class ____(Generic[T]):
@property
def prop1(self) -> T: ...
@property
def prop2(self) -> Self: ...
| Parent |
python | spack__spack | lib/spack/spack/build_environment.py | {
"start": 49276,
"end": 58411
} | class ____:
"""Class used to manage builds launched by Spack.
Each build is launched in its own child process, and the main Spack process
tracks each child with a ``BuildProcess`` object. ``BuildProcess`` is used to:
- Start and monitor an active child process.
- Clean up its processes and resources when the child process completes.
- Kill the child process if needed.
See also ``start_build_process()`` and ``complete_build_process()``.
"""
def __init__(
self,
*,
target: Callable,
args: Tuple[Any, ...],
pkg: "spack.package_base.PackageBase",
read_pipe: Connection,
timeout: Optional[int],
) -> None:
self.p = multiprocessing.Process(target=target, args=args)
self.pkg = pkg
self.read_pipe = read_pipe
self.timeout = timeout
def start(self) -> None:
self.p.start()
def poll(self) -> bool:
"""Check if there is data available to receive from the read pipe."""
return self.read_pipe.poll()
def complete(self):
"""Wait (if needed) for child process to complete
and return its exit status.
See ``complete_build_process()``.
"""
return complete_build_process(self)
def is_alive(self) -> bool:
return self.p.is_alive()
def join(self, *, timeout: Optional[int] = None):
self.p.join(timeout=timeout)
def terminate(self):
# Opportunity for graceful termination
self.p.terminate()
self.p.join(timeout=1)
# If the process didn't gracefully terminate, forcefully kill
if self.p.is_alive():
# TODO (python 3.6 removal): use self.p.kill() instead, consider removing this class
assert isinstance(self.p.pid, int), f"unexpected value for PID: {self.p.pid}"
os.kill(self.p.pid, signal.SIGKILL)
self.p.join()
@property
def pid(self):
return self.p.pid
@property
def exitcode(self):
return self.p.exitcode
def start_build_process(
pkg: "spack.package_base.PackageBase",
function: Callable,
kwargs: Dict[str, Any],
*,
timeout: Optional[int] = None,
) -> BuildProcess:
"""Create a child process to do part of a spack build.
Args:
pkg: package whose environment we should set up the
child process for.
function: argless function to run in the child
process.
kwargs: additional keyword arguments to pass to ``function()``
timeout: maximum time allowed to finish the execution of function
Usage::
def child_fun():
# do stuff
process = build_env.start_build_process(pkg, child_fun)
complete_build_process(process)
The child process is run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
"""
read_pipe, write_pipe = multiprocessing.Pipe(duplex=False)
input_fd = None
jobserver_fd1 = None
jobserver_fd2 = None
serialized_pkg = spack.subprocess_context.PackageInstallContext(pkg)
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.platform != "win32" and sys.stdin.isatty() and hasattr(sys.stdin, "fileno"):
input_fd = Connection(os.dup(sys.stdin.fileno()))
mflags = os.environ.get("MAKEFLAGS")
if mflags is not None:
m = re.search(r"--jobserver-[^=]*=(\d),(\d)", mflags)
if m:
jobserver_fd1 = Connection(int(m.group(1)))
jobserver_fd2 = Connection(int(m.group(2)))
p = BuildProcess(
target=_setup_pkg_and_run,
args=(
serialized_pkg,
function,
kwargs,
write_pipe,
input_fd,
jobserver_fd1,
jobserver_fd2,
),
read_pipe=read_pipe,
timeout=timeout,
pkg=pkg,
)
p.start()
# We close the writable end of the pipe now to be sure that p is the
# only process which owns a handle for it. This ensures that when p
# closes its handle for the writable end, read_pipe.recv() will
# promptly report the readable end as being ready.
write_pipe.close()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_fd is not None:
input_fd.close()
return p
def complete_build_process(process: BuildProcess):
"""
Wait for the child process to complete and handles its exit status.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
"""
def exitcode_msg(process):
typ = "exit" if process.exitcode >= 0 else "signal"
return f"{typ} {abs(process.exitcode)}"
try:
# Check if information from the read pipe has been received.
child_result = process.read_pipe.recv()
except EOFError:
raise InstallError(f"The process has stopped unexpectedly ({exitcode_msg(process)})")
finally:
timeout = process.timeout
process.join(timeout=timeout)
if process.is_alive():
warnings.warn(f"Terminating process, since the timeout of {timeout}s was exceeded")
process.terminate()
# If returns a StopPhase, raise it
if isinstance(child_result, spack.error.StopPhase):
raise child_result
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = process.pkg
if isinstance(child_result, ChildError):
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
child_result.print_context()
raise child_result
# Fallback. Usually caught beforehand in EOFError above.
if process.exitcode != 0:
raise InstallError(f"The process failed unexpectedly ({exitcode_msg(process)})")
return child_result
CONTEXT_BASES = (spack.package_base.PackageBase, spack.builder.BaseBuilder)
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback: A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
basenames = tuple(base.__name__ for base in CONTEXT_BASES)
for tb in stack:
frame = tb.tb_frame
if "self" in frame.f_locals:
# Find the first proper subclass of the PackageBase or BaseBuilder, but
# don't provide context if the code is actually in the base classes.
obj = frame.f_locals["self"]
func = getattr(obj, tb.tb_frame.f_code.co_name, "")
if func and hasattr(func, "__qualname__"):
typename, *_ = func.__qualname__.partition(".")
if isinstance(obj, CONTEXT_BASES) and typename not in basenames:
break
else:
return None
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
filename = inspect.getfile(frame.f_code)
lines = [f"{filename}:{frame.f_lineno}, in {frame.f_code.co_name}:"]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
fun_lineno = frame.f_lineno - start
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx : fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
# Add start to get lineno relative to start of file, not function.
marked = f" {'>> ' if is_error else ' '}{start + start_ctx + i:-6d}{line.rstrip()}"
if is_error:
marked = colorize("@R{%s}" % cescape(marked))
lines.append(marked)
return lines
| BuildProcess |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 40784,
"end": 42950
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
"""Test that strided slice's custom gradient produces correct gradients."""
@parameterized.parameters(set((True, context.executing_eagerly())))
@test_util.disable_xla(
"b/210077724: Auto-clustering with where op isn't supported. Has loose "
"output shape bounds")
def testGradient(self, use_tape):
with test_util.device(use_gpu=True):
var = variables.Variable(
array_ops.reshape(
math_ops.range(1, 97, 1, dtype=dtypes.float32), shape=(6, 4, 4)))
self.evaluate(var.initializer)
raw = np.array(range(1, 97, 1)).reshape((6, 4, 4))
grad = GradSliceChecker(self, var, raw, use_tape)
_ = grad[2:6:2, 1:3, 1:3]
_ = grad[3:0:-2, 1:3, 1:3]
_ = grad[3:0:-2, array_ops.newaxis, 1:3, 2, array_ops.newaxis]
_ = grad[3:0:-2, 1:3, 2]
_ = grad[:, -1, :]
_ = grad[:, -2, :]
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"out of bounds"):
_ = grad[:, -200, :]
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"out of bounds"):
_ = grad[:, 200, :]
# Test numpy array type mask
_ = grad[raw > 51]
# Test tensor type mask
_ = grad[ops.convert_to_tensor(raw) <= 76]
@parameterized.parameters(set((True, context.executing_eagerly())))
def testGradientZero(self, use_tape):
with test_util.device(use_gpu=True):
var = variables.Variable(8.)
self.evaluate(var.initializer)
grad = GradSliceChecker(self, var, np.array(8), use_tape)
_ = grad[tuple()]
@parameterized.parameters(set((True, context.executing_eagerly())))
def testInt64Indices(self, use_tape):
with test_util.AbstractGradientTape(use_tape=use_tape) as tape:
a = math_ops.range(3, dtype=dtypes.float32)
tape.watch(a)
index = constant_op.constant(1, dtype=dtypes.int64)
b = 2. * a[index]
grad = tape.gradient(b, a)
self.assertAllEqual(self.evaluate(grad), [0., 2., 0.])
| StridedSliceGradTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingTypeEquals1.py | {
"start": 774,
"end": 962
} | class ____(A):
pass
def func4(a: str | A):
if type(a) == B:
reveal_type(a, expected_text="B")
else:
reveal_type(a, expected_text="str | A")
T = TypeVar("T")
| B |
python | getsentry__sentry | src/sentry/auth/providers/saml2/provider.py | {
"start": 3502,
"end": 5100
} | class ____(BaseView):
@method_decorator(csrf_exempt)
def dispatch(self, request: HttpRequest, organization_slug: str) -> HttpResponseBase:
from sentry.auth.helper import AuthHelper
pipeline = AuthHelper.get_for_request(request)
# SP initiated authentication, request helper is provided
if pipeline:
from sentry.web.frontend.auth_provider_login import AuthProviderLoginView
sso_login = AuthProviderLoginView()
return sso_login.handle(request)
# IdP initiated authentication. The organization_slug must be valid and
# an auth provider must exist for this organization to proceed with
# IdP initiated SAML auth.
org_context = organization_service.get_organization_by_slug(
slug=organization_slug, only_visible=False
)
if org_context is None:
messages.add_message(request, messages.ERROR, ERR_NO_SAML_SSO)
return self.redirect(reverse("sentry-login"))
try:
auth_provider = AuthProvider.objects.get(organization_id=org_context.organization.id)
except AuthProvider.DoesNotExist:
messages.add_message(request, messages.ERROR, ERR_NO_SAML_SSO)
return self.redirect(reverse("sentry-login"))
pipeline = AuthHelper(
request=request,
organization=(org_context.organization),
auth_provider=auth_provider,
flow=FLOW_LOGIN,
)
pipeline.initialize()
return pipeline.current_step()
@control_silo_view
| SAML2AcceptACSView |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/key_binding/key_processor.py | {
"start": 1935,
"end": 14744
} | class ____:
"""
Statemachine that receives :class:`KeyPress` instances and according to the
key bindings in the given :class:`KeyBindings`, calls the matching handlers.
::
p = KeyProcessor(key_bindings)
# Send keys into the processor.
p.feed(KeyPress(Keys.ControlX, '\x18'))
p.feed(KeyPress(Keys.ControlC, '\x03')
# Process all the keys in the queue.
p.process_keys()
# Now the ControlX-ControlC callback will be called if this sequence is
# registered in the key bindings.
:param key_bindings: `KeyBindingsBase` instance.
"""
def __init__(self, key_bindings: KeyBindingsBase) -> None:
self._bindings = key_bindings
self.before_key_press = Event(self)
self.after_key_press = Event(self)
self._flush_wait_task: Task[None] | None = None
self.reset()
def reset(self) -> None:
self._previous_key_sequence: list[KeyPress] = []
self._previous_handler: Binding | None = None
# The queue of keys not yet send to our _process generator/state machine.
self.input_queue: deque[KeyPress] = deque()
# The key buffer that is matched in the generator state machine.
# (This is at at most the amount of keys that make up for one key binding.)
self.key_buffer: list[KeyPress] = []
#: Readline argument (for repetition of commands.)
#: https://www.gnu.org/software/bash/manual/html_node/Readline-Arguments.html
self.arg: str | None = None
# Start the processor coroutine.
self._process_coroutine = self._process()
self._process_coroutine.send(None) # type: ignore
def _get_matches(self, key_presses: list[KeyPress]) -> list[Binding]:
"""
For a list of :class:`KeyPress` instances. Give the matching handlers
that would handle this.
"""
keys = tuple(k.key for k in key_presses)
# Try match, with mode flag
return [b for b in self._bindings.get_bindings_for_keys(keys) if b.filter()]
def _is_prefix_of_longer_match(self, key_presses: list[KeyPress]) -> bool:
"""
For a list of :class:`KeyPress` instances. Return True if there is any
handler that is bound to a suffix of this keys.
"""
keys = tuple(k.key for k in key_presses)
# Get the filters for all the key bindings that have a longer match.
# Note that we transform it into a `set`, because we don't care about
# the actual bindings and executing it more than once doesn't make
# sense. (Many key bindings share the same filter.)
filters = {
b.filter for b in self._bindings.get_bindings_starting_with_keys(keys)
}
# When any key binding is active, return True.
return any(f() for f in filters)
def _process(self) -> Generator[None, KeyPress, None]:
"""
Coroutine implementing the key match algorithm. Key strokes are sent
into this generator, and it calls the appropriate handlers.
"""
buffer = self.key_buffer
retry = False
while True:
flush = False
if retry:
retry = False
else:
key = yield
if key is _Flush:
flush = True
else:
buffer.append(key)
# If we have some key presses, check for matches.
if buffer:
matches = self._get_matches(buffer)
if flush:
is_prefix_of_longer_match = False
else:
is_prefix_of_longer_match = self._is_prefix_of_longer_match(buffer)
# When eager matches were found, give priority to them and also
# ignore all the longer matches.
eager_matches = [m for m in matches if m.eager()]
if eager_matches:
matches = eager_matches
is_prefix_of_longer_match = False
# Exact matches found, call handler.
if not is_prefix_of_longer_match and matches:
self._call_handler(matches[-1], key_sequence=buffer[:])
del buffer[:] # Keep reference.
# No match found.
elif not is_prefix_of_longer_match and not matches:
retry = True
found = False
# Loop over the input, try longest match first and shift.
for i in range(len(buffer), 0, -1):
matches = self._get_matches(buffer[:i])
if matches:
self._call_handler(matches[-1], key_sequence=buffer[:i])
del buffer[:i]
found = True
break
if not found:
del buffer[:1]
def feed(self, key_press: KeyPress, first: bool = False) -> None:
"""
Add a new :class:`KeyPress` to the input queue.
(Don't forget to call `process_keys` in order to process the queue.)
:param first: If true, insert before everything else.
"""
if first:
self.input_queue.appendleft(key_press)
else:
self.input_queue.append(key_press)
def feed_multiple(self, key_presses: list[KeyPress], first: bool = False) -> None:
"""
:param first: If true, insert before everything else.
"""
if first:
self.input_queue.extendleft(reversed(key_presses))
else:
self.input_queue.extend(key_presses)
def process_keys(self) -> None:
"""
Process all the keys in the `input_queue`.
(To be called after `feed`.)
Note: because of the `feed`/`process_keys` separation, it is
possible to call `feed` from inside a key binding.
This function keeps looping until the queue is empty.
"""
app = get_app()
def not_empty() -> bool:
# When the application result is set, stop processing keys. (E.g.
# if ENTER was received, followed by a few additional key strokes,
# leave the other keys in the queue.)
if app.is_done:
# But if there are still CPRResponse keys in the queue, these
# need to be processed.
return any(k for k in self.input_queue if k.key == Keys.CPRResponse)
else:
return bool(self.input_queue)
def get_next() -> KeyPress:
if app.is_done:
# Only process CPR responses. Everything else is typeahead.
cpr = [k for k in self.input_queue if k.key == Keys.CPRResponse][0]
self.input_queue.remove(cpr)
return cpr
else:
return self.input_queue.popleft()
is_flush = False
while not_empty():
# Process next key.
key_press = get_next()
is_flush = key_press is _Flush
is_cpr = key_press.key == Keys.CPRResponse
if not is_flush and not is_cpr:
self.before_key_press.fire()
try:
self._process_coroutine.send(key_press)
except Exception:
# If for some reason something goes wrong in the parser, (maybe
# an exception was raised) restart the processor for next time.
self.reset()
self.empty_queue()
raise
if not is_flush and not is_cpr:
self.after_key_press.fire()
# Skip timeout if the last key was flush.
if not is_flush:
self._start_timeout()
def empty_queue(self) -> list[KeyPress]:
"""
Empty the input queue. Return the unprocessed input.
"""
key_presses = list(self.input_queue)
self.input_queue.clear()
# Filter out CPRs. We don't want to return these.
key_presses = [k for k in key_presses if k.key != Keys.CPRResponse]
return key_presses
def _call_handler(self, handler: Binding, key_sequence: list[KeyPress]) -> None:
app = get_app()
was_recording_emacs = app.emacs_state.is_recording
was_recording_vi = bool(app.vi_state.recording_register)
was_temporary_navigation_mode = app.vi_state.temporary_navigation_mode
arg = self.arg
self.arg = None
event = KeyPressEvent(
weakref.ref(self),
arg=arg,
key_sequence=key_sequence,
previous_key_sequence=self._previous_key_sequence,
is_repeat=(handler == self._previous_handler),
)
# Save the state of the current buffer.
if handler.save_before(event):
event.app.current_buffer.save_to_undo_stack()
# Call handler.
from prompt_toolkit.buffer import EditReadOnlyBuffer
try:
handler.call(event)
self._fix_vi_cursor_position(event)
except EditReadOnlyBuffer:
# When a key binding does an attempt to change a buffer which is
# read-only, we can ignore that. We sound a bell and go on.
app.output.bell()
if was_temporary_navigation_mode:
self._leave_vi_temp_navigation_mode(event)
self._previous_key_sequence = key_sequence
self._previous_handler = handler
# Record the key sequence in our macro. (Only if we're in macro mode
# before and after executing the key.)
if handler.record_in_macro():
if app.emacs_state.is_recording and was_recording_emacs:
recording = app.emacs_state.current_recording
if recording is not None: # Should always be true, given that
# `was_recording_emacs` is set.
recording.extend(key_sequence)
if app.vi_state.recording_register and was_recording_vi:
for k in key_sequence:
app.vi_state.current_recording += k.data
def _fix_vi_cursor_position(self, event: KeyPressEvent) -> None:
"""
After every command, make sure that if we are in Vi navigation mode, we
never put the cursor after the last character of a line. (Unless it's
an empty line.)
"""
app = event.app
buff = app.current_buffer
preferred_column = buff.preferred_column
if (
vi_navigation_mode()
and buff.document.is_cursor_at_the_end_of_line
and len(buff.document.current_line) > 0
):
buff.cursor_position -= 1
# Set the preferred_column for arrow up/down again.
# (This was cleared after changing the cursor position.)
buff.preferred_column = preferred_column
def _leave_vi_temp_navigation_mode(self, event: KeyPressEvent) -> None:
"""
If we're in Vi temporary navigation (normal) mode, return to
insert/replace mode after executing one action.
"""
app = event.app
if app.editing_mode == EditingMode.VI:
# Not waiting for a text object and no argument has been given.
if app.vi_state.operator_func is None and self.arg is None:
app.vi_state.temporary_navigation_mode = False
def _start_timeout(self) -> None:
"""
Start auto flush timeout. Similar to Vim's `timeoutlen` option.
Start a background coroutine with a timer. When this timeout expires
and no key was pressed in the meantime, we flush all data in the queue
and call the appropriate key binding handlers.
"""
app = get_app()
timeout = app.timeoutlen
if timeout is None:
return
async def wait() -> None:
"Wait for timeout."
# This sleep can be cancelled. In that case we don't flush.
await sleep(timeout)
if len(self.key_buffer) > 0:
# (No keys pressed in the meantime.)
flush_keys()
def flush_keys() -> None:
"Flush keys."
self.feed(_Flush)
self.process_keys()
# Automatically flush keys.
if self._flush_wait_task:
self._flush_wait_task.cancel()
self._flush_wait_task = app.create_background_task(wait())
def send_sigint(self) -> None:
"""
Send SIGINT. Immediately call the SIGINT key handler.
"""
self.feed(KeyPress(key=Keys.SIGINT), first=True)
self.process_keys()
| KeyProcessor |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/parentheses/opening_parentheses_comment_value.py | {
"start": 1177,
"end": 1526
} | class ____( # e 9
x): pass
f1 = [ # f 1
x]
[ # f 2
x]
f3 = { # f3
x}
{ # f 4
x}
# Non-empty parentheses: These are not allowed without a value
def f1[ # f1
T
](): pass
f2 = ( # f2
i for i in range(10)
)
f3 = [ # f3
i for i in range(10)
]
f4 = { # f4
i for i in range(10)
}
f5 = { # f5
i: i**2 for i in range(10)
}
| E9 |
python | astropy__astropy | astropy/coordinates/baseframe.py | {
"start": 4224,
"end": 4829
} | class ____(NamedTuple):
"""
This :class:`~typing.NamedTuple` is used with the
``frame_specific_representation_info`` attribute to tell frames what
attribute names (and default units) to use for a particular representation.
``reprname`` and ``framename`` should be strings, while ``defaultunit`` can
be either an astropy unit, the string ``'recommended'`` (which is degrees
for Angles, nothing otherwise), or None (to indicate that no unit mapping
should be done).
"""
reprname: str
framename: str
defaultunit: str | Unit = "recommended"
| RepresentationMapping |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance15.py | {
"start": 223,
"end": 837
} | class ____(Operator, Generic[OpType]):
def __init__(
self,
step: OpType,
) -> None:
if isinstance(step, BasePipeline):
reveal_type(step, expected_text="BasePipeline[Unknown]*")
else:
reveal_type(step, expected_text="Operator*")
T1 = TypeVar("T1", int, str)
def do_nothing1(x: T1) -> T1:
if isinstance(x, int):
return x
return x
T2 = TypeVar("T2")
def func2(arg: T2) -> T2:
if isinstance(arg, str):
reveal_type(arg, expected_text="str*")
reveal_type(arg, expected_text="str* | object*")
return arg
| BasePipeline |
python | Lightning-AI__lightning | examples/pytorch/domain_templates/reinforce_learn_Qnet.py | {
"start": 3580,
"end": 4380
} | class ____(IterableDataset):
"""Iterable Dataset containing the ExperienceBuffer which will be updated with new experiences during training.
>>> RLDataset(ReplayBuffer(5)) # doctest: +ELLIPSIS
<...reinforce_learn_Qnet.RLDataset object at ...>
"""
def __init__(self, buffer: ReplayBuffer, sample_size: int = 200) -> None:
"""
Args:
buffer: replay buffer
sample_size: number of experiences to sample at a time
"""
self.buffer = buffer
self.sample_size = sample_size
def __iter__(self) -> Iterator:
states, actions, rewards, dones, new_states = self.buffer.sample(self.sample_size)
for i in range(len(dones)):
yield states[i], actions[i], rewards[i], dones[i], new_states[i]
| RLDataset |
python | PyCQA__pylint | doc/data/messages/a/arguments-renamed/good.py | {
"start": 147,
"end": 395
} | class ____(Fruit):
def brew(self, ingredient_name: str):
print(f"Brewing an orange with {ingredient_name}")
for fruit, ingredient_name in [[Orange(), "thyme"], [Apple(), "cinnamon"]]:
fruit.brew(ingredient_name=ingredient_name)
| Orange |
python | python-visualization__folium | folium/map.py | {
"start": 2810,
"end": 4113
} | class ____(Evented):
"""An abstract class for everything that is a Layer on the map.
It will be used to define whether an object will be included in
LayerControls.
Parameters
----------
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening.
"""
def __init__(
self,
name: Optional[str] = None,
overlay: bool = False,
control: bool = True,
show: bool = True,
):
super().__init__()
self.layer_name = name if name is not None else self.get_name()
self.overlay = overlay
self.control = control
self.show = show
def render(self, **kwargs):
if self.show:
self.add_child(
ElementAddToElement(
element_name=self.get_name(),
element_parent_name=self._parent.get_name(),
),
name=self.get_name() + "_add",
)
super().render(**kwargs)
| Layer |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/external.py | {
"start": 7152,
"end": 10248
} | class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.ID)
name = graphene.NonNull(graphene.String)
locationOrLoadError = graphene.Field(GrapheneRepositoryLocationOrLoadError)
loadStatus = graphene.NonNull(GrapheneRepositoryLocationLoadStatus)
displayMetadata = non_null_list(GrapheneRepositoryMetadata)
updatedTimestamp = graphene.NonNull(graphene.Float)
versionKey = graphene.NonNull(graphene.String)
defsStateInfo = graphene.Field(lambda: GrapheneDefsStateInfo)
permissions = graphene.Field(non_null_list(GraphenePermission))
featureFlags = non_null_list(GrapheneFeatureFlag)
definitionsSource = graphene.NonNull(GrapheneDefinitionsSource)
class Meta:
name = "WorkspaceLocationEntry"
def __init__(self, location_entry: CodeLocationEntry):
self._location_entry = check.inst_param(location_entry, "location_entry", CodeLocationEntry)
super().__init__(
name=self._location_entry.origin.location_name,
definitionsSource=self._location_entry.definitions_source,
)
def resolve_id(self, _):
return self.name
def resolve_locationOrLoadError(self, _: ResolveInfo):
if self._location_entry.code_location:
return GrapheneRepositoryLocation(
self._location_entry.code_location.name,
self._location_entry.code_location,
)
error = self._location_entry.load_error
return GraphenePythonError(error) if error else None
def resolve_loadStatus(self, _):
return GrapheneRepositoryLocationLoadStatus.from_python_status(
self._location_entry.load_status
)
def resolve_displayMetadata(self, _):
metadata = self._location_entry.display_metadata
return [
GrapheneRepositoryMetadata(key=key, value=value)
for key, value in metadata.items()
if value is not None
]
def resolve_defsStateInfo(self, graphene_info: ResolveInfo):
if not self._location_entry.code_location:
return None
defs_state_info = self._location_entry.code_location.get_defs_state_info()
if not defs_state_info:
return None
return GrapheneDefsStateInfo(defs_state_info)
def resolve_updatedTimestamp(self, _) -> float:
return self._location_entry.update_timestamp
def resolve_versionKey(self, _) -> str:
return self._location_entry.version_key
def resolve_permissions(self, graphene_info):
permissions = graphene_info.context.permissions_for_location(location_name=self.name)
return [GraphenePermission(permission, value) for permission, value in permissions.items()]
def resolve_featureFlags(self, graphene_info):
feature_flags = get_feature_flags_for_location(self._location_entry)
return [
GrapheneFeatureFlag(name=feature_flag_name.value, enabled=feature_flag_enabled)
for feature_flag_name, feature_flag_enabled in feature_flags.items()
]
| GrapheneWorkspaceLocationEntry |
python | apache__airflow | providers/http/src/airflow/providers/http/exceptions.py | {
"start": 871,
"end": 973
} | class ____(AirflowException):
"""Exception raised for HTTP error in Http hook."""
| HttpErrorException |
python | astropy__astropy | astropy/io/registry/tests/test_registries.py | {
"start": 25256,
"end": 38734
} | class ____(TestUnifiedIORegistryBase):
"""Test :class:`astropy.io.registry.UnifiedOutputRegistry`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedOutputRegistry
# ===========================================
def test_inherited_write_registration(self, registry):
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(EmptyData):
pass
class Child2(Child1):
pass
def _write():
return EmptyData()
def _write1():
return Child1()
# check that writer gets inherited
registry.register_writer("test", EmptyData, _write)
assert registry.get_writer("test", Child2) is _write
# check that nearest ancestor is identified
# (i.e. that the writer for Child2 is the registered method
# for Child1, and not Table)
registry.register_writer("test", Child1, _write1)
assert registry.get_writer("test", Child2) is _write1
# ===========================================
@SKIPIF_OPTIMIZED_PYTHON
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
fmt, cls = fmtcls1
with registry.delay_doc_updates(EmptyData):
registry.register_writer(*fmtcls1, empty_writer)
# test that the doc has not yet been updated.
# if a the format was registered in a different way, then
# test that this method is not present.
if "Format" in EmptyData.read.__doc__:
docs = EmptyData.write.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs) if ("Format" in s)][0]
ifmt = docs[ihd].index("Format")
iwrite = docs[ihd].index("Write") + 1
# there might not actually be anything here, which is also good
if docs[-2] != docs[-1]:
assert fmt in docs[-1][ifmt : ifmt + len(fmt) + 1]
assert docs[-1][iwrite : iwrite + 3] != "Yes"
# now test it's updated
docs = EmptyData.write.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs) if ("Format" in s)][0]
ifmt = docs[ihd].index("Format") + 1
iwrite = docs[ihd].index("Write") + 2
assert fmt in docs[-2][ifmt : ifmt + len(fmt) + 1]
assert docs[-2][iwrite : iwrite + 3] == "Yes"
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
raise AssertionError()
def test_identify_write_format(self, registry, fmtcls1):
"""Test ``registry.identify_format()``."""
fmt, cls = fmtcls1
args = ("write", cls, None, None, (None,), {})
# test there is no format to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
# doesn't actually matter if register a writer, it returns True for all
registry.register_identifier(fmt, cls, empty_identifier)
formats = registry.identify_format(*args)
assert fmt in formats
# -----------------------
def test_register_writer(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_writer()``."""
# initial check it's not registered
assert fmtcls1 not in registry._writers
assert fmtcls2 not in registry._writers
# register
registry.register_writer(*fmtcls1, empty_writer)
registry.register_writer(*fmtcls2, empty_writer)
assert fmtcls1 in registry._writers
assert fmtcls2 in registry._writers
def test_register_writer_invalid(self, registry, fmtcls):
"""Test calling ``registry.register_writer()`` twice."""
fmt, cls = fmtcls
registry.register_writer(fmt, cls, empty_writer)
with pytest.raises(IORegistryError) as exc:
registry.register_writer(fmt, cls, empty_writer)
assert (
str(exc.value) == f"Writer for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_writer_force(self, registry, fmtcls1):
registry.register_writer(*fmtcls1, empty_writer)
registry.register_writer(*fmtcls1, empty_writer, force=True)
assert fmtcls1 in registry._writers
# -----------------------
def test_unregister_writer(self, registry, fmtcls1):
"""Test ``registry.unregister_writer()``."""
registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in registry._writers
registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in registry._writers
def test_unregister_writer_invalid(self, registry, fmtcls):
"""Test ``registry.unregister_writer()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.unregister_writer(fmt, cls)
assert (
str(exc.value)
== f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_get_writer(self, registry, fmtcls1):
"""Test ``registry.get_writer()``."""
with pytest.raises(IORegistryError):
registry.get_writer(*fmtcls1)
registry.register_writer(*fmtcls1, empty_writer)
writer = registry.get_writer(*fmtcls1)
assert writer is empty_writer
def test_get_writer_invalid(self, registry, fmtcls1):
"""Test invalid ``registry.get_writer()``."""
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
registry.get_writer(fmt, cls)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_write_noformat(self, registry, fmtcls1):
"""Test ``registry.write()`` when there isn't a writer."""
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_noformat_arbitrary(self, registry, original, fmtcls1):
"""Test that all identifier functions can accept arbitrary input"""
registry._identifiers.update(original["identifiers"])
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(object(), registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_noformat_arbitrary_file(self, tmp_path, registry, original):
"""Tests that all identifier functions can accept arbitrary files"""
registry._writers.update(original["writers"])
testfile = tmp_path / "foo.example"
with pytest.raises(IORegistryError) as exc:
Table().write(testfile, registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_toomanyformats(self, registry, fmtcls1, fmtcls2):
registry.register_identifier(*fmtcls1, lambda o, *x, **y: True)
registry.register_identifier(*fmtcls2, lambda o, *x, **y: True)
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(registry=registry)
assert (
str(exc.value)
== f"Format is ambiguous - options are: {fmtcls1[0]}, {fmtcls2[0]}"
)
def test_write_uses_priority(self, registry, fmtcls1, fmtcls2):
fmt1, cls1 = fmtcls1
fmt2, cls2 = fmtcls2
counter = Counter()
def counting_writer1(*args, **kwargs):
counter[fmt1] += 1
def counting_writer2(*args, **kwargs):
counter[fmt2] += 1
registry.register_writer(fmt1, cls1, counting_writer1, priority=1)
registry.register_writer(fmt2, cls2, counting_writer2, priority=2)
registry.register_identifier(fmt1, cls1, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls2, lambda o, *x, **y: True)
cls1().write(registry=registry)
assert counter[fmt2] == 1
assert counter[fmt1] == 0
def test_write_format_nowriter(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
cls().write(format=fmt, registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_write_identifier(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: x[0].startswith("a"))
registry.register_identifier(fmt2, cls, lambda o, *x, **y: x[0].startswith("b"))
# Now check that we got past the identifier and are trying to get
# the reader. The registry.get_writer will fail but the error message
# will tell us if the identifier worked.
with pytest.raises(IORegistryError) as exc:
cls().write("abc", registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt1}' and class '{cls.__name__}'"
)
with pytest.raises(IORegistryError) as exc:
cls().write("bac", registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt2}' and class '{cls.__name__}'"
)
def test_write_return(self, registry, fmtcls1):
"""Most writers will return None, but other values are not forbidden."""
fmt, cls = fmtcls1
registry.register_writer(fmt, cls, empty_writer)
res = cls.write(cls(), format=fmt, registry=registry)
assert res == "status: success"
# ===========================================
# Compat tests
def test_compat_register_writer(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._writers
compat.register_writer(*fmtcls1, empty_writer, registry=registry)
assert fmtcls1 in registry._writers
registry.unregister_writer(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
try:
compat.register_writer(*fmtcls1, empty_writer)
except Exception:
pass
else:
assert fmtcls1 in default_registry._writers
finally:
default_registry._writers.pop(fmtcls1)
def test_compat_unregister_writer(self, registry, fmtcls1):
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in registry._writers
compat.unregister_writer(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._writers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
compat.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
def test_compat_get_writer(self, registry, fmtcls1):
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
writer = compat.get_writer(*fmtcls1, registry=registry)
assert writer is empty_writer
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
writer = compat.get_writer(*fmtcls1)
assert writer is empty_writer
default_registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
def test_compat_write(self, registry, fmtcls1):
fmt, cls = fmtcls1
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
res = compat.write(cls(), format=fmt, registry=registry)
assert res == "status: success"
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
res = compat.write(cls(), format=fmt)
assert res == "status: success"
default_registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
| TestUnifiedOutputRegistry |
python | bokeh__bokeh | tests/unit/bokeh/server/test_auth_provider.py | {
"start": 4377,
"end": 6556
} | class ____(RequestHandler): pass
""", func, suffix='.py')
def test_get_user(self) -> None:
def func(filename: str):
am = bsa.AuthModule(filename)
assert am.get_user is not None
assert am.get_user('handler') == 10
with_file_contents("""
def get_user(handler): return 10
login_url = "/foo"
""", func, suffix='.py')
async def test_get_user_async(self) -> None:
async def func(filename: str):
am = bsa.AuthModule(filename)
assert am.get_user_async is not None
assert await am.get_user_async('handler') == 10
await with_file_contents_async("""
async def get_user_async(handler): return 10
login_url = "/foo"
""", func, suffix='.py')
def test_login_url(self) -> None:
def func(filename: str):
am = bsa.AuthModule(filename)
assert am.login_url == "/foo"
assert am.get_login_url is None
assert am.login_handler is None
assert am.logout_url is None
assert am.logout_handler is None
with_file_contents("""
def get_user(handler): return 10
login_url = "/foo"
""", func, suffix='.py')
def test_get_login_url(self) -> None:
def func(filename: str):
am = bsa.AuthModule(filename)
assert am.login_url is None
assert am.get_login_url('handler') == 20
assert am.login_handler is None
assert am.logout_url is None
assert am.logout_handler is None
with_file_contents("""
def get_user(handler): return 10
def get_login_url(handler): return 20
""", func, suffix='.py')
def test_login_handler(self) -> None:
def func(filename: str):
am = bsa.AuthModule(filename)
assert am.login_url == "/foo"
assert am.get_login_url is None
assert issubclass(am.login_handler, RequestHandler)
assert am.logout_url is None
assert am.logout_handler is None
with_file_contents("""
def get_user(handler): return 10
login_url = "/foo"
from tornado.web import RequestHandler
| LogoutHandler |
python | pytorch__pytorch | torch/distributed/checkpoint/_consolidate_hf_safetensors.py | {
"start": 1537,
"end": 27138
} | class ____:
"""
Dataclass to store information about an input safetensors file.
Attributes:
metadata_size: Size of the metadata section in bytes
metadata: Json metadata from the safetensors file
"""
metadata_size: int = 0
metadata: Any = None
def _parse_input_metadata(
input_files_data: dict[str, _InputFileData],
output_files_data: dict[str, _OutputFileData],
) -> None:
"""
Parse metadata from input safetensors files to determine the full tensor shapes and types.
This function analyzes the metadata from all input files to determine the complete shape
of each tensor after consolidation. It updates the output_files_data with this information.
Args:
input_files_data: dict of metadata from input safetensors files
output_files_data: Dictionary mapping output file paths to their metadata
Raises:
ValueError: If no DCP custom metadata is found in a safetensors file
"""
from safetensors.torch import _getdtype # type: ignore[import]
# Dictionary to track the full size of each tensor across all shards
fqn_to_size_mapping: dict[str, tuple[list[int], str]] = {}
for file_data in input_files_data.values():
safetensors_metadata = file_data.metadata
dcp_sharding_info = _get_dcp_custom_metadata(safetensors_metadata)
if not dcp_sharding_info:
raise ValueError(
"No DCP custom metadata found in safetensors file. The file must be saved with DCP to be consolidated."
)
for key, val in safetensors_metadata.items():
if key == DEFAULT_EXTRA_METADATA_KEY:
continue
# Get the shape of this tensor shard and its offset in the full tensor
sizes = val[SHAPE_KEY]
offsets = dcp_sharding_info[key][SAVED_OFFSETS_KEY]
if key not in fqn_to_size_mapping:
# First time seeing this tensor - calculate its full size by adding offsets to dimensions
cur_size = [size + offset for size, offset in zip(sizes, offsets)]
fqn_to_size_mapping[key] = (cur_size, val[DTYPE_KEY])
else:
# We've seen this tensor before - update its size if this shard extends beyond current known dimensions
cur_size = fqn_to_size_mapping[key][0]
for i in range(len(sizes)):
cur_size[i] = max(cur_size[i], sizes[i] + offsets[i])
# Now that we know the full size of each tensor, populate the output file data
for fqn, tensor_info in fqn_to_size_mapping.items():
tensor_size = tensor_info[0]
dtype_str = tensor_info[1]
for output_data in output_files_data.values():
# Add this tensor to the output file if it's already assigned there
if fqn in output_data.fqn_data:
output_data.fqn_data[fqn] = _FqnData(
shape_in_file=tensor_size,
dtype_size=torch.finfo(_getdtype(dtype_str)).bits
// 8, # Convert bits to bytes
dtype_str=dtype_str,
)
def _write_metadata(
output_files_data: dict[str, _OutputFileData],
) -> None:
"""
Write metadata to the beginning of each output safetensors file.
This function writes the metadata section to each output file, including information
about tensor shapes, data types, and offsets. It also updates the offset_in_file
field for each tensor in the output_files_data.
Args:
output_files_data: Dictionary mapping output file paths to their metadata
"""
# Process each output file
for file_path, output_data in output_files_data.items():
with open(file_path, "wb") as f:
metadata = {}
curr_offset = 0
# Calculate offsets for each tensor in the file
for fqn, fqn_data in output_data.fqn_data.items():
# Calculate the end offset by multiplying all dimensions and the data type size
end_offset = (
curr_offset
+ math.prod(fqn_data.shape_in_file) * fqn_data.dtype_size
)
# Store metadata for this tensor
metadata[fqn] = {
SHAPE_KEY: fqn_data.shape_in_file,
DTYPE_KEY: fqn_data.dtype_str,
DATA_OFFSETS_KEY: [
curr_offset,
end_offset,
], # Start and end byte offsets
}
# Store the offset for later use when writing the actual tensor data
fqn_data.offset_in_file = curr_offset
# Update current offset for the next tensor
curr_offset = end_offset
# Convert metadata to JSON and encode as bytes
json_metadata = json.dumps(metadata)
json_bytes = json_metadata.encode("utf-8")
# Write the metadata size as an 8-byte unsigned integer (little-endian)
size_in_bytes = len(json_bytes)
header_len = struct.pack("<Q", size_in_bytes)
# Write the header length and metadata to the file
f.write(header_len)
f.write(json_bytes)
# Store the total metadata size (header + JSON) for later use
output_data.metadata_size = f.tell()
def _read_tensor_data_mmap(
file_path: str,
start_offset: int,
end_offset: int,
metadata_size: int,
) -> bytes:
"""
Read tensor data from a safetensors file using memory mapping for efficiency.
Args:
file_path: Path to the safetensors file
start_offset: Start offset of tensor data within the data section
end_offset: End offset of tensor data within the data section
metadata_size: Size of the metadata header
Returns:
Raw tensor data as bytes
"""
# Use mmap for efficient access
with open(file_path, "rb") as f:
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
absolute_start = metadata_size + start_offset
absolute_end = metadata_size + end_offset
return bytes(mm[absolute_start:absolute_end])
def _process_output_file(
output_file: str,
output_data: _OutputFileData,
input_files_data: dict[str, _InputFileData],
) -> None:
"""
Process a single output file by writing tensor data from input files using memory mapping.
This function is designed to be run in parallel for different output files.
Args:
output_file: Path to the output file
output_data: Metadata for the output file
input_files_data: Dictionary mapping input file paths to their metadata
"""
sorted_tensors = sorted(
output_data.fqn_data.items(), key=lambda x: x[1].offset_in_file
)
with open(output_file, "r+b") as output_stream:
output_stream.seek(0, os.SEEK_END)
# Process each tensor in sequential output order
for tensor_fqn, tensor_fqn_data in sorted_tensors:
full_tensor_mv = memoryview(
bytearray(
math.prod(tensor_fqn_data.shape_in_file)
* tensor_fqn_data.dtype_size
)
)
# Process each input safetensors file
for safetensors_file in input_files_data:
file_metadata = input_files_data[safetensors_file].metadata
input_metadata_size = input_files_data[safetensors_file].metadata_size
if tensor_fqn not in file_metadata:
continue
metadata = file_metadata[tensor_fqn]
data_offsets = metadata[DATA_OFFSETS_KEY]
# Use memory mapping to read tensor data efficiently
data_to_write = _read_tensor_data_mmap(
safetensors_file,
data_offsets[0],
data_offsets[1],
input_metadata_size,
)
# Get the offsets of this tensor shard within the full tensor
fqn_custom_metadata = _get_dcp_custom_metadata(file_metadata)[
tensor_fqn
] # type: ignore[index]
offsets_of_tensor_being_read = fqn_custom_metadata[SAVED_OFFSETS_KEY] # type: ignore[index]
# Write this tensor shard to the appropriate position in the output file
_write_sub_tensor_to_file_optimized(
full_tensor_mv,
data_to_write,
tensor_fqn_data.dtype_size, # Size of each element in bytes
tensor_fqn_data.shape_in_file, # Full tensor shape
offsets_of_tensor_being_read, # Where this shard belongs in the full tensor
metadata[SHAPE_KEY], # Shape of this shard
)
output_stream.write(full_tensor_mv)
def _write_data(
input_files_data: dict[str, _InputFileData],
output_files_data: dict[str, _OutputFileData],
num_threads: int = 1,
) -> None:
"""
Write tensor data from input files to the output files using memory mapping.
This function reads tensor data from each input file and writes it to the appropriate
position in the output files based on the tensor's offsets. When num_threads > 1,
the work is split across threads with each thread handling a different output file.
Args:
input_files_data: Dictionary mapping input file paths to their metadata
output_files_data: Dictionary mapping output file paths to their metadata
num_threads: Number of threads to use for parallel processing
"""
if num_threads <= 1 or len(output_files_data) <= 1:
# Sequential processing
for output_file, output_data in output_files_data.items():
_process_output_file(output_file, output_data, input_files_data)
else:
# Parallel processing with ThreadPoolExecutor
with concurrent.futures.ThreadPoolExecutor(
max_workers=min(num_threads, len(output_files_data))
) as executor:
futures = []
for output_file, output_data in output_files_data.items():
futures.append(
executor.submit(
_process_output_file,
output_file,
output_data,
input_files_data,
)
)
# Wait for all futures to complete
for future in concurrent.futures.as_completed(futures):
# Handle any exceptions that might have occurred
try:
future.result()
except Exception as e:
print(f"Error processing output file: {e}")
raise
def _write_sub_tensor_to_file_optimized(
full_tensor_mv: memoryview,
sub_tensor_bytes: bytes,
element_size: int,
tensor_shape: list[int],
sub_tensor_offsets: list[int],
sub_tensor_shape: list[int],
) -> None:
"""
Optimized version that writes the maximum number of contiguous bytes possible.
Uses a unified algorithm that calculates the maximum contiguous bytes that can be
written in each iteration and continues until the entire subtensor is written.
Handles all sharding patterns efficiently:
- Full sub-tensor at once for row-wise sharding
- Row-by-row for column-wise sharding
- Optimized chunks for other patterns
Args:
full_tensor_mv: Buffer to write the full tensor to
sub_tensor_bytes: Raw tensor data as bytes
element_size: Size of each element in bytes
tensor_shape: Shape of the full tensor
sub_tensor_offsets: Starting offsets of the sub-tensor within the full tensor
sub_tensor_shape: Shape of the sub-tensor
"""
# Handle empty tensors
if not tensor_shape or not sub_tensor_shape:
return
# Calculate tensor strides for efficient indexing
tensor_strides = [1]
for i in range(len(tensor_shape) - 1, 0, -1):
tensor_strides.insert(0, tensor_strides[0] * tensor_shape[i])
sub_tensor_strides = [1]
for i in range(len(sub_tensor_shape) - 1, 0, -1):
sub_tensor_strides.insert(0, sub_tensor_strides[0] * sub_tensor_shape[i])
total_elements = math.prod(sub_tensor_shape)
elements_written = 0
while elements_written < total_elements:
# Convert linear index to multi-dimensional indices
temp_idx = elements_written
indices = []
for dim_size in reversed(sub_tensor_shape):
indices.append(temp_idx % dim_size)
temp_idx //= dim_size
indices.reverse()
# Calculate maximum contiguous elements we can write from this position
max_contiguous = _calculate_max_contiguous_elements(
indices, sub_tensor_shape, tensor_shape
)
# Calculate source position in bytes
src_pos = sum(idx * stride for idx, stride in zip(indices, sub_tensor_strides))
src_byte_offset = src_pos * element_size
# Calculate destination position in bytes
dest_indices = [
idx + offset for idx, offset in zip(indices, sub_tensor_offsets)
]
dest_pos = sum(
idx * stride for idx, stride in zip(dest_indices, tensor_strides)
)
dest_byte_offset = dest_pos * element_size
# Write the contiguous chunk
bytes_to_write = max_contiguous * element_size
chunk_data = sub_tensor_bytes[
src_byte_offset : src_byte_offset + bytes_to_write
]
full_tensor_mv[dest_byte_offset : dest_byte_offset + bytes_to_write] = (
chunk_data
)
elements_written += max_contiguous
def _calculate_max_contiguous_elements(
indices: list[int],
sub_tensor_shape: list[int],
tensor_shape: list[int],
) -> int:
"""
Calculate the maximum number of contiguous elements that can be written from current position.
This determines the largest chunk by checking how elements are laid out in memory
and finding natural boundaries where contiguity breaks.
Args:
indices: Current position indices in the sub-tensor
sub_tensor_shape: Shape of the sub-tensor being written
tensor_shape: Shape of the full tensor
Raises:
ValueError: If input lists are empty, have mismatched lengths, or contain invalid values
"""
# Validate input lists are not empty
if not indices or not sub_tensor_shape or not tensor_shape:
raise ValueError("Input lists cannot be empty")
# Validate all lists have the same length (same number of dimensions)
if not (len(indices) == len(sub_tensor_shape) == len(tensor_shape)):
raise ValueError(
f"All input lists must have the same length. Got indices: {len(indices)}, "
f"sub_tensor_shape: {len(sub_tensor_shape)}, tensor_shape: {len(tensor_shape)}"
)
# Validate indices are within bounds of sub_tensor_shape
for i, (idx, sub_dim) in enumerate(zip(indices, sub_tensor_shape)):
if idx >= sub_dim:
raise ValueError(
f"Index {idx} at dimension {i} is out of bounds for sub-tensor shape {sub_tensor_shape}"
)
# Validate sub_tensor dimensions don't exceed tensor dimensions
for i, (sub_dim, tensor_dim) in enumerate(zip(sub_tensor_shape, tensor_shape)):
if sub_dim > tensor_dim:
raise ValueError(
f"Sub-tensor dimension {sub_dim} at position {i} exceeds tensor dimension {tensor_dim}"
)
# Start with elements remaining in the last dimension
max_contiguous = sub_tensor_shape[-1] - indices[-1]
# Check if we can extend across multiple dimensions
# We can write across dimension boundaries if we're writing complete "rows"
# and the layout in destination tensor maintains contiguity
# For 2D case: check if we can write multiple complete rows
if len(sub_tensor_shape) >= 2:
# If we're at the start of a row and can write complete rows
if indices[-1] == 0: # At start of last dimension (column)
rows_remaining = sub_tensor_shape[-2] - indices[-2] # Rows left to write
# Check if writing complete rows maintains contiguity in destination
# This is true for row-wise sharding or when sub-tensor spans full width
if sub_tensor_shape[-1] == tensor_shape[-1]: # Full width
max_contiguous = rows_remaining * sub_tensor_shape[-1]
# For higher dimensions, check if we can extend further
if len(sub_tensor_shape) >= 3 and indices[-2] == 0:
# Check if we can write complete 2D slices
remaining_in_dim = sub_tensor_shape[-3] - indices[-3]
if (
sub_tensor_shape[-1] == tensor_shape[-1]
and sub_tensor_shape[-2] == tensor_shape[-2]
):
max_contiguous = (
remaining_in_dim * sub_tensor_shape[-2] * sub_tensor_shape[-1]
)
return max_contiguous
def _write_overall_metadata_file(
output_dir: str,
output_files_data: dict[str, _OutputFileData],
) -> None:
"""
Write the overall metadata file that maps tensor names to their file locations.
This creates a model.safetensors.index.json file that HuggingFace models use
to locate tensors across multiple files.
Args:
output_dir: Directory where the metadata file will be written
output_files_data: Dictionary mapping output file paths to their metadata
"""
total_size = 0
weight_map = {}
for output_path, value in output_files_data.items():
for fqn, fqn_data in value.fqn_data.items():
total_size += math.prod(fqn_data.shape_in_file) * fqn_data.dtype_size
weight_map[fqn] = os.path.basename(output_path)
metadata_to_write: dict[str, Any] = {}
metadata_to_write["metadata"] = {"total_size": total_size}
metadata_to_write["weight_map"] = weight_map
metadata_path = os.path.join(output_dir, f"{_metadata_fn}")
with open(metadata_path, "w") as metadata_file:
json.dump(metadata_to_write, metadata_file, indent=2)
def _consolidate_safetensors_files(
input_dir: str,
output_dir: str,
fqn_to_file_mapping: dict[str, str],
num_threads: int,
) -> dict[str, _OutputFileData]:
output_files_data: dict[str, _OutputFileData] = {}
# Create multiple output files based on the provided mapping
for fqn, filename in fqn_to_file_mapping.items():
output_path = os.path.join(output_dir, filename)
if output_path not in output_files_data:
output_files_data[output_path] = _OutputFileData(fqn_data={fqn: _FqnData()})
else:
output_files_data[output_path].fqn_data[fqn] = _FqnData()
# Find all safetensors files in the input directory
safetensors_files = glob.glob(os.path.join(input_dir, f"*{SUFFIX}"))
# Read metadata from all input files
input_files_data: dict[str, _InputFileData] = {}
for safetensor_file in safetensors_files:
with open(safetensor_file, "rb") as f:
metadata, size = _get_safetensors_file_metadata(f)
input_files_data[safetensor_file] = _InputFileData(
metadata_size=size, metadata=metadata
)
# Step 1: Parse metadata to determine tensor shapes and types
_parse_input_metadata(input_files_data, output_files_data)
# Step 2: Write metadata headers to output files
_write_metadata(output_files_data)
# Step 3: Write actual tensor data from input files to output files
_write_data(input_files_data, output_files_data, num_threads)
return output_files_data
def consolidate_safetensors_files(
input_dir: str,
output_dir: str,
fqn_to_index_mapping: dict[str, int],
num_threads: int = 1,
) -> None:
"""
Main function to consolidate sharded safetensors files into one or more output files.
This function orchestrates the entire consolidation process:
1. Sets up the output file structure based on the fqn_to_index_mapping
2. Finds all safetensors files in the input directory
3. Parses metadata from all input files
4. Writes metadata to the output files
5. Writes tensor data from input files to output files
6. Writes overall model.index.safetensors.json file with weight map
Args:
input_dir: Directory containing sharded safetensors files
output_dir: Directory where consolidated files will be written
fqn_to_index_mapping: Optional mapping of tensor names to output file indices.
If None, all tensors will be consolidated into a single file.
num_threads: Number of threads to use for parallel processing of saving data to output files.
"""
start_time = time.time()
logger.info(
"Consolidating safetensors files from %s to %s. Beginning at time %f",
input_dir,
output_dir,
start_time,
)
max_index = max(fqn_to_index_mapping.values())
fqn_to_file_mapping = {
fqn: _gen_file_name(idx, max_index) for fqn, idx in fqn_to_index_mapping.items()
}
output_files_data = _consolidate_safetensors_files(
input_dir, output_dir, fqn_to_file_mapping, num_threads
)
# Step 4: Write overall model.index.safetensors.json file with weight map
_write_overall_metadata_file(output_dir, output_files_data)
logger.info("Done consolidating. Took %.2f secs.", time.time() - start_time)
def consolidate_safetensors_files_on_every_rank(
input_dir: str,
output_dir: str,
fqn_to_index_mapping: dict[str, int],
num_threads: int = 1,
process_group: Optional[dist.ProcessGroup] = None,
) -> None:
"""
Consolidate sharded safetensors files across multiple ranks, with each rank handling a subset of output files.
This function distributes the consolidation work by assigning output files to different ranks.
All tensors with the same index in fqn_to_index_mapping are processed by the same rank,
as they belong to the same output file.
If process_group is provided, rank and world_size will be derived from it. Otherwise,
they will be automatically detected from the distributed environment if available.
Args:
input_dir: Directory containing sharded safetensors files
output_dir: Directory where consolidated files will be written
fqn_to_index_mapping: Mapping of tensor names to output file indices
num_threads: Number of threads to use for parallel processing on each rank
process_group: PyTorch distributed process group (default: None, will use default group)
"""
start_time = time.time()
# Derive rank and world_size from process_group or default distributed environment
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank(group=process_group)
world_size = dist.get_world_size(group=process_group)
else:
# Default to single process mode if distributed is not initialized
rank = 0
world_size = 1
logger.warning(
"Distributed environment not initialized. Running in single process mode."
)
logger.info(
"Rank %d/%d: Consolidating safetensors files from %s to %s",
rank,
world_size,
input_dir,
output_dir,
)
# Find all unique indices in the mapping
unique_indices = set(fqn_to_index_mapping.values())
# Distribute indices across ranks
indices_for_this_rank = []
for idx in unique_indices:
# Simple distribution: index % world_size == rank
if idx % world_size == rank:
indices_for_this_rank.append(idx)
logger.info(
"Rank %d: Assigned %d output files out of %d total files",
rank,
len(indices_for_this_rank),
len(unique_indices),
)
# Filter the fqn_to_index_mapping to only include tensors for this rank
filtered_mapping = {
fqn: idx
for fqn, idx in fqn_to_index_mapping.items()
if idx in indices_for_this_rank
}
if filtered_mapping:
# Convert index mapping to filename mapping
max_index = max(unique_indices)
filtered_filename_mapping = {}
for fqn, idx in filtered_mapping.items():
filename = _gen_file_name(idx, max_index)
filtered_filename_mapping[fqn] = filename
# Call the existing consolidation function with the filtered mapping
_consolidate_safetensors_files(
input_dir=input_dir,
output_dir=output_dir,
fqn_to_file_mapping=filtered_filename_mapping,
num_threads=num_threads,
)
logger.info(
"Rank %d: Done consolidating. Processed %d unique indices in %.2f secs.",
rank,
len(indices_for_this_rank),
time.time() - start_time,
)
# Wait for all ranks to complete
if dist.is_available() and dist.is_initialized():
logger.info("Rank %d: Waiting for all ranks to complete...", rank)
dist.barrier()
logger.info("Rank %d: All ranks have completed.", rank)
if rank == 0:
logger.info("Total time taken: %.2f secs.", time.time() - start_time)
| _InputFileData |
python | plotly__plotly.py | plotly/graph_objs/layout/xaxis/title/_font.py | {
"start": 235,
"end": 9890
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.xaxis.title"
_path_str = "layout.xaxis.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this axis' title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.xaxis.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.xaxis.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.xaxis.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 513591,
"end": 513879
} | class ____(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
| IntBinopNode |
python | run-llama__llama_index | llama-index-core/llama_index/core/prompts/base.py | {
"start": 1478,
"end": 4583
} | class ____(BaseModel, ABC): # type: ignore[no-redef]
model_config = ConfigDict(arbitrary_types_allowed=True)
metadata: Dict[str, Any]
template_vars: List[str]
kwargs: Dict[str, str]
output_parser: Optional[BaseOutputParser]
template_var_mappings: Optional[Dict[str, Any]] = Field(
default_factory=dict, # type: ignore
description="Template variable mappings (Optional).",
)
function_mappings: Optional[Dict[str, AnnotatedCallable]] = Field(
default_factory=dict, # type: ignore
description=(
"Function mappings (Optional). This is a mapping from template "
"variable names to functions that take in the current kwargs and "
"return a string."
),
)
def _map_template_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""For keys in template_var_mappings, swap in the right keys."""
template_var_mappings = self.template_var_mappings or {}
return {template_var_mappings.get(k, k): v for k, v in kwargs.items()}
def _map_function_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""
For keys in function_mappings, compute values and combine w/ kwargs.
Users can pass in functions instead of fixed values as format variables.
For each function, we call the function with the current kwargs,
get back the value, and then use that value in the template
for the corresponding format variable.
"""
function_mappings = self.function_mappings or {}
# first generate the values for the functions
new_kwargs = {}
for k, v in function_mappings.items():
# TODO: figure out what variables to pass into each function
# is it the kwargs specified during query time? just the fixed kwargs?
# all kwargs?
new_kwargs[k] = v(**kwargs)
# then, add the fixed variables only if not in new_kwargs already
# (implying that function mapping will override fixed variables)
for k, v in kwargs.items():
if k not in new_kwargs:
new_kwargs[k] = v
return new_kwargs
def _map_all_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""
Map both template and function variables.
We (1) first call function mappings to compute functions,
and then (2) call the template_var_mappings.
"""
# map function
new_kwargs = self._map_function_vars(kwargs)
# map template vars (to point to existing format vars in string template)
return self._map_template_vars(new_kwargs)
@abstractmethod
def partial_format(self, **kwargs: Any) -> "BasePromptTemplate": ...
@abstractmethod
def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str: ...
@abstractmethod
def format_messages(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> List[ChatMessage]: ...
@abstractmethod
def get_template(self, llm: Optional[BaseLLM] = None) -> str: ...
| BasePromptTemplate |
python | pydantic__pydantic | pydantic/plugin/__init__.py | {
"start": 2608,
"end": 3581
} | class ____(Protocol):
"""Base class for plugin callbacks protocols.
You shouldn't implement this protocol directly, instead use one of the subclasses with adds the correctly
typed `on_error` method.
"""
on_enter: Callable[..., None]
"""`on_enter` is changed to be more specific on all subclasses"""
def on_success(self, result: Any) -> None:
"""Callback to be notified of successful validation.
Args:
result: The result of the validation.
"""
return
def on_error(self, error: ValidationError) -> None:
"""Callback to be notified of validation errors.
Args:
error: The validation error.
"""
return
def on_exception(self, exception: Exception) -> None:
"""Callback to be notified of validation exceptions.
Args:
exception: The exception raised during validation.
"""
return
| BaseValidateHandlerProtocol |
python | walkccc__LeetCode | solutions/1209. Remove All Adjacent Duplicates in String II/1209.py | {
"start": 0,
"end": 335
} | class ____:
def removeDuplicates(self, s: str, k: int) -> str:
stack = []
for c in s:
if not stack or stack[-1][0] != c:
stack.append([c, 1])
else: # stack[-1][0] == c
stack[-1][1] += 1
if stack[-1][1] == k:
stack.pop()
return ''.join(c * count for c, count in stack)
| Solution |
python | langchain-ai__langchain | libs/partners/huggingface/langchain_huggingface/llms/huggingface_endpoint.py | {
"start": 682,
"end": 16438
} | class ____(LLM):
"""Hugging Face Endpoint. This works with any model that supports text generation (i.e. text completion) task.
To use this class, you should have installed the `huggingface_hub` package, and
the environment variable `HUGGINGFACEHUB_API_TOKEN` set with your API token,
or given as a named parameter to the constructor.
Example:
```python
# Basic Example (no streaming)
model = HuggingFaceEndpoint(
endpoint_url="http://localhost:8010/",
max_new_tokens=512,
top_k=10,
top_p=0.95,
typical_p=0.95,
temperature=0.01,
repetition_penalty=1.03,
huggingfacehub_api_token="my-api-key",
)
print(model.invoke("What is Deep Learning?"))
# Streaming response example
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
callbacks = [StreamingStdOutCallbackHandler()]
model = HuggingFaceEndpoint(
endpoint_url="http://localhost:8010/",
max_new_tokens=512,
top_k=10,
top_p=0.95,
typical_p=0.95,
temperature=0.01,
repetition_penalty=1.03,
callbacks=callbacks,
streaming=True,
huggingfacehub_api_token="my-api-key",
)
print(model.invoke("What is Deep Learning?"))
# Basic Example (no streaming) with Mistral-Nemo-Base-2407 model using a third-party provider (Novita).
model = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-Nemo-Base-2407",
provider="novita",
max_new_tokens=100,
do_sample=False,
huggingfacehub_api_token="my-api-key",
)
print(model.invoke("What is Deep Learning?"))
```
""" # noqa: E501
endpoint_url: str | None = None
"""Endpoint URL to use. If repo_id is not specified then this needs to given or
should be pass as env variable in `HF_INFERENCE_ENDPOINT`"""
repo_id: str | None = None
"""Repo to use. If endpoint_url is not specified then this needs to given"""
provider: str | None = None
"""Name of the provider to use for inference with the model specified in `repo_id`.
e.g. "cerebras". if not specified, Defaults to "auto" i.e. the first of the
providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
available providers can be found in the [huggingface_hub documentation](https://huggingface.co/docs/huggingface_hub/guides/inference#supported-providers-and-tasks)."""
huggingfacehub_api_token: str | None = Field(
default_factory=from_env("HUGGINGFACEHUB_API_TOKEN", default=None)
)
max_new_tokens: int = 512
"""Maximum number of generated tokens"""
top_k: int | None = None
"""The number of highest probability vocabulary tokens to keep for
top-k-filtering."""
top_p: float | None = 0.95
"""If set to < 1, only the smallest set of most probable tokens with probabilities
that add up to `top_p` or higher are kept for generation."""
typical_p: float | None = 0.95
"""Typical Decoding mass. See [Typical Decoding for Natural Language
Generation](https://arxiv.org/abs/2202.00666) for more information."""
temperature: float | None = 0.8
"""The value used to module the logits distribution."""
repetition_penalty: float | None = None
"""The parameter for repetition penalty. 1.0 means no penalty.
See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details."""
return_full_text: bool = False
"""Whether to prepend the prompt to the generated text"""
truncate: int | None = None
"""Truncate inputs tokens to the given size"""
stop_sequences: list[str] = Field(default_factory=list)
"""Stop generating tokens if a member of `stop_sequences` is generated"""
seed: int | None = None
"""Random sampling seed"""
inference_server_url: str = ""
"""text-generation-inference instance base url"""
timeout: int = 120
"""Timeout in seconds"""
streaming: bool = False
"""Whether to generate a stream of tokens asynchronously"""
do_sample: bool = False
"""Activate logits sampling"""
watermark: bool = False
"""Watermarking with [A Watermark for Large Language Models]
(https://arxiv.org/abs/2301.10226)"""
server_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Holds any text-generation-inference server parameters not explicitly specified"""
model_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `call` not explicitly specified"""
model: str
client: Any = None
async_client: Any = None
task: str | None = None
"""Task to call the model with. Should be a task that returns `generated_text`."""
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
msg = f"Found {field_name} supplied twice."
raise ValueError(msg)
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please make sure that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
msg = (
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
raise ValueError(msg)
values["model_kwargs"] = extra
# to correctly create the InferenceClient and AsyncInferenceClient
# in validate_environment, we need to populate values["model"].
# from InferenceClient docstring:
# model (`str`, `optional`):
# The model to run inference with. Can be a model id hosted on the Hugging
# Face Hub, e.g. `bigcode/starcoder`
# or a URL to a deployed Inference Endpoint. Defaults to `None`, in which
# case a recommended model is
# automatically selected for the task.
# this string could be in 3 places of descending priority:
# 2. values["model"] or values["endpoint_url"] or values["repo_id"]
# (equal priority - don't allow both set)
# 3. values["HF_INFERENCE_ENDPOINT"] (if none above set)
model = values.get("model")
endpoint_url = values.get("endpoint_url")
repo_id = values.get("repo_id")
if sum([bool(model), bool(endpoint_url), bool(repo_id)]) > 1:
msg = (
"Please specify either a `model` OR an `endpoint_url` OR a `repo_id`,"
"not more than one."
)
raise ValueError(msg)
values["model"] = (
model or endpoint_url or repo_id or os.environ.get("HF_INFERENCE_ENDPOINT")
)
if not values["model"]:
msg = (
"Please specify a `model` or an `endpoint_url` or a `repo_id` for the "
"model."
)
raise ValueError(msg)
return values
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that package is installed and that the API token is valid."""
huggingfacehub_api_token = self.huggingfacehub_api_token or os.getenv(
"HF_TOKEN"
)
from huggingface_hub import ( # type: ignore[import]
AsyncInferenceClient, # type: ignore[import]
InferenceClient, # type: ignore[import]
)
# Instantiate clients with supported kwargs
sync_supported_kwargs = set(inspect.signature(InferenceClient).parameters)
self.client = InferenceClient(
model=self.model,
timeout=self.timeout,
api_key=huggingfacehub_api_token,
provider=self.provider, # type: ignore[arg-type]
**{
key: value
for key, value in self.server_kwargs.items()
if key in sync_supported_kwargs
},
)
async_supported_kwargs = set(inspect.signature(AsyncInferenceClient).parameters)
self.async_client = AsyncInferenceClient(
model=self.model,
timeout=self.timeout,
api_key=huggingfacehub_api_token,
provider=self.provider, # type: ignore[arg-type]
**{
key: value
for key, value in self.server_kwargs.items()
if key in async_supported_kwargs
},
)
ignored_kwargs = (
set(self.server_kwargs.keys())
- sync_supported_kwargs
- async_supported_kwargs
)
if len(ignored_kwargs) > 0:
logger.warning(
f"Ignoring following parameters as they are not supported by the "
f"InferenceClient or AsyncInferenceClient: {ignored_kwargs}."
)
return self
@property
def _default_params(self) -> dict[str, Any]:
"""Get the default parameters for calling text generation inference API."""
return {
"max_new_tokens": self.max_new_tokens,
"top_k": self.top_k,
"top_p": self.top_p,
"typical_p": self.typical_p,
"temperature": self.temperature,
"repetition_penalty": self.repetition_penalty,
"return_full_text": self.return_full_text,
"truncate": self.truncate,
"stop": self.stop_sequences,
"seed": self.seed,
"do_sample": self.do_sample,
"watermark": self.watermark,
**self.model_kwargs,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
"endpoint_url": self.endpoint_url,
"task": self.task,
"provider": self.provider,
"model_kwargs": _model_kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "huggingface_endpoint"
def _invocation_params(
self, runtime_stop: list[str] | None, **kwargs: Any
) -> dict[str, Any]:
params = {**self._default_params, **kwargs}
params["stop"] = params["stop"] + (runtime_stop or [])
return params
def _call(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
"""Call out to HuggingFace Hub's inference endpoint."""
invocation_params = self._invocation_params(stop, **kwargs)
if self.streaming:
completion = ""
for chunk in self._stream(
prompt, run_manager=run_manager, **invocation_params
):
completion += chunk.text
return completion
response_text = self.client.text_generation(
prompt=prompt,
model=self.model,
**invocation_params,
)
# Maybe the generation has stopped at one of the stop sequences:
# then we remove this stop sequence from the end of the generated text
for stop_seq in invocation_params["stop"]:
if response_text[-len(stop_seq) :] == stop_seq:
response_text = response_text[: -len(stop_seq)]
return response_text
async def _acall(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> str:
invocation_params = self._invocation_params(stop, **kwargs)
if self.streaming:
completion = ""
async for chunk in self._astream(
prompt, run_manager=run_manager, **invocation_params
):
completion += chunk.text
return completion
response_text = await self.async_client.text_generation(
prompt=prompt,
**invocation_params,
model=self.model,
stream=False,
)
# Maybe the generation has stopped at one of the stop sequences:
# then remove this stop sequence from the end of the generated text
for stop_seq in invocation_params["stop"]:
if response_text[-len(stop_seq) :] == stop_seq:
response_text = response_text[: -len(stop_seq)]
return response_text
def _stream(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
for response in self.client.text_generation(
prompt, **invocation_params, stream=True
):
# identify stop sequence in generated text, if any
stop_seq_found: str | None = None
for stop_seq in invocation_params["stop"]:
if stop_seq in response:
stop_seq_found = stop_seq
# identify text to yield
text: str | None = None
if stop_seq_found:
text = response[: response.index(stop_seq_found)]
else:
text = response
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
if run_manager:
run_manager.on_llm_new_token(chunk.text)
yield chunk
# break if stop sequence found
if stop_seq_found:
break
async def _astream(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
invocation_params = self._invocation_params(stop, **kwargs)
async for response in await self.async_client.text_generation(
prompt, **invocation_params, stream=True
):
# identify stop sequence in generated text, if any
stop_seq_found: str | None = None
for stop_seq in invocation_params["stop"]:
if stop_seq in response:
stop_seq_found = stop_seq
# identify text to yield
text: str | None = None
if stop_seq_found:
text = response[: response.index(stop_seq_found)]
else:
text = response
# yield text, if any
if text:
chunk = GenerationChunk(text=text)
if run_manager:
await run_manager.on_llm_new_token(chunk.text)
yield chunk
# break if stop sequence found
if stop_seq_found:
break
| HuggingFaceEndpoint |
python | bokeh__bokeh | src/bokeh/sphinxext/_internal/bokeh_jinja.py | {
"start": 2102,
"end": 3950
} | class ____(BokehDirective):
has_content = True
required_arguments = 1
option_spec = {
"noindex": lambda x: True, # directives.flag weirdly returns None
}
def run(self):
template_path = self.arguments[0]
module_path, template_name = template_path.rsplit(".", 1)
try:
module = importlib.import_module(module_path)
except ImportError:
SphinxError(f"Unable to import Bokeh template module: {module_path}")
template = getattr(module, template_name, None)
if template is None:
SphinxError(f"Unable to find Bokeh template: {template_path}")
template_text = open(template.filename).read()
m = _DOCPAT.match(template_text)
doc = m.group(1) if m else None
filename = basename(template.filename)
rst_text = JINJA_DETAIL.render(
name=template_name,
module=module_path,
objrepr=repr(template),
noindex=self.options.get("noindex", False),
doc="" if doc is None else textwrap.dedent(doc),
filename=filename,
template_text=_DOCPAT.sub("", template_text),
)
return self.parse(rst_text, "<bokeh-jinja>")
def setup(app):
""" Required Sphinx extension setup function. """
app.add_directive_to_domain("py", "bokeh-jinja", BokehJinjaDirective)
return PARALLEL_SAFE
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
_DOCPAT = re.compile(r"\{\#(.+?)\#\}", flags=re.MULTILINE | re.DOTALL)
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| BokehJinjaDirective |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 259970,
"end": 260645
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("CreatedCommitContributionEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("CreatedCommitContribution"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| CreatedCommitContributionConnection |
python | PyCQA__isort | isort/parse.py | {
"start": 4121,
"end": 25413
} | class ____(NamedTuple):
in_lines: list[str]
lines_without_imports: list[str]
import_index: int
place_imports: dict[str, list[str]]
import_placements: dict[str, str]
as_map: dict[str, dict[str, list[str]]]
imports: dict[str, dict[str, Any]]
categorized_comments: "CommentsDict"
change_count: int
original_line_count: int
line_separator: str
sections: Any
verbose_output: list[str]
trailing_commas: set[str]
# Ignore DeepSource cyclomatic complexity check for this function. It is one
# the main entrypoints so sort of expected to be complex.
# skipcq: PY-R1000
def file_contents(contents: str, config: Config = DEFAULT_CONFIG) -> ParsedContent:
"""Parses a python file taking out and categorizing imports."""
line_separator: str = config.line_ending or _infer_line_separator(contents)
in_lines = contents.splitlines()
if contents and contents[-1] in ("\n", "\r"):
in_lines.append("")
out_lines = []
original_line_count = len(in_lines)
finder = partial(place.module, config=config)
line_count = len(in_lines)
place_imports: dict[str, list[str]] = {}
import_placements: dict[str, str] = {}
as_map: dict[str, dict[str, list[str]]] = {
"straight": defaultdict(list),
"from": defaultdict(list),
}
imports: OrderedDict[str, dict[str, Any]] = OrderedDict()
verbose_output: list[str] = []
for section in chain(config.sections, config.forced_separate):
imports[section] = {"straight": OrderedDict(), "from": OrderedDict()}
categorized_comments: CommentsDict = {
"from": {},
"straight": {},
"nested": {},
"above": {"straight": {}, "from": {}},
}
trailing_commas: set[str] = set()
index = 0
import_index = -1
in_quote = ""
while index < line_count:
line = in_lines[index]
index += 1
statement_index = index
(skipping_line, in_quote) = skip_line(
line, in_quote=in_quote, index=index, section_comments=config.section_comments
)
if (
line in config.section_comments or line in config.section_comments_end
) and not skipping_line:
if import_index == -1: # pragma: no branch
import_index = index - 1
continue
if "isort:imports-" in line and line.startswith("#"):
section = line.split("isort:imports-")[-1].split()[0].upper()
place_imports[section] = []
import_placements[line] = section
elif "isort: imports-" in line and line.startswith("#"):
section = line.split("isort: imports-")[-1].split()[0].upper()
place_imports[section] = []
import_placements[line] = section
if skipping_line:
out_lines.append(line)
continue
lstripped_line = line.lstrip()
if (
config.float_to_top
and import_index == -1
and line
and not in_quote
and not lstripped_line.startswith("#")
and not lstripped_line.startswith("'''")
and not lstripped_line.startswith('"""')
):
if not lstripped_line.startswith("import") and not lstripped_line.startswith("from"):
import_index = index - 1
while import_index and not in_lines[import_index - 1]:
import_index -= 1
else:
commentless = line.split("#", 1)[0].strip()
if (
("isort:skip" in line or "isort: skip" in line)
and "(" in commentless
and ")" not in commentless
):
import_index = index
starting_line = line
while "isort:skip" in starting_line or "isort: skip" in starting_line:
commentless = starting_line.split("#", 1)[0]
if (
"(" in commentless
and not commentless.rstrip().endswith(")")
and import_index < line_count
):
while import_index < line_count and not commentless.rstrip().endswith(
")"
):
commentless = in_lines[import_index].split("#", 1)[0]
import_index += 1
else:
import_index += 1
if import_index >= line_count:
break
starting_line = in_lines[import_index]
line, *end_of_line_comment = line.split("#", 1)
if ";" in line:
statements = [line.strip() for line in line.split(";")]
else:
statements = [line]
if end_of_line_comment:
statements[-1] = f"{statements[-1]}#{end_of_line_comment[0]}"
for statement in statements:
line, raw_line = normalize_line(statement)
type_of_import = import_type(line, config) or ""
raw_lines = [raw_line]
if not type_of_import:
out_lines.append(raw_line)
continue
if import_index == -1:
import_index = index - 1
nested_comments = {}
import_string, comment = parse_comments(line)
comments = [comment] if comment else []
line_parts = [part for part in strip_syntax(import_string).strip().split(" ") if part]
if type_of_import == "from" and len(line_parts) == 2 and comments:
nested_comments[line_parts[-1]] = comments[0]
if "(" in line.split("#", 1)[0] and index < line_count:
while not line.split("#")[0].strip().endswith(")") and index < line_count:
line, new_comment = parse_comments(in_lines[index])
index += 1
if new_comment:
comments.append(new_comment)
stripped_line = strip_syntax(line).strip()
if (
type_of_import == "from"
and stripped_line
and " " not in stripped_line.replace(" as ", "")
and new_comment
):
nested_comments[stripped_line] = comments[-1]
import_string += line_separator + line
raw_lines.append(line)
else:
while line.strip().endswith("\\"):
line, new_comment = parse_comments(in_lines[index])
line = line.lstrip()
index += 1
if new_comment:
comments.append(new_comment)
# Still need to check for parentheses after an escaped line
if (
"(" in line.split("#")[0]
and ")" not in line.split("#")[0]
and index < line_count
):
stripped_line = strip_syntax(line).strip()
if (
type_of_import == "from"
and stripped_line
and " " not in stripped_line.replace(" as ", "")
and new_comment
):
nested_comments[stripped_line] = comments[-1]
import_string += line_separator + line
raw_lines.append(line)
while not line.split("#")[0].strip().endswith(")") and index < line_count:
line, new_comment = parse_comments(in_lines[index])
index += 1
if new_comment:
comments.append(new_comment)
stripped_line = strip_syntax(line).strip()
if (
type_of_import == "from"
and stripped_line
and " " not in stripped_line.replace(" as ", "")
and new_comment
):
nested_comments[stripped_line] = comments[-1]
import_string += line_separator + line
raw_lines.append(line)
stripped_line = strip_syntax(line).strip()
if (
type_of_import == "from"
and stripped_line
and " " not in stripped_line.replace(" as ", "")
and new_comment
):
nested_comments[stripped_line] = comments[-1]
if import_string.strip().endswith(
(" import", " cimport")
) or line.strip().startswith(("import ", "cimport ")):
import_string += line_separator + line
else:
import_string = import_string.rstrip().rstrip("\\") + " " + line.lstrip()
if type_of_import == "from":
cimports: bool
import_string = (
import_string.replace("import(", "import (")
.replace("\\", " ")
.replace("\n", " ")
)
if "import " not in import_string:
out_lines.extend(raw_lines)
continue
if " cimport " in import_string:
parts = import_string.split(" cimport ")
cimports = True
else:
parts = import_string.split(" import ")
cimports = False
from_import = parts[0].split(" ")
import_string = (" cimport " if cimports else " import ").join(
[from_import[0] + " " + "".join(from_import[1:]), *parts[1:]]
)
just_imports = [
item.replace("{|", "{ ").replace("|}", " }")
for item in strip_syntax(import_string).split()
]
attach_comments_to: list[Any] | None = None
direct_imports = just_imports[1:]
straight_import = True
top_level_module = ""
if "as" in just_imports and (just_imports.index("as") + 1) < len(just_imports):
straight_import = False
while "as" in just_imports:
nested_module = None
as_index = just_imports.index("as")
if type_of_import == "from":
nested_module = just_imports[as_index - 1]
top_level_module = just_imports[0]
module = top_level_module + "." + nested_module
as_name = just_imports[as_index + 1]
direct_imports.remove(nested_module)
direct_imports.remove(as_name)
direct_imports.remove("as")
if nested_module == as_name and config.remove_redundant_aliases:
pass
elif as_name not in as_map["from"][module]: # pragma: no branch
as_map["from"][module].append(as_name)
full_name = f"{nested_module} as {as_name}"
associated_comment = nested_comments.get(full_name)
if associated_comment:
categorized_comments["nested"].setdefault(top_level_module, {})[
full_name
] = associated_comment
if associated_comment in comments: # pragma: no branch
comments.pop(comments.index(associated_comment))
else:
module = just_imports[as_index - 1]
as_name = just_imports[as_index + 1]
if module == as_name and config.remove_redundant_aliases:
pass
elif as_name not in as_map["straight"][module]:
as_map["straight"][module].append(as_name)
if comments and attach_comments_to is None:
if nested_module and config.combine_as_imports:
attach_comments_to = categorized_comments["from"].setdefault(
f"{top_level_module}.__combined_as__", []
)
else:
if type_of_import == "from" or (
config.remove_redundant_aliases and as_name == module.split(".")[-1]
):
attach_comments_to = categorized_comments["straight"].setdefault(
module, []
)
else:
attach_comments_to = categorized_comments["straight"].setdefault(
f"{module} as {as_name}", []
)
del just_imports[as_index : as_index + 2]
if type_of_import == "from":
import_from = just_imports.pop(0)
placed_module = finder(import_from)
if config.verbose and not config.only_modified:
print(f"from-type place_module for {import_from} returned {placed_module}")
elif config.verbose:
verbose_output.append(
f"from-type place_module for {import_from} returned {placed_module}"
)
if placed_module == "":
warn(
f"could not place module {import_from} of line {line} --"
" Do you need to define a default section?",
stacklevel=2,
)
if placed_module and placed_module not in imports:
raise MissingSection(import_module=import_from, section=placed_module)
root = imports[placed_module][type_of_import]
for import_name in just_imports:
associated_comment = nested_comments.get(import_name)
if associated_comment:
categorized_comments["nested"].setdefault(import_from, {})[import_name] = (
associated_comment
)
if associated_comment in comments: # pragma: no branch
comments.pop(comments.index(associated_comment))
if (
config.force_single_line
and comments
and attach_comments_to is None
and len(just_imports) == 1
):
nested_from_comments = categorized_comments["nested"].setdefault(
import_from, {}
)
existing_comment = nested_from_comments.get(just_imports[0], "")
nested_from_comments[just_imports[0]] = (
f"{existing_comment}{'; ' if existing_comment else ''}{'; '.join(comments)}"
)
comments = []
if comments and attach_comments_to is None:
attach_comments_to = categorized_comments["from"].setdefault(import_from, [])
if len(out_lines) > max(import_index, 1) - 1:
last = out_lines[-1].rstrip() if out_lines else ""
while (
last.startswith("#")
and not last.endswith('"""')
and not last.endswith("'''")
and "isort:imports-" not in last
and "isort: imports-" not in last
and not config.treat_all_comments_as_code
and last.strip() not in config.treat_comments_as_code
):
categorized_comments["above"]["from"].setdefault(import_from, []).insert(
0, out_lines.pop(-1)
)
if out_lines:
last = out_lines[-1].rstrip()
else:
last = ""
if statement_index - 1 == import_index: # pragma: no cover
import_index -= len(
categorized_comments["above"]["from"].get(import_from, [])
)
if import_from not in root:
root[import_from] = OrderedDict(
(module, module in direct_imports) for module in just_imports
)
else:
root[import_from].update(
(module, root[import_from].get(module, False) or module in direct_imports)
for module in just_imports
)
if comments and attach_comments_to is not None:
attach_comments_to.extend(comments)
if (
just_imports
and just_imports[-1]
and "," in import_string.split(just_imports[-1])[-1]
):
trailing_commas.add(import_from)
else:
if comments and attach_comments_to is not None:
attach_comments_to.extend(comments)
comments = []
for module in just_imports:
if comments:
categorized_comments["straight"][module] = comments
comments = []
if len(out_lines) > max(import_index, +1, 1) - 1:
last = out_lines[-1].rstrip() if out_lines else ""
while (
last.startswith("#")
and not last.endswith('"""')
and not last.endswith("'''")
and "isort:imports-" not in last
and "isort: imports-" not in last
and not config.treat_all_comments_as_code
and last.strip() not in config.treat_comments_as_code
):
categorized_comments["above"]["straight"].setdefault(module, []).insert(
0, out_lines.pop(-1)
)
if out_lines:
last = out_lines[-1].rstrip()
else:
last = ""
if index - 1 == import_index:
import_index -= len(
categorized_comments["above"]["straight"].get(module, [])
)
placed_module = finder(module)
if config.verbose and not config.only_modified:
print(f"else-type place_module for {module} returned {placed_module}")
elif config.verbose:
verbose_output.append(
f"else-type place_module for {module} returned {placed_module}"
)
if placed_module == "":
warn(
f"could not place module {module} of line {line} --"
" Do you need to define a default section?",
stacklevel=2,
)
imports.setdefault("", {"straight": OrderedDict(), "from": OrderedDict()})
if placed_module and placed_module not in imports:
raise MissingSection(import_module=module, section=placed_module)
straight_import |= imports[placed_module][type_of_import].get(module, False)
imports[placed_module][type_of_import][module] = straight_import
change_count = len(out_lines) - original_line_count
return ParsedContent(
in_lines=in_lines,
lines_without_imports=out_lines,
import_index=import_index,
place_imports=place_imports,
import_placements=import_placements,
as_map=as_map,
imports=imports,
categorized_comments=categorized_comments,
change_count=change_count,
original_line_count=original_line_count,
line_separator=line_separator,
sections=config.sections,
verbose_output=verbose_output,
trailing_commas=trailing_commas,
)
| ParsedContent |
python | doocs__leetcode | solution/1500-1599/1533.Find the Index of the Large Integer/Solution.py | {
"start": 503,
"end": 1051
} | class ____:
def getIndex(self, reader: 'ArrayReader') -> int:
left, right = 0, reader.length() - 1
while left < right:
t1, t2, t3 = (
left,
left + (right - left) // 3,
left + ((right - left) // 3) * 2 + 1,
)
cmp = reader.compareSub(t1, t2, t2 + 1, t3)
if cmp == 0:
left = t3 + 1
elif cmp == 1:
right = t2
else:
left, right = t2 + 1, t3
return left
| Solution |
python | networkx__networkx | networkx/classes/tests/test_multigraph.py | {
"start": 5848,
"end": 14303
} | class ____(BaseMultiGraphTester, _TestGraph):
def setup_method(self):
self.Graph = nx.MultiGraph
# build K3
ed1, ed2, ed3 = ({0: {}}, {0: {}}, {0: {}})
self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed1, 2: ed3}, 2: {0: ed2, 1: ed3}}
self.k3edges = [(0, 1), (0, 2), (1, 2)]
self.k3nodes = [0, 1, 2]
self.K3 = self.Graph()
self.K3._adj = self.k3adj
self.K3._node = {}
self.K3._node[0] = {}
self.K3._node[1] = {}
self.K3._node[2] = {}
def test_data_input(self):
G = self.Graph({1: [2], 2: [1]}, name="test")
assert G.name == "test"
expected = [(1, {2: {0: {}}}), (2, {1: {0: {}}})]
assert sorted(G.adj.items()) == expected
def test_data_multigraph_input(self):
# standard case with edge keys and edge data
edata0 = {"w": 200, "s": "foo"}
edata1 = {"w": 201, "s": "bar"}
keydict = {0: edata0, 1: edata1}
dododod = {"a": {"b": keydict}}
multiple_edge = [("a", "b", 0, edata0), ("a", "b", 1, edata1)]
single_edge = [("a", "b", 0, keydict)]
G = self.Graph(dododod, multigraph_input=True)
assert list(G.edges(keys=True, data=True)) == multiple_edge
G = self.Graph(dododod, multigraph_input=None)
assert list(G.edges(keys=True, data=True)) == multiple_edge
G = self.Graph(dododod, multigraph_input=False)
assert list(G.edges(keys=True, data=True)) == single_edge
# test round-trip to_dict_of_dict and MultiGraph constructor
G = self.Graph(dododod, multigraph_input=True)
H = self.Graph(nx.to_dict_of_dicts(G))
assert nx.is_isomorphic(G, H) is True # test that default is True
for mgi in [True, False]:
H = self.Graph(nx.to_dict_of_dicts(G), multigraph_input=mgi)
assert nx.is_isomorphic(G, H) == mgi
# Set up cases for when incoming_graph_data is not multigraph_input
etraits = {"w": 200, "s": "foo"}
egraphics = {"color": "blue", "shape": "box"}
edata = {"traits": etraits, "graphics": egraphics}
dodod1 = {"a": {"b": edata}}
dodod2 = {"a": {"b": etraits}}
dodod3 = {"a": {"b": {"traits": etraits, "s": "foo"}}}
dol = {"a": ["b"]}
multiple_edge = [("a", "b", "traits", etraits), ("a", "b", "graphics", egraphics)]
single_edge = [("a", "b", 0, {})] # type: ignore[var-annotated]
single_edge1 = [("a", "b", 0, edata)]
single_edge2 = [("a", "b", 0, etraits)]
single_edge3 = [("a", "b", 0, {"traits": etraits, "s": "foo"})]
cases = [ # (dod, mgi, edges)
(dodod1, True, multiple_edge),
(dodod1, False, single_edge1),
(dodod2, False, single_edge2),
(dodod3, False, single_edge3),
(dol, False, single_edge),
]
@pytest.mark.parametrize("dod, mgi, edges", cases)
def test_non_multigraph_input(self, dod, mgi, edges):
G = self.Graph(dod, multigraph_input=mgi)
assert list(G.edges(keys=True, data=True)) == edges
G = nx.to_networkx_graph(dod, create_using=self.Graph, multigraph_input=mgi)
assert list(G.edges(keys=True, data=True)) == edges
mgi_none_cases = [
(dodod1, multiple_edge),
(dodod2, single_edge2),
(dodod3, single_edge3),
]
@pytest.mark.parametrize("dod, edges", mgi_none_cases)
def test_non_multigraph_input_mgi_none(self, dod, edges):
# test constructor without to_networkx_graph for mgi=None
G = self.Graph(dod)
assert list(G.edges(keys=True, data=True)) == edges
raise_cases = [dodod2, dodod3, dol]
@pytest.mark.parametrize("dod", raise_cases)
def test_non_multigraph_input_raise(self, dod):
# cases where NetworkXError is raised
pytest.raises(nx.NetworkXError, self.Graph, dod, multigraph_input=True)
pytest.raises(
nx.NetworkXError,
nx.to_networkx_graph,
dod,
create_using=self.Graph,
multigraph_input=True,
)
def test_getitem(self):
G = self.K3
assert G[0] == {1: {0: {}}, 2: {0: {}}}
with pytest.raises(KeyError):
G.__getitem__("j")
with pytest.raises(TypeError):
G.__getitem__(["A"])
def test_remove_node(self):
G = self.K3
G.remove_node(0)
assert G.adj == {1: {2: {0: {}}}, 2: {1: {0: {}}}}
with pytest.raises(nx.NetworkXError):
G.remove_node(-1)
def test_add_edge(self):
G = self.Graph()
G.add_edge(0, 1)
assert G.adj == {0: {1: {0: {}}}, 1: {0: {0: {}}}}
G = self.Graph()
G.add_edge(*(0, 1))
assert G.adj == {0: {1: {0: {}}}, 1: {0: {0: {}}}}
G = self.Graph()
with pytest.raises(ValueError):
G.add_edge(None, "anything")
def test_add_edge_conflicting_key(self):
G = self.Graph()
G.add_edge(0, 1, key=1)
G.add_edge(0, 1)
assert G.number_of_edges() == 2
G = self.Graph()
G.add_edges_from([(0, 1, 1, {})])
G.add_edges_from([(0, 1)])
assert G.number_of_edges() == 2
def test_add_edges_from(self):
G = self.Graph()
G.add_edges_from([(0, 1), (0, 1, {"weight": 3})])
assert G.adj == {
0: {1: {0: {}, 1: {"weight": 3}}},
1: {0: {0: {}, 1: {"weight": 3}}},
}
G.add_edges_from([(0, 1), (0, 1, {"weight": 3})], weight=2)
assert G.adj == {
0: {1: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}},
1: {0: {0: {}, 1: {"weight": 3}, 2: {"weight": 2}, 3: {"weight": 3}}},
}
G = self.Graph()
edges = [
(0, 1, {"weight": 3}),
(0, 1, (("weight", 2),)),
(0, 1, 5),
(0, 1, "s"),
]
G.add_edges_from(edges)
keydict = {0: {"weight": 3}, 1: {"weight": 2}, 5: {}, "s": {}}
assert G._adj == {0: {1: keydict}, 1: {0: keydict}}
# too few in tuple
with pytest.raises(nx.NetworkXError):
G.add_edges_from([(0,)])
# too many in tuple
with pytest.raises(nx.NetworkXError):
G.add_edges_from([(0, 1, 2, 3, 4)])
# not a tuple
with pytest.raises(TypeError):
G.add_edges_from([0])
def test_multigraph_add_edges_from_four_tuple_misordered(self):
"""add_edges_from expects 4-tuples of the format (u, v, key, data_dict).
Ensure 4-tuples of form (u, v, data_dict, key) raise exception.
"""
G = nx.MultiGraph()
with pytest.raises(TypeError):
# key/data values flipped in 4-tuple
G.add_edges_from([(0, 1, {"color": "red"}, 0)])
def test_remove_edge(self):
G = self.K3
G.remove_edge(0, 1)
assert G.adj == {0: {2: {0: {}}}, 1: {2: {0: {}}}, 2: {0: {0: {}}, 1: {0: {}}}}
with pytest.raises(nx.NetworkXError):
G.remove_edge(-1, 0)
with pytest.raises(nx.NetworkXError):
G.remove_edge(0, 2, key=1)
def test_remove_edges_from(self):
G = self.K3.copy()
G.remove_edges_from([(0, 1)])
kd = {0: {}}
assert G.adj == {0: {2: kd}, 1: {2: kd}, 2: {0: kd, 1: kd}}
G.remove_edges_from([(0, 0)]) # silent fail
self.K3.add_edge(0, 1)
G = self.K3.copy()
G.remove_edges_from(list(G.edges(data=True, keys=True)))
assert G.adj == {0: {}, 1: {}, 2: {}}
G = self.K3.copy()
G.remove_edges_from(list(G.edges(data=False, keys=True)))
assert G.adj == {0: {}, 1: {}, 2: {}}
G = self.K3.copy()
G.remove_edges_from(list(G.edges(data=False, keys=False)))
assert G.adj == {0: {}, 1: {}, 2: {}}
G = self.K3.copy()
G.remove_edges_from([(0, 1, 0), (0, 2, 0, {}), (1, 2)])
assert G.adj == {0: {1: {1: {}}}, 1: {0: {1: {}}}, 2: {}}
def test_remove_multiedge(self):
G = self.K3
G.add_edge(0, 1, key="parallel edge")
G.remove_edge(0, 1, key="parallel edge")
assert G.adj == {
0: {1: {0: {}}, 2: {0: {}}},
1: {0: {0: {}}, 2: {0: {}}},
2: {0: {0: {}}, 1: {0: {}}},
}
G.remove_edge(0, 1)
kd = {0: {}}
assert G.adj == {0: {2: kd}, 1: {2: kd}, 2: {0: kd, 1: kd}}
with pytest.raises(nx.NetworkXError):
G.remove_edge(-1, 0)
| TestMultiGraph |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.