language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
scikit-learn__scikit-learn
|
sklearn/utils/tests/test_estimator_checks.py
|
{
"start": 3682,
"end": 3848
}
|
class ____(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
|
BaseBadClassifier
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/api/sensor.py
|
{
"start": 428,
"end": 1705
}
|
class ____:
"""API for sensor operations."""
client: IGraphQLClient
def list_sensors(
self,
repository_location_name: Optional[str] = None,
repository_name: Optional[str] = None,
) -> "DgApiSensorList":
"""List all sensors, optionally filtered by repository location and name."""
return list_sensors_via_graphql(
self.client,
repository_location_name=repository_location_name,
repository_name=repository_name,
)
def get_sensor(
self,
sensor_name: str,
repository_location_name: str,
repository_name: str,
) -> "DgApiSensor":
"""Get sensor by name and repository details."""
return get_sensor_via_graphql(
self.client,
sensor_name=sensor_name,
repository_location_name=repository_location_name,
repository_name=repository_name,
)
def get_sensor_by_name(self, sensor_name: str) -> "DgApiSensor":
"""Get sensor by name, searching across all repositories."""
from dagster_dg_cli.api_layer.graphql_adapter.sensor import get_sensor_by_name_via_graphql
return get_sensor_by_name_via_graphql(self.client, sensor_name=sensor_name)
|
DgApiSensorApi
|
python
|
getsentry__sentry
|
tests/tools/mypy_helpers/test_plugin.py
|
{
"start": 6856,
"end": 8479
}
|
class ____(Service):
X = "hello world"
def f(self) -> int:
return 5
backend = LazyServiceWrapper(MyService, "some.path", {})
# should proxy attributes properly
assert_type(backend.X, str)
assert_type(backend.f(), int)
# should represent self types properly
assert_type(backend._backend, str)
assert_type(backend._wrapped, _EmptyType | MyService)
"""
expected = """\
<string>:12: error: Expression is of type "Any", not "str" [assert-type]
<string>:13: error: Expression is of type "Any", not "int" [assert-type]
Found 2 errors in 1 file (checked 1 source file)
"""
ret, out = call_mypy(src, plugins=[])
assert ret
assert out == expected
ret, out = call_mypy(src)
assert ret == 0
def test_base_cache_adjusted_version_type() -> None:
src = """\
from django.core.cache import cache
cache.set(key='123', value='456', version='deadbeef')
"""
expected = """\
<string>:3: error: Argument "version" to "set" of "BaseCache" has incompatible type "str"; expected "int | None" [arg-type]
Found 1 error in 1 file (checked 1 source file)
"""
ret, out = call_mypy(src, plugins=[])
assert ret
assert out == expected
ret, out = call_mypy(src)
assert ret == 0
def test_base_cache_incr_decr_version_removed() -> None:
src = """\
from django.core.cache import cache
cache.incr_version('123')
"""
expected = """\
<string>:3: error: removed method [misc]
Found 1 error in 1 file (checked 1 source file)
"""
ret, out = call_mypy(src, plugins=[])
assert ret == 0
ret, out = call_mypy(src)
assert ret
assert out == expected
|
MyService
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/user_defined.py
|
{
"start": 73064,
"end": 77305
}
|
class ____(UserDefinedObjectVariable):
class HashWrapper:
"""This class is hashed if a dataclass is used as a key in a dict.
It's necessary to avoid side effects from calling the __init__ of the dataclass class when hashing"""
def __init__(self, c, fields):
self.cls = c
self.fields = tuple(fields.items())
def __eq__(self, other):
return (
type(self) is type(other)
and self.cls == other.cls
and self.fields == other.fields
)
def __hash__(self):
return hash((self.cls, self.fields))
@staticmethod
def create(tx, value, source):
from dataclasses import fields
assert is_frozen_dataclass(value)
field_map = {}
for field in fields(value):
if hasattr(value, field.name):
field_map[field.name] = VariableTracker.build(
tx,
getattr(value, field.name),
source and AttrSource(source, field.name),
)
return FrozenDataClassVariable(value, fields=field_map, source=source)
def __init__(self, value, fields=None, **kwargs) -> None:
super().__init__(value, **kwargs)
if fields is None:
fields = {}
self.fields = fields
def as_python_constant(self):
# NOTE: this is an intentionally limited version of
# `as_python_constant` for `nonstrict_trace` implementation.
from dataclasses import fields
import torch.utils._pytree as pytree
if not istype(
self.value, (pytree.TreeSpec, pytree.LeafSpec, pytree.ConstantNode)
):
# TODO loosen this restriction and fix `as_proxy`.
raise NotImplementedError(
"currently can't reconstruct arbitrary frozen dataclass instances"
)
args = []
kwargs = {}
for field in fields(self.value):
if field.init:
data = self.fields[field.name].as_python_constant()
if getattr(field, "kw_only", False):
kwargs[field.name] = data
else:
args.append(data)
# This is safe because we know the TreeSpec classes constructors don't
# have external side effects.
ctor = self.python_type()
return ctor(*args, **kwargs)
def as_proxy(self):
from dataclasses import fields
args = []
kwargs = {}
for field in fields(self.value):
proxy = self.fields[field.name].as_proxy()
if hasattr(field, "kw_only") and field.kw_only:
kwargs[field.name] = proxy
else:
args.append(proxy)
# TODO this isn't really safe, because
# 1. it could invoke a user defined `__post_init__`.
# 2. it could invoke a user defined `__init__` if the class _subclasses_
# a frozen dataclass.
# Either of the above could end up mutating external state.
ctor = self.python_type()
return ctor(*args, **kwargs)
def reconstruct(self, codegen: "PyCodegen") -> None:
# Handle specific pytree classes
import torch.utils._pytree as pytree
if isinstance(self.value, pytree.TreeSpec) and self.value.is_leaf():
# Create a new LeafSpec instance by calling the constructor
codegen.add_push_null(
lambda: codegen.load_import_from("torch.utils._pytree", "LeafSpec")
)
codegen.extend_output(create_call_function(0, False))
return
# For other frozen dataclasses, fall back to the base class behavior
super().reconstruct(codegen)
# NB: This is called during __init__ for a frozen dataclass
# use this to accumulate the most up-to-date field values
def method_setattr_standard(self, tx: "InstructionTranslator", name, value):
self.fields[name.as_python_constant()] = value
return super().method_setattr_standard(tx, name, value)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.value_type.__name__})"
|
FrozenDataClassVariable
|
python
|
huggingface__transformers
|
src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py
|
{
"start": 10915,
"end": 14176
}
|
class ____(Ernie4_5_MoePreTrainedModel):
def __init__(self, config: Ernie4_5_MoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Ernie4_5_MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Ernie4_5_MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Ernie4_5_MoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
|
Ernie4_5_MoeModel
|
python
|
pallets__werkzeug
|
src/werkzeug/exceptions.py
|
{
"start": 13819,
"end": 14206
}
|
class ____(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
"The requested URL is no longer available on this server and"
" there is no forwarding address. If you followed a link from a"
" foreign page, please contact the author of this page."
)
|
Gone
|
python
|
pyparsing__pyparsing
|
pyparsing/diagram/__init__.py
|
{
"start": 2389,
"end": 2983
}
|
class ____(railroad.Group):
"""
Custom railroad item to compose a:
- :class:`railroad.Group` containing a
- :class:`railroad.OneOrMore` containing a
- :class:`railroad.Choice` of the elements in the
:class:`railroad.Each`
with the group label indicating that all must be matched
"""
all_label = "[ALL]"
def __init__(self, *items) -> None:
choice_item = railroad.Choice(len(items) - 1, *items)
one_or_more_item = railroad.OneOrMore(item=choice_item)
super().__init__(one_or_more_item, label=self.all_label)
|
EachItem
|
python
|
bokeh__bokeh
|
src/bokeh/document/events.py
|
{
"start": 12691,
"end": 15785
}
|
class ____(DocumentPatchedEvent):
''' A concrete event representing efficiently replacing *all*
existing data for a :class:`~bokeh.models.sources.ColumnDataSource`
'''
kind = "ColumnDataChanged"
def __init__(self, document: Document, model: Model, attr: str, data: DataDict | None = None,
cols: list[str] | None = None, setter: Setter | None = None, callback_invoker: Invoker | None = None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
column_source (ColumnDataSource) :
cols (list[str]) :
optional explicit list of column names to update. If None, all
columns will be updated (default: None)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super().__init__(document, setter, callback_invoker)
self.model = model
self.attr = attr
self.data = data
self.cols = cols
def dispatch(self, receiver: Any) -> None:
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._column_data_changed`` if it exists.
'''
super().dispatch(receiver)
if hasattr(receiver, '_column_data_changed'):
cast(ColumnDataChangedMixin, receiver)._column_data_changed(self)
def to_serializable(self, serializer: Serializer) -> ColumnDataChanged:
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'ColumnDataChanged'
'column_source' : <reference to a CDS>
'data' : <new data to steam to column_source>
'cols' : <specific columns to update>
}
Args:
serializer (Serializer):
'''
data = self.data if self.data is not None else getattr(self.model, self.attr)
cols = self.cols
if cols is not None:
data = {col: value for col in cols if (value := data.get(col)) is not None}
return ColumnDataChanged(
kind = self.kind,
model = self.model.ref,
attr = self.attr,
data = serializer.encode(data),
cols = serializer.encode(cols),
)
@staticmethod
def _handle_event(doc: Document, event: ColumnDataChangedEvent) -> None:
model = event.model
attr = event.attr
data = event.data
model.set_from_json(attr, data, setter=event.setter)
|
ColumnDataChangedEvent
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/destination-astra/destination_astra/indexer.py
|
{
"start": 785,
"end": 3968
}
|
class ____(Indexer):
config: AstraIndexingModel
def __init__(self, config: AstraIndexingModel, embedding_dimensions: int):
super().__init__(config)
self.client = AstraClient(
config.astra_db_endpoint, config.astra_db_app_token, config.astra_db_keyspace, embedding_dimensions, "cosine"
)
self.embedding_dimensions = embedding_dimensions
def _create_collection(self):
if self.client.find_collection(self.config.collection) is False:
self.client.create_collection(self.config.collection)
def pre_sync(self, catalog: ConfiguredAirbyteCatalog):
self._create_collection()
for stream in catalog.streams:
if stream.destination_sync_mode == DestinationSyncMode.overwrite:
self.client.delete_documents(
collection_name=self.config.collection, filter={METADATA_STREAM_FIELD: create_stream_identifier(stream.stream)}
)
def index(self, document_chunks, namespace, stream):
docs = []
for i in range(len(document_chunks)):
chunk = document_chunks[i]
metadata = chunk.metadata
if chunk.page_content is not None:
metadata["text"] = chunk.page_content
doc = {
"_id": str(uuid.uuid4()),
"$vector": chunk.embedding,
**metadata,
}
docs.append(doc)
serial_batches = create_chunks(docs, batch_size=PARALLELISM_LIMIT)
for batch in serial_batches:
results = [chunk for chunk in batch]
self.client.insert_documents(collection_name=self.config.collection, documents=results)
def delete(self, delete_ids, namespace, stream):
if len(delete_ids) > 0:
self.client.delete_documents(collection_name=self.config.collection, filter={METADATA_RECORD_ID_FIELD: {"$in": delete_ids}})
def check(self) -> Optional[str]:
try:
self._create_collection()
collections = self.client.find_collections()
collection = next(filter(lambda f: f["name"] == self.config.collection, collections), None)
if collection is None:
return f"{self.config.collection} collection does not exist."
actual_dimension = collection["options"]["vector"]["dimension"]
if actual_dimension != self.embedding_dimensions:
return f"Your embedding configuration will produce vectors with dimension {self.embedding_dimensions:d}, but your collection is configured with dimension {actual_dimension:d}. Make sure embedding and indexing configurations match."
except Exception as e:
if isinstance(e, urllib3.exceptions.MaxRetryError):
if "Failed to resolve 'apps.astra.datastax.com'" in str(e.reason):
return "Failed to resolve environment, please check whether the credential is correct."
if isinstance(e, urllib3.exceptions.HTTPError):
return str(e)
formatted_exception = format_exception(e)
return formatted_exception
return None
|
AstraIndexer
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-shopify/source_shopify/streams/base_streams.py
|
{
"start": 1260,
"end": 5137
}
|
class ____(HttpStream, ABC):
# define default logger
logger = logging.getLogger("airbyte")
# Latest Stable Release
api_version = "2025-01"
# Page size
limit = 250
primary_key = "id"
order_field = "updated_at"
filter_field = "updated_at_min"
def __init__(self, config: Dict) -> None:
super().__init__(authenticator=config["authenticator"])
self._transformer = DataTypeEnforcer(self.get_json_schema())
self.config = config
@property
@abstractmethod
def data_field(self) -> str:
"""The name of the field in the response which contains the data"""
@property
def url_base(self) -> str:
return f"https://{self.config['shop']}.myshopify.com/admin/api/{self.api_version}/"
@property
def default_filter_field_value(self) -> Union[int, str]:
# certain streams are using `since_id` field as `filter_field`, which requires to use `int` type,
# but many other use `str` values for this, we determine what to use based on `filter_field` value
# by default, we use the user defined `Start Date` as initial value, or 0 for `id`-dependent streams.
return 0 if self.filter_field == "since_id" else (self.config.get("start_date") or "")
def path(self, **kwargs) -> str:
return f"{self.data_field}.json"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
next_page = response.links.get("next", None)
if next_page:
return dict(parse_qsl(urlparse(next_page.get("url")).query))
else:
return None
def request_params(self, next_page_token: Optional[Mapping[str, Any]] = None, **kwargs) -> MutableMapping[str, Any]:
params = {"limit": self.limit}
if next_page_token:
params.update(**next_page_token)
else:
params["order"] = f"{self.order_field} asc"
params[self.filter_field] = self.default_filter_field_value
return params
@limiter.balance_rate_limit()
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.status_code is requests.codes.OK:
try:
json_response = response.json()
records = json_response.get(self.data_field, []) if self.data_field is not None else json_response
yield from self.produce_records(records)
except RequestException as e:
self.logger.warning(f"Unexpected error in `parse_response`: {e}, the actual response data: {response.text}")
yield {}
def produce_records(
self, records: Optional[Union[Iterable[Mapping[str, Any]], Mapping[str, Any]]] = None
) -> Iterable[Mapping[str, Any]]:
# transform method was implemented according to issue 4841
# Shopify API returns price fields as a string and it should be converted to number
# this solution designed to convert string into number, but in future can be modified for general purpose
if isinstance(records, dict):
# for cases when we have a single record as dict
# add shop_url to the record to make querying easy
records["shop_url"] = self.config["shop"]
yield self._transformer.transform(records)
else:
# for other cases
for record in records:
# add shop_url to the record to make querying easy
record["shop_url"] = self.config["shop"]
yield self._transformer.transform(record)
def get_error_handler(self) -> Optional[ErrorHandler]:
known_errors = ShopifyNonRetryableErrors(self.name)
error_mapping = DEFAULT_ERROR_MAPPING | known_errors
return HttpStatusErrorHandler(self.logger, max_retries=5, error_mapping=error_mapping)
|
ShopifyStream
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/models/test_hparams.py
|
{
"start": 9512,
"end": 9654
}
|
class ____(BoringModel):
def __init__(self, batch_size=64):
super().__init__()
self.save_hyperparameters()
|
CustomBoringModel
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/datamodels/config.py
|
{
"start": 885,
"end": 1191
}
|
class ____(StrictBaseModel):
"""Config option."""
key: str
value: str | tuple[str, str]
@property
def text_format(self):
if isinstance(self.value, tuple):
return f"{self.key} = {self.value[0]} {self.value[1]}"
return f"{self.key} = {self.value}"
|
ConfigOption
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/context.py
|
{
"start": 117847,
"end": 118181
}
|
class ____(_ORMColumnEntity):
translate_raw_column = False
def setup_compile_state(self, compile_state):
pass
def row_processor(self, context, result):
def getter(row):
return context.load_options._identity_token
return getter, self._label_name, self._extra_entities
|
_IdentityTokenEntity
|
python
|
pytorch__pytorch
|
test/test_jit_disabled.py
|
{
"start": 1434,
"end": 1818
}
|
class ____(torch.jit.ScriptModule):
def __init__(self, x):
super().__init__()
self.x = torch.jit.Attribute(x, torch.Tensor)
def forward(self, input):
return input
s = Foo(torch.ones(2, 3))
print(s.x)
"""
self.compare_enabled_disabled(_program_string)
def test_script_module_construction(self):
_program_string = """
import torch
|
Foo
|
python
|
scipy__scipy
|
benchmarks/benchmarks/stats_sampling.py
|
{
"start": 2526,
"end": 3123
}
|
class ____:
def __init__(self):
self.mode = 0
def pdf(self, x):
return 0.2 * (0.05 + 0.45 * (1 + np.sin(2*np.pi*x)))
def dpdf(self, x):
return 0.2 * 0.45 * (2*np.pi) * np.cos(2*np.pi*x)
def cdf(self, x):
return x/10. + 0.5 + 0.09/(2*np.pi) * (np.cos(10*np.pi) -
np.cos(2*np.pi*x))
def support(self):
return -5, 5
def __repr__(self):
return 'sin10'
allcontdists = [contdist1(), contdist2(), contdist3(), contdist3(10000.),
contdist4(), contdist5()]
|
contdist5
|
python
|
doocs__leetcode
|
solution/3400-3499/3445.Maximum Difference Between Even and Odd Frequency II/Solution.py
|
{
"start": 0,
"end": 832
}
|
class ____:
def maxDifference(self, S: str, k: int) -> int:
s = list(map(int, S))
ans = -inf
for a in range(5):
for b in range(5):
if a == b:
continue
curA = curB = 0
preA = preB = 0
t = [[inf, inf], [inf, inf]]
l = -1
for r, x in enumerate(s):
curA += x == a
curB += x == b
while r - l >= k and curB - preB >= 2:
t[preA & 1][preB & 1] = min(t[preA & 1][preB & 1], preA - preB)
l += 1
preA += s[l] == a
preB += s[l] == b
ans = max(ans, curA - curB - t[curA & 1 ^ 1][curB & 1])
return ans
|
Solution
|
python
|
ansible__ansible
|
test/integration/targets/old_style_vars_plugins/roles/a/vars_plugins/auto_role_vars.py
|
{
"start": 86,
"end": 256
}
|
class ____(BaseVarsPlugin):
# Implicitly
# REQUIRES_ENABLED = False
def get_vars(self, loader, path, entities):
return {'auto_role_var': True}
|
VarsModule
|
python
|
django__django
|
tests/signing/tests.py
|
{
"start": 7892,
"end": 8695
}
|
class ____(SimpleTestCase):
def test_timestamp_signer(self):
value = "hello"
with freeze_time(123456789):
signer = signing.TimestampSigner(key="predictable-key")
ts = signer.sign(value)
self.assertNotEqual(ts, signing.Signer(key="predictable-key").sign(value))
self.assertEqual(signer.unsign(ts), value)
with freeze_time(123456800):
self.assertEqual(signer.unsign(ts, max_age=12), value)
# max_age parameter can also accept a datetime.timedelta object
self.assertEqual(
signer.unsign(ts, max_age=datetime.timedelta(seconds=11)), value
)
with self.assertRaises(signing.SignatureExpired):
signer.unsign(ts, max_age=10)
|
TestTimestampSigner
|
python
|
pypa__warehouse
|
warehouse/admin/flags.py
|
{
"start": 177,
"end": 814
}
|
class ____(enum.Enum):
DISABLE_ORGANIZATIONS = "disable-organizations"
DISABLE_PEP740 = "disable-pep740"
DISALLOW_DELETION = "disallow-deletion"
DISALLOW_NEW_PROJECT_REGISTRATION = "disallow-new-project-registration"
DISALLOW_NEW_UPLOAD = "disallow-new-upload"
DISALLOW_NEW_USER_REGISTRATION = "disallow-new-user-registration"
DISALLOW_OIDC = "disallow-oidc"
DISALLOW_GITHUB_OIDC = "disallow-github-oidc"
DISALLOW_GITLAB_OIDC = "disallow-gitlab-oidc"
DISALLOW_GOOGLE_OIDC = "disallow-google-oidc"
DISALLOW_ACTIVESTATE_OIDC = "disallow-activestate-oidc"
READ_ONLY = "read-only"
|
AdminFlagValue
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typedDictClosed3.py
|
{
"start": 1562,
"end": 1625
}
|
class ____(ParentClosed4):
b: NotRequired[int]
|
ChildClosed4_4
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_subquery_relations.py
|
{
"start": 108005,
"end": 111027
}
|
class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
a2_id = Column(ForeignKey("a2.id"))
a2 = relationship("A2")
b = relationship("B")
class A2(Base):
__tablename__ = "a2"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
b = relationship("B")
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
c1_m2o_id = Column(ForeignKey("c1_m2o.id"))
c2_m2o_id = Column(ForeignKey("c2_m2o.id"))
c1_o2m = relationship("C1o2m")
c2_o2m = relationship("C2o2m")
c1_m2o = relationship("C1m2o")
c2_m2o = relationship("C2m2o")
class C1o2m(Base):
__tablename__ = "c1_o2m"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
class C2o2m(Base):
__tablename__ = "c2_o2m"
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey("b.id"))
class C1m2o(Base):
__tablename__ = "c1_m2o"
id = Column(Integer, primary_key=True)
class C2m2o(Base):
__tablename__ = "c2_m2o"
id = Column(Integer, primary_key=True)
@classmethod
def insert_data(cls, connection):
A, A2, B, C1o2m, C2o2m, C1m2o, C2m2o = cls.classes(
"A", "A2", "B", "C1o2m", "C2o2m", "C1m2o", "C2m2o"
)
s = Session(connection)
b = B(
c1_o2m=[C1o2m()], c2_o2m=[C2o2m()], c1_m2o=C1m2o(), c2_m2o=C2m2o()
)
s.add(A(b=b, a2=A2(b=b)))
s.commit()
def test_o2m(self):
A, A2, B, C1o2m, C2o2m = self.classes("A", "A2", "B", "C1o2m", "C2o2m")
s = fixture_session()
# A -J-> B -L-> C1
# A -J-> B -S-> C2
# A -J-> A2 -J-> B -S-> C1
# A -J-> A2 -J-> B -L-> C2
q = s.query(A).options(
joinedload(A.b).subqueryload(B.c2_o2m),
joinedload(A.a2).joinedload(A2.b).subqueryload(B.c1_o2m),
)
a1 = q.all()[0]
is_true("c1_o2m" in a1.b.__dict__)
is_true("c2_o2m" in a1.b.__dict__)
def test_m2o(self):
A, A2, B, C1m2o, C2m2o = self.classes("A", "A2", "B", "C1m2o", "C2m2o")
s = fixture_session()
# A -J-> B -L-> C1
# A -J-> B -S-> C2
# A -J-> A2 -J-> B -S-> C1
# A -J-> A2 -J-> B -L-> C2
q = s.query(A).options(
joinedload(A.b).subqueryload(B.c2_m2o),
joinedload(A.a2).joinedload(A2.b).subqueryload(B.c1_m2o),
)
a1 = q.all()[0]
is_true("c1_m2o" in a1.b.__dict__)
is_true("c2_m2o" in a1.b.__dict__)
|
TestExistingRowPopulation
|
python
|
pydantic__pydantic
|
tests/mypy/outputs/mypy-plugin_ini/plugin_strict_fields.py
|
{
"start": 1340,
"end": 1605
}
|
class ____(ModelStrictMode):
model_config = {'strict': False}
# expected error: b
ModelOverrideStrictMode(a='1', b='2', c='3')
# MYPY: error: Argument "b" to "ModelOverrideStrictMode" has incompatible type "str"; expected "int" [arg-type]
|
ModelOverrideStrictMode
|
python
|
pytorch__pytorch
|
torch/nn/modules/pooling.py
|
{
"start": 51461,
"end": 52019
}
|
class ____(Module):
__constants__ = ["output_size", "return_indices"]
return_indices: bool
def __init__(
self, output_size: _size_any_opt_t, return_indices: bool = False
) -> None:
super().__init__()
self.output_size = output_size
self.return_indices = return_indices
def extra_repr(self) -> str:
return f"output_size={self.output_size}"
# FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and
# output shapes are, and how the operation computes output.
|
_AdaptiveMaxPoolNd
|
python
|
google__jax
|
tests/api_test.py
|
{
"start": 237401,
"end": 241733
}
|
class ____(jtu.JaxTestCase):
def test_scalar_literals(self):
jaxpr = api.make_jaxpr(lambda x: x + 2)(42)
self.assertLen(jaxpr.jaxpr.constvars, 0)
def test_abstract_inputs(self):
jaxpr = api.make_jaxpr(lambda x: x + 2.)(
types.SimpleNamespace(shape=(), dtype=np.dtype(np.float32)))
self.assertEqual(jaxpr.in_avals[0].shape, ())
self.assertEqual(jaxpr.in_avals[0].dtype, np.float32)
def test_const(self):
def fun(x):
return (x, 1., np.zeros(1, dtype=jnp.float32))
dtype = "f64" if config.enable_x64.value else "f32"
if config.use_simplified_jaxpr_constants.value:
expected = f"{{ lambda ; a:f32[]. let in (a, 1.0:{dtype}[], [...]:f32[1]) }}"
else:
expected = f"{{ lambda a:f32[1]; b:f32[]. let in (b, 1.0:{dtype}[], a) }}"
jaxpr = api.make_jaxpr(fun)(jnp.float32(0.))
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
@config.use_simplified_jaxpr_constants(True)
def test_non_scalar_const(self):
def fun(x):
return (x, np.zeros(3, dtype=jnp.float32))
expected = "{ lambda ; a:f32[]. let in (a, [...]:f32[3]) }"
jaxpr = api.make_jaxpr(fun)(jnp.float32(0.))
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
def test_cond(self):
def f(x):
return lax.cond(x >= 0.,
x + 1.,
lambda xt: xt + x,
x + 2.,
lambda xf: xf - x)
expected = """{ lambda ; a:f32[]. let
b:bool[] = ge a 0.0:f32[]
c:f32[] = add a 1.0:f32[]
d:f32[] = add a 2.0:f32[]
e:i32[] = convert_element_type[new_dtype=int32 weak_type=False] b
f:f32[] = cond[
branches=(
{ lambda ; g:f32[] h:f32[] i:f32[]. let j:f32[] = sub i g in (j,) }
{ lambda ; k:f32[] l:f32[] m:f32[]. let n:f32[] = add l k in (n,) }
)
] e a c d
in (f,) }"""
jaxpr = api.make_jaxpr(f)(jnp.float32(3.))
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
def test_make_jaxpr_static_argnums(self):
def f(x, y):
return x + y
jaxpr = api.make_jaxpr(f, static_argnums=(1,))(2, 3)
self.assertIn('3', str(jaxpr))
def test_make_jaxpr_return_shape(self):
_, shape_tree = api.make_jaxpr(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),
return_shape=True)(jnp.int32(1))
expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),
api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))
self.assertEqual(shape_tree, expected)
def test_make_jaxpr_axis_env(self):
def f(x):
return x - lax.psum(x, 'i')
jaxpr = api.make_jaxpr(f, axis_env=[('i', 4)])(2)
self.assertIn('psum', str(jaxpr))
def test_weak_type_jit_invariance(self):
y = jnp.broadcast_to(3., (3,))
self.assertTrue(y.aval.weak_type)
def f():
return lax.convert_element_type(y, 'float32')
self.assertEqual(f().aval.weak_type, api.jit(f)().aval.weak_type)
def test_elide_trivial_convert_element_types(self):
# since we apply convert_element_type to a numpy.ndarray, the primitive is
# still bound and thus would appear in the jaxpr if we didn't clean it up
if config.enable_x64.value:
x = np.arange(3, dtype='float64')
else:
x = np.arange(3, dtype='float32')
cet = partial(lax.convert_element_type, new_dtype=x.dtype)
jaxpr = api.make_jaxpr(lambda: cet(cet(cet(x))))()
self.assertLen(jaxpr.eqns, 0)
def test_elide_trivial_broadcasts(self):
# since we apply broadcast to a numpy.ndarray, the primitive is still bound
# and thus would appear in the jaxpr if we didn't clean it up
jaxpr = api.make_jaxpr(lambda: lax.broadcast(np.float32(3), ()))()
self.assertLen(jaxpr.jaxpr.eqns, 0)
def test_convert_element_type_literal_constant_folding(self):
# this convert_element_type is nontrivial, but because it's on a scalar we
# constant-fold it
cet = partial(lax.convert_element_type, new_dtype='float16')
jaxpr = api.make_jaxpr(lambda: cet(3.))()
self.assertLen(jaxpr.eqns, 0)
def test_eqn_repr_with_no_lhs(self):
def f(x):
jax.debug.print("{}", x)
return x
jaxpr = jax.make_jaxpr(f)(np.int32(0))
self.assertEqual(jaxpr.eqns[0].primitive, debugging.debug_print_p)
self.assertStartsWith(str(jaxpr.eqns[0]), "debug_print[")
|
JaxprTest
|
python
|
realpython__materials
|
python-dict-attribute/class_inheritance.py
|
{
"start": 76,
"end": 264
}
|
class ____(Parent):
def __init__(self):
super().__init__()
self.child_attr = "child"
parent = Parent()
print(parent.__dict__)
child = Child()
print(child.__dict__)
|
Child
|
python
|
pypa__pip
|
src/pip/_vendor/msgpack/fallback.py
|
{
"start": 20192,
"end": 32390
}
|
class ____:
"""
MessagePack Packer
Usage::
packer = Packer()
astream.write(packer.pack(a))
astream.write(packer.pack(b))
Packer's constructor has some keyword arguments:
:param default:
When specified, it should be callable.
Convert user type to builtin type that Packer supports.
See also simplejson's document.
:param bool use_single_float:
Use single precision float type for float. (default: False)
:param bool autoreset:
Reset buffer after each pack and return its content as `bytes`. (default: True).
If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enables str8 type for unicode. (default: True)
:param bool strict_types:
If set to true, types will be checked to be exact. Derived classes
from serializable types will not be serialized and will be
treated as unsupported type and forwarded to default.
Additionally tuples will not be serialized as lists.
This is useful when trying to implement accurate serialization
for python types.
:param bool datetime:
If set to true, datetime with tzinfo is packed into Timestamp type.
Note that the tzinfo is stripped in the timestamp.
You can get UTC datetime with `timestamp=3` option of the Unpacker.
:param str unicode_errors:
The error handler for encoding unicode. (default: 'strict')
DO NOT USE THIS!! This option is kept for very specific usage.
:param int buf_size:
Internal buffer size. This option is used only for C implementation.
"""
def __init__(
self,
*,
default=None,
use_single_float=False,
autoreset=True,
use_bin_type=True,
strict_types=False,
datetime=False,
unicode_errors=None,
buf_size=None,
):
self._strict_types = strict_types
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
self._buffer = BytesIO()
self._datetime = bool(datetime)
self._unicode_errors = unicode_errors or "strict"
if default is not None and not callable(default):
raise TypeError("default must be callable")
self._default = default
def _pack(
self,
obj,
nest_limit=DEFAULT_RECURSE_LIMIT,
check=isinstance,
check_type_strict=_check_type_strict,
):
default_used = False
if self._strict_types:
check = check_type_strict
list_types = list
else:
list_types = (list, tuple)
while True:
if nest_limit < 0:
raise ValueError("recursion limit exceeded")
if obj is None:
return self._buffer.write(b"\xc0")
if check(obj, bool):
if obj:
return self._buffer.write(b"\xc3")
return self._buffer.write(b"\xc2")
if check(obj, int):
if 0 <= obj < 0x80:
return self._buffer.write(struct.pack("B", obj))
if -0x20 <= obj < 0:
return self._buffer.write(struct.pack("b", obj))
if 0x80 <= obj <= 0xFF:
return self._buffer.write(struct.pack("BB", 0xCC, obj))
if -0x80 <= obj < 0:
return self._buffer.write(struct.pack(">Bb", 0xD0, obj))
if 0xFF < obj <= 0xFFFF:
return self._buffer.write(struct.pack(">BH", 0xCD, obj))
if -0x8000 <= obj < -0x80:
return self._buffer.write(struct.pack(">Bh", 0xD1, obj))
if 0xFFFF < obj <= 0xFFFFFFFF:
return self._buffer.write(struct.pack(">BI", 0xCE, obj))
if -0x80000000 <= obj < -0x8000:
return self._buffer.write(struct.pack(">Bi", 0xD2, obj))
if 0xFFFFFFFF < obj <= 0xFFFFFFFFFFFFFFFF:
return self._buffer.write(struct.pack(">BQ", 0xCF, obj))
if -0x8000000000000000 <= obj < -0x80000000:
return self._buffer.write(struct.pack(">Bq", 0xD3, obj))
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = True
continue
raise OverflowError("Integer value out of range")
if check(obj, (bytes, bytearray)):
n = len(obj)
if n >= 2**32:
raise ValueError("%s is too large" % type(obj).__name__)
self._pack_bin_header(n)
return self._buffer.write(obj)
if check(obj, str):
obj = obj.encode("utf-8", self._unicode_errors)
n = len(obj)
if n >= 2**32:
raise ValueError("String is too large")
self._pack_raw_header(n)
return self._buffer.write(obj)
if check(obj, memoryview):
n = obj.nbytes
if n >= 2**32:
raise ValueError("Memoryview is too large")
self._pack_bin_header(n)
return self._buffer.write(obj)
if check(obj, float):
if self._use_float:
return self._buffer.write(struct.pack(">Bf", 0xCA, obj))
return self._buffer.write(struct.pack(">Bd", 0xCB, obj))
if check(obj, (ExtType, Timestamp)):
if check(obj, Timestamp):
code = -1
data = obj.to_bytes()
else:
code = obj.code
data = obj.data
assert isinstance(code, int)
assert isinstance(data, bytes)
L = len(data)
if L == 1:
self._buffer.write(b"\xd4")
elif L == 2:
self._buffer.write(b"\xd5")
elif L == 4:
self._buffer.write(b"\xd6")
elif L == 8:
self._buffer.write(b"\xd7")
elif L == 16:
self._buffer.write(b"\xd8")
elif L <= 0xFF:
self._buffer.write(struct.pack(">BB", 0xC7, L))
elif L <= 0xFFFF:
self._buffer.write(struct.pack(">BH", 0xC8, L))
else:
self._buffer.write(struct.pack(">BI", 0xC9, L))
self._buffer.write(struct.pack("b", code))
self._buffer.write(data)
return
if check(obj, list_types):
n = len(obj)
self._pack_array_header(n)
for i in range(n):
self._pack(obj[i], nest_limit - 1)
return
if check(obj, dict):
return self._pack_map_pairs(len(obj), obj.items(), nest_limit - 1)
if self._datetime and check(obj, _DateTime) and obj.tzinfo is not None:
obj = Timestamp.from_datetime(obj)
default_used = 1
continue
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = 1
continue
if self._datetime and check(obj, _DateTime):
raise ValueError(f"Cannot serialize {obj!r} where tzinfo=None")
raise TypeError(f"Cannot serialize {obj!r}")
def pack(self, obj):
try:
self._pack(obj)
except:
self._buffer = BytesIO() # force reset
raise
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = BytesIO()
return ret
def pack_map_pairs(self, pairs):
self._pack_map_pairs(len(pairs), pairs)
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = BytesIO()
return ret
def pack_array_header(self, n):
if n >= 2**32:
raise ValueError
self._pack_array_header(n)
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = BytesIO()
return ret
def pack_map_header(self, n):
if n >= 2**32:
raise ValueError
self._pack_map_header(n)
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = BytesIO()
return ret
def pack_ext_type(self, typecode, data):
if not isinstance(typecode, int):
raise TypeError("typecode must have int type.")
if not 0 <= typecode <= 127:
raise ValueError("typecode should be 0-127")
if not isinstance(data, bytes):
raise TypeError("data must have bytes type")
L = len(data)
if L > 0xFFFFFFFF:
raise ValueError("Too large data")
if L == 1:
self._buffer.write(b"\xd4")
elif L == 2:
self._buffer.write(b"\xd5")
elif L == 4:
self._buffer.write(b"\xd6")
elif L == 8:
self._buffer.write(b"\xd7")
elif L == 16:
self._buffer.write(b"\xd8")
elif L <= 0xFF:
self._buffer.write(b"\xc7" + struct.pack("B", L))
elif L <= 0xFFFF:
self._buffer.write(b"\xc8" + struct.pack(">H", L))
else:
self._buffer.write(b"\xc9" + struct.pack(">I", L))
self._buffer.write(struct.pack("B", typecode))
self._buffer.write(data)
def _pack_array_header(self, n):
if n <= 0x0F:
return self._buffer.write(struct.pack("B", 0x90 + n))
if n <= 0xFFFF:
return self._buffer.write(struct.pack(">BH", 0xDC, n))
if n <= 0xFFFFFFFF:
return self._buffer.write(struct.pack(">BI", 0xDD, n))
raise ValueError("Array is too large")
def _pack_map_header(self, n):
if n <= 0x0F:
return self._buffer.write(struct.pack("B", 0x80 + n))
if n <= 0xFFFF:
return self._buffer.write(struct.pack(">BH", 0xDE, n))
if n <= 0xFFFFFFFF:
return self._buffer.write(struct.pack(">BI", 0xDF, n))
raise ValueError("Dict is too large")
def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
self._pack_map_header(n)
for k, v in pairs:
self._pack(k, nest_limit - 1)
self._pack(v, nest_limit - 1)
def _pack_raw_header(self, n):
if n <= 0x1F:
self._buffer.write(struct.pack("B", 0xA0 + n))
elif self._use_bin_type and n <= 0xFF:
self._buffer.write(struct.pack(">BB", 0xD9, n))
elif n <= 0xFFFF:
self._buffer.write(struct.pack(">BH", 0xDA, n))
elif n <= 0xFFFFFFFF:
self._buffer.write(struct.pack(">BI", 0xDB, n))
else:
raise ValueError("Raw is too large")
def _pack_bin_header(self, n):
if not self._use_bin_type:
return self._pack_raw_header(n)
elif n <= 0xFF:
return self._buffer.write(struct.pack(">BB", 0xC4, n))
elif n <= 0xFFFF:
return self._buffer.write(struct.pack(">BH", 0xC5, n))
elif n <= 0xFFFFFFFF:
return self._buffer.write(struct.pack(">BI", 0xC6, n))
else:
raise ValueError("Bin is too large")
def bytes(self):
"""Return internal buffer contents as bytes object"""
return self._buffer.getvalue()
def reset(self):
"""Reset internal buffer.
This method is useful only when autoreset=False.
"""
self._buffer = BytesIO()
def getbuffer(self):
"""Return view of internal buffer."""
if _USING_STRINGBUILDER:
return memoryview(self.bytes())
else:
return self._buffer.getbuffer()
|
Packer
|
python
|
ray-project__ray
|
release/train_tests/benchmark/recsys/recsys_factory.py
|
{
"start": 1794,
"end": 2151
}
|
class ____(BaseModel):
embedding_dim: int = 128
num_embeddings_per_feature: List[int] = CRITEO_NUM_EMBEDDINGS_PER_FEATURE
over_arch_layer_sizes: List[int] = [1024, 1024, 512, 256, 1]
dense_arch_layer_sizes: List[int] = [512, 256, 128]
interaction_type: str = "dcn"
dcn_num_layers: int = 3
dcn_low_rank_dim: int = 512
|
TorchRecConfig
|
python
|
huggingface__transformers
|
src/transformers/models/t5/modeling_t5.py
|
{
"start": 5230,
"end": 5899
}
|
class ____(nn.Module):
def __init__(self, config: T5Config):
super().__init__()
if config.is_gated_act:
self.DenseReluDense = T5DenseGatedActDense(config)
else:
self.DenseReluDense = T5DenseActDense(config)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
|
T5LayerFF
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/_resource.py
|
{
"start": 637,
"end": 1080
}
|
class ____:
_client: AsyncAPIClient
def __init__(self, client: AsyncAPIClient) -> None:
self._client = client
self._get = client.get
self._post = client.post
self._patch = client.patch
self._put = client.put
self._delete = client.delete
self._get_api_list = client.get_api_list
async def _sleep(self, seconds: float) -> None:
await anyio.sleep(seconds)
|
AsyncAPIResource
|
python
|
networkx__networkx
|
networkx/algorithms/centrality/tests/test_subgraph.py
|
{
"start": 275,
"end": 3729
}
|
class ____:
def test_subgraph_centrality(self):
answer = {0: 1.5430806348152433, 1: 1.5430806348152433}
result = subgraph_centrality(nx.path_graph(2))
for k, v in result.items():
assert answer[k] == pytest.approx(v, abs=1e-7)
answer1 = {
"1": 1.6445956054135658,
"Albert": 2.4368257358712189,
"Aric": 2.4368257358712193,
"Dan": 3.1306328496328168,
"Franck": 2.3876142275231915,
}
G1 = nx.Graph(
[
("Franck", "Aric"),
("Aric", "Dan"),
("Dan", "Albert"),
("Albert", "Franck"),
("Dan", "1"),
("Franck", "Albert"),
]
)
result1 = subgraph_centrality(G1)
for k, v in result1.items():
assert answer1[k] == pytest.approx(v, abs=1e-7)
result1 = subgraph_centrality_exp(G1)
for k, v in result1.items():
assert answer1[k] == pytest.approx(v, abs=1e-7)
def test_subgraph_centrality_big_graph(self):
g199 = nx.complete_graph(199)
g200 = nx.complete_graph(200)
comm199 = nx.subgraph_centrality(g199)
comm199_exp = nx.subgraph_centrality_exp(g199)
comm200 = nx.subgraph_centrality(g200)
comm200_exp = nx.subgraph_centrality_exp(g200)
def test_communicability_betweenness_centrality_small(self):
result = communicability_betweenness_centrality(nx.path_graph(2))
assert result == {0: 0, 1: 0}
result = communicability_betweenness_centrality(nx.path_graph(1))
assert result == {0: 0}
result = communicability_betweenness_centrality(nx.path_graph(0))
assert result == {}
answer = {0: 0.1411224421177313, 1: 1.0, 2: 0.1411224421177313}
result = communicability_betweenness_centrality(nx.path_graph(3))
for k, v in result.items():
assert answer[k] == pytest.approx(v, abs=1e-7)
result = communicability_betweenness_centrality(nx.complete_graph(3))
for k, v in result.items():
assert 0.49786143366223296 == pytest.approx(v, abs=1e-7)
def test_communicability_betweenness_centrality(self):
answer = {
0: 0.07017447951484615,
1: 0.71565598701107991,
2: 0.71565598701107991,
3: 0.07017447951484615,
}
result = communicability_betweenness_centrality(nx.path_graph(4))
for k, v in result.items():
assert answer[k] == pytest.approx(v, abs=1e-7)
answer1 = {
"1": 0.060039074193949521,
"Albert": 0.315470761661372,
"Aric": 0.31547076166137211,
"Dan": 0.68297778678316201,
"Franck": 0.21977926617449497,
}
G1 = nx.Graph(
[
("Franck", "Aric"),
("Aric", "Dan"),
("Dan", "Albert"),
("Albert", "Franck"),
("Dan", "1"),
("Franck", "Albert"),
]
)
result1 = communicability_betweenness_centrality(G1)
for k, v in result1.items():
assert answer1[k] == pytest.approx(v, abs=1e-7)
def test_estrada_index(self):
answer = 1041.2470334195475
result = estrada_index(nx.karate_club_graph())
assert answer == pytest.approx(result, abs=1e-7)
|
TestSubgraph
|
python
|
pytorch__pytorch
|
torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py
|
{
"start": 1225,
"end": 1921
}
|
class ____(nn.Module):
r"""
Borrowing the ``Policy`` class from the Reinforcement Learning example.
Copying the code to make these two examples independent.
See https://github.com/pytorch/examples/tree/master/reinforcement_learning
"""
def __init__(self) -> None:
super().__init__()
self.affine1 = nn.Linear(4, 128)
self.dropout = nn.Dropout(p=0.6)
self.affine2 = nn.Linear(128, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = self.affine1(x)
x = self.dropout(x)
x = F.relu(x)
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=1)
|
Policy
|
python
|
openai__openai-python
|
src/openai/resources/fine_tuning/alpha/graders.py
|
{
"start": 9484,
"end": 9801
}
|
class ____:
def __init__(self, graders: Graders) -> None:
self._graders = graders
self.run = _legacy_response.to_raw_response_wrapper(
graders.run,
)
self.validate = _legacy_response.to_raw_response_wrapper(
graders.validate,
)
|
GradersWithRawResponse
|
python
|
tensorflow__tensorflow
|
tensorflow/python/saved_model/save_test.py
|
{
"start": 59548,
"end": 59866
}
|
class ____(test.TestCase):
def test_toggle_flag(self):
self.assertTrue(flags.config().saved_model_fingerprinting.value())
flags.config().saved_model_fingerprinting.reset(False)
self.assertFalse(flags.config().saved_model_fingerprinting.value())
if __name__ == "__main__":
test.main()
|
FingerprintingTests
|
python
|
psf__black
|
tests/data/cases/preview_long_strings__regression.py
|
{
"start": 34216,
"end": 34820
}
|
class ____:
class B:
def foo():
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version())
)
|
A
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/ps_values_test.py
|
{
"start": 2678,
"end": 4294
}
|
class ____(test.TestCase, parameterized.TestCase):
def testAssignOutOfScope(self, distribution):
with distribution.scope():
aggregating = variables_lib.Variable(1.)
self.assertIsInstance(aggregating, ps_values.AggregatingVariable)
self.evaluate(aggregating.assign(3.))
self.assertEqual(self.evaluate(aggregating.read_value()), 3.)
self.assertEqual(self.evaluate(aggregating._v.read_value()), 3.)
def testAssignAdd(self, distribution):
with distribution.scope():
v = variable_v1.VariableV1(
1, aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def assign():
return v.assign_add(2)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(assign)))
self.assertAllEqual([3], per_replica_results)
def testAsyncCheckpointAggregatingVariable(self, distribution):
with self.test_session():
with distribution.scope():
x = variables_lib.Variable(1.)
self.assertIsInstance(x, ps_values.AggregatingVariable)
self.evaluate(x.initializer)
async_checkpoint_test_helper(self, x)
def testAsyncCheckpointCachingVariable(self, distribution):
del distribution
with self.test_session():
v = variables_lib.Variable(1.)
x = ps_values.CachingVariable(v)
self.assertIsInstance(x, ps_values.CachingVariable)
self.evaluate(x.initializer)
async_checkpoint_test_helper(self, x)
if __name__ == "__main__":
test.main()
|
AggregatingVariableTest
|
python
|
ray-project__ray
|
python/ray/util/state/api.py
|
{
"start": 2820,
"end": 54583
}
|
class ____(SubmissionClient):
"""State API Client issues REST GET requests to the server for resource states."""
def __init__(
self,
address: Optional[str] = None,
cookies: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
):
"""Initialize a StateApiClient and check the connection to the cluster.
Args:
address: Ray bootstrap address (e.g. `127.0.0.0:6379`, `auto`), or Ray
Client address (e.g. `ray://<head-node-ip>:10001`), or Ray dashboard
address (e.g. `http://<head-node-ip>:8265`).
If not provided, it will be detected automatically from any running
local Ray cluster.
cookies: Cookies to use when sending requests to the HTTP job server.
headers: Headers to use when sending requests to the HTTP job server, used
for cases like authentication to a remote cluster.
"""
if requests is None:
raise RuntimeError(
"The Ray state CLI & SDK require the ray[default] "
"installation: `pip install 'ray[default']``"
)
if not headers:
headers = {"Content-Type": "application/json"}
# Resolve API server URL
api_server_url = get_address_for_submission_client(address)
super().__init__(
address=api_server_url,
create_cluster_if_needed=False,
headers=headers,
cookies=cookies,
)
@classmethod
def _make_param(cls, options: Union[ListApiOptions, GetApiOptions]) -> Dict:
options_dict = {}
for field in fields(options):
# TODO(rickyyx): We will need to find a way to pass server side timeout
# TODO(rickyyx): We will have to convert filter option
# slightly differently for now. But could we do k,v pair rather than this?
# I see we are also converting dict to XXXApiOptions later on, we could
# probably organize the marshaling a bit better.
if field.name == "filters":
options_dict["filter_keys"] = []
options_dict["filter_predicates"] = []
options_dict["filter_values"] = []
for filter in options.filters:
if len(filter) != 3:
raise ValueError(
f"The given filter has incorrect input type, {filter}. "
"Provide (key, predicate, value) tuples."
)
filter_k, filter_predicate, filter_val = filter
options_dict["filter_keys"].append(filter_k)
options_dict["filter_predicates"].append(filter_predicate)
options_dict["filter_values"].append(filter_val)
continue
option_val = getattr(options, field.name)
if option_val is not None:
options_dict[field.name] = option_val
return options_dict
def _make_http_get_request(
self,
endpoint: str,
params: Dict,
timeout: float,
_explain: bool = False,
) -> Dict:
with warnings_on_slow_request(
address=self._address, endpoint=endpoint, timeout=timeout, explain=_explain
):
# Send a request.
response = None
try:
response = self._do_request(
"GET",
endpoint,
timeout=timeout,
params=params,
)
# If we have a valid JSON error, don't raise a generic exception but
# instead let the caller parse it to raise a more precise exception.
if (
response.status_code == 500
and "application/json"
not in response.headers.get("Content-Type", "")
):
response.raise_for_status()
except requests.exceptions.RequestException as e:
err_str = f"Failed to make request to {self._address}{endpoint}. "
# Best-effort to give hints to users on potential reasons of connection
# failure.
err_str += (
"Failed to connect to API server. Please check the API server "
"log for details. Make sure dependencies are installed with "
"`pip install ray[default]`. Please also check dashboard is "
"available, and included when starting ray cluster, "
"i.e. `ray start --include-dashboard=True --head`. "
)
if response is None:
raise ServerUnavailable(err_str)
err_str += f"Response(url={response.url},status={response.status_code})"
raise RayStateApiException(err_str) from e
try:
# Process the response.
response = response.json()
except requests.exceptions.JSONDecodeError as e:
raise RayStateApiException(
f"Failed to parse Response(url={response.url}, "
f"status={response.status_code}, text='{response.text[:_MAX_HTTP_RESPONSE_EXCEPTION_TEXT]}')"
) from e
if response["result"] is False:
raise RayStateApiException(
"API server internal error. See dashboard.log file for more details. "
f"Error: {response['msg']}"
)
# Dictionary of `ListApiResponse` or `SummaryApiResponse`
return response["data"]["result"]
def get(
self,
resource: StateResource,
id: str,
options: Optional[GetApiOptions],
_explain: bool = False,
) -> Optional[
Union[
ActorState,
PlacementGroupState,
NodeState,
WorkerState,
TaskState,
List[ObjectState],
JobState,
]
]:
"""Get resources states by id
Args:
resource_name: Resource names, i.e. 'workers', 'actors', 'nodes',
'placement_groups', 'tasks', 'objects'.
'jobs' and 'runtime-envs' are not supported yet.
id: ID for the resource, i.e. 'node_id' for nodes.
options: Get options. See `GetApiOptions` for details.
_explain: Print the API information such as API
latency or failed query information.
Returns:
None if not found, and if found:
- ActorState for actors
- PlacementGroupState for placement groups
- NodeState for nodes
- WorkerState for workers
- TaskState for tasks
- JobState for jobs
Empty list for objects if not found, or list of ObjectState for objects
Raises:
Exception: This doesn't catch any exceptions raised when the underlying request
call raises exceptions. For example, it could raise `requests.Timeout`
when timeout occurs.
ValueError:
if the resource could not be GET by id, i.e. jobs and runtime-envs.
"""
# TODO(rickyyx): Make GET not using filters on list operation
params = self._make_param(options)
RESOURCE_ID_KEY_NAME = {
StateResource.NODES: "node_id",
StateResource.ACTORS: "actor_id",
StateResource.PLACEMENT_GROUPS: "placement_group_id",
StateResource.WORKERS: "worker_id",
StateResource.TASKS: "task_id",
StateResource.OBJECTS: "object_id",
StateResource.JOBS: "submission_id",
}
if resource not in RESOURCE_ID_KEY_NAME:
raise ValueError(f"Can't get {resource.name} by id.")
params["filter_keys"] = [RESOURCE_ID_KEY_NAME[resource]]
params["filter_predicates"] = ["="]
params["filter_values"] = [id]
params["detail"] = True
endpoint = f"/api/v0/{resource.value}"
list_api_response = self._make_http_get_request(
endpoint=endpoint,
params=params,
timeout=options.timeout,
_explain=_explain,
)
result = list_api_response["result"]
# Empty result
if len(result) == 0:
return None
result = [dict_to_state(d, resource) for d in result]
if resource == StateResource.OBJECTS:
# NOTE(rickyyx):
# There might be multiple object entries for a single object id
# because a single object could be referenced at different places
# e.g. pinned as local variable, used as parameter
return result
if resource == StateResource.TASKS:
# There might be multiple task attempts given a task id due to
# task retries.
if len(result) == 1:
return result[0]
return result
# For the rest of the resources, there should only be a single entry
# for a particular id.
assert len(result) == 1
return result[0]
def _print_api_warning(
self,
resource: StateResource,
api_response: dict,
warn_data_source_not_available: bool = True,
warn_data_truncation: bool = True,
warn_limit: bool = True,
warn_server_side_warnings: bool = True,
):
"""Print the API warnings.
Args:
resource: Resource names, i.e. 'jobs', 'actors', 'nodes',
see `StateResource` for details.
api_response: The dictionarified `ListApiResponse` or `SummaryApiResponse`.
warn_data_source_not_available: Warn when some data sources
are not available.
warn_data_truncation: Warn when results were truncated at
the data source.
warn_limit: Warn when results were limited.
warn_server_side_warnings: Warn when the server side generates warnings
(E.g., when callsites not enabled for listing objects)
"""
# Print warnings if anything was given.
if warn_data_source_not_available:
warning_msgs = api_response.get("partial_failure_warning", None)
if warning_msgs:
warnings.warn(warning_msgs)
if warn_data_truncation:
# Print warnings if data is truncated at the data source.
num_after_truncation = api_response["num_after_truncation"]
total = api_response["total"]
if total > num_after_truncation:
# NOTE(rickyyx): For now, there's not much users
# could do (neither can we), with hard truncation.
# Unless we allow users to set a higher
# `RAY_MAX_LIMIT_FROM_DATA_SOURCE`, the data will
# always be truncated at the data source.
warnings.warn(
(
"The returned data may contain incomplete result. "
f"{num_after_truncation} ({total} total from the cluster) "
f"{resource.value} are retrieved from the data source. "
f"{total - num_after_truncation} entries have been truncated. "
f"Max of {num_after_truncation} entries are retrieved "
"from data source to prevent over-sized payloads."
),
)
if warn_limit:
# Print warnings if return data is limited at the API server due to
# limit enforced at the server side
num_filtered = api_response["num_filtered"]
data = api_response["result"]
if num_filtered > len(data):
warnings.warn(
(
f"Limit last {len(data)} entries "
f"(Total {num_filtered}). Use `--filter` to reduce "
"the amount of data to return or "
"setting a higher limit with `--limit` to see all data. "
),
)
if warn_server_side_warnings:
# Print the additional warnings.
warnings_to_print = api_response.get("warnings", [])
if warnings_to_print:
for warning_to_print in warnings_to_print:
warnings.warn(warning_to_print)
def _raise_on_missing_output(self, resource: StateResource, api_response: dict):
"""Raise an exception when the API resopnse contains a missing output.
Output can be missing if (1) Failures on some of data source queries (e.g.,
`ray list tasks` queries all raylets, and if some of queries fail, it will
contain missing output. If all queries fail, it will just fail). (2) Data
is truncated because the output is too large.
Args:
resource: Resource names, i.e. 'jobs', 'actors', 'nodes',
see `StateResource` for details.
api_response: The dictionarified `ListApiResponse` or `SummaryApiResponse`.
"""
# Raise an exception if there are partial failures that cause missing output.
warning_msgs = api_response.get("partial_failure_warning", None)
if warning_msgs:
raise RayStateApiException(
f"Failed to retrieve all {resource.value} from the cluster because"
"they are not reachable due to query failures to the data sources. "
"To avoid raising an exception and allow having missing output, "
"set `raise_on_missing_output=False`. "
)
# Raise an exception is there is data truncation that cause missing output.
total = api_response["total"]
num_after_truncation = api_response["num_after_truncation"]
if total != num_after_truncation:
raise RayStateApiException(
f"Failed to retrieve all {total} {resource.value} from the cluster "
"because they are not reachable due to data truncation. It happens "
"when the returned data is too large "
# When the data is truncated, the truncation
# threshold == num_after_truncation. We cannot set this to env
# var because the CLI side might not have the correct env var.
f"(> {num_after_truncation}) "
"To avoid raising an exception and allow having missing output, "
"set `raise_on_missing_output=False`. "
)
def list(
self,
resource: StateResource,
options: ListApiOptions,
raise_on_missing_output: bool,
_explain: bool = False,
) -> List[
Union[
ActorState,
JobState,
NodeState,
TaskState,
ObjectState,
PlacementGroupState,
RuntimeEnvState,
WorkerState,
ClusterEventState,
]
]:
"""List resources states
Args:
resource: Resource names, i.e. 'jobs', 'actors', 'nodes',
see `StateResource` for details.
options: List options. See `ListApiOptions` for details.
raise_on_missing_output: When True, raise an exception if the output
is incomplete. Output can be incomplete if
(1) there's a partial network failure when the source is distributed.
(2) data is truncated because it is too large.
Set it to False to avoid throwing an exception on missing data.
_explain: Print the API information such as API
latency or failed query information.
Returns:
A list of queried result from `ListApiResponse`,
Raises:
Exception: This doesn't catch any exceptions raised when the
underlying request call raises exceptions. For example, it could
raise `requests.Timeout` when timeout occurs.
"""
if options.has_conflicting_filters():
# return early with empty list when there are conflicting filters
return []
endpoint = f"/api/v0/{resource.value}"
params = self._make_param(options)
list_api_response = self._make_http_get_request(
endpoint=endpoint,
params=params,
timeout=options.timeout,
_explain=_explain,
)
if raise_on_missing_output:
self._raise_on_missing_output(resource, list_api_response)
if _explain:
self._print_api_warning(resource, list_api_response)
return [dict_to_state(d, resource) for d in list_api_response["result"]]
def summary(
self,
resource: SummaryResource,
*,
options: SummaryApiOptions,
raise_on_missing_output: bool,
_explain: bool = False,
) -> Dict:
"""Summarize resources states
Args:
resource_name: Resource names,
see `SummaryResource` for details.
options: summary options. See `SummaryApiOptions` for details.
raise_on_missing_output: Raise an exception if the output has missing data.
Output can have missing data if (1) there's a partial network failure
when the source is distributed. (2) data is truncated
because it is too large.
_explain: Print the API information such as API
latency or failed query information.
Returns:
A dictionary of queried result from `SummaryApiResponse`.
Raises:
Exception: This doesn't catch any exceptions raised when the
underlying request call raises exceptions. For example, it could
raise `requests.Timeout` when timeout occurs.
"""
params = {"timeout": options.timeout}
endpoint = f"/api/v0/{resource.value}/summarize"
summary_api_response = self._make_http_get_request(
endpoint=endpoint,
params=params,
timeout=options.timeout,
_explain=_explain,
)
if raise_on_missing_output:
self._raise_on_missing_output(resource, summary_api_response)
if _explain:
# There's no limit applied to summary, so we shouldn't warn.
self._print_api_warning(resource, summary_api_response, warn_limit=False)
return summary_api_response["result"]["node_id_to_summary"]
@DeveloperAPI
def get_actor(
id: str,
address: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
_explain: bool = False,
) -> Optional[ActorState]:
"""Get an actor by id.
Args:
id: Id of the actor
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
timeout: Max timeout value for the state API requests made.
_explain: Print the API information such as API latency or
failed query information.
Returns:
None if actor not found, or
:class:`ActorState <ray.util.state.common.ActorState>`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).get(
StateResource.ACTORS, id, GetApiOptions(timeout=timeout), _explain=_explain
)
@DeveloperAPI
def get_job(
id: str,
address: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
_explain: bool = False,
) -> Optional[JobState]:
"""Get a submission job detail by id.
Args:
id: Submission ID obtained from job API.
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
timeout: Max timeout value for the state API requests made.
_explain: Print the API information such as API latency or
failed query information.
Returns:
None if job not found, or
:class:`JobState <ray.util.state.common.JobState>`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).get(
StateResource.JOBS,
id,
GetApiOptions(timeout=timeout),
_explain=_explain,
)
@DeveloperAPI
def get_placement_group(
id: str,
address: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
_explain: bool = False,
) -> Optional[PlacementGroupState]:
"""Get a placement group by id.
Args:
id: Id of the placement group
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
timeout: Max timeout value for the state APIs requests made.
_explain: Print the API information such as API latency or
failed query information.
Returns:
None if actor not found, or
:class:`~ray.util.state.common.PlacementGroupState`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).get(
StateResource.PLACEMENT_GROUPS,
id,
GetApiOptions(timeout=timeout),
_explain=_explain,
)
@DeveloperAPI
def get_node(
id: str,
address: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
_explain: bool = False,
) -> Optional[NodeState]:
"""Get a node by id.
Args:
id: Id of the node.
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
timeout: Max timeout value for the state APIs requests made.
_explain: Print the API information such as API latency or
failed query information.
Returns:
None if actor not found, or
:class:`NodeState <ray.util.state.common.NodeState>`.
Raises:
RayStateApiException: if the CLI is failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).get(
StateResource.NODES,
id,
GetApiOptions(timeout=timeout),
_explain=_explain,
)
@DeveloperAPI
def get_worker(
id: str,
address: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
_explain: bool = False,
) -> Optional[WorkerState]:
"""Get a worker by id.
Args:
id: Id of the worker
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
timeout: Max timeout value for the state APIs requests made.
_explain: Print the API information such as API latency or
failed query information.
Returns:
None if actor not found, or
:class:`WorkerState <ray.util.state.common.WorkerState>`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).get(
StateResource.WORKERS,
id,
GetApiOptions(timeout=timeout),
_explain=_explain,
)
@DeveloperAPI
def get_task(
id: Union[str, "ray.ObjectRef"],
address: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
_explain: bool = False,
) -> Optional[TaskState]:
"""Get task attempts of a task by id.
Args:
id: String id of the task or ObjectRef that corresponds to task
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
timeout: Max timeout value for the state APIs requests made.
_explain: Print the API information such as API latency or
failed query information.
Returns:
None if task not found, or a list of
:class:`~ray.util.state.common.TaskState`
from the task attempts.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
str_id: str
if isinstance(id, str):
str_id = id
else:
str_id = id.task_id().hex()
return StateApiClient(address=address).get(
StateResource.TASKS,
str_id,
GetApiOptions(timeout=timeout),
_explain=_explain,
)
@DeveloperAPI
def get_objects(
id: str,
address: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
_explain: bool = False,
) -> List[ObjectState]:
"""Get objects by id.
There could be more than 1 entry returned since an object could be
referenced at different places.
Args:
id: Id of the object
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
timeout: Max timeout value for the state APIs requests made.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of
:class:`~ray.util.state.common.ObjectState`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).get(
StateResource.OBJECTS,
id,
GetApiOptions(timeout=timeout),
_explain=_explain,
)
@DeveloperAPI
def list_actors(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[ActorState]:
"""List actors in the cluster.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
filters: List of tuples of filter key, predicate (=, or !=), and
the filter value. E.g., `("id", "=", "abcd")`
String filter values are case-insensitive.
limit: Max number of entries returned by the state backend.
timeout: Max timeout value for the state APIs requests made.
detail: When True, more details info (specified in `ActorState`)
will be queried and returned. See
:class:`ActorState <ray.util.state.common.ActorState>`.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of
:class:`ActorState <ray.util.state.common.ActorState>`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).list(
StateResource.ACTORS,
options=ListApiOptions(
limit=limit,
timeout=timeout,
filters=filters,
detail=detail,
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
@DeveloperAPI
def list_placement_groups(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[PlacementGroupState]:
"""List placement groups in the cluster.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
filters: List of tuples of filter key, predicate (=, or !=), and
the filter value. E.g., `("state", "=", "abcd")`
String filter values are case-insensitive.
limit: Max number of entries returned by the state backend.
timeout: Max timeout value for the state APIs requests made.
detail: When True, more details info (specified in `PlacementGroupState`)
will be queried and returned. See
:class:`~ray.util.state.common.PlacementGroupState`.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of :class:`~ray.util.state.common.PlacementGroupState`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).list(
StateResource.PLACEMENT_GROUPS,
options=ListApiOptions(
limit=limit, timeout=timeout, filters=filters, detail=detail
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
@DeveloperAPI
def list_nodes(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[NodeState]:
"""List nodes in the cluster.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
filters: List of tuples of filter key, predicate (=, or !=), and
the filter value. E.g., `("node_name", "=", "abcd")`
String filter values are case-insensitive.
limit: Max number of entries returned by the state backend.
timeout: Max timeout value for the state APIs requests made.
detail: When True, more details info (specified in `NodeState`)
will be queried and returned. See
:class:`NodeState <ray.util.state.common.NodeState>`.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of dictionarified
:class:`NodeState <ray.util.state.common.NodeState>`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).list(
StateResource.NODES,
options=ListApiOptions(
limit=limit, timeout=timeout, filters=filters, detail=detail
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
@DeveloperAPI
def list_jobs(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[JobState]:
"""List jobs submitted to the cluster by :ref:`ray job submission <jobs-overview>`.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
filters: List of tuples of filter key, predicate (=, or !=), and
the filter value. E.g., `("status", "=", "abcd")`
String filter values are case-insensitive.
limit: Max number of entries returned by the state backend.
timeout: Max timeout value for the state APIs requests made.
detail: When True, more details info (specified in `JobState`)
will be queried and returned. See
:class:`JobState <ray.util.state.common.JobState>`.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of dictionarified
:class:`JobState <ray.util.state.common.JobState>`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).list(
StateResource.JOBS,
options=ListApiOptions(
limit=limit, timeout=timeout, filters=filters, detail=detail
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
@DeveloperAPI
def list_workers(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[WorkerState]:
"""List workers in the cluster.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
filters: List of tuples of filter key, predicate (=, or !=), and
the filter value. E.g., `("is_alive", "=", "True")`
String filter values are case-insensitive.
limit: Max number of entries returned by the state backend.
timeout: Max timeout value for the state APIs requests made.
detail: When True, more details info (specified in `WorkerState`)
will be queried and returned. See
:class:`WorkerState <ray.util.state.common.WorkerState>`.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of
:class:`WorkerState <ray.util.state.common.WorkerState>`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).list(
StateResource.WORKERS,
options=ListApiOptions(
limit=limit, timeout=timeout, filters=filters, detail=detail
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
@DeveloperAPI
def list_tasks(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[TaskState]:
"""List tasks in the cluster.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
filters: List of tuples of filter key, predicate (=, or !=), and
the filter value. E.g., `("is_alive", "=", "True")`
String filter values are case-insensitive.
limit: Max number of entries returned by the state backend.
timeout: Max timeout value for the state APIs requests made.
detail: When True, more details info (specified in `TaskState`)
will be queried and returned. See
:class:`TaskState <ray.util.state.common.TaskState>`.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of
:class:`TaskState <ray.util.state.common.TaskState>`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).list(
StateResource.TASKS,
options=ListApiOptions(
limit=limit, timeout=timeout, filters=filters, detail=detail
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
@DeveloperAPI
def list_objects(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[ObjectState]:
"""List objects in the cluster.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
filters: List of tuples of filter key, predicate (=, or !=), and
the filter value. E.g., `("ip", "=", "0.0.0.0")`
String filter values are case-insensitive.
limit: Max number of entries returned by the state backend.
timeout: Max timeout value for the state APIs requests made.
detail: When True, more details info (specified in `ObjectState`)
will be queried and returned. See
:class:`ObjectState <ray.util.state.common.ObjectState>`.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of
:class:`ObjectState <ray.util.state.common.ObjectState>`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).list(
StateResource.OBJECTS,
options=ListApiOptions(
limit=limit, timeout=timeout, filters=filters, detail=detail
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
@DeveloperAPI
def list_runtime_envs(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[RuntimeEnvState]:
"""List runtime environments in the cluster.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
filters: List of tuples of filter key, predicate (=, or !=), and
the filter value. E.g., `("node_id", "=", "abcdef")`
String filter values are case-insensitive.
limit: Max number of entries returned by the state backend.
timeout: Max timeout value for the state APIs requests made.
detail: When True, more details info (specified in `RuntimeEnvState`)
will be queried and returned. See
:class:`RuntimeEnvState <ray.util.state.common.RuntimeEnvState>`.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of
:class:`RuntimeEnvState <ray.util.state.common.RuntimeEnvState>`.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).list(
StateResource.RUNTIME_ENVS,
options=ListApiOptions(
limit=limit, timeout=timeout, filters=filters, detail=detail
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
@DeveloperAPI
def list_cluster_events(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[Dict]:
return StateApiClient(address=address).list(
StateResource.CLUSTER_EVENTS,
options=ListApiOptions(
limit=limit, timeout=timeout, filters=filters, detail=detail
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
"""
Log APIs
"""
@DeveloperAPI
def get_log(
address: Optional[str] = None,
node_id: Optional[str] = None,
node_ip: Optional[str] = None,
filename: Optional[str] = None,
actor_id: Optional[str] = None,
task_id: Optional[str] = None,
pid: Optional[int] = None,
follow: bool = False,
tail: int = -1,
timeout: int = DEFAULT_RPC_TIMEOUT,
suffix: str = "out",
encoding: Optional[str] = "utf-8",
errors: Optional[str] = "strict",
submission_id: Optional[str] = None,
attempt_number: int = 0,
_interval: Optional[float] = None,
filter_ansi_code: bool = False,
) -> Generator[str, None, None]:
"""Retrieve log file based on file name or some entities ids (pid, actor id, task id).
Examples:
.. testcode::
:hide:
import ray
import time
ray.shutdown()
ray.init()
# Wait for the node to be registered to the dashboard
time.sleep(5)
.. testcode::
import ray
from ray.util.state import get_log
# Node id could be retrieved from list_nodes() or ray.nodes()
node_id = ray.nodes()[0]["NodeID"]
filename = "raylet.out"
for l in get_log(filename=filename, node_id=node_id):
print(l)
.. testoutput::
:options: +MOCK
[2023-05-19 12:35:18,347 I 4259 68399276] (raylet) io_service_pool.cc:35: IOServicePool is running with 1 io_service.
[2023-05-19 12:35:18,348 I 4259 68399276] (raylet) store_runner.cc:32: Allowing the Plasma store to use up to 2.14748GB of memory.
[2023-05-19 12:35:18,348 I 4259 68399276] (raylet) store_runner.cc:48: Starting object store with directory /tmp, fallback /tmp/ray, and huge page support disabled
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If not specified, it will be retrieved from the initialized ray cluster.
node_id: Id of the node containing the logs .
node_ip: Ip of the node containing the logs. (At least one of the node_id and
node_ip have to be supplied when identifying a node).
filename: Name of the file (relative to the ray log directory) to be retrieved.
actor_id: Id of the actor if getting logs from an actor.
task_id: Id of the task if getting logs from a non concurrent actor.
For concurrent actor, please query the log with actor_id.
pid: PID of the worker if getting logs generated by a worker. When querying
with pid, either node_id or node_ip must be supplied.
follow: When set to True, logs will be streamed and followed.
tail: Number of lines to get from the end of the log file. Set to -1 for getting
the entire log.
timeout: Max timeout for requests made when getting the logs.
suffix: The suffix of the log file if query by id of tasks/workers/actors. Default to "out".
encoding: The encoding used to decode the content of the log file. Default is
"utf-8". Use None to get binary data directly.
errors: The error handling scheme to use for decoding errors. Default is
"strict". See https://docs.python.org/3/library/codecs.html#error-handlers
submission_id: Job submission ID if getting log from a submission job.
attempt_number: The attempt number of the task if getting logs generated by a task.
_interval: The interval in secs to print new logs when `follow=True`.
filter_ansi_code: A boolean flag for determining whether to filter ANSI escape codes.
Setting to `True` removes ANSI escape codes from the output. The default value is `False`.
Return:
A Generator of log line, None for SendType and ReturnType.
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
api_server_url = ray_address_to_api_server_url(address)
media_type = "stream" if follow else "file"
options = GetLogOptions(
node_id=node_id,
node_ip=node_ip,
filename=filename,
actor_id=actor_id,
task_id=task_id,
pid=pid,
lines=tail,
interval=_interval,
media_type=media_type,
timeout=timeout,
suffix=suffix,
submission_id=submission_id,
attempt_number=attempt_number,
)
options_dict = {}
for field in fields(options):
option_val = getattr(options, field.name)
if option_val is not None:
options_dict[field.name] = option_val
if filter_ansi_code is not None:
options_dict["filter_ansi_code"] = filter_ansi_code
with requests.get(
f"{api_server_url}/api/v0/logs/{media_type}?"
f"{urllib.parse.urlencode(options_dict)}",
stream=True,
) as r:
if r.status_code != 200:
raise RayStateApiException(r.text)
for chunk in r.iter_content(chunk_size=None):
if encoding is not None:
chunk = chunk.decode(encoding=encoding, errors=errors)
yield chunk
@DeveloperAPI
def list_logs(
address: Optional[str] = None,
node_id: Optional[str] = None,
node_ip: Optional[str] = None,
glob_filter: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
) -> Dict[str, List[str]]:
"""Listing log files available.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If not specified, it will be retrieved from the initialized ray cluster.
node_id: Id of the node containing the logs.
node_ip: Ip of the node containing the logs.
glob_filter: Name of the file (relative to the ray log directory) to be
retrieved. E.g. `glob_filter="*worker*"` for all worker logs.
actor_id: Id of the actor if getting logs from an actor.
timeout: Max timeout for requests made when getting the logs.
_interval: The interval in secs to print new logs when `follow=True`.
Return:
A dictionary where the keys are log groups (e.g. gcs, raylet, worker), and
values are list of log filenames.
Raises:
RayStateApiException: if the CLI failed to query the data, or ConnectionError if
failed to resolve the ray address.
""" # noqa: E501
assert (
node_ip is not None or node_id is not None
), "At least one of node ip and node id is required"
api_server_url = ray_address_to_api_server_url(address)
if not glob_filter:
glob_filter = "*"
options_dict = {}
if node_ip:
options_dict["node_ip"] = node_ip
if node_id:
options_dict["node_id"] = node_id
if glob_filter:
options_dict["glob"] = glob_filter
options_dict["timeout"] = timeout
r = requests.get(
f"{api_server_url}/api/v0/logs?{urllib.parse.urlencode(options_dict)}"
)
# TODO(rickyx): we could do better at error handling here.
r.raise_for_status()
response = r.json()
if response["result"] is False:
raise RayStateApiException(
"API server internal error. See dashboard.log file for more details. "
f"Error: {response['msg']}"
)
return response["data"]["result"]
"""
Summary APIs
"""
@DeveloperAPI
def summarize_tasks(
address: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> Dict:
"""Summarize the tasks in cluster.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
timeout: Max timeout for requests made when getting the states.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Return:
Dictionarified
:class:`~ray.util.state.common.TaskSummaries`
Raises:
RayStateApiException: if the CLI is failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).summary(
SummaryResource.TASKS,
options=SummaryApiOptions(timeout=timeout),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
@DeveloperAPI
def summarize_actors(
address: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> Dict:
"""Summarize the actors in cluster.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
timeout: Max timeout for requests made when getting the states.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Return:
Dictionarified
:class:`~ray.util.state.common.ActorSummaries`
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).summary(
SummaryResource.ACTORS,
options=SummaryApiOptions(timeout=timeout),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
@DeveloperAPI
def summarize_objects(
address: Optional[str] = None,
timeout: int = DEFAULT_RPC_TIMEOUT,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> Dict:
"""Summarize the objects in cluster.
Args:
address: Ray bootstrap address, could be `auto`, `localhost:6379`.
If None, it will be resolved automatically from an initialized ray.
timeout: Max timeout for requests made when getting the states.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Return:
Dictionarified :class:`~ray.util.state.common.ObjectSummaries`
Raises:
RayStateApiException: if the CLI failed to query the data.
""" # noqa: E501
return StateApiClient(address=address).summary(
SummaryResource.OBJECTS,
options=SummaryApiOptions(timeout=timeout),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
|
StateApiClient
|
python
|
tensorflow__tensorflow
|
tensorflow/core/function/capture/capture_container.py
|
{
"start": 1807,
"end": 12184
}
|
class ____(object):
"""A container for all capture usages within FuncGraph."""
def __init__(self):
self._by_ref_internal = py_collections.OrderedDict()
self._by_ref_external = py_collections.OrderedDict()
self._by_ref_tracetype = py_collections.OrderedDict()
self._by_val_internal = MutationAwareDict()
self._by_val_external = MutationAwareDict()
self._by_val_tracetype = py_collections.OrderedDict()
# Set of external ops on which the graph has a control dependency
self.control = object_identity.ObjectIdentitySet()
# Cached properties derived from the above.
self._cached_by_val_capture_tuples = []
self._cached_capture_types = py_collections.OrderedDict()
def clear(self):
self._by_ref_internal.clear()
self._by_ref_external.clear()
self._by_ref_tracetype.clear()
self._by_val_internal.clear()
self._by_val_external.clear()
def capture_by_value(
self, graph: Any, tensor: core.Tensor, name: Optional[str] = None
) -> core.Tensor:
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
graph: The FuncGraph that captures this tensor.
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
Returns:
Tensor from this FuncGraph.
Raises:
InaccessibleTensorError: if any tensors are accessed in a manner that
bypasses the mechanisms required for the data dependencies to be correctly
wired.
"""
if isinstance(tensor, core.Value):
if name is None:
# A unique (within the program execution) integer.
name = str(pywrap_tfe.TFE_Py_UID())
# Small EagerTensors are captured with Const ops
if (
tensor.dtype in dtypes.TF_VALUE_DTYPES
and functools.reduce(lambda a, b: a * b, tensor.shape, 1)
<= _EAGER_CONST_THRESHOLD
):
graph_const = self.by_val_internal.get(id(tensor))
if graph_const is None:
graph_const = tensor._capture_as_const(name) # pylint: disable=protected-access
if graph_const is None:
# Some eager tensors, e.g. parallel tensors, are not convertible to
# a single constant. We'll use a placeholder for this case.
graph_const = self._create_placeholder_helper(graph, tensor, name)
self.add_or_replace(
key=id(tensor),
external=tensor,
internal=graph_const,
is_by_ref=False,
)
graph.inputs.append(graph_const)
graph_const._record_tape(tensor) # pylint: disable=protected-access
return graph_const
# Large EagerTensors and resources are captured with Placeholder ops
return self._create_placeholder_helper(graph, tensor, name)
if tensor.graph is not graph:
graph._validate_in_scope(tensor) # pylint: disable=protected-access
if name is None:
assert tensor.op is not None, (
tensor.__class__,
dir(tensor),
tensor.__class__.__name__,
)
name = tensor.op.name
# cond/while graphs override _capture_helper() so cannot call
# self.create_placeholder_helper() here directly.
return graph._capture_helper(tensor, name) # pylint: disable=protected-access
return tensor
def add_or_replace(
self,
key: Hashable,
external: Any,
internal: core.Tensor,
tracetype: Any = None,
is_by_ref: bool = False,
) -> None:
"""Replace a already exsiting capture, otherwise add it."""
if is_by_ref:
self._by_ref_external[key] = external
self._by_ref_internal[key] = internal
self._by_ref_tracetype[key] = tracetype
else:
self._by_val_internal[key] = internal
self._by_val_external[key] = external
if tracetype is not None:
self._by_val_tracetype[key] = tracetype
else:
self._by_val_tracetype[key] = trace_type.from_value(external)
def pop(self, key: Hashable, is_by_ref: bool = False) -> Any:
if is_by_ref:
return (
self._by_ref_external.pop(key, None),
self._by_ref_internal.pop(key, None),
self._by_ref_tracetype.pop(key, None),
)
else:
return (
self._by_val_external.pop(key, None),
self._by_val_internal.pop(key, None),
self._by_val_tracetype.pop(key, None),
)
def reset_captures(self, tensors, placeholders):
"""Set the captures with the provided list of captures & placeholder."""
self._by_val_external = MutationAwareDict()
self._by_val_internal = MutationAwareDict()
self._by_val_tracetype = MutationAwareDict()
for external, internal in zip(tensors, placeholders):
key = id(external)
self._by_val_external[key] = external
self._by_val_internal[key] = internal
self._by_val_tracetype[key] = trace_type.from_value(external)
# TODO(panzf): make the method public after supporting lam() returns
# non-tensor values. Currently, this method is only used by
# FuncGraph._experimental_capture_side_input_by_ref(), which contains the
# logics for converting non-tensor values to tensor.
def _capture_by_ref(
self, graph: Any, lam: Callable[[], Any], key: Hashable = None
) -> Any:
"""Used during tracing process to create/retrive by-ref captures.
Args:
graph: The FuncGraph that captures this tensor.
lam: A callable that takes no arguments and returns tensor captures.
key: A hashable identifier.
Returns:
Tensor from this FuncGraph.
"""
# Check if the capture exists in self._by_ref
if key is not None and key in self._by_ref_internal:
return self._by_ref_internal[key]
if key is None:
key = len(self._by_ref_internal)
while key in self._by_ref_internal:
key += 1
value_nested = lam()
capture_trace_type = trace_type.from_value(value_nested)
ctx = trace_type.InternalPlaceholderContext(graph)
internal = capture_trace_type.placeholder_value(ctx)
def lam_fn():
# pytype: disable=attribute-error
value = lam()
return capture_trace_type.to_tensors(value)
# pytype: enable=attribute-error
self._by_ref_external[key] = lam_fn
self._by_ref_internal[key] = internal
self._by_ref_tracetype[key] = capture_trace_type
return self._by_ref_internal[key]
def merge_by_ref_with(self, other: "FunctionCaptures") -> None:
"""Add by-ref captures from `other` to `self` if not exist."""
assert isinstance(other, FunctionCaptures)
for key in other.by_ref_external:
if key not in self._by_ref_external:
self._by_ref_external[key] = other.by_ref_external[key]
self._by_ref_tracetype[key] = other.by_ref_tracetype[key]
# TODO(panzf): Return structured values instead of flat tensors.
def get_by_ref_snapshot(self) -> Mapping[Hashable, Any]:
"""Get a snapshot of current values of by-ref captures."""
snapshot = {}
for key in self._by_ref_external:
func = self._by_ref_external[key]
try:
value = func()
except (AttributeError, RuntimeError):
# b/269680071 In case of by-ref captures are unavailable at dispatch
# time, use the predefined trace_type instead.
value = self._by_ref_tracetype[key]
snapshot[key] = value
return snapshot
def _create_placeholder_helper(
self, graph: Any, tensor: core.Tensor, name: str
):
"""A helper function to create capture placeholder."""
placeholder = self._by_val_internal.get(id(tensor))
if placeholder is None:
tracing_ctx = trace_type.InternalTracingContext()
spec = trace_type.from_value(tensor, tracing_ctx)
spec._name = name # pylint: disable=protected-access
if isinstance(tensor, core.Value) and tensor.is_packed:
composite_device_name = tensor.device
else:
composite_device_name = None
placeholder_ctx = trace_type.InternalPlaceholderContext(
graph,
with_none_control_dependencies=True,
composite_device_name=composite_device_name,
)
placeholder = spec.placeholder_value(placeholder_ctx)
self.add_or_replace(
key=id(tensor), external=tensor, internal=placeholder, is_by_ref=False
)
graph.inputs.append(placeholder)
placeholder._record_tape(tensor) # pylint: disable=protected-access
return placeholder
def _recompute_cached_properties(self):
"""Regenerates cached properties if there have been mutations."""
self._by_val_internal.mutated = False
self._by_val_external.mutated = False
assert len(self._by_val_internal) == len(self._by_val_external)
self._cached_by_val_capture_tuples = []
for key in self._by_val_internal:
assert key in self._by_val_external
internal = self._by_val_internal[key]
external = self._by_val_external[key]
self._cached_by_val_capture_tuples.append((external, internal))
self._cached_capture_types = py_collections.OrderedDict(
list(self._by_val_tracetype.items())
+ list(self._by_ref_tracetype.items())
)
@property
def capture_types(self):
if self._by_val_internal.mutated or self._by_val_external.mutated:
self._recompute_cached_properties()
return self._cached_capture_types
@property
def by_val_capture_tuples(self):
if self._by_val_internal.mutated or self._by_val_external.mutated:
self._recompute_cached_properties()
return self._cached_by_val_capture_tuples
@property
def by_ref_internal(self):
return self._by_ref_internal
@property
def by_ref_external(self):
return self._by_ref_external
@property
def by_ref_tracetype(self):
return self._by_ref_tracetype
@property
def by_val_internal(self):
return self._by_val_internal
@property
def by_val_external(self):
return self._by_val_external
@property
def by_val_tracetype(self):
return self._by_val_tracetype
|
FunctionCaptures
|
python
|
GoogleCloudPlatform__python-docs-samples
|
appengine/standard/ndb/overview/main.py
|
{
"start": 1298,
"end": 2893
}
|
class ____(webapp2.RequestHandler):
def get(self):
self.response.out.write("<html><body>")
guestbook_name = self.request.get("guestbook_name")
ancestor_key = ndb.Key("Book", guestbook_name or "*notitle*")
greetings = Greeting.query_book(ancestor_key).fetch(20)
# [END gae_ndb_overview_query]
greeting_blockquotes = []
for greeting in greetings:
greeting_blockquotes.append(
"<blockquote>%s</blockquote>" % cgi.escape(greeting.content)
)
self.response.out.write(
textwrap.dedent(
"""\
<html>
<body>
{blockquotes}
<form action="/sign?{sign}" method="post">
<div>
<textarea name="content" rows="3" cols="60">
</textarea>
</div>
<div>
<input type="submit" value="Sign Guestbook">
</div>
</form>
<hr>
<form>
Guestbook name:
<input value="{guestbook_name}" name="guestbook_name">
<input type="submit" value="switch">
</form>
</body>
</html>"""
).format(
blockquotes="\n".join(greeting_blockquotes),
sign=urllib.urlencode({"guestbook_name": guestbook_name}),
guestbook_name=cgi.escape(guestbook_name),
)
)
# [START gae_ndb_overview_submit]
|
MainPage
|
python
|
kamyu104__LeetCode-Solutions
|
Python/restore-the-array.py
|
{
"start": 36,
"end": 681
}
|
class ____(object):
def numberOfArrays(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
MOD = 10**9 + 7
klen = len(str(k))
dp = [0]*(klen+1)
dp[len(s)%len(dp)] = 1
for i in reversed(xrange(len(s))):
dp[i%len(dp)] = 0
if s[i] == '0':
continue
curr = 0
for j in xrange(i, min(i+klen, len(s))):
curr = 10*curr + int(s[j])
if curr > k:
break
dp[i%len(dp)] = (dp[i%len(dp)] + dp[(j+1)%len(dp)])%MOD
return dp[0]
|
Solution
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 618582,
"end": 618936
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("direction", "field")
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
field = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="field")
|
SortBy
|
python
|
huggingface__transformers
|
src/transformers/models/sam3/modeling_sam3.py
|
{
"start": 60374,
"end": 66052
}
|
class ____(nn.Module):
"""DETR decoder layer with self-attention, text cross-attention, and vision cross-attention."""
def __init__(self, config: Sam3DETRDecoderConfig):
super().__init__()
self.config = config
self.self_attn = Sam3Attention(config)
self.self_attn_dropout = nn.Dropout(config.dropout)
self.self_attn_layer_norm = nn.LayerNorm(config.hidden_size)
self.text_cross_attn = Sam3Attention(config)
self.text_cross_attn_dropout = nn.Dropout(config.dropout)
self.text_cross_attn_layer_norm = nn.LayerNorm(config.hidden_size)
self.vision_cross_attn = Sam3Attention(config)
self.vision_cross_attn_dropout = nn.Dropout(config.dropout)
self.vision_cross_attn_layer_norm = nn.LayerNorm(config.hidden_size)
self.mlp = Sam3MLP(config)
self.mlp_layer_norm = nn.LayerNorm(config.hidden_size)
self.mlp_dropout = nn.Dropout(config.dropout)
def forward(
self,
hidden_states: torch.Tensor,
query_pos: torch.Tensor,
text_features: torch.Tensor,
vision_features: torch.Tensor,
vision_pos_encoding: torch.Tensor,
text_mask: Optional[torch.Tensor] = None,
vision_cross_attn_mask: Optional[torch.Tensor] = None,
presence_token: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Forward pass for decoder layer.
Args:
hidden_states: Query features [batch_size, num_queries, hidden_size]
query_pos: Query position embeddings [batch_size, num_queries, hidden_size]
text_features: Text features [batch_size, seq_len, hidden_size]
vision_features: Vision features [batch_size, height*width, hidden_size]
vision_pos_encoding: Vision position encoding [batch_size, height*width, hidden_size]
text_mask: Text padding mask [batch_size, seq_len] where True=valid, False=padding
vision_cross_attn_mask: Vision cross-attention mask [batch_size, num_heads, num_queries, height*width]
presence_token: Optional presence token [batch_size, 1, hidden_size]
Returns:
Tuple of (updated hidden states, updated presence token)
"""
# Concatenate presence token if provided
if presence_token is not None:
hidden_states = torch.cat([presence_token, hidden_states], dim=1)
query_pos = torch.cat([torch.zeros_like(presence_token), query_pos], dim=1)
# Self-attention with query position encoding
residual = hidden_states
query_with_pos = hidden_states + query_pos
attn_output, _ = self.self_attn(
query=query_with_pos,
key=query_with_pos,
value=hidden_states,
attention_mask=None,
**kwargs,
)
hidden_states = residual + self.self_attn_dropout(attn_output)
hidden_states = self.self_attn_layer_norm(hidden_states)
# Text cross-attention: queries attend to text features
residual = hidden_states
query_with_pos = hidden_states + query_pos
text_cross_attn_mask = None
if text_mask is not None:
text_cross_attn_mask = create_bidirectional_mask(
config=self.config,
input_embeds=hidden_states,
attention_mask=text_mask,
encoder_hidden_states=text_features,
)
attn_output, _ = self.text_cross_attn(
query=query_with_pos,
key=text_features,
value=text_features,
attention_mask=text_cross_attn_mask,
**kwargs,
)
hidden_states = residual + self.text_cross_attn_dropout(attn_output)
hidden_states = self.text_cross_attn_layer_norm(hidden_states)
# Expand vision cross-attention mask for presence token if needed
combined_vision_mask = vision_cross_attn_mask
if presence_token is not None and combined_vision_mask is not None:
batch_size, num_heads = combined_vision_mask.shape[:2]
presence_mask = torch.zeros(
batch_size,
num_heads,
1,
combined_vision_mask.shape[-1],
device=combined_vision_mask.device,
dtype=combined_vision_mask.dtype,
)
combined_vision_mask = torch.cat([presence_mask, combined_vision_mask], dim=2)
# Vision cross-attention: queries attend to vision features (with RPB)
residual = hidden_states
query_with_pos = hidden_states + query_pos
key_with_pos = vision_features + vision_pos_encoding
attn_output, _ = self.vision_cross_attn(
query=query_with_pos,
key=key_with_pos,
value=vision_features,
attention_mask=combined_vision_mask,
**kwargs,
)
hidden_states = residual + self.vision_cross_attn_dropout(attn_output)
hidden_states = self.vision_cross_attn_layer_norm(hidden_states)
# MLP
residual = hidden_states
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.mlp_dropout(hidden_states)
hidden_states = self.mlp_layer_norm(hidden_states)
# Extract presence token if it was added
presence_token_out = None
if presence_token is not None:
presence_token_out = hidden_states[:, :1]
hidden_states = hidden_states[:, 1:]
return hidden_states, presence_token_out
|
Sam3DetrDecoderLayer
|
python
|
google__pytype
|
pytype/abstract/class_mixin.py
|
{
"start": 6012,
"end": 28991
}
|
class ____(metaclass=mixin.MixinMeta): # pylint: disable=undefined-variable
"""Mix-in to mark all class-like values."""
overloads: Sequence[str] = (
"_get_class",
"call",
"compute_mro",
"get_own_new",
"get_special_attribute",
"update_official_name",
)
def __new__(cls, *unused_args, **unused_kwds) -> "Class":
"""Prevent direct instantiation."""
assert cls is not Class, "Cannot instantiate Class"
return object.__new__(cls)
def init_mixin(self, metaclass: "Class|None") -> None:
"""Mix-in equivalent of __init__."""
if metaclass is None:
metaclass = self._get_inherited_metaclass()
if metaclass:
self.cls = metaclass
# Key-value store of metadata for overlays to use.
self.metadata = {}
self.decorators = []
self._instance_cache = {}
self._init_abstract_methods()
self._init_protocol_attributes()
self._init_overrides_bool()
self._all_formal_type_parameters = datatypes.AliasingDict()
self._all_formal_type_parameters_loaded = False
# Call these methods in addition to __init__ when constructing instances.
self.additional_init_methods = []
if self.is_test_class():
self.additional_init_methods.append("setUp")
def _get_class(self):
return self.ctx.convert.type_type
def bases(self) -> list[cfg.Variable]:
return []
@property
def all_formal_type_parameters(
self,
) -> "datatypes.AliasingDict[str, _instance_base.SimpleValue]":
self._load_all_formal_type_parameters()
return self._all_formal_type_parameters
def _load_all_formal_type_parameters(self) -> None:
"""Load _all_formal_type_parameters."""
if self._all_formal_type_parameters_loaded:
return
bases = [
abstract_utils.get_atomic_value(
base, default=self.ctx.convert.unsolvable
)
for base in self.bases()
]
for base in bases:
abstract_utils.parse_formal_type_parameters(
base, self.full_name, self._all_formal_type_parameters
)
self._all_formal_type_parameters_loaded = True
def get_own_attributes(self):
"""Get the attributes defined by this class."""
raise NotImplementedError(self.__class__.__name__)
def has_protocol_base(self) -> bool:
"""Returns whether this class inherits directly from typing.Protocol.
Subclasses that may inherit from Protocol should override this method.
"""
return False
def _init_protocol_attributes(self) -> None:
"""Compute this class's protocol attributes."""
if isinstance(self, _abstract.ParameterizedClass):
self.protocol_attributes = self.base_cls.protocol_attributes
return
if not self.has_protocol_base():
self.protocol_attributes = set()
return
if isinstance(self, _abstract.PyTDClass) and self.pytd_cls.name.startswith(
"typing."
):
protocol_attributes = set()
if self.pytd_cls.name == "typing.Mapping":
# Append Mapping-specific attributes to forbid matching against classes
# that satisfy the Mapping ABC but don't contain mapping_attrs.
mapping_attrs = {
"__contains__",
"keys",
"items",
"values",
"get",
"__eq__",
"__ne__",
}
protocol_attributes |= mapping_attrs
# In typing.pytd, we've experimentally marked some classes such as
# Sequence, which contains a mix of abstract and non-abstract methods, as
# protocols, with only the abstract methods being required.
protocol_attributes |= self.abstract_methods
self.protocol_attributes = protocol_attributes
return
# For the algorithm to run, protocol_attributes needs to be populated with
# the protocol attributes defined by this class. We'll overwrite the
# attribute with the full set of protocol attributes later.
self.protocol_attributes = self.get_own_attributes()
protocol_attributes = set()
for cls in reversed(self.mro):
if not isinstance(cls, Class):
continue
if cls.is_protocol:
# Add protocol attributes defined by this class.
protocol_attributes |= {a for a in cls.protocol_attributes if a in cls}
else:
# Remove attributes implemented by this class.
protocol_attributes = {a for a in protocol_attributes if a not in cls}
self.protocol_attributes = protocol_attributes
def _init_overrides_bool(self) -> None:
"""Compute and cache whether the class sets its own boolean value."""
# A class's instances can evaluate to False if it defines __bool__ or
# __len__.
if isinstance(self, _abstract.ParameterizedClass):
self.overrides_bool = self.base_cls.overrides_bool
return
for cls in self.mro:
if isinstance(cls, Class):
if any(x in cls.get_own_attributes() for x in ("__bool__", "__len__")):
self.overrides_bool = True
return
self.overrides_bool = False
def get_own_abstract_methods(self):
"""Get the abstract methods defined by this class."""
raise NotImplementedError(self.__class__.__name__)
def _init_abstract_methods(self) -> None:
"""Compute this class's abstract methods."""
# For the algorithm to run, abstract_methods needs to be populated with the
# abstract methods defined by this class. We'll overwrite the attribute
# with the full set of abstract methods later.
self.abstract_methods = self.get_own_abstract_methods()
abstract_methods = set()
for cls in reversed(self.mro):
if not isinstance(cls, Class):
continue
# Remove methods implemented by this class.
abstract_methods = {
m
for m in abstract_methods
if m not in cls or m in cls.abstract_methods
}
# Add abstract methods defined by this class.
abstract_methods |= {m for m in cls.abstract_methods if m in cls}
self.abstract_methods = abstract_methods
def _has_explicit_abcmeta(self) -> bool:
return any(base.full_name == "abc.ABCMeta" for base in self.cls.mro)
def _has_implicit_abcmeta(self) -> bool:
"""Whether the class should be considered implicitly abstract."""
# Protocols must be marked as abstract to get around the
# [ignored-abstractmethod] check for interpreter classes.
if not isinstance(self, _abstract.InterpreterClass):
return False
# We check self._bases (immediate bases) instead of self.mro because our
# builtins and typing stubs are inconsistent about implementing abstract
# methods, and we don't want [not-instantiable] errors all over the place
# because a class has Protocol buried in its MRO.
for var in self._bases:
if any(
base.full_name == "typing.Protocol"
or isinstance(base, Class)
and base.is_protocol
for base in var.data
):
return True
return False
@property
def is_abstract(self) -> bool:
return (
self._has_explicit_abcmeta() or self._has_implicit_abcmeta()
) and bool(self.abstract_methods)
def is_test_class(self) -> bool:
return any(
base.full_name in ("unittest.TestCase", "unittest.case.TestCase")
for base in self.mro
)
@property
def is_enum(self) -> bool:
return any(cls.full_name == "enum.EnumMeta" for cls in self.cls.mro)
@property
def is_protocol(self) -> bool:
return bool(self.protocol_attributes)
@property
def is_typed_dict_class(self) -> bool:
return (
self.full_name == "typing.TypedDict"
or self.__class__.__name__ == "TypedDictClass"
)
def get_annotated_local(self, name: str) -> abstract_utils.Local | None:
ann = abstract_utils.get_annotations_dict(self.members)
return ann and ann.annotated_locals.get(name)
def _get_inherited_metaclass(self) -> "Class | None":
for base in self.mro[1:]:
if (
isinstance(base, Class)
and base.cls != self.ctx.convert.unsolvable
and base.cls.full_name != "builtins.type"
):
return base.cls
return None
def call_metaclass_init(self, node: cfg.CFGNode) -> cfg.CFGNode:
"""Call the metaclass's __init__ method if it does anything interesting."""
if self.cls.full_name == "builtins.type":
return node
elif (
isinstance(self.cls, Class)
and "__dataclass_transform__" in self.cls.metadata
):
# A metaclass with @dataclass_transform just needs to apply the attribute
# to the current class.
self.metadata["__dataclass_transform__"] = True
return node
node, init = self.ctx.attribute_handler.get_attribute(
node, self.cls, "__init__"
)
if not init or not any(
isinstance(f, _abstract.SignedFunction) for f in init.data
):
# Only SignedFunctions (InterpreterFunction and SimpleFunction) have
# interesting side effects.
return node
args = function.Args(
posargs=(
self.to_variable(node),
self.ctx.convert.build_string(node, self.name),
self.ctx.convert.build_tuple(node, self.bases()),
self.ctx.new_unsolvable(node),
)
)
log.debug(
"Calling __init__ on metaclass %s of class %s", self.cls.name, self.name
)
node, _ = function.call_function(self.ctx, node, init, args)
return node
def call_init_subclass(self, node: cfg.CFGNode) -> cfg.CFGNode:
"""Call init_subclass(cls) for all base classes."""
for cls in self.mro:
node = cls.init_subclass(node, self)
return node
def get_own_new(
self, node: cfg.CFGNode, value: cfg.Binding
) -> tuple[cfg.CFGNode, cfg.Variable | None]:
"""Get this value's __new__ method, if it isn't object.__new__.
Args:
node: The current node.
value: A cfg.Binding containing this value.
Returns:
A tuple of (1) a node and (2) either a cfg.Variable of the special
__new__ method, or None.
"""
node, new = self.ctx.attribute_handler.get_attribute(
node, value.data, "__new__"
)
if new is None:
return node, None
if len(new.bindings) == 1:
f = new.bindings[0].data
if isinstance(
f, _abstract.AMBIGUOUS_OR_EMPTY
) or self.ctx.convert.object_type.is_object_new(f):
# Instead of calling object.__new__, our abstract classes directly
# create instances of themselves.
return node, None
return node, new
def _call_new_and_init(
self, node: cfg.CFGNode, value: cfg.Binding, args: function.Args
) -> tuple[cfg.CFGNode, cfg.Variable | None]:
"""Call __new__ if it has been overridden on the given value."""
node, new = self.get_own_new(node, value)
if new is None:
return node, None
cls = value.AssignToNewVariable(node)
new_args = args.replace(posargs=(cls,) + args.posargs)
node, variable = function.call_function(self.ctx, node, new, new_args)
for val in variable.bindings:
# If val.data is a class, call_init mistakenly calls val.data's __init__
# method rather than that of val.data.cls.
if not isinstance(val.data, Class) and self == val.data.cls:
node = self.call_init(node, val, args)
return node, variable
def _call_method(
self,
node: cfg.CFGNode,
value: cfg.Binding,
method_name: str,
args: function.Args,
) -> cfg.CFGNode:
node, bound_method = self.ctx.vm.get_bound_method(
node, value.data, method_name, value
)
if bound_method:
call_repr = f"{self.name}.{method_name}(..._)"
log.debug("calling %s", call_repr)
node, ret = function.call_function(self.ctx, node, bound_method, args)
log.debug("%s returned %r", call_repr, ret)
return node
def call_init(
self, node: cfg.CFGNode, value: cfg.Binding, args: function.Args
) -> cfg.CFGNode:
node = self._call_method(node, value, "__init__", args)
# Call any additional initalizers the class has registered.
for method in self.additional_init_methods:
node = self._call_method(node, value, method, function.Args(()))
return node
def _new_instance(
self, container, node: cfg.CFGNode, args: function.Args | None
):
"""Returns a (possibly cached) instance of 'self'."""
del args # unused
# We allow only one "instance" per code location, regardless of call stack.
key = self.ctx.vm.current_opcode or node
assert key
if key not in self._instance_cache:
self._instance_cache[key] = _abstract.Instance(self, self.ctx, container) # pytype: disable=wrong-arg-types
return self._instance_cache[key]
def _check_not_instantiable(self) -> None:
"""Report [not-instantiable] if the class cannot be instantiated."""
# We report a not-instantiable error if all of the following are true:
# - The class is abstract.
# - It was not created from an explicit type annotation.
# - The instantiation is not occurring inside one of the class's own
# methods.
# We check the last condition by seeing whether ctx.vm.frame.func is an
# InterpreterFunction whose name starts with "<class>."
if not self.is_abstract or self.from_annotation:
return
if self.ctx.vm.frame and self.ctx.vm.frame.func:
calling_func = self.ctx.vm.frame.func.data
if isinstance(
calling_func, _abstract.InterpreterFunction
) and calling_func.name.startswith(f"{self.name}."):
return
self.ctx.errorlog.not_instantiable(self.ctx.vm.frames, self)
def call(
self,
node: cfg.CFGNode,
func: cfg.Binding,
args: function.Args,
alias_map: datatypes.UnionFind | None = None,
) -> tuple[cfg.CFGNode, cfg.Variable]:
del alias_map # unused
self._check_not_instantiable()
node, variable = self._call_new_and_init(node, func, args)
if variable is None:
value = self._new_instance(None, node, args)
variable = self.ctx.program.NewVariable()
val = variable.AddBinding(value, [func], node)
node = self.call_init(node, val, args)
return node, variable
def get_special_attribute(
self, node: cfg.CFGNode, name: str, valself: cfg.Variable
):
"""Fetch a special attribute."""
if name == "__getitem__" and valself is None:
# See vm_utils._call_binop_on_bindings: valself == None is a special value
# that indicates an annotation.
# TODO(rechen): In Python 3.8 and below, typeshed has a custom __getitem__
# defined on InitVar's metaclass, preventing pytype from recognizing it as
# a type annotation. We can remove the check for _InitVarMeta once we
# support only 3.9+.
if self.cls.full_name not in (
"builtins.type",
"dataclasses._InitVarMeta",
):
# This class has a custom metaclass; check if it defines __getitem__.
_, att = self.ctx.attribute_handler.get_attribute(
node, self.cls, name, self.to_binding(node)
)
if att:
return att
# Treat this class as a parameterized container in an annotation. We do
# not need to worry about the class not being a container: in that case,
# AnnotationContainer's param length check reports an appropriate error.
container = self.to_annotation_container()
return container.get_special_attribute(node, name, valself)
return Class.super(self.get_special_attribute)(node, name, valself)
def has_dynamic_attributes(self) -> bool:
return any(a in self for a in abstract_utils.DYNAMIC_ATTRIBUTE_MARKERS)
def compute_is_dynamic(self) -> bool:
# This needs to be called after self.mro is set.
return any(
c.has_dynamic_attributes() for c in self.mro if isinstance(c, Class)
)
def compute_mro(self):
"""Compute the class precedence list (mro) according to C3."""
bases = abstract_utils.get_mro_bases(self.bases())
bases = [[self]] + [list(base.mro) for base in bases] + [list(bases)]
base2cls = {}
newbases = []
for row in bases:
baselist = []
for base in row:
if isinstance(base, _abstract.ParameterizedClass):
base2cls[base.base_cls] = base
baselist.append(base.base_cls)
else:
base2cls[base] = base
baselist.append(base)
newbases.append(baselist)
# calc MRO and replace them with original base classes
return tuple(base2cls[base] for base in mro.MROMerge(newbases))
def _get_mro_attrs_for_attrs(
self, cls_attrs: list[Attribute], metadata_key: str
) -> list[Attribute]:
"""Traverse the MRO and collect base class attributes for metadata_key."""
# For dataclasses, attributes preserve the ordering from the reversed MRO,
# but derived classes can override the type of an attribute. For attrs,
# derived attributes follow a more complicated scheme which we reproduce
# below.
#
# We take the dataclass behaviour as default, and special-case attrs.
#
# TODO(mdemello): See https://github.com/python-attrs/attrs/issues/428 -
# there are two separate behaviours, based on a `collect_by_mro` argument.
base_attrs = []
taken_attr_names = {a.name for a in cls_attrs}
for base_cls in self.mro[1:]:
if not isinstance(base_cls, Class):
continue
sub_attrs = base_cls.metadata.get(metadata_key, None)
if sub_attrs is None:
continue
for a in sub_attrs:
if a.name not in taken_attr_names:
taken_attr_names.add(a.name)
base_attrs.append(a)
return base_attrs + cls_attrs
def _recompute_attrs_type_from_mro(
self,
all_attrs: dict[str, Attribute],
type_params: "dict[str | int, _base.BaseValue]",
) -> None:
"""Traverse the MRO and apply Generic type params to class attributes.
This IS REQUIRED for dataclass instances that inherits from a Generic.
Args:
all_attrs: All __init__ attributes of a class.
type_params: List of ParameterizedClass instances that will override
TypeVar attributes in all_attrs.
"""
for typ_name, typ_obj in type_params.items():
for attr in all_attrs.values():
if typ_name == attr.typ.cls.name:
attr.typ = typ_obj
def _get_attrs_from_mro(
self, cls_attrs: list[Attribute], metadata_key: str
) -> Sequence[Attribute]:
"""Traverse the MRO and collect base class attributes for metadata_key."""
if metadata_key == "__attrs_attrs__":
# attrs are special-cased
return self._get_mro_attrs_for_attrs(cls_attrs, metadata_key)
all_attrs = {}
sub_attrs = []
type_params = {}
attributes_to_ignore = set()
for base_cls in reversed(self.mro[1:]):
if not isinstance(base_cls, Class):
continue
# Some third-party dataclass implementations add implicit fields that
# should not be considered inherited attributes.
attributes_to_ignore.update(getattr(base_cls, "IMPLICIT_FIELDS", ()))
# Any subclass of a Parameterized dataclass must inherit attributes from
# its parent's init.
# See https://github.com/google/pytype/issues/1104
if isinstance(base_cls, _abstract.ParameterizedClass):
type_params = base_cls.formal_type_parameters
base_cls = base_cls.base_cls
if metadata_key in base_cls.metadata:
sub_attrs.append([
a
for a in base_cls.metadata[metadata_key]
if a.name not in attributes_to_ignore
])
sub_attrs.append(cls_attrs)
for attrs in sub_attrs:
for a in attrs:
all_attrs[a.name] = a
self._recompute_attrs_type_from_mro(all_attrs, type_params)
return list(all_attrs.values())
def record_attr_ordering(self, own_attrs: list[Attribute]) -> None:
"""Records the order of attrs to write in the output pyi."""
self.metadata["attr_order"] = own_attrs
def compute_attr_metadata(
self, own_attrs: list[Attribute], decorator: str
) -> Sequence[Attribute]:
"""Sets combined metadata based on inherited and own attrs.
Args:
own_attrs: The attrs defined explicitly in this class
decorator: The fully qualified decorator name
Returns:
The list of combined attrs.
"""
# We want this to crash if 'decorator' is not in _METADATA_KEYS
assert decorator in _METADATA_KEYS, f"No metadata key for {decorator}"
key = _METADATA_KEYS[decorator]
attrs = self._get_attrs_from_mro(own_attrs, key)
# Stash attributes in class metadata for subclasses.
self.metadata[key] = attrs
return attrs
def update_official_name(self, name: str) -> None:
"""Update the official name."""
if (
self._official_name is None
or name == self.name
or (self._official_name != self.name and name < self._official_name)
):
# The lexical comparison is to ensure that, in the case of multiple calls
# to this method, the official name does not depend on the call order.
self._official_name = name
for member_var in self.members.values():
for member in member_var.data:
if isinstance(member, Class):
member.update_official_name(f"{name}.{member.name}")
def _convert_str_tuple(self, field_name: str) -> tuple[str, ...] | None:
"""Convert __slots__ and similar fields from a Variable to a tuple."""
field_var = self.members.get(field_name)
if field_var is None:
return None
if len(field_var.bindings) != 1:
# Ambiguous slots
return None # Treat "unknown __slots__" and "no __slots__" the same.
val = field_var.data[0]
if isinstance(val, mixin.PythonConstant):
if isinstance(val.pyval, (list, tuple)):
entries = val.pyval
else:
return None # Happens e.g. __slots__ = {"foo", "bar"}. Not an error.
else:
return None # Happens e.g. for __slots__ = dir(Foo)
try:
names = [abstract_utils.get_atomic_python_constant(v) for v in entries]
except abstract_utils.ConversionError:
return None # Happens e.g. for __slots__ = ["x" if b else "y"]
# Slot names should be strings.
for s in names:
if not isinstance(s, str):
self.ctx.errorlog.bad_slots(
self.ctx.vm.frames, f"Invalid {field_name} entry: {str(s)!r}"
)
return None
return tuple(self._mangle(s) for s in names)
def _mangle(self, name: str) -> str:
"""Do name-mangling on an attribute name.
See https://goo.gl/X85fHt. Python automatically converts a name like
"__foo" to "_ClassName__foo" in the bytecode. (But "forgets" to do so in
other places, e.g. in the strings of __slots__.)
Arguments:
name: The name of an attribute of the current class. E.g. "__foo".
Returns:
The mangled name. E.g. "_MyClass__foo".
"""
if name.startswith("__") and not name.endswith("__"):
return "_" + self.name + name
else:
return name
|
Class
|
python
|
Netflix__metaflow
|
metaflow/datastore/content_addressed_store.py
|
{
"start": 183,
"end": 9205
}
|
class ____(object):
"""
This class is not meant to be overridden and is meant to be common across
different datastores.
"""
save_blobs_result = namedtuple("save_blobs_result", "uri key")
def __init__(self, prefix, storage_impl):
"""
Initialize a ContentAddressedStore
A content-addressed store stores data using a name/key that is a hash
of the content. This means that duplicate content is only stored once.
Parameters
----------
prefix : string
Prefix that will be prepended when storing a file
storage_impl : type
Implementation for the backing storage implementation to use
"""
self._prefix = prefix
self._storage_impl = storage_impl
self.TYPE = self._storage_impl.TYPE
self._blob_cache = None
def set_blob_cache(self, blob_cache):
self._blob_cache = blob_cache
def save_blobs(self, blob_iter, raw=False, len_hint=0, is_transfer=False):
"""
Saves blobs of data to the datastore
The blobs of data are saved as is if raw is True. If raw is False, the
datastore may process the blobs and they should then only be loaded
using load_blob
NOTE: The idea here is that there are two modes to access the file once
it is saved to the datastore:
- if raw is True, you would be able to access it directly using the
URI returned; the bytes that are passed in as 'blob' would be
returned directly by reading the object at that URI. You would also
be able to access it using load_blob passing the key returned
- if raw is False, no URI would be returned (the URI would be None)
and you would only be able to access the object using load_blob.
- The API also specifically takes a list to allow for parallel writes
if available in the datastore. We could also make a single
save_blob' API and save_blobs but this seems superfluous
Parameters
----------
blob_iter : Iterator
Iterator over bytes objects to save
raw : bool, default False
Whether to save the bytes directly or process them, by default False
len_hint : int, default 0
Hint of the number of blobs that will be produced by the
iterator, by default 0
is_transfer : bool, default False
If True, this indicates we are saving blobs directly from the output of another
content addressed store's
Returns
-------
List of save_blobs_result:
The list order is the same as the blobs passed in. The URI will be
None if raw is False.
"""
results = []
def packing_iter():
for blob in blob_iter:
if is_transfer:
key, blob_data, meta = blob
path = self._storage_impl.path_join(self._prefix, key[:2], key)
# Transfer data is always raw/decompressed, so mark it as such
meta_corrected = {"cas_raw": True, "cas_version": 1}
results.append(
self.save_blobs_result(
uri=self._storage_impl.full_uri(path),
key=key,
)
)
yield path, (BytesIO(blob_data), meta_corrected)
continue
sha = sha1(blob).hexdigest()
path = self._storage_impl.path_join(self._prefix, sha[:2], sha)
results.append(
self.save_blobs_result(
uri=self._storage_impl.full_uri(path) if raw else None,
key=sha,
)
)
if not self._storage_impl.is_file([path])[0]:
# only process blobs that don't exist already in the
# backing datastore
meta = {"cas_raw": raw, "cas_version": 1}
if raw:
yield path, (BytesIO(blob), meta)
else:
yield path, (self._pack_v1(blob), meta)
# We don't actually want to overwrite but by saying =True, we avoid
# checking again saving some operations. We are already sure we are not
# sending duplicate files since we already checked.
self._storage_impl.save_bytes(packing_iter(), overwrite=True, len_hint=len_hint)
return results
def load_blobs(self, keys, force_raw=False, is_transfer=False):
"""
Mirror function of save_blobs
This function is guaranteed to return the bytes passed to save_blob for
the keys
Parameters
----------
keys : List of string
Key describing the object to load
force_raw : bool, default False
Support for backward compatibility with previous datastores. If
True, this will force the key to be loaded as is (raw). By default,
False
is_transfer : bool, default False
If True, this indicates we are loading blobs to transfer them directly
to another datastore. We will, in this case, also transfer the metadata
and do minimal processing. This is for internal use only.
Returns
-------
Returns an iterator of (string, bytes) tuples; the iterator may return keys
in a different order than were passed in. If is_transfer is True, the tuple
has three elements with the third one being the metadata.
"""
load_paths = []
for key in keys:
blob = None
if self._blob_cache:
blob = self._blob_cache.load_key(key)
if blob is not None:
if is_transfer:
# Cached blobs are decompressed/processed bytes regardless of original format
yield key, blob, {"cas_raw": False, "cas_version": 1}
else:
yield key, blob
else:
path = self._storage_impl.path_join(self._prefix, key[:2], key)
load_paths.append((key, path))
with self._storage_impl.load_bytes([p for _, p in load_paths]) as loaded:
for path_key, file_path, meta in loaded:
key = self._storage_impl.path_split(path_key)[-1]
# At this point, we either return the object as is (if raw) or
# decode it according to the encoding version
with open(file_path, "rb") as f:
if force_raw or (meta and meta.get("cas_raw", False)):
blob = f.read()
else:
if meta is None:
# Previous version of the datastore had no meta
# information
unpack_code = self._unpack_backward_compatible
else:
version = meta.get("cas_version", -1)
if version == -1:
raise DataException(
"Could not extract encoding version for '%s'" % path
)
unpack_code = getattr(self, "_unpack_v%d" % version, None)
if unpack_code is None:
raise DataException(
"Unknown encoding version %d for '%s' -- "
"the artifact is either corrupt or you "
"need to update Metaflow to the latest "
"version" % (version, path)
)
try:
blob = unpack_code(f)
except Exception as e:
raise DataException(
"Could not unpack artifact '%s': %s" % (path, e)
)
if self._blob_cache:
self._blob_cache.store_key(key, blob)
if is_transfer:
yield key, blob, meta # Preserve exact original metadata from storage
else:
yield key, blob
def _unpack_backward_compatible(self, blob):
# This is the backward compatible unpack
# (if the blob doesn't have a version encoded)
return self._unpack_v1(blob)
def _pack_v1(self, blob):
buf = BytesIO()
with gzip.GzipFile(fileobj=buf, mode="wb", compresslevel=3) as f:
f.write(blob)
buf.seek(0)
return buf
def _unpack_v1(self, blob):
with gzip.GzipFile(fileobj=blob, mode="rb") as f:
return f.read()
|
ContentAddressedStore
|
python
|
doocs__leetcode
|
solution/2600-2699/2615.Sum of Distances/Solution.py
|
{
"start": 0,
"end": 552
}
|
class ____:
def distance(self, nums: List[int]) -> List[int]:
d = defaultdict(list)
for i, x in enumerate(nums):
d[x].append(i)
ans = [0] * len(nums)
for idx in d.values():
left, right = 0, sum(idx) - len(idx) * idx[0]
for i in range(len(idx)):
ans[idx[i]] = left + right
if i + 1 < len(idx):
left += (idx[i + 1] - idx[i]) * (i + 1)
right -= (idx[i + 1] - idx[i]) * (len(idx) - i - 1)
return ans
|
Solution
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_south_carolina_zip.py
|
{
"start": 1798,
"end": 4187
}
|
class ____(ColumnMapExpectation):
"""Expect values in this column to be valid South Carolina zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_south_carolina_zip": ["29001", "29207", "29430", "29945"],
"invalid_south_carolina_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_south_carolina_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_south_carolina_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_south_carolina_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidSouthCarolinaZip().print_diagnostic_checklist()
|
ExpectColumnValuesToBeValidSouthCarolinaZip
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_unused_arguments/ARG.py
|
{
"start": 3336,
"end": 3952
}
|
class ____:
def __new__(cls, x):
print("Hello, world!")
def __init__(self, x) -> None:
print("Hello, world!")
def __str__(self) -> str:
return "Hello, world!"
def __exit__(self, exc_type, exc_value, traceback) -> None:
print("Hello, world!")
def __init_subclass__(cls, x) -> None:
print("Hello, world!")
def __class_getitem__(cls, x):
print("Hello, world!")
###
# Used arguments on chained cast.
###
def f(x: None) -> None:
_ = cast(Any, _identity)(x=x)
###
# Unused arguments with `locals`.
###
def f(bar: str):
print(locals())
|
C
|
python
|
scikit-learn__scikit-learn
|
sklearn/svm/_classes.py
|
{
"start": 58252,
"end": 66296
}
|
class ____(OutlierMixin, BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <outlier_detection>`.
Parameters
----------
kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \
default='rbf'
Specifies the kernel type to be used in the algorithm.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, default=3
Degree of the polynomial kernel function ('poly').
Must be non-negative. Ignored by all other kernels.
gamma : {'scale', 'auto'} or float, default='scale'
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
- if ``gamma='scale'`` (default) is passed then it uses
1 / (n_features * X.var()) as value of gamma,
- if 'auto', uses 1 / n_features
- if float, must be non-negative.
.. versionchanged:: 0.22
The default value of ``gamma`` changed from 'auto' to 'scale'.
coef0 : float, default=0.0
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, default=1e-3
Tolerance for stopping criterion.
nu : float, default=0.5
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
shrinking : bool, default=True
Whether to use the shrinking heuristic.
See the :ref:`User Guide <shrinking_svm>`.
cache_size : float, default=200
Specify the size of the kernel cache (in MB).
verbose : bool, default=False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, default=-1
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
coef_ : ndarray of shape (1, n_features)
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
dual_coef_ : ndarray of shape (1, n_SV)
Coefficients of the support vectors in the decision function.
fit_status_ : int
0 if correctly fitted, 1 otherwise (will raise warning)
intercept_ : ndarray of shape (1,)
Constant in the decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations run by the optimization routine to fit the model.
.. versionadded:: 1.1
n_support_ : ndarray of shape (n_classes,), dtype=int32
Number of support vectors for each class.
offset_ : float
Offset used to define the decision function from the raw scores.
We have the relation: decision_function = score_samples - `offset_`.
The offset is the opposite of `intercept_` and is provided for
consistency with other outlier detection algorithms.
.. versionadded:: 0.20
shape_fit_ : tuple of int of shape (n_dimensions_of_X,)
Array dimensions of training vector ``X``.
support_ : ndarray of shape (n_SV,)
Indices of support vectors.
support_vectors_ : ndarray of shape (n_SV, n_features)
Support vectors.
See Also
--------
sklearn.linear_model.SGDOneClassSVM : Solves linear One-Class SVM using
Stochastic Gradient Descent.
sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection using
Local Outlier Factor (LOF).
sklearn.ensemble.IsolationForest : Isolation Forest Algorithm.
Examples
--------
>>> from sklearn.svm import OneClassSVM
>>> X = [[0], [0.44], [0.45], [0.46], [1]]
>>> clf = OneClassSVM(gamma='auto').fit(X)
>>> clf.predict(X)
array([-1, 1, 1, 1, -1])
>>> clf.score_samples(X)
array([1.7798, 2.0547, 2.0556, 2.0561, 1.7332])
For a more extended example,
see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py`
"""
_impl = "one_class"
_parameter_constraints: dict = {**BaseLibSVM._parameter_constraints}
for unused_param in ["C", "class_weight", "epsilon", "probability", "random_state"]:
_parameter_constraints.pop(unused_param)
def __init__(
self,
*,
kernel="rbf",
degree=3,
gamma="scale",
coef0=0.0,
tol=1e-3,
nu=0.5,
shrinking=True,
cache_size=200,
verbose=False,
max_iter=-1,
):
super().__init__(
kernel,
degree,
gamma,
coef0,
tol,
0.0,
nu,
0.0,
shrinking,
False,
cache_size,
None,
verbose,
max_iter,
random_state=None,
)
def fit(self, X, y=None, sample_weight=None):
"""Detect the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Set of samples, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Fitted estimator.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super().fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight)
self.offset_ = -self._intercept_
return self
def decision_function(self, X):
"""Signed distance to the separating hyperplane.
Signed distance is positive for an inlier and negative for an outlier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
dec : ndarray of shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X).ravel()
return dec
def score_samples(self, X):
"""Raw scoring function of the samples.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
score_samples : ndarray of shape (n_samples,)
Returns the (unshifted) scoring function of the samples.
"""
return self.decision_function(X) + self.offset_
def predict(self, X):
"""Perform classification on samples in X.
For a one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples_test, n_samples_train)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
Class labels for samples in X.
"""
y = super().predict(X)
return np.asarray(y, dtype=np.intp)
|
OneClassSVM
|
python
|
jmcnamara__XlsxWriter
|
examples/inheritance1.py
|
{
"start": 985,
"end": 1706
}
|
class ____(Workbook):
"""
Subclass of the XlsxWriter Workbook class to override the default
Worksheet class with our custom class.
"""
def add_worksheet(self, name=None):
# Overwrite add_worksheet() to create a MyWorksheet object.
worksheet = super().add_worksheet(name, MyWorksheet)
return worksheet
# Create a new MyWorkbook object.
workbook = MyWorkbook("inheritance1.xlsx")
# The code from now on will be the same as a normal "Workbook" program.
worksheet = workbook.add_worksheet()
# Write some data to test the subclassing.
worksheet.write("A1", "Hello")
worksheet.write("A2", "World")
worksheet.write("A3", 123)
worksheet.write("A4", 345)
workbook.close()
|
MyWorkbook
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_table38.py
|
{
"start": 306,
"end": 1236
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table38.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write(1, 0, 1)
worksheet.write(2, 0, 2)
worksheet.write(3, 0, 3)
worksheet.write(4, 0, 4)
worksheet.write(5, 0, 5)
worksheet.write(1, 1, 10)
worksheet.write(2, 1, 15)
worksheet.write(3, 1, 20)
worksheet.write(4, 1, 10)
worksheet.write(5, 1, 15)
worksheet.set_column("A:B", 10.288)
worksheet.add_table("A1:B6", {"description": "Alt text", "title": "Alt title"})
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
mahmoud__boltons
|
boltons/fileutils.py
|
{
"start": 11014,
"end": 22396
}
|
class ____:
"""``AtomicSaver`` is a configurable `context manager`_ that provides
a writable :class:`file` which will be moved into place as long as
no exceptions are raised within the context manager's block. These
"part files" are created in the same directory as the destination
path to ensure atomic move operations (i.e., no cross-filesystem
moves occur).
Args:
dest_path (str): The path where the completed file will be
written.
overwrite (bool): Whether to overwrite the destination file if
it exists at completion time. Defaults to ``True``.
file_perms (int): Integer representation of file permissions
for the newly-created file. Defaults are, when the
destination path already exists, to copy the permissions
from the previous file, or if the file did not exist, to
respect the user's configured `umask`_, usually resulting
in octal 0644 or 0664.
text_mode (bool): Whether to open the destination file in text
mode (i.e., ``'w'`` not ``'wb'``). Defaults to ``False`` (``wb``).
part_file (str): Name of the temporary *part_file*. Defaults
to *dest_path* + ``.part``. Note that this argument is
just the filename, and not the full path of the part
file. To guarantee atomic saves, part files are always
created in the same directory as the destination path.
overwrite_part (bool): Whether to overwrite the *part_file*,
should it exist at setup time. Defaults to ``False``,
which results in an :exc:`OSError` being raised on
pre-existing part files. Be careful of setting this to
``True`` in situations when multiple threads or processes
could be writing to the same part file.
rm_part_on_exc (bool): Remove *part_file* on exception cases.
Defaults to ``True``, but ``False`` can be useful for
recovery in some cases. Note that resumption is not
automatic and by default an :exc:`OSError` is raised if
the *part_file* exists.
Practically, the AtomicSaver serves a few purposes:
* Avoiding overwriting an existing, valid file with a partially
written one.
* Providing a reasonable guarantee that a part file only has one
writer at a time.
* Optional recovery of partial data in failure cases.
.. _context manager: https://docs.python.org/2/reference/compound_stmts.html#with
.. _umask: https://en.wikipedia.org/wiki/Umask
"""
_default_file_perms = RW_PERMS
# TODO: option to abort if target file modify date has changed since start?
def __init__(self, dest_path, **kwargs):
self.dest_path = dest_path
self.overwrite = kwargs.pop('overwrite', True)
self.file_perms = kwargs.pop('file_perms', None)
self.overwrite_part = kwargs.pop('overwrite_part', False)
self.part_filename = kwargs.pop('part_file', None)
self.rm_part_on_exc = kwargs.pop('rm_part_on_exc', True)
self.text_mode = kwargs.pop('text_mode', False)
self.buffering = kwargs.pop('buffering', -1)
if kwargs:
raise TypeError(f'unexpected kwargs: {kwargs.keys()!r}')
self.dest_path = os.path.abspath(self.dest_path)
self.dest_dir = os.path.dirname(self.dest_path)
if not self.part_filename:
self.part_path = dest_path + '.part'
else:
self.part_path = os.path.join(self.dest_dir, self.part_filename)
self.mode = 'w+' if self.text_mode else 'w+b'
self.open_flags = _TEXT_OPENFLAGS if self.text_mode else _BIN_OPENFLAGS
self.part_file = None
def _open_part_file(self):
do_chmod = True
file_perms = self.file_perms
if file_perms is None:
try:
# try to copy from file being replaced
stat_res = os.stat(self.dest_path)
file_perms = stat.S_IMODE(stat_res.st_mode)
except OSError:
# default if no destination file exists
file_perms = self._default_file_perms
do_chmod = False # respect the umask
fd = os.open(self.part_path, self.open_flags, file_perms)
set_cloexec(fd)
self.part_file = os.fdopen(fd, self.mode, self.buffering)
# if default perms are overridden by the user or previous dest_path
# chmod away the effects of the umask
if do_chmod:
try:
os.chmod(self.part_path, file_perms)
except OSError:
self.part_file.close()
raise
return
def setup(self):
"""Called on context manager entry (the :keyword:`with` statement),
the ``setup()`` method creates the temporary file in the same
directory as the destination file.
``setup()`` tests for a writable directory with rename permissions
early, as the part file may not be written to immediately (not
using :func:`os.access` because of the potential issues of
effective vs. real privileges).
If the caller is not using the :class:`AtomicSaver` as a
context manager, this method should be called explicitly
before writing.
"""
if os.path.lexists(self.dest_path):
if not self.overwrite:
raise OSError(errno.EEXIST,
'Overwrite disabled and file already exists',
self.dest_path)
if self.overwrite_part and os.path.lexists(self.part_path):
os.unlink(self.part_path)
self._open_part_file()
return
def __enter__(self):
self.setup()
return self.part_file
def __exit__(self, exc_type, exc_val, exc_tb):
if self.part_file:
# Ensure data is flushed and synced to disk before closing
self.part_file.flush()
os.fsync(self.part_file.fileno())
self.part_file.close()
if exc_type:
if self.rm_part_on_exc:
try:
os.unlink(self.part_path)
except Exception:
pass # avoid masking original error
return
try:
atomic_rename(self.part_path, self.dest_path,
overwrite=self.overwrite)
except OSError:
if self.rm_part_on_exc:
try:
os.unlink(self.part_path)
except Exception:
pass # avoid masking original error
raise # could not save destination file
return
def iter_find_files(directory, patterns, ignored=None, include_dirs=False, max_depth=None):
"""Returns a generator that yields file paths under a *directory*,
matching *patterns* using `glob`_ syntax (e.g., ``*.txt``). Also
supports *ignored* patterns.
Args:
directory (str): Path that serves as the root of the
search. Yielded paths will include this as a prefix.
patterns (str or list): A single pattern or list of
glob-formatted patterns to find under *directory*.
ignored (str or list): A single pattern or list of
glob-formatted patterns to ignore.
include_dirs (bool): Whether to include directories that match
patterns, as well. Defaults to ``False``.
max_depth (int): traverse up to this level of subdirectory.
I.e., 0 for the specified *directory* only, 1 for *directory*
and one level of subdirectory.
For example, finding Python files in the current directory:
>>> _CUR_DIR = os.path.dirname(os.path.abspath(__file__))
>>> filenames = sorted(iter_find_files(_CUR_DIR, '*.py'))
>>> os.path.basename(filenames[-1])
'urlutils.py'
Or, Python files while ignoring emacs lockfiles:
>>> filenames = iter_find_files(_CUR_DIR, '*.py', ignored='.#*')
.. _glob: https://en.wikipedia.org/wiki/Glob_%28programming%29
"""
if isinstance(patterns, str):
patterns = [patterns]
pats_re = re.compile('|'.join([fnmatch.translate(p) for p in patterns]))
if not ignored:
ignored = []
elif isinstance(ignored, str):
ignored = [ignored]
ign_re = re.compile('|'.join([fnmatch.translate(p) for p in ignored]))
start_depth = len(directory.split(os.path.sep))
for root, dirs, files in os.walk(directory):
if max_depth is not None and (len(root.split(os.path.sep)) - start_depth) > max_depth:
continue
if include_dirs:
for basename in dirs:
if pats_re.match(basename):
if ignored and ign_re.match(basename):
continue
filename = os.path.join(root, basename)
yield filename
for basename in files:
if pats_re.match(basename):
if ignored and ign_re.match(basename):
continue
filename = os.path.join(root, basename)
yield filename
return
def copy_tree(src, dst, symlinks=False, ignore=None):
"""The ``copy_tree`` function is an exact copy of the built-in
:func:`shutil.copytree`, with one key difference: it will not
raise an exception if part of the tree already exists. It achieves
this by using :func:`mkdir_p`.
As of Python 3.8, you may pass :func:`shutil.copytree` the
`dirs_exist_ok=True` flag to achieve the same effect.
Args:
src (str): Path of the source directory to copy.
dst (str): Destination path. Existing directories accepted.
symlinks (bool): If ``True``, copy symlinks rather than their
contents.
ignore (callable): A callable that takes a path and directory
listing, returning the files within the listing to be ignored.
For more details, check out :func:`shutil.copytree` and
:func:`shutil.copy2`.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
mkdir_p(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as e:
errors.extend(e.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
copytree = copy_tree # alias for drop-in replacement of shutil
# like open(os.devnull) but with even fewer side effects
|
AtomicSaver
|
python
|
facebook__pyre-check
|
api/query.py
|
{
"start": 2939,
"end": 10232
}
|
class ____(NamedTuple):
fully_qualified_name: str
path: Optional[str]
line: int
column: int
stop_line: int
stop_column: int
full_error_message: str
def _defines(pyre_connection: PyreConnection, modules: Iterable[str]) -> List[Define]:
query = "defines({})".format(",".join(modules))
result = pyre_connection.query_server(query)
return [
Define(
name=element["name"],
parameters=[
DefineParameter(
name=parameter["name"], annotation=parameter["annotation"]
)
for parameter in element["parameters"]
],
return_annotation=element["return_annotation"],
)
for element in result["response"]
]
def defines(
pyre_connection: PyreConnection,
modules: Iterable[str],
batch_size: Optional[int] = None,
) -> List[Define]:
modules = list(modules)
if batch_size is None:
return _defines(pyre_connection, modules)
if batch_size <= 0:
raise ValueError(
"batch_size must a positive integer, provided: `{}`".format(batch_size)
)
found_defines: List[Define] = []
module_chunks = [
modules[index : index + batch_size]
for index in range(0, len(modules), batch_size)
]
for modules in module_chunks:
found_defines.extend(_defines(pyre_connection, modules))
return found_defines
def get_class_hierarchy(pyre_connection: PyreConnection) -> ClassHierarchy:
result = pyre_connection.query_server("dump_class_hierarchy()")
return ClassHierarchy(
{
key: edges
for annotation_and_edges in result["response"]
for key, edges in annotation_and_edges.items()
}
)
def get_cached_class_hierarchy(
pyre_connection: PyreConnection, pyre_cache: Optional[PyreCache]
) -> ClassHierarchy:
cached_class_hierarchy = (
pyre_cache.class_hierarchy if pyre_cache is not None else None
)
if cached_class_hierarchy is not None:
return cached_class_hierarchy
class_hierarchy = get_class_hierarchy(pyre_connection)
if pyre_cache is not None:
pyre_cache.class_hierarchy = class_hierarchy
return class_hierarchy
def _annotations_per_file(data: PyreQueryResult) -> Dict[str, List[Annotation]]:
def make_position(mapping: Dict[str, int]) -> Position:
return Position(column=mapping["column"], line=mapping["line"])
return {
response["response"][0]["path"]: [
Annotation(
locations_and_annotations["annotation"],
make_position(locations_and_annotations["location"]["start"]),
make_position(locations_and_annotations["location"]["stop"]),
)
for locations_and_annotations in response["response"][0]["types"]
]
for response in data["response"]
if "response" in response
}
def get_types(
pyre_connection: PyreConnection, *paths: str
) -> Dict[str, List[Annotation]]:
types_sequence = ",".join([f"types('{path}')" for path in paths])
result = pyre_connection.query_server(f"batch({types_sequence})")
return _annotations_per_file(result)
def get_superclasses(pyre_connection: PyreConnection, class_name: str) -> List[str]:
query = f"superclasses({class_name})"
result = pyre_connection.query_server(query)
return result["response"][0][class_name]
def _get_batch(
iterable: Iterable[T], batch_size: Optional[int]
) -> Generator[Iterable[T], None, None]:
if not batch_size:
yield iterable
elif batch_size <= 0:
raise ValueError(
"batch_size must a positive integer, provided: `{}`".format(batch_size)
)
else:
iterator = iter(iterable)
batch = list(islice(iterator, batch_size))
while batch:
yield batch
batch = list(islice(iterator, batch_size))
def _get_attributes(
pyre_connection: PyreConnection, class_name: str
) -> List[Attributes]:
query = f"attributes({class_name})"
response = pyre_connection.query_server(query)["response"]
return [
Attributes(
name=attribute["name"],
annotation=attribute["annotation"],
kind=attribute["kind"],
final=attribute["final"],
)
for attribute in response["attributes"]
]
def get_attributes(
pyre_connection: PyreConnection,
class_names: Iterable[str],
batch_size: Optional[int] = None,
) -> Dict[str, List[Attributes]]:
all_responses = {}
for batch in _get_batch(class_names, batch_size):
query = "batch({})".format(", ".join([f"attributes({name})" for name in batch]))
responses = pyre_connection.query_server(query)["response"]
for class_name, response in zip(batch, responses):
if "response" in response:
all_responses[class_name] = [
Attributes(
name=attribute["name"],
annotation=attribute["annotation"],
kind=attribute["kind"],
final=attribute["final"],
)
for attribute in response["response"]["attributes"]
]
else:
LOG.warning(
f"Error resolving query for `{class_name=}` in get_attributes `{response=}`"
)
all_responses[class_name] = []
return all_responses
def get_call_graph(
pyre_connection: PyreConnection,
) -> Optional[Dict[str, List[CallGraphTarget]]]:
response = pyre_connection.query_server("dump_call_graph()")["response"]
call_graph = {}
for function, calls in response.items():
call_graph[function] = [CallGraphTarget(call) for call in calls]
return call_graph
def _parse_location(location_json: Dict[str, Any]) -> Location:
return Location(
path=location_json["path"],
start=_parse_position(location_json["start"]),
stop=_parse_position(location_json["stop"]),
)
def _parse_position(position_json: Dict[str, Any]) -> Position:
return Position(line=position_json["line"], column=position_json["column"])
def get_invalid_taint_models(
pyre_connection: PyreConnection,
verify_dsl: bool = False,
) -> List[InvalidModel]:
errors: List[InvalidModel] = []
# TODO(T143503449): Combine into one f-string after fbcode pyre version upgrade
response = pyre_connection.query_server(
"validate_taint_models(verify_dsl=True)"
if verify_dsl
else "validate_taint_models()"
)
if "response" in response and "errors" in response["response"]:
found_errors = response["response"]["errors"]
for error in found_errors:
errors.append(
InvalidModel(
full_error_message=error["description"],
path=error["path"],
line=error["line"],
column=error["column"],
stop_line=error["stop_line"],
stop_column=error["stop_column"],
fully_qualified_name="",
)
)
return errors
|
InvalidModel
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/unions3.py
|
{
"start": 558,
"end": 633
}
|
class ____(type):
def __ror__(cls: _T, other: type) -> _T: ...
|
Metaclass2
|
python
|
django__django
|
tests/generic_views/views.py
|
{
"start": 2300,
"end": 2647
}
|
class ____(AuthorList):
paginate_by = 5
def get_paginator(
self, queryset, page_size, orphans=0, allow_empty_first_page=True
):
return super().get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page,
)
|
AuthorListCustomPaginator
|
python
|
pydantic__pydantic
|
tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail_baseConfig.py
|
{
"start": 2055,
"end": 2271
}
|
class ____(BaseModel):
class Config:
from_attributes: Any = {} # not sensible, but should still be handled gracefully
# MYPY: error: Invalid value for "Config.from_attributes" [pydantic-config]
|
BadConfig1
|
python
|
realpython__materials
|
inheritance-and-composition/choosing/employees.py
|
{
"start": 820,
"end": 1622
}
|
class ____(AsDictionaryMixin):
def __init__(self, id):
self.id = id
info = employee_database.get_employee_info(self.id)
self.name = info.get("name")
self.address = get_employee_address(self.id)
self._role = get_role(info.get("role"))
self._payroll = get_policy(self.id)
def work(self, hours):
duties = self._role.perform_duties(hours)
print(f"Employee {self.id} - {self.name}:")
print(f"- {duties}")
print("")
self._payroll.track_work(hours)
def calculate_payroll(self):
return self._payroll.calculate_payroll()
def apply_payroll_policy(self, new_policy):
new_policy.apply_to_policy(self._payroll)
self._payroll = new_policy
employee_database = _EmployeeDatabase()
|
Employee
|
python
|
tensorflow__tensorflow
|
tensorflow/python/compiler/tensorrt/test/const_broadcast_test.py
|
{
"start": 1012,
"end": 2532
}
|
class ____(trt_test.TfTrtIntegrationTestBase):
"""Test for Constant broadcasting in TF-TRT."""
def GraphFn(self, x):
"""Return the expected graph to convert."""
dtype = x.dtype
filt1 = constant_op.constant(
0.3, shape=(3, 3, 2, 1), dtype=dtype, name='filt1')
y1 = nn.conv2d(x, filt1, strides=[1, 1, 1, 1], padding='SAME', name='y1')
z1 = nn.relu(y1, name='z1')
filt2 = constant_op.constant(
0.3, shape=(3, 3, 1, 1), dtype=dtype, name='filt2')
y2 = nn.conv2d(z1, filt2, strides=[1, 1, 1, 1], padding='SAME', name='y2')
z2 = nn.relu(y2, name='z')
filt3 = constant_op.constant(
0.3, shape=(3, 3, 1, 1), dtype=dtype, name='filt3')
y3 = nn.conv2d(z2, filt3, strides=[1, 1, 1, 1], padding='SAME', name='y3')
return nn.relu(y3, name='output_0')
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[5, 12, 12, 2]],
[[5, 12, 12, 1]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ['TRTEngineOp_000']
def ExpectedAbsoluteTolerance(self, run_params):
"""The absolute tolerance to compare floating point results."""
return 1.e-04 if run_params.precision_mode == 'FP32' else 1.e-02
def ExpectedRelativeTolerance(self, run_params):
"""The relative tolerance to compare floating point results."""
return 1.e-04 if run_params.precision_mode == 'FP32' else 1.e-02
if __name__ == '__main__':
test.main()
|
ConstBroadcastTest
|
python
|
getsentry__sentry
|
src/sudo/views.py
|
{
"start": 1102,
"end": 4291
}
|
class ____(View):
"""
The default view for the sudo mode page. The role of this page is to
prompt the user for their password again, and if successful, redirect
them back to ``next``.
"""
form_class = SudoForm
template_name = "sudo/sudo.html"
extra_context: dict[str, str] | None = None
def handle_sudo(self, request: HttpRequest, context: dict[str, Any]) -> bool:
return request.method == "POST" and context["form"].is_valid()
def grant_sudo_privileges(self, request: HttpRequest, redirect_to: str) -> HttpResponseRedirect:
grant_sudo_privileges(request)
# Restore the redirect destination from the GET request
redirect_to = request.session.pop(REDIRECT_TO_FIELD_NAME, redirect_to)
# Double check we're not redirecting to other sites
if not url_has_allowed_host_and_scheme(redirect_to, allowed_hosts=(request.get_host(),)):
redirect_to = resolve_url(REDIRECT_URL)
return HttpResponseRedirect(redirect_to)
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
@method_decorator(csrf_protect)
@method_decorator(login_required)
def dispatch(self, request: HttpRequest, *args: object, **kwargs: object) -> HttpResponseBase:
redirect_to = request.GET.get(REDIRECT_FIELD_NAME, REDIRECT_URL)
# Make sure we're not redirecting to other sites
if not url_has_allowed_host_and_scheme(redirect_to, allowed_hosts=(request.get_host(),)):
redirect_to = resolve_url(REDIRECT_URL)
if request.is_sudo():
return HttpResponseRedirect(redirect_to)
if request.method == "GET":
request.session[REDIRECT_TO_FIELD_NAME] = redirect_to
context = {
"form": self.form_class(request.user, request.POST or None),
"request": request,
REDIRECT_FIELD_NAME: redirect_to,
}
if self.handle_sudo(request, context):
return self.grant_sudo_privileges(request, redirect_to)
if self.extra_context is not None:
context.update(self.extra_context)
return TemplateResponse(request, self.template_name, context)
def sudo(request: HttpRequest, **kwargs: object) -> HttpResponseBase:
return SudoView(**kwargs).dispatch(request)
def redirect_to_sudo(next_url: str, sudo_url: str | None = None) -> HttpResponseRedirect:
"""
Redirects the user to the login page, passing the given 'next' page
"""
if sudo_url is None:
sudo_obj = URL
else:
sudo_obj = sudo_url
try:
# django 1.10 and greater can't resolve the string 'sudo.views.sudo' to a URL
# https://docs.djangoproject.com/en/1.10/releases/1.10/#removed-features-1-10
sudo_obj = import_string(sudo_obj)
except (ImportError, ImproperlyConfigured):
pass # wasn't a dotted path
sudo_url_parts = list(urlparse(resolve_url(sudo_obj)))
querystring = QueryDict(sudo_url_parts[4], mutable=True)
querystring[REDIRECT_FIELD_NAME] = next_url
sudo_url_parts[4] = querystring.urlencode(safe="/")
return HttpResponseRedirect(urlunparse(sudo_url_parts))
|
SudoView
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/unions5.py
|
{
"start": 213,
"end": 700
}
|
class ____:
a: int
# This should generate an error
a1: type[Class1] | type[Class2] = Class1 | Class2
# This should generate an error
a2: type[Class1] | type[Class2] = Union[Class1, Class2]
b1 = Class1 | Class2
# This should generate an error
print(b1.a)
# This should generate an error
b1()
b2 = Union[Class1, Class2]
# This should generate an error
print(b2.a)
# This should generate an error
b2()
c1: UnionType
c1 = int | str
c1 = Union[int, str]
c1 = Optional[int]
|
Class2
|
python
|
spack__spack
|
lib/spack/spack/vendor/jinja2/nodes.py
|
{
"start": 33069,
"end": 33168
}
|
class ____(Stmt):
"""An artificial scope."""
fields = ("body",)
body: t.List[Node]
|
Scope
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE807.py
|
{
"start": 564,
"end": 809
}
|
class ____(BaseTable):
foo = fields.ListField(list)
bar = fields.ListField(dict)
lambda *args, **kwargs: []
lambda *args, **kwargs: {}
lambda *args: []
lambda *args: {}
lambda **kwargs: []
lambda **kwargs: {}
lambda: {**unwrap}
|
FooTable
|
python
|
django__django
|
tests/proxy_models/models.py
|
{
"start": 4120,
"end": 4253
}
|
class ____(ProxyBug):
"""
A proxy of proxy model with related field
"""
class Meta:
proxy = True
|
ProxyProxyBug
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/indices/query/query_transform/feedback_transform.py
|
{
"start": 955,
"end": 4484
}
|
class ____(BaseQueryTransform):
"""
Transform the query given the evaluation feedback.
Args:
eval(Evaluation): An evaluation object.
llm(LLM): An LLM.
resynthesize_query(bool): Whether to resynthesize the query.
resynthesis_prompt(BasePromptTemplate): A prompt for resynthesizing the query.
"""
def __init__(
self,
llm: Optional[LLM] = None,
resynthesize_query: bool = False,
resynthesis_prompt: Optional[BasePromptTemplate] = None,
) -> None:
super().__init__()
self.llm = llm or Settings.llm
self.should_resynthesize_query = resynthesize_query
self.resynthesis_prompt = resynthesis_prompt or DEFAULT_RESYNTHESIS_PROMPT
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {"resynthesis_prompt": self.resynthesis_prompt}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "resynthesis_prompt" in prompts:
self.resynthesis_prompt = prompts["resynthesis_prompt"]
def _run(self, query_bundle: QueryBundle, metadata: Dict) -> QueryBundle:
orig_query_str = query_bundle.query_str
if metadata.get("evaluation") and isinstance(
metadata.get("evaluation"), Evaluation
):
self.evaluation = metadata.get("evaluation")
if self.evaluation is None or not isinstance(self.evaluation, Evaluation):
raise ValueError("Evaluation is not set.")
if self.evaluation.response is None or self.evaluation.feedback is None:
raise ValueError("Evaluation result must contain response and feedback.")
if self.evaluation.feedback == "YES" or self.evaluation.feedback == "NO":
new_query = (
orig_query_str
+ "\n----------------\n"
+ self._construct_feedback(response=self.evaluation.response)
)
else:
if self.should_resynthesize_query:
new_query_str = self._resynthesize_query(
orig_query_str, self.evaluation.response, self.evaluation.feedback
)
else:
new_query_str = orig_query_str
new_query = (
self._construct_feedback(response=self.evaluation.response)
+ "\n"
+ "Here is some feedback from the evaluator about the response given.\n"
+ self.evaluation.feedback
+ "\n"
+ "Now answer the question.\n"
+ new_query_str
)
return QueryBundle(new_query, custom_embedding_strs=[orig_query_str])
@staticmethod
def _construct_feedback(response: Optional[str]) -> str:
"""Construct feedback from response."""
if response is None:
return ""
else:
return "Here is a previous bad answer.\n" + response
def _resynthesize_query(
self, query_str: str, response: str, feedback: Optional[str]
) -> str:
"""Resynthesize query given feedback."""
if feedback is None:
return query_str
else:
new_query_str = self.llm.predict(
self.resynthesis_prompt,
query_str=query_str,
response=response,
feedback=feedback,
)
logger.debug("Resynthesized query: %s", new_query_str)
return new_query_str
|
FeedbackQueryTransformation
|
python
|
getsentry__sentry
|
tests/acceptance/test_organization_group_index.py
|
{
"start": 602,
"end": 6702
}
|
class ____(AcceptanceTestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(
organization=self.org, name="Mariachi Band", members=[self.user]
)
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.other_project = self.create_project(
organization=self.org, teams=[self.team], name="Sumatra"
)
self.login_as(self.user)
self.page = IssueListPage(self.browser, self.client)
self.dismiss_assistant()
def create_issues(self) -> None:
self.event_a = self.store_event(
data={
"event_id": "a" * 32,
"message": "oh no",
"timestamp": (event_time - timedelta(hours=1)).isoformat(),
"fingerprint": ["group-1"],
},
project_id=self.project.id,
)
add_group_to_inbox(self.event_a.group, GroupInboxReason.NEW)
self.event_b = self.store_event(
data={
"event_id": "b" * 32,
"message": "oh snap",
"timestamp": event_time.isoformat(),
"fingerprint": ["group-2"],
},
project_id=self.project.id,
)
add_group_to_inbox(self.event_b.group, GroupInboxReason.NEW)
def test_with_onboarding(self) -> None:
self.project.update(first_event=None)
self.page.visit_issue_list(self.org.slug)
self.browser.wait_until_test_id("awaiting-events")
def test_with_no_results(self) -> None:
self.project.update(first_event=django_timezone.now())
self.page.visit_issue_list(self.org.slug, query="?query=assigned%3Ame")
self.browser.wait_until_test_id("empty-state")
@patch("django.utils.timezone.now")
def test_with_results(self, mock_now: MagicMock) -> None:
mock_now.return_value = datetime.now(timezone.utc)
self.create_issues()
self.page.visit_issue_list(self.org.slug)
self.page.wait_for_stream()
groups = self.browser.elements('[data-test-id="event-issue-header"]')
assert len(groups) == 2
assert "oh snap" in groups[0].text
assert "oh no" in groups[1].text
@patch("django.utils.timezone.now")
def test_resolve_issues_removal(self, mock_now: MagicMock) -> None:
mock_now.return_value = datetime.now(timezone.utc)
self.create_issues()
group1 = self.event_a.group
self.page.visit_issue_list(self.org.slug)
self.page.wait_for_stream()
self.page.select_issue(1)
self.page.resolve_issues()
group1.update(status=GroupStatus.RESOLVED, substatus=None)
self.page.wait_for_issue_removal()
groups = self.browser.elements('[data-test-id="event-issue-header"]')
assert len(groups) == 1
@patch("django.utils.timezone.now")
def test_archive_issues(self, mock_now: MagicMock) -> None:
mock_now.return_value = datetime.now(timezone.utc)
self.create_issues()
group1 = self.event_a.group
self.page.visit_issue_list(self.org.slug)
self.page.wait_for_stream()
self.page.select_issue(1)
self.page.archive_issues()
group1.update(status=GroupStatus.IGNORED, substatus=None)
self.page.wait_for_issue_removal()
groups = self.browser.elements('[data-test-id="event-issue-header"]')
assert len(groups) == 1
@patch("django.utils.timezone.now")
def test_delete_issues(self, mock_now: MagicMock) -> None:
mock_now.return_value = datetime.now(timezone.utc)
self.create_issues()
group1 = self.event_a.group
self.page.visit_issue_list(self.org.slug)
self.page.wait_for_stream()
self.page.select_issue(1)
self.page.delete_issues()
group1.update(status=GroupStatus.PENDING_DELETION, substatus=None)
self.page.wait_for_issue_removal()
groups = self.browser.elements('[data-test-id="event-issue-header"]')
assert len(groups) == 1
@patch("django.utils.timezone.now")
def test_merge_issues(self, mock_now: MagicMock) -> None:
mock_now.return_value = datetime.now(timezone.utc)
self.create_issues()
group1 = self.event_a.group
group2 = self.event_b.group
self.page.visit_issue_list(self.org.slug)
self.page.wait_for_stream()
self.page.select_issue(1)
self.page.select_issue(2)
self.page.merge_issues()
group1.update(status=GroupStatus.PENDING_MERGE, substatus=None)
group2.update(status=GroupStatus.PENDING_MERGE, substatus=None)
self.page.wait_for_issue_removal()
groups = self.browser.elements('[data-test-id="event-issue-header"]')
assert len(groups) == 1
@patch("django.utils.timezone.now")
def test_inbox_results(self, mock_now: MagicMock) -> None:
mock_now.return_value = datetime.now(timezone.utc)
self.create_issues()
# Disable for_review_guide
AssistantActivity.objects.create(
user=self.user, guide_id=9, viewed_ts=django_timezone.now()
)
self.page.visit_issue_list(
self.org.slug,
query="?query=is%3Aunresolved+is%3Afor_review+assigned_or_suggested%3A[me, none]",
)
self.page.wait_for_stream()
groups = self.browser.elements('[data-test-id="event-issue-header"]')
assert len(groups) == 2
self.page.select_issue(1)
self.page.mark_reviewed_issues()
self.page.visit_issue_list(
self.org.slug,
query="?query=is%3Aunresolved+is%3Afor_review+assigned_or_suggested%3A[me, none]",
)
self.page.wait_for_stream()
groups = self.browser.elements('[data-test-id="event-issue-header"]')
assert len(groups) == 1
|
OrganizationGroupIndexTest
|
python
|
joke2k__faker
|
faker/providers/bank/en_GB/__init__.py
|
{
"start": 42,
"end": 192
}
|
class ____(BankProvider):
"""Implement bank provider for ``en_GB`` locale."""
bban_format = "????##############"
country_code = "GB"
|
Provider
|
python
|
pytorch__pytorch
|
test/distributed/tensor/experimental/test_register_sharding.py
|
{
"start": 529,
"end": 5611
}
|
class ____(DTensorTestBase):
@with_comms
def test_softmax_fwd(self):
# After registering the custom softmax sharding strategy,
# the original entry would have been replaced.
# The following line is for showcasing purpose only.
DTensor._op_dispatcher.sharding_propagator.op_strategy_funcs.pop(
aten._softmax.default, None
)
@register_sharding(aten._softmax.default)
def custom_softmax_sharding(
x: DTensorSpec,
dim: int,
half_to_float: torch.dtype,
):
softmax_dim = dim if dim >= 0 else dim + x.ndim
acceptable_shardings = []
all_replicate = ([Replicate()], [Replicate(), None, None])
acceptable_shardings.append(all_replicate)
for sharding_dim in range(x.ndim):
if sharding_dim != softmax_dim:
all_sharded = (
[Shard(sharding_dim)],
[Shard(sharding_dim), None, None],
)
acceptable_shardings.append(all_sharded)
return acceptable_shardings
# check if the RuntimeSchemaInfo is derived correctly
schema_info = DTensor._op_dispatcher.sharding_propagator.op_to_schema_info[
aten._softmax.default
]
self.assertEqual(schema_info.static_argnum, 1)
device_mesh = self.build_device_mesh()
x = torch.rand(8, 12, 16, device=self.device_type)
dims = range(3) # used to convert -1 to the actual dim
softmax_dims = [-1, 0, 1]
shard_dims = [0, 1, 2]
test_list = list(itertools.product(softmax_dims, shard_dims))
for softmax_dim, shard_dim in test_list:
local_y = torch.nn.functional.softmax(
x, dim=softmax_dim, dtype=torch.float32
)
dist_x = distribute_tensor(x, device_mesh, [Shard(shard_dim)])
dist_y = torch.nn.functional.softmax(
dist_x, dim=softmax_dim, dtype=torch.float32
)
if dims[shard_dim] == dims[softmax_dim]:
self.assertTrue(dist_y.placements[0].is_replicate())
self.assertEqual(dist_y.to_local(), local_y)
else:
self.assertTrue(dist_y.placements[0].is_shard(dim=shard_dim))
self.assertEqual(dist_y.full_tensor(), local_y)
@with_comms
def test_argmax(self):
@register_sharding(aten.argmax.default)
def custom_argmax_sharding(x, dim, keepdim):
acceptable_shardings = []
all_replicate = ([Replicate()], [Replicate(), None, None])
acceptable_shardings.append(all_replicate)
if keepdim:
for sharding_dim in range(x.ndim):
if sharding_dim != dim:
all_sharded = (
[Shard(sharding_dim)],
[Shard(sharding_dim), None, None],
)
acceptable_shardings.append(all_sharded)
return acceptable_shardings
# check if the RuntimeSchemaInfo is derived correctly
# when the first int arg is optional
schema_info = DTensor._op_dispatcher.sharding_propagator.op_to_schema_info[
aten.argmax.default
]
self.assertEqual(schema_info.static_argnum, 1)
device_mesh = self.build_device_mesh()
x = torch.rand(8, 12, device=self.device_type)
dist_x = distribute_tensor(x, device_mesh, [Shard(0)])
local_y = torch.argmax(x, dim=1, keepdim=True)
dist_y = torch.argmax(dist_x, dim=1, keepdim=True)
self.assertTrue(dist_y.placements[0].is_shard(dim=0))
self.assertEqual(dist_y.full_tensor(), local_y)
@with_comms
def test_register_sharding_for_tensor_kwargs(self):
mesh = self.build_device_mesh()
x = torch.randn(4, 4, device=self.device_type)
x_dt = distribute_tensor(x, mesh, [Replicate()])
@register_sharding(aten.min.dim_min)
def min_dim_strategy(x, dim, keepdim, min, min_indices):
all_replicate = (
[Replicate(), Replicate()],
[Replicate(), None, None, Replicate(), Replicate()],
)
return [all_replicate]
value = torch.randn(4, 1, device=self.device_type)
indices = torch.randn(4, 1, device=self.device_type).long()
value_dt = distribute_tensor(value, mesh, [Replicate()])
indices_dt = distribute_tensor(indices, mesh, [Replicate()])
result = torch.min(x_dt, dim=1, keepdim=True, out=(value_dt, indices_dt))
self.assertIsInstance(result[0], DTensor)
self.assertIsInstance(result[1], DTensor)
expected_values, expected_indices = torch.min(x, dim=1, keepdim=True)
self.assertEqual(result[0].full_tensor(), expected_values)
self.assertEqual(result[1].full_tensor(), expected_indices)
if __name__ == "__main__":
run_tests()
|
TestRegisterSharding
|
python
|
huggingface__transformers
|
src/transformers/models/gemma3n/modeling_gemma3n.py
|
{
"start": 37503,
"end": 38924
}
|
class ____(nn.Module):
def __init__(self, config: Gemma3nAudioConfig):
super().__init__()
self.config = config
self.register_buffer("gradient_clipping", torch.tensor(self.config.gradient_clipping), persistent=False)
self.pre_layer_norm = Gemma3nRMSNorm(self.config.hidden_size)
self.ffw_layer_1 = nn.Linear(self.config.hidden_size, self.config.hidden_size * 4, bias=False)
self.ffw_layer_2 = nn.Linear(self.config.hidden_size * 4, self.config.hidden_size, bias=False)
self.post_layer_norm = Gemma3nRMSNorm(self.config.hidden_size)
self.post_layer_scale = torch.tensor(self.config.conf_residual_weight)
def forward(self, audio_encodings: torch.Tensor) -> torch.Tensor:
residual = audio_encodings
audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping)
audio_encodings = self.pre_layer_norm(audio_encodings)
audio_encodings: torch.Tensor = self.ffw_layer_1(audio_encodings)
audio_encodings = nn.functional.silu(audio_encodings)
audio_encodings: torch.Tensor = self.ffw_layer_2(audio_encodings)
audio_encodings = torch.clamp(audio_encodings, -self.gradient_clipping, self.gradient_clipping)
audio_encodings = self.post_layer_norm(audio_encodings)
return residual + (audio_encodings * self.post_layer_scale)
|
Gemma3nAudioConformerFeedForward
|
python
|
django__django
|
tests/template_tests/syntax_tests/i18n/test_blocktranslate.py
|
{
"start": 26638,
"end": 28520
}
|
class ____(MultipleLocaleActivationTestCase):
tag_name = "blocktranslate"
def get_template(self, template_string):
return Template(
template_string.replace(
"{{% blocktranslate ", "{{% {}".format(self.tag_name)
).replace(
"{{% endblocktranslate %}}", "{{% end{} %}}".format(self.tag_name)
)
)
def test_single_locale_activation(self):
"""
Simple baseline behavior with one locale for all the supported i18n
constructs.
"""
with translation.override("fr"):
self.assertEqual(
self.get_template(
"{% load i18n %}{% blocktranslate %}Yes{% endblocktranslate %}"
).render(Context({})),
"Oui",
)
def test_multiple_locale_btrans(self):
with translation.override("de"):
t = self.get_template(
"{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}"
)
with translation.override(self._old_language), translation.override("nl"):
self.assertEqual(t.render(Context({})), "Nee")
def test_multiple_locale_deactivate_btrans(self):
with translation.override("de", deactivate=True):
t = self.get_template(
"{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}"
)
with translation.override("nl"):
self.assertEqual(t.render(Context({})), "Nee")
def test_multiple_locale_direct_switch_btrans(self):
with translation.override("de"):
t = self.get_template(
"{% load i18n %}{% blocktranslate %}No{% endblocktranslate %}"
)
with translation.override("nl"):
self.assertEqual(t.render(Context({})), "Nee")
|
MultipleLocaleActivationBlockTranslateTests
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/shape/_label.py
|
{
"start": 235,
"end": 17369
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.shape"
_path_str = "layout.shape.label"
_valid_props = {
"font",
"padding",
"text",
"textangle",
"textposition",
"texttemplate",
"texttemplatefallback",
"xanchor",
"yanchor",
}
@property
def font(self):
"""
Sets the shape label text font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.shape.label.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.layout.shape.label.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def padding(self):
"""
Sets padding (in px) between edge of label and edge of shape.
The 'padding' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["padding"]
@padding.setter
def padding(self, val):
self["padding"] = val
@property
def text(self):
"""
Sets the text to display with shape. It is also used for legend
item if `name` is not provided.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def textangle(self):
"""
Sets the angle at which the label text is drawn with respect to
the horizontal. For lines, angle "auto" is the same angle as
the line. For all other shapes, angle "auto" is horizontal.
The 'textangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["textangle"]
@textangle.setter
def textangle(self, val):
self["textangle"] = val
@property
def textposition(self):
"""
Sets the position of the label text relative to the shape.
Supported values for rectangles, circles and paths are *top
left*, *top center*, *top right*, *middle left*, *middle
center*, *middle right*, *bottom left*, *bottom center*, and
*bottom right*. Supported values for lines are "start",
"middle", and "end". Default: *middle center* for rectangles,
circles, and paths; "middle" for lines.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top left', 'top center', 'top right', 'middle left',
'middle center', 'middle right', 'bottom left', 'bottom
center', 'bottom right', 'start', 'middle', 'end']
Returns
-------
Any
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
@property
def texttemplate(self):
"""
Template string used for rendering the shape's label. Note that
this will override `text`. Variables are inserted using
%{variable}, for example "x0: %{x0}". Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{x0:$.2f}". See
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{x0|%m %b %Y}". See https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. A single multiplication or division
operation may be applied to numeric variables, and combined
with d3 number formatting, for example "Length in cm:
%{x0*2.54}", "%{slope*60:.1f} meters per second." For log axes,
variable values are given in log units. For date axes, x/y
coordinate variables and center variables use datetimes, while
all other variable values use values in ms. Finally, the
template string has access to variables `x0`, `x1`, `y0`, `y1`,
`slope`, `dx`, `dy`, `width`, `height`, `length`, `xcenter` and
`ycenter`. Variables that can't be found will be replaced with
the specifier. For example, a template of "data: %{x}, %{y}"
will result in a value of "data: 1, %{y}" if x is 1 and y is
missing. Variables with an undefined value will be replaced
with the fallback value.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
@property
def texttemplatefallback(self):
"""
Fallback string that's displayed when a variable referenced in
a template is missing. If the boolean value 'false' is passed
in, the specifier with the missing variable will be displayed.
The 'texttemplatefallback' property accepts values of any type
Returns
-------
Any
"""
return self["texttemplatefallback"]
@texttemplatefallback.setter
def texttemplatefallback(self, val):
self["texttemplatefallback"] = val
@property
def xanchor(self):
"""
Sets the label's horizontal position anchor This anchor binds
the specified `textposition` to the "left", "center" or "right"
of the label text. For example, if `textposition` is set to
*top right* and `xanchor` to "right" then the right-most
portion of the label text lines up with the right-most edge of
the shape.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
@property
def yanchor(self):
"""
Sets the label's vertical position anchor This anchor binds the
specified `textposition` to the "top", "middle" or "bottom" of
the label text. For example, if `textposition` is set to *top
right* and `yanchor` to "top" then the top-most portion of the
label text lines up with the top-most edge of the shape.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets the shape label text font.
padding
Sets padding (in px) between edge of label and edge of
shape.
text
Sets the text to display with shape. It is also used
for legend item if `name` is not provided.
textangle
Sets the angle at which the label text is drawn with
respect to the horizontal. For lines, angle "auto" is
the same angle as the line. For all other shapes, angle
"auto" is horizontal.
textposition
Sets the position of the label text relative to the
shape. Supported values for rectangles, circles and
paths are *top left*, *top center*, *top right*,
*middle left*, *middle center*, *middle right*, *bottom
left*, *bottom center*, and *bottom right*. Supported
values for lines are "start", "middle", and "end".
Default: *middle center* for rectangles, circles, and
paths; "middle" for lines.
texttemplate
Template string used for rendering the shape's label.
Note that this will override `text`. Variables are
inserted using %{variable}, for example "x0: %{x0}".
Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{x0:$.2f}".
See
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day: %{x0|%m
%b %Y}". See https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. A single multiplication or
division operation may be applied to numeric variables,
and combined with d3 number formatting, for example
"Length in cm: %{x0*2.54}", "%{slope*60:.1f} meters per
second." For log axes, variable values are given in log
units. For date axes, x/y coordinate variables and
center variables use datetimes, while all other
variable values use values in ms. Finally, the template
string has access to variables `x0`, `x1`, `y0`, `y1`,
`slope`, `dx`, `dy`, `width`, `height`, `length`,
`xcenter` and `ycenter`. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
xanchor
Sets the label's horizontal position anchor This anchor
binds the specified `textposition` to the "left",
"center" or "right" of the label text. For example, if
`textposition` is set to *top right* and `xanchor` to
"right" then the right-most portion of the label text
lines up with the right-most edge of the shape.
yanchor
Sets the label's vertical position anchor This anchor
binds the specified `textposition` to the "top",
"middle" or "bottom" of the label text. For example, if
`textposition` is set to *top right* and `yanchor` to
"top" then the top-most portion of the label text lines
up with the top-most edge of the shape.
"""
def __init__(
self,
arg=None,
font=None,
padding=None,
text=None,
textangle=None,
textposition=None,
texttemplate=None,
texttemplatefallback=None,
xanchor=None,
yanchor=None,
**kwargs,
):
"""
Construct a new Label object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.shape.Label`
font
Sets the shape label text font.
padding
Sets padding (in px) between edge of label and edge of
shape.
text
Sets the text to display with shape. It is also used
for legend item if `name` is not provided.
textangle
Sets the angle at which the label text is drawn with
respect to the horizontal. For lines, angle "auto" is
the same angle as the line. For all other shapes, angle
"auto" is horizontal.
textposition
Sets the position of the label text relative to the
shape. Supported values for rectangles, circles and
paths are *top left*, *top center*, *top right*,
*middle left*, *middle center*, *middle right*, *bottom
left*, *bottom center*, and *bottom right*. Supported
values for lines are "start", "middle", and "end".
Default: *middle center* for rectangles, circles, and
paths; "middle" for lines.
texttemplate
Template string used for rendering the shape's label.
Note that this will override `text`. Variables are
inserted using %{variable}, for example "x0: %{x0}".
Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{x0:$.2f}".
See
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day: %{x0|%m
%b %Y}". See https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. A single multiplication or
division operation may be applied to numeric variables,
and combined with d3 number formatting, for example
"Length in cm: %{x0*2.54}", "%{slope*60:.1f} meters per
second." For log axes, variable values are given in log
units. For date axes, x/y coordinate variables and
center variables use datetimes, while all other
variable values use values in ms. Finally, the template
string has access to variables `x0`, `x1`, `y0`, `y1`,
`slope`, `dx`, `dy`, `width`, `height`, `length`,
`xcenter` and `ycenter`. Variables that can't be found
will be replaced with the specifier. For example, a
template of "data: %{x}, %{y}" will result in a value
of "data: 1, %{y}" if x is 1 and y is missing.
Variables with an undefined value will be replaced with
the fallback value.
texttemplatefallback
Fallback string that's displayed when a variable
referenced in a template is missing. If the boolean
value 'false' is passed in, the specifier with the
missing variable will be displayed.
xanchor
Sets the label's horizontal position anchor This anchor
binds the specified `textposition` to the "left",
"center" or "right" of the label text. For example, if
`textposition` is set to *top right* and `xanchor` to
"right" then the right-most portion of the label text
lines up with the right-most edge of the shape.
yanchor
Sets the label's vertical position anchor This anchor
binds the specified `textposition` to the "top",
"middle" or "bottom" of the label text. For example, if
`textposition` is set to *top right* and `yanchor` to
"top" then the top-most portion of the label text lines
up with the top-most edge of the shape.
Returns
-------
Label
"""
super().__init__("label")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.shape.Label
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.shape.Label`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("padding", arg, padding)
self._set_property("text", arg, text)
self._set_property("textangle", arg, textangle)
self._set_property("textposition", arg, textposition)
self._set_property("texttemplate", arg, texttemplate)
self._set_property("texttemplatefallback", arg, texttemplatefallback)
self._set_property("xanchor", arg, xanchor)
self._set_property("yanchor", arg, yanchor)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Label
|
python
|
FactoryBoy__factory_boy
|
tests/test_alchemy.py
|
{
"start": 4539,
"end": 5260
}
|
class ____(TransactionTestCase):
def test_one_defined(self):
obj1 = WithMultipleGetOrCreateFieldsFactory()
obj2 = WithMultipleGetOrCreateFieldsFactory(slug=obj1.slug)
self.assertEqual(obj1, obj2)
def test_both_defined(self):
obj1 = WithMultipleGetOrCreateFieldsFactory()
with self.assertRaises(sqlalchemy.exc.IntegrityError):
WithMultipleGetOrCreateFieldsFactory(slug=obj1.slug, text="alt")
def test_unique_field_not_in_get_or_create(self):
WithMultipleGetOrCreateFieldsFactory(title='Title')
with self.assertRaises(sqlalchemy.exc.IntegrityError):
WithMultipleGetOrCreateFieldsFactory(title='Title')
|
MultipleGetOrCreateFieldsTest
|
python
|
facebook__pyre-check
|
client/commands/tests/analyze_test.py
|
{
"start": 496,
"end": 12740
}
|
class ____(testslide.TestCase):
def test_serialize_arguments(self) -> None:
def assert_serialized(
arguments: analyze.Arguments, items: Iterable[Tuple[str, object]]
) -> None:
serialized = arguments.serialize()
for key, value in items:
if key not in serialized:
self.fail(f"Cannot find key `{key}` in serialized arguments")
else:
self.assertEqual(value, serialized[key])
assert_serialized(
analyze.Arguments(
base_arguments=backend_arguments.BaseArguments(
log_path="/log",
global_root="/project",
source_paths=backend_arguments.SimpleSourcePath(
[search_path.SimpleElement("source")]
),
),
dump_call_graph="/call-graph",
dump_model_query_results="/model-query",
find_missing_flows="obscure",
maximum_model_source_tree_width=10,
maximum_model_sink_tree_width=11,
maximum_model_tito_tree_width=12,
maximum_tree_depth_after_widening=4,
maximum_return_access_path_width=13,
maximum_return_access_path_depth_after_widening=5,
maximum_tito_collapse_depth=6,
maximum_tito_positions=40,
maximum_overrides_to_analyze=50,
maximum_tito_depth=5,
maximum_trace_length=4,
no_verify=True,
verify_dsl=True,
repository_root="/root",
rule_filter=[1, 2],
source_filter=["X"],
sink_filter=["Y", "Z"],
transform_filter=["T"],
save_results_to="/output/results.json",
output_format="sharded-json",
strict=True,
taint_model_paths=["/taint/models"],
use_cache=True,
build_cache_only=True,
check_invariants=True,
limit_entrypoints=True,
compact_ocaml_heap=True,
saved_state_arguments=command_arguments.PysaSavedStateArguments(
watchman_root=Path("/root"),
project_name="my_project",
preset="some_preset",
cache_critical_files=["*.py"],
),
compute_coverage=True,
higher_order_call_graph_max_iterations=10,
maximum_target_depth=4,
maximum_parameterized_targets_at_call_site=1000,
),
[
("log_path", "/log"),
("global_root", "/project"),
("source_paths", {"kind": "simple", "paths": ["source"]}),
("dump_call_graph", "/call-graph"),
("dump_model_query_results", "/model-query"),
("find_missing_flows", "obscure"),
("infer_self_tito", True),
("infer_argument_tito", False),
("maximum_model_source_tree_width", 10),
("maximum_model_sink_tree_width", 11),
("maximum_model_tito_tree_width", 12),
("maximum_tree_depth_after_widening", 4),
("maximum_return_access_path_width", 13),
("maximum_return_access_path_depth_after_widening", 5),
("maximum_tito_collapse_depth", 6),
("maximum_tito_positions", 40),
("maximum_overrides_to_analyze", 50),
("maximum_tito_depth", 5),
("maximum_trace_length", 4),
("no_verify", True),
("verify_dsl", True),
("repository_root", "/root"),
("rule_filter", [1, 2]),
("source_filter", ["X"]),
("sink_filter", ["Y", "Z"]),
("transform_filter", ["T"]),
("save_results_to", "/output/results.json"),
("output_format", "sharded-json"),
("strict", True),
("taint_model_paths", ["/taint/models"]),
("use_cache", True),
("build_cache_only", True),
("check_invariants", True),
("limit_entrypoints", True),
("compact_ocaml_heap", True),
(
"saved_state",
{
"watchman_root": "/root",
"project_name": "my_project",
"preset": "some_preset",
"cache_critical_files": ["*.py"],
},
),
("compute_coverage", True),
("higher_order_call_graph_max_iterations", 10),
("maximum_target_depth", 4),
("maximum_parameterized_targets_at_call_site", 1000),
],
)
def test_create_analyze_arguments(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(
root_path,
[".pyre", "blocks", "search", "taint_models", "local/src"],
)
setup.write_configuration_file(
root_path,
{
"ignore_all_errors": ["blocks", "nonexistent"],
"exclude": ["exclude"],
"extensions": [".ext"],
"workers": 42,
"search_path": ["search"],
"optional_search_path": ["nonexistent"],
},
)
setup.write_configuration_file(
root_path, {"source_directories": ["src"]}, relative="local"
)
analyze_configuration = frontend_configuration.OpenSource(
configuration.create_configuration(
command_arguments.CommandArguments(
local_configuration="local",
dot_pyre_directory=root_path / ".pyre",
strict=True,
),
root_path,
)
)
self.assertEqual(
analyze.create_analyze_arguments(
analyze_configuration,
command_arguments.AnalyzeArguments(
debug=True,
dump_call_graph="/call-graph",
dump_model_query_results="/model-query",
find_missing_flows=command_arguments.MissingFlowsKind.TYPE,
infer_self_tito=False,
infer_argument_tito=True,
maximum_model_source_tree_width=10,
maximum_model_sink_tree_width=11,
maximum_model_tito_tree_width=12,
maximum_tree_depth_after_widening=4,
maximum_return_access_path_width=13,
maximum_return_access_path_depth_after_widening=5,
maximum_tito_collapse_depth=6,
maximum_tito_positions=40,
maximum_overrides_to_analyze=50,
maximum_tito_depth=5,
maximum_trace_length=4,
no_verify=True,
verify_dsl=True,
repository_root="/root",
rule=[1, 2],
source=["X"],
sink=["Y", "Z"],
transform=["T"],
save_results_to="/result.json",
output_format=command_arguments.TaintOutputFormat.SHARDED_JSON,
taint_models_path=[str(root_path / "taint_models")],
use_cache=True,
build_cache_only=True,
check_invariants=True,
limit_entrypoints=True,
compact_ocaml_heap=True,
saved_state_arguments=command_arguments.PysaSavedStateArguments(
watchman_root=Path("/root"),
project_name="test_project",
preset="some_preset",
cache_critical_files=["*.py"],
),
compute_coverage=True,
higher_order_call_graph_max_iterations=10,
maximum_target_depth=4,
maximum_parameterized_targets_at_call_site=1000,
),
),
analyze.Arguments(
base_arguments=backend_arguments.BaseArguments(
log_path=str(root_path / ".pyre/local"),
global_root=str(root_path),
checked_directory_allowlist=[
str(root_path / "local/src"),
],
checked_directory_blocklist=[
str(root_path / "blocks"),
str(root_path / "nonexistent"),
],
debug=True,
excludes=[
"exclude",
],
extensions=[".ext"],
relative_local_root="local",
number_of_workers=42,
parallel=True,
python_version=analyze_configuration.get_python_version(),
search_paths=[
search_path.SimpleElement(str(root_path / "search"))
],
source_paths=backend_arguments.SimpleSourcePath(
[search_path.SimpleElement(str(root_path / "local/src"))]
),
),
dump_call_graph="/call-graph",
dump_model_query_results="/model-query",
find_missing_flows="type",
infer_self_tito=False,
infer_argument_tito=True,
maximum_model_source_tree_width=10,
maximum_model_sink_tree_width=11,
maximum_model_tito_tree_width=12,
maximum_tree_depth_after_widening=4,
maximum_return_access_path_width=13,
maximum_return_access_path_depth_after_widening=5,
maximum_tito_collapse_depth=6,
maximum_tito_positions=40,
maximum_overrides_to_analyze=50,
maximum_tito_depth=5,
maximum_trace_length=4,
no_verify=True,
verify_dsl=True,
repository_root="/root",
rule_filter=[1, 2],
source_filter=["X"],
sink_filter=["Y", "Z"],
transform_filter=["T"],
save_results_to="/result.json",
output_format="sharded-json",
strict=True,
taint_model_paths=[str(root_path / "taint_models")],
use_cache=True,
build_cache_only=True,
check_invariants=True,
limit_entrypoints=True,
compact_ocaml_heap=True,
saved_state_arguments=command_arguments.PysaSavedStateArguments(
watchman_root=Path("/root"),
project_name="test_project",
preset="some_preset",
cache_critical_files=["*.py"],
),
compute_coverage=True,
higher_order_call_graph_max_iterations=10,
maximum_target_depth=4,
maximum_parameterized_targets_at_call_site=1000,
),
)
|
ArgumentTest
|
python
|
Pylons__pyramid
|
tests/pkgs/eventonly/__init__.py
|
{
"start": 77,
"end": 321
}
|
class ____:
def __init__(self, val, config):
self.val = val
def text(self):
return f'path_startswith = {self.val}'
phash = text
def __call__(self, event):
return getattr(event.response, 'yup', False)
|
Yup
|
python
|
astropy__astropy
|
astropy/samp/utils.py
|
{
"start": 4182,
"end": 4824
}
|
class ____:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _HubAsClientMethod(self.__send, f"{self.__name}.{name}")
def __call__(self, *args):
return self.__send(self.__name, args)
def get_num_args(f):
"""
Find the number of arguments a function or method takes (excluding ``self``).
"""
if inspect.ismethod(f):
return f.__func__.__code__.co_argcount - 1
elif inspect.isfunction(f):
return f.__code__.co_argcount
else:
raise TypeError("f should be a function or a method")
|
_HubAsClientMethod
|
python
|
getsentry__sentry
|
src/sentry/migrations/0923_dashboard_starred_backfill_orgs.py
|
{
"start": 1206,
"end": 2574
}
|
class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0922_dashboard_starred_add_position_column_and_constraint"),
]
operations = [
migrations.RunPython(
code=backfill_dashboard_organization,
reverse_code=migrations.RunPython.noop,
hints={"tables": ["sentry_dashboardfavoriteuser"]},
),
]
|
Migration
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/distributions/special_math_test.py
|
{
"start": 8020,
"end": 8434
}
|
class ____(NdtrTest):
_use_log = True
_grid32 = GridSpec(
min=sm.LOGNDTR_FLOAT32_LOWER, max=sm.LOGNDTR_FLOAT32_UPPER, shape=[100])
_grid64 = GridSpec(
min=sm.LOGNDTR_FLOAT64_LOWER, max=sm.LOGNDTR_FLOAT64_UPPER, shape=[100])
# Differences show up as soon as we're in the tail, so add some atol.
_error32 = ErrorSpec(rtol=0.1, atol=1e-7)
_error64 = ErrorSpec(rtol=0.1, atol=1e-7)
|
LogNdtrTestMid
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/saving/utils_v1/export_output.py
|
{
"start": 1178,
"end": 3358
}
|
class ____(object):
"""Represents an output of a model that can be served.
These typically correspond to model heads.
"""
__metaclass__ = abc.ABCMeta
_SEPARATOR_CHAR = '/'
@abc.abstractmethod
def as_signature_def(self, receiver_tensors):
"""Generate a SignatureDef proto for inclusion in a MetaGraphDef.
The SignatureDef will specify outputs as described in this ExportOutput,
and will use the provided receiver_tensors as inputs.
Args:
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes that will be fed.
"""
pass
def _check_output_key(self, key, error_label):
# For multi-head models, the key can be a tuple.
if isinstance(key, tuple):
key = self._SEPARATOR_CHAR.join(key)
if not isinstance(key, str):
raise ValueError(
'{} output key must be a string; got {}.'.format(error_label, key))
return key
def _wrap_and_check_outputs(
self, outputs, single_output_default_name, error_label=None):
"""Wraps raw tensors as dicts and checks type.
Note that we create a new dict here so that we can overwrite the keys
if necessary.
Args:
outputs: A `Tensor` or a dict of string to `Tensor`.
single_output_default_name: A string key for use in the output dict
if the provided `outputs` is a raw tensor.
error_label: descriptive string for use in error messages. If none,
single_output_default_name will be used.
Returns:
A dict of tensors
Raises:
ValueError: if the outputs dict keys are not strings or tuples of strings
or the values are not Tensors.
"""
if not isinstance(outputs, dict):
outputs = {single_output_default_name: outputs}
output_dict = {}
for key, value in outputs.items():
error_name = error_label or single_output_default_name
key = self._check_output_key(key, error_name)
if not isinstance(value, tensor.Tensor):
raise ValueError(
'{} output value must be a Tensor; got {}.'.format(
error_name, value))
output_dict[key] = value
return output_dict
|
ExportOutput
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/py_pip/package.py
|
{
"start": 217,
"end": 458
}
|
class ____(Package):
"""Only needed because other mock packages use PythonPackage"""
homepage = "http://www.example.com"
url = "http://www.example.com/pip-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
|
PyPip
|
python
|
bokeh__bokeh
|
src/bokeh/core/property/wrappers.py
|
{
"start": 4406,
"end": 5586
}
|
class ____:
""" A base class for property container classes that support change
notifications on mutating operations.
This class maintains an internal list of property owners, and also
provides a private mechanism for methods wrapped with
:func:`~bokeh.core.property.wrappers.notify_owners` to update
those owners when mutating changes occur.
"""
_owners: set[tuple[HasProps, PropertyDescriptor[Any]]]
def __init__(self, *args, **kwargs) -> None:
self._owners = set()
super().__init__(*args, **kwargs)
def _register_owner(self, owner: HasProps, descriptor: PropertyDescriptor[Any]) -> None:
self._owners.add((owner, descriptor))
def _unregister_owner(self, owner: HasProps, descriptor: PropertyDescriptor[Any]) -> None:
self._owners.discard((owner, descriptor))
def _notify_owners(self, old: Any, hint: DocumentPatchedEvent | None = None) -> None:
for (owner, descriptor) in self._owners:
descriptor._notify_mutated(owner, old, hint=hint)
def _saved_copy(self) -> Any:
raise RuntimeError("Subtypes must implement this to make a backup copy")
|
PropertyValueContainer
|
python
|
sqlalchemy__sqlalchemy
|
examples/versioned_rows/versioned_map.py
|
{
"start": 5291,
"end": 6674
}
|
class ____(Base):
"""Relate ConfigData objects to associated ConfigValue objects."""
__tablename__ = "config_value_association"
config_id = Column(ForeignKey("config.id"), primary_key=True)
"""Reference the primary key of the ConfigData object."""
config_value_id = Column(ForeignKey("config_value.id"), primary_key=True)
"""Reference the primary key of the ConfigValue object."""
config_value = relationship("ConfigValue", lazy="joined", innerjoin=True)
"""Reference the related ConfigValue object."""
def __init__(self, config_value):
self.config_value = config_value
def new_version(self, session):
"""Expire all pending state, as ConfigValueAssociation is immutable."""
session.expire(self)
@property
def name(self):
return self.config_value.name
@property
def value(self):
return self.config_value.value
@value.setter
def value(self, value):
"""Intercept set events.
Create a new ConfigValueAssociation upon change,
replacing this one in the parent ConfigData's dictionary.
If no net change, do nothing.
"""
if value != self.config_value.value:
self.config_data.elements[self.name] = ConfigValueAssociation(
ConfigValue(self.config_value.name, value)
)
|
ConfigValueAssociation
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/matchValue1.py
|
{
"start": 713,
"end": 1977
}
|
class ____:
class_var_1: "MyClass"
def __eq__(self, object: "MyClass") -> bool: ...
def test_unknown(value_to_match):
match value_to_match:
case MyEnum1.V1 as a1:
reveal_type(a1, expected_text="Unknown")
reveal_type(value_to_match, expected_text="Unknown")
def test_enum(value_to_match: MyEnum1):
match value_to_match:
case MyEnum1.V1 as a1:
reveal_type(a1, expected_text="Literal[MyEnum1.V1]")
reveal_type(value_to_match, expected_text="Literal[MyEnum1.V1]")
case y:
reveal_type(y, expected_text="Literal[MyEnum1.V2]")
reveal_type(value_to_match, expected_text="Literal[MyEnum1.V2]")
def test_class_var(value_to_match: str):
match value_to_match:
case MyClass.class_var_1 as a1:
reveal_type(a1, expected_text="Never")
reveal_type(value_to_match, expected_text="Never")
TInt = TypeVar("TInt", bound=MyEnum1)
def test_union(value_to_match: TInt | MyEnum1) -> TInt | MyEnum1:
match value_to_match:
case MyEnum1.V1 as a1:
reveal_type(a1, expected_text="Literal[MyEnum1.V1]")
reveal_type(value_to_match, expected_text="Literal[MyEnum1.V1]")
return value_to_match
|
MyClass
|
python
|
openai__openai-python
|
src/openai/resources/videos.py
|
{
"start": 28709,
"end": 29443
}
|
class ____:
def __init__(self, videos: Videos) -> None:
self._videos = videos
self.create = _legacy_response.to_raw_response_wrapper(
videos.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
videos.retrieve,
)
self.list = _legacy_response.to_raw_response_wrapper(
videos.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
videos.delete,
)
self.download_content = _legacy_response.to_raw_response_wrapper(
videos.download_content,
)
self.remix = _legacy_response.to_raw_response_wrapper(
videos.remix,
)
|
VideosWithRawResponse
|
python
|
openai__openai-python
|
src/openai/types/moderation_image_url_input_param.py
|
{
"start": 240,
"end": 375
}
|
class ____(TypedDict, total=False):
url: Required[str]
"""Either a URL of the image or the base64 encoded image data."""
|
ImageURL
|
python
|
getsentry__sentry
|
src/sentry/auth/providers/oauth2.py
|
{
"start": 4998,
"end": 8344
}
|
class ____(Provider, abc.ABC):
is_partner = False
@abc.abstractmethod
def get_client_id(self) -> str:
raise NotImplementedError
@abc.abstractmethod
def get_client_secret(self) -> str:
raise NotImplementedError
def get_auth_pipeline(self) -> list[AuthView]:
return [
OAuth2Login(client_id=self.get_client_id()),
OAuth2Callback(client_id=self.get_client_id(), client_secret=self.get_client_secret()),
]
@abc.abstractmethod
def get_refresh_token_url(self) -> str:
raise NotImplementedError
def get_refresh_token_params(self, refresh_token: str) -> Mapping[str, str | None]:
return {
"client_id": self.get_client_id(),
"client_secret": self.get_client_secret(),
"grant_type": "refresh_token",
"refresh_token": refresh_token,
}
def get_oauth_data(self, payload: Mapping[str, Any]) -> Mapping[str, Any]:
data = {"access_token": payload["access_token"], "token_type": payload["token_type"]}
if "expires_in" in payload:
data["expires"] = int(time()) + int(payload["expires_in"])
if "refresh_token" in payload:
data["refresh_token"] = payload["refresh_token"]
return data
@abc.abstractmethod
def build_identity(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Example implementation:
data = state['data']
return {
'id': '',
'email': '',
'name': '',
'data': self.get_oauth_data(data),
}
"""
raise NotImplementedError
def update_identity(
self, new_data: dict[str, Any], current_data: Mapping[str, Any]
) -> Mapping[str, Any]:
# we want to maintain things like refresh_token that might not
# exist on a refreshed state
if "refresh_token" in current_data:
new_data.setdefault("refresh_token", current_data["refresh_token"])
return new_data
def refresh_identity(self, auth_identity: AuthIdentity) -> None:
refresh_token = auth_identity.data.get("refresh_token")
if not refresh_token:
raise IdentityNotValid("Missing refresh token")
data = self.get_refresh_token_params(refresh_token=refresh_token)
req = safe_urlopen(self.get_refresh_token_url(), data=data)
try:
body = safe_urlread(req)
payload = orjson.loads(body)
except Exception:
payload = {}
error = payload.get("error", "unknown_error")
error_description = payload.get("error_description", "no description available")
formatted_error = f"HTTP {req.status_code} ({error}): {error_description}"
if req.status_code == 401:
raise IdentityNotValid(formatted_error)
if req.status_code == 400:
# this may not be common, but at the very least Google will return
# an invalid grant when a user is suspended
if error == "invalid_grant":
raise IdentityNotValid(formatted_error)
if req.status_code != 200:
raise Exception(formatted_error)
auth_identity.data.update(self.get_oauth_data(payload))
auth_identity.update(data=auth_identity.data)
|
OAuth2Provider
|
python
|
pytorch__pytorch
|
torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py
|
{
"start": 2882,
"end": 4126
}
|
class ____:
r"""
An observer has exclusive access to its own environment. Each observer
captures the state from its environment, and send the state to the agent to
select an action. Then, the observer applies the action to its environment
and reports the reward to the agent.
"""
def __init__(self) -> None:
self.id = rpc.get_worker_info().id
self.env = DummyEnv()
self.env.seed(SEED)
def run_episode(self, agent_rref, n_steps):
r"""
Run one episode of n_steps.
Arguments:
agent_rref (RRef): an RRef referencing the agent object.
n_steps (int): number of steps in this episode
"""
state, _ep_reward = self.env.reset(), 0
for _ in range(n_steps):
# send the state to the agent to get an action
action = _remote_method(Agent.select_action, agent_rref, self.id, state)
# apply the action to the environment, and get the reward
state, reward, done, _ = self.env.step(action)
# report the reward to the agent for training purpose
_remote_method(Agent.report_reward, agent_rref, self.id, reward)
if done:
break
|
Observer
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-confluence/llama_index/readers/confluence/event.py
|
{
"start": 1891,
"end": 2239
}
|
class ____(BaseEvent):
"""Event emitted when an attachment is successfully processed."""
page_id: str
attachment_id: str
attachment_name: str
attachment_type: str
attachment_size: int
attachment_link: str
@classmethod
def class_name(cls) -> str:
return "AttachmentProcessedEvent"
|
AttachmentProcessedEvent
|
python
|
huggingface__transformers
|
src/transformers/models/prophetnet/modeling_prophetnet.py
|
{
"start": 47842,
"end": 52258
}
|
class ____(ProphetNetPreTrainedModel):
def __init__(self, config: ProphetNetConfig):
super().__init__(config)
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = ProphetNetPositionalEmbeddings(config)
self.embeddings_layer_norm = LayerNorm(config.hidden_size)
self.layers = nn.ModuleList([ProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)])
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.word_embeddings
def set_input_embeddings(self, value):
self.word_embeddings = value
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, ProphetNetEncoder
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/prophetnet-large-uncased")
>>> model = ProphetNetEncoder.from_pretrained("patrickvonplaten/prophetnet-large-uncased-standalone")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is None and inputs_embeds is None:
raise ValueError("Either input_ids or inputs_embeds has to be passed.")
elif input_ids is not None and inputs_embeds is not None:
raise ValueError("Make sure to only pass input_ids or inputs_embeds.")
elif input_ids is not None and inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
# prepare attention mask
if attention_mask is not None:
extended_attention_mask = (
1.0 - attention_mask[:, None, None, :].repeat(1, self.config.num_encoder_attention_heads, 1, 1)
) * torch.finfo(self.dtype).min
extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype)
else:
extended_attention_mask = None
position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device)
hidden_states = inputs_embeds + position_embeddings
hidden_states = self.embeddings_layer_norm(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.config.dropout, training=self.training)
encoder_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_hidden_states = encoder_hidden_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask=extended_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_hidden_states = encoder_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions
)
@auto_docstring(
custom_intro="""
The standalone decoder part of the ProphetNetModel.
"""
)
|
ProphetNetEncoder
|
python
|
pytorch__pytorch
|
torch/utils/data/datapipes/iter/filelister.py
|
{
"start": 414,
"end": 2554
}
|
class ____(IterDataPipe[str]):
r"""
Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.
Multiple root directories can be provided (functional name: ``list_files``).
Args:
root: Root directory or a sequence of root directories
masks: Unix style filter string or string list for filtering file name(s)
recursive: Whether to return pathname from nested directories or not
abspath: Whether to return relative pathname or absolute pathname
non_deterministic: Whether to return pathname in sorted order or not.
If ``False``, the results yielded from each root directory will be sorted
length: Nominal length of the datapipe
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import FileLister
>>> dp = FileLister(root=".", recursive=True)
>>> list(dp)
['example.py', './data/data.tar']
"""
def __init__(
self,
root: str | Sequence[str] | IterDataPipe = ".",
masks: str | list[str] = "",
*,
recursive: bool = False,
abspath: bool = False,
non_deterministic: bool = False,
length: int = -1,
) -> None:
super().__init__()
if isinstance(root, str):
root = [root]
if not isinstance(root, IterDataPipe):
root = IterableWrapperIterDataPipe(root)
self.datapipe: IterDataPipe = root
self.masks: str | list[str] = masks
self.recursive: bool = recursive
self.abspath: bool = abspath
self.non_deterministic: bool = non_deterministic
self.length: int = length
def __iter__(self) -> Iterator[str]:
for path in self.datapipe:
yield from get_file_pathnames_from_root(
path, self.masks, self.recursive, self.abspath, self.non_deterministic
)
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
|
FileListerIterDataPipe
|
python
|
pallets__jinja
|
src/jinja2/nodes.py
|
{
"start": 29280,
"end": 29508
}
|
class ____(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ("name",)
name: str
|
EnvironmentAttribute
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-github/tests/test_github_repository_reader.py
|
{
"start": 39254,
"end": 44023
}
|
class ____:
"""Test edge cases and error handling."""
def test_invalid_filter_type_raises_error(self):
"""Test that invalid filter types raise ValueError."""
gh_client = GithubClient(GITHUB_TOKEN)
with pytest.raises(ValueError, match="Unknown filter type"):
reader = GithubRepositoryReader(gh_client, "run-llama", "llama_index")
reader._filter_file_paths = (["test.py"], "INVALID_TYPE")
reader._check_filter_file_paths("test.py")
def test_process_file_callback_exception_handling(self, monkeypatch):
"""Test that exceptions in process_file_callback are handled gracefully."""
def broken_callback(file_path: str, file_size: int) -> tuple[bool, str]:
raise Exception("Callback error")
async def mock_get_commit(self, *args, **kwargs):
return GitCommitResponseModel.from_json(COMMIT_JSON)
async def mock_get_branch(self, *args, **kwargs):
return GitBranchResponseModel.from_json(BRANCH_JSON)
tree_json = {
"sha": "test_tree_sha",
"url": "https://api.github.com/test/tree",
"tree": [
{
"path": "test_file.txt",
"mode": "100644",
"type": "blob",
"sha": "test_blob_sha",
"size": 100,
"url": "https://api.github.com/test/blob",
}
],
"truncated": False,
}
async def mock_get_tree(self, *args, **kwargs):
return GitTreeResponseModel.from_json(json.dumps(tree_json))
monkeypatch.setattr(GithubClient, "get_commit", mock_get_commit)
monkeypatch.setattr(GithubClient, "get_branch", mock_get_branch)
monkeypatch.setattr(GithubClient, "get_tree", mock_get_tree)
gh_client = GithubClient(GITHUB_TOKEN)
reader = GithubRepositoryReader(
gh_client,
"run-llama",
"llama_index",
process_file_callback=broken_callback,
fail_on_error=False,
)
# This should not raise an exception due to fail_on_error=False
# The callback exception should be caught and handled
documents = reader.load_data(branch="main")
# Should succeed and return empty list since the file was skipped due to callback error
assert isinstance(documents, list)
def test_fail_on_error_true_with_processing_error(self, monkeypatch):
"""Test that fail_on_error=True propagates processing errors."""
async def mock_get_commit(self, *args, **kwargs):
return GitCommitResponseModel.from_json(COMMIT_JSON)
async def mock_get_branch(self, *args, **kwargs):
return GitBranchResponseModel.from_json(BRANCH_JSON)
tree_json = {
"sha": "test_tree_sha",
"url": "https://api.github.com/test/tree",
"tree": [
{
"path": "test_file.txt",
"mode": "100644",
"type": "blob",
"sha": "test_blob_sha",
"size": 100,
"url": "https://api.github.com/test/blob",
}
],
"truncated": False,
}
async def mock_get_tree(self, *args, **kwargs):
return GitTreeResponseModel.from_json(json.dumps(tree_json))
async def mock_get_blob_fail(self, *args, **kwargs):
blob_json = {
"sha": "test_blob_sha",
"node_id": "test_node",
"size": 40,
"url": "https://api.github.com/test/blob",
"content": "invalid_base64!!!", # Invalid base64
"encoding": "base64",
}
from llama_index.readers.github.repository.github_client import (
GitBlobResponseModel,
)
return GitBlobResponseModel.from_json(json.dumps(blob_json))
monkeypatch.setattr(GithubClient, "get_commit", mock_get_commit)
monkeypatch.setattr(GithubClient, "get_branch", mock_get_branch)
monkeypatch.setattr(GithubClient, "get_tree", mock_get_tree)
monkeypatch.setattr(GithubClient, "get_blob", mock_get_blob_fail)
gh_client = GithubClient(GITHUB_TOKEN)
reader = GithubRepositoryReader(
gh_client, "run-llama", "llama_index", fail_on_error=True
)
# This should continue processing despite base64 decode errors
# since those are handled separately from processing errors
documents = reader.load_data(branch="main")
assert isinstance(documents, list)
|
TestGithubRepositoryReaderEdgeCases
|
python
|
numba__numba
|
numba/tests/test_maxmin.py
|
{
"start": 164,
"end": 747
}
|
class ____(unittest.TestCase):
def test_max3(self):
pyfunc = domax3
argtys = (types.int32, types.float32, types.double)
cfunc = njit(argtys)(pyfunc)
a = 1
b = 2
c = 3
self.assertEqual(pyfunc(a, b, c), cfunc(a, b, c))
def test_min3(self):
pyfunc = domin3
argtys = (types.int32, types.float32, types.double)
cfunc = njit(argtys)(pyfunc)
a = 1
b = 2
c = 3
self.assertEqual(pyfunc(a, b, c), cfunc(a, b, c))
if __name__ == '__main__':
unittest.main()
|
TestMaxMin
|
python
|
apache__thrift
|
lib/py/src/transport/TTransport.py
|
{
"start": 7151,
"end": 9135
}
|
class ____(TTransportBase, CReadableTransport):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans,):
self.__trans = trans
self.__rbuf = BytesIO(b'')
self.__wbuf = BytesIO()
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.readFrame()
return self.__rbuf.read(sz)
def readFrame(self):
buff = self.__trans.readAll(4)
sz, = unpack('!i', buff)
self.__rbuf = BytesIO(self.__trans.readAll(sz))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = BytesIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = pack("!i", wsz) + wout
self.__trans.write(buf)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self.readFrame()
prefix += self.__rbuf.getvalue()
self.__rbuf = BytesIO(prefix)
return self.__rbuf
|
TFramedTransport
|
python
|
sqlalchemy__sqlalchemy
|
test/typing/plain_files/ext/hybrid/hybrid_three.py
|
{
"start": 768,
"end": 1667
}
|
class ____(Base):
__tablename__ = "user"
id: Mapped[int] = mapped_column(primary_key=True)
name: Mapped[str] = mapped_column(String(100))
accounts: Mapped[List[SavingsAccount]] = relationship()
@hybrid_property
def _balance_getter(self) -> Optional[Decimal]:
if self.accounts:
return self.accounts[0].balance
else:
return None
@_balance_getter.setter
def _balance_setter(self, value: Optional[Decimal]) -> None:
assert value is not None
if not self.accounts:
account = SavingsAccount(owner=self)
else:
account = self.accounts[0]
account.balance = value
@_balance_setter.expression
def balance(cls) -> SQLColumnExpression[Optional[Decimal]]:
return cast(
"SQLColumnExpression[Optional[Decimal]]", SavingsAccount.balance
)
|
UserStyleOne
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/layout/controls.py
|
{
"start": 17066,
"end": 17224
}
|
class ____(NamedTuple):
fragments: StyleAndTextTuples
source_to_display: Callable[[int], int]
display_to_source: Callable[[int], int]
|
_ProcessedLine
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/offsetbox.py
|
{
"start": 49512,
"end": 53671
}
|
class ____:
"""
Helper base class for a draggable artist (legend, offsetbox).
Derived classes must override the following methods::
def save_offset(self):
'''
Called when the object is picked for dragging; should save the
reference position of the artist.
'''
def update_offset(self, dx, dy):
'''
Called during the dragging; (*dx*, *dy*) is the pixel offset from
the point where the mouse drag started.
'''
Optionally, you may override the following method::
def finalize_offset(self):
'''Called when the mouse is released.'''
In the current implementation of `.DraggableLegend` and
`DraggableAnnotation`, `update_offset` places the artists in display
coordinates, and `finalize_offset` recalculates their position in axes
coordinate and set a relevant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
if not ref_artist.pickable():
ref_artist.set_picker(self._picker)
self.got_artist = False
self._use_blit = use_blit and self.canvas.supports_blit
callbacks = self.canvas.callbacks
self._disconnectors = [
functools.partial(
callbacks.disconnect, callbacks._connect_picklable(name, func))
for name, func in [
("pick_event", self.on_pick),
("button_release_event", self.on_release),
("motion_notify_event", self.on_motion),
]
]
@staticmethod
def _picker(artist, mouseevent):
# A custom picker to prevent dragging on mouse scroll events
if mouseevent.name == "scroll_event":
return False, {}
return artist.contains(mouseevent)
# A property, not an attribute, to maintain picklability.
canvas = property(lambda self: self.ref_artist.get_figure(root=True).canvas)
cids = property(lambda self: [
disconnect.args[0] for disconnect in self._disconnectors[:2]])
def on_motion(self, evt):
if self._check_still_parented() and self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
if self._use_blit:
self.canvas.restore_region(self.background)
self.ref_artist.draw(
self.ref_artist.get_figure(root=True)._get_renderer())
self.canvas.blit()
else:
self.canvas.draw()
def on_pick(self, evt):
if self._check_still_parented():
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.save_offset()
self.got_artist = True
if self.got_artist and self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
fig = self.ref_artist.get_figure(root=False)
self.background = self.canvas.copy_from_bbox(fig.bbox)
self.ref_artist.draw(fig._get_renderer())
self.canvas.blit()
def on_release(self, event):
if self._check_still_parented() and self.got_artist:
self.finalize_offset()
self.got_artist = False
if self._use_blit:
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._get_renderer())
self.canvas.blit()
self.ref_artist.set_animated(False)
def _check_still_parented(self):
if self.ref_artist.get_figure(root=False) is None:
self.disconnect()
return False
else:
return True
def disconnect(self):
"""Disconnect the callbacks."""
for disconnector in self._disconnectors:
disconnector()
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
|
DraggableBase
|
python
|
giampaolo__psutil
|
tests/test_system.py
|
{
"start": 21923,
"end": 27129
}
|
class ____(PsutilTestCase):
def test_disk_usage(self):
usage = psutil.disk_usage(os.getcwd())
assert usage._fields == ('total', 'used', 'free', 'percent')
assert usage.total > 0, usage
assert usage.used > 0, usage
assert usage.free > 0, usage
assert usage.total > usage.used, usage
assert usage.total > usage.free, usage
assert 0 <= usage.percent <= 100, usage.percent
shutil_usage = shutil.disk_usage(os.getcwd())
tolerance = 5 * 1024 * 1024 # 5MB
assert usage.total == shutil_usage.total
assert abs(usage.free - shutil_usage.free) < tolerance
if not MACOS_12PLUS:
# see https://github.com/giampaolo/psutil/issues/2147
assert abs(usage.used - shutil_usage.used) < tolerance
# if path does not exist OSError ENOENT is expected across
# all platforms
fname = self.get_testfn()
with pytest.raises(FileNotFoundError):
psutil.disk_usage(fname)
@pytest.mark.skipif(not ASCII_FS, reason="not an ASCII fs")
def test_disk_usage_unicode(self):
# See: https://github.com/giampaolo/psutil/issues/416
with pytest.raises(UnicodeEncodeError):
psutil.disk_usage(UNICODE_SUFFIX)
def test_disk_usage_bytes(self):
psutil.disk_usage(b'.')
def test_disk_partitions(self):
def check_ntuple(nt):
assert isinstance(nt.device, str)
assert isinstance(nt.mountpoint, str)
assert isinstance(nt.fstype, str)
assert isinstance(nt.opts, str)
# all = False
ls = psutil.disk_partitions(all=False)
assert ls
for disk in ls:
check_ntuple(disk)
if WINDOWS and 'cdrom' in disk.opts:
continue
if not POSIX:
assert os.path.exists(disk.device), disk
else:
# we cannot make any assumption about this, see:
# http://goo.gl/p9c43
disk.device # noqa: B018
# on modern systems mount points can also be files
assert os.path.exists(disk.mountpoint), disk
assert disk.fstype, disk
# all = True
ls = psutil.disk_partitions(all=True)
assert ls
for disk in psutil.disk_partitions(all=True):
check_ntuple(disk)
if not WINDOWS and disk.mountpoint:
try:
os.stat(disk.mountpoint)
except OSError as err:
if GITHUB_ACTIONS and MACOS and err.errno == errno.EIO:
continue
# http://mail.python.org/pipermail/python-dev/
# 2012-June/120787.html
if err.errno not in {errno.EPERM, errno.EACCES}:
raise
else:
assert os.path.exists(disk.mountpoint), disk
# ---
def find_mount_point(path):
path = os.path.abspath(path)
while not os.path.ismount(path):
path = os.path.dirname(path)
return path.lower()
mount = find_mount_point(__file__)
mounts = [
x.mountpoint.lower()
for x in psutil.disk_partitions(all=True)
if x.mountpoint
]
assert mount in mounts
@pytest.mark.skipif(
LINUX and not os.path.exists('/proc/diskstats'),
reason="/proc/diskstats not available on this linux version",
)
@pytest.mark.skipif(
CI_TESTING and not psutil.disk_io_counters(), reason="unreliable on CI"
) # no visible disks
def test_disk_io_counters(self):
def check_ntuple(nt):
assert nt[0] == nt.read_count
assert nt[1] == nt.write_count
assert nt[2] == nt.read_bytes
assert nt[3] == nt.write_bytes
if not (OPENBSD or NETBSD):
assert nt[4] == nt.read_time
assert nt[5] == nt.write_time
if LINUX:
assert nt[6] == nt.read_merged_count
assert nt[7] == nt.write_merged_count
assert nt[8] == nt.busy_time
elif FREEBSD:
assert nt[6] == nt.busy_time
for name in nt._fields:
assert getattr(nt, name) >= 0, nt
ret = psutil.disk_io_counters(perdisk=False)
assert ret is not None, "no disks on this system?"
check_ntuple(ret)
ret = psutil.disk_io_counters(perdisk=True)
# make sure there are no duplicates
assert len(ret) == len(set(ret))
for key in ret:
assert key, key
check_ntuple(ret[key])
def test_disk_io_counters_no_disks(self):
# Emulate a case where no disks are installed, see:
# https://github.com/giampaolo/psutil/issues/1062
with mock.patch(
'psutil._psplatform.disk_io_counters', return_value={}
) as m:
assert psutil.disk_io_counters(perdisk=False) is None
assert psutil.disk_io_counters(perdisk=True) == {}
assert m.called
|
TestDiskAPIs
|
python
|
getsentry__sentry
|
src/sentry/grouping/component.py
|
{
"start": 10526,
"end": 10612
}
|
class ____(BaseGroupingComponent[str]):
id: str = "message"
|
MessageGroupingComponent
|
python
|
bokeh__bokeh
|
src/bokeh/core/query.py
|
{
"start": 7318,
"end": 7642
}
|
class ____(_Operator):
''' Predicate to test if property values are greater than some value.
Construct and ``GT`` predicate as a dict with ``GT`` as the key,
and a value to compare against.
.. code-block:: python
# matches any models with .size > 10
dict(size={ GT: 10 })
'''
pass
|
GT
|
python
|
oauthlib__oauthlib
|
oauthlib/openid/connect/core/exceptions.py
|
{
"start": 1223,
"end": 1794
}
|
class ____(OpenIDClientError):
"""
The End-User is REQUIRED to select a session at the Authorization Server.
The End-User MAY be authenticated at the Authorization Server with
different associated accounts, but the End-User did not select a session.
This error MAY be returned when the prompt parameter value in the
Authentication Request is none, but the Authentication Request cannot be
completed without displaying a user interface to prompt for a session to
use.
"""
error = 'account_selection_required'
|
AccountSelectionRequired
|
python
|
pytorch__pytorch
|
tools/testing/target_determination/heuristics/public_bindings.py
|
{
"start": 336,
"end": 1277
}
|
class ____(HeuristicInterface):
# Literally just a heuristic for test_public_bindings. Pretty much anything
# that changes the public API can affect this testp
test_public_bindings = "test_public_bindings"
additional_files = ["test/allowlist_for_publicAPI.json"]
def __init__(self, **kwargs: dict[str, Any]) -> None:
super().__init__(**kwargs)
def get_prediction_confidence(self, tests: list[str]) -> TestPrioritizations:
test_ratings = {}
try:
changed_files = query_changed_files()
except Exception as e:
warn(f"Can't query changed test files due to {e}")
changed_files = []
if any(
file.startswith("torch/") or file in self.additional_files
for file in changed_files
):
test_ratings[TestRun(self.test_public_bindings)] = 1.0
return TestPrioritizations(tests, test_ratings)
|
PublicBindings
|
python
|
pytorch__pytorch
|
torch/_dynamo/replay_record.py
|
{
"start": 953,
"end": 1086
}
|
class ____:
module: ModuleType
accessed_attrs: dict[str, Any] = field(default_factory=dict)
@dataclasses.dataclass
|
ModuleRecord
|
python
|
TheAlgorithms__Python
|
graphs/prim.py
|
{
"start": 237,
"end": 3506
}
|
class ____:
"""Class Vertex."""
def __init__(self, id_):
"""
Arguments:
id - input an id to identify the vertex
Attributes:
neighbors - a list of the vertices it is linked to
edges - a dict to store the edges's weight
"""
self.id = str(id_)
self.key = None
self.pi = None
self.neighbors = []
self.edges = {} # {vertex:distance}
def __lt__(self, other):
"""Comparison rule to < operator."""
return self.key < other.key
def __repr__(self):
"""Return the vertex id."""
return self.id
def add_neighbor(self, vertex):
"""Add a pointer to a vertex at neighbor's list."""
self.neighbors.append(vertex)
def add_edge(self, vertex, weight):
"""Destination vertex and weight."""
self.edges[vertex.id] = weight
def connect(graph, a, b, edge):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1], edge)
graph[b - 1].add_edge(graph[a - 1], edge)
def prim(graph: list, root: Vertex) -> list:
"""Prim's Algorithm.
Runtime:
O(mn) with `m` edges and `n` vertices
Return:
List with the edges of a Minimum Spanning Tree
Usage:
prim(graph, graph[0])
"""
a = []
for u in graph:
u.key = math.inf
u.pi = None
root.key = 0
q = graph[:]
while q:
u = min(q)
q.remove(u)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
v.pi = u
v.key = u.edges[v.id]
for i in range(1, len(graph)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def prim_heap(graph: list, root: Vertex) -> Iterator[tuple]:
"""Prim's Algorithm with min heap.
Runtime:
O((m + n)log n) with `m` edges and `n` vertices
Yield:
Edges of a Minimum Spanning Tree
Usage:
prim(graph, graph[0])
"""
for u in graph:
u.key = math.inf
u.pi = None
root.key = 0
h = list(graph)
hq.heapify(h)
while h:
u = hq.heappop(h)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
v.pi = u
v.key = u.edges[v.id]
hq.heapify(h)
for i in range(1, len(graph)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def test_vector() -> None:
"""
# Creates a list to store x vertices.
>>> x = 5
>>> G = [Vertex(n) for n in range(x)]
>>> connect(G, 1, 2, 15)
>>> connect(G, 1, 3, 12)
>>> connect(G, 2, 4, 13)
>>> connect(G, 2, 5, 5)
>>> connect(G, 3, 2, 6)
>>> connect(G, 3, 4, 6)
>>> connect(G, 0, 0, 0) # Generate the minimum spanning tree:
>>> G_heap = G[:]
>>> MST = prim(G, G[0])
>>> MST_heap = prim_heap(G, G[0])
>>> for i in MST:
... print(i)
(2, 3)
(3, 1)
(4, 3)
(5, 2)
>>> for i in MST_heap:
... print(i)
(2, 3)
(3, 1)
(4, 3)
(5, 2)
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Vertex
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.