language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/releases/endpoints/organization_release_files.py | {
"start": 604,
"end": 4341
} | class ____(OrganizationReleasesBaseEndpoint, ReleaseFilesMixin):
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
"POST": ApiPublishStatus.UNKNOWN,
}
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=40, window=1),
RateLimitCategory.USER: RateLimit(limit=40, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=40, window=1),
},
"POST": {
RateLimitCategory.IP: RateLimit(limit=40, window=1),
RateLimitCategory.USER: RateLimit(limit=40, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=40, window=1),
},
}
)
def get(self, request: Request, organization, version) -> Response:
"""
List an Organization Release's Files
````````````````````````````````````
Retrieve a list of files for a given release.
:pparam string organization_id_or_slug: the id or slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:qparam string query: If set, only files with these partial names will be returned.
:qparam string checksum: If set, only files with these exact checksums will be returned.
:auth: required
"""
try:
release = Release.objects.get(organization_id=organization.id, version=version)
except Release.DoesNotExist:
raise ResourceDoesNotExist
if not self.has_release_permission(request, organization, release):
raise ResourceDoesNotExist
return self.get_releasefiles(request, release, organization.id)
def post(self, request: Request, organization, version) -> Response:
"""
Upload a New Organization Release File
``````````````````````````````````````
Upload a new file for the given release.
Unlike other API requests, files must be uploaded using the
traditional multipart/form-data content-type.
Requests to this endpoint should use the region-specific domain
eg. `us.sentry.io` or `de.sentry.io`
The optional 'name' attribute should reflect the absolute path
that this file will be referenced as. For example, in the case of
JavaScript you might specify the full web URI.
:pparam string organization_id_or_slug: the id or slug of the organization the
release belongs to.
:pparam string version: the version identifier of the release.
:param string name: the name (full path) of the file.
:param file file: the multipart encoded file.
:param string dist: the name of the dist.
:param string header: this parameter can be supplied multiple times
to attach headers to the file. Each header
is a string in the format ``key:value``. For
instance it can be used to define a content
type.
:auth: required
"""
try:
release = Release.objects.get(organization_id=organization.id, version=version)
except Release.DoesNotExist:
raise ResourceDoesNotExist
logger = logging.getLogger("sentry.files")
logger.info("organizationreleasefile.start")
if not self.has_release_permission(request, organization, release):
raise ResourceDoesNotExist
return self.post_releasefile(request, release, logger)
| OrganizationReleaseFilesEndpoint |
python | huggingface__transformers | src/transformers/models/nemotron/configuration_nemotron.py | {
"start": 923,
"end": 7851
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`NemotronModel`]. It is used to instantiate an Nemotron
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Nemotron-8B.
e.g. [nvidia/nemotron-3-8b-base-4k-hf](https://huggingface.co/nvidia/nemotron-3-8b-base-4k-hf).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the Nemotron model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`NemotronModel`]
hidden_size (`int`, *optional*, defaults to 6144):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 24576):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 48):
Number of attention heads for each attention layer in the Transformer decoder.
head_dim (`int`, *optional*):
Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if None
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.0134):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 3):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj and down_proj layers in the MLP layers.
```python
>>> from transformers import NemotronModel, NemotronConfig
>>> # Initializing a Nemotron nemotron-15b style configuration
>>> configuration = NemotronConfig()
>>> # Initializing a model from the nemotron-15b style configuration
>>> model = NemotronModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "nemotron"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: Optional[int] = 256000,
hidden_size: Optional[int] = 6144,
intermediate_size: Optional[int] = 24576,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 48,
head_dim: Optional[int] = None,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "relu2",
max_position_embeddings: Optional[int] = 4096,
initializer_range: Optional[float] = 0.0134,
norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 2,
eos_token_id: Optional[int] = 3,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim if head_dim is not None else hidden_size // num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.norm_eps = norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.rope_parameters = rope_parameters
kwargs.setdefault("partial_rotary_factor", 0.5) # assign default for BC
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["NemotronConfig"]
| NemotronConfig |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 466099,
"end": 467041
} | class ____(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception as e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
self.analyse_cpp_operation(env)
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
| NotNode |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/io_management/input_managers.py | {
"start": 2554,
"end": 3782
} | class ____(BetterPandasIOManager):
def load_input(self, context: dg.InputContext) -> np.ndarray: # pyright: ignore[reportIncompatibleMethodOverride]
file_path = self._get_path(context.upstream_output)
array = np.genfromtxt(file_path, delimiter=",", dtype=None)
return array
@dg.op(ins={"np_array_input": dg.In(input_manager_key="better_numpy_manager")})
def better_analyze_as_numpy(np_array_input: np.ndarray):
assert isinstance(np_array_input, np.ndarray)
@dg.job(
resource_defs={
"numpy_manager": MyBetterNumpyLoader(),
"io_manager": BetterPandasIOManager(),
}
)
def my_better_job():
df = produce_pandas_output()
better_analyze_as_numpy(df)
# end_better_input_manager
# start_load_unconnected_via_fn
@dg.input_manager
def simple_table_1_manager():
return read_dataframe_from_table(name="table_1")
@dg.op(ins={"dataframe": dg.In(input_manager_key="simple_load_input_manager")})
def my_op(dataframe):
"""Do some stuff."""
dataframe.head()
@dg.job(resource_defs={"simple_load_input_manager": simple_table_1_manager})
def simple_load_table_job():
my_op()
# end_load_unconnected_via_fn
# start_load_unconnected_input
| MyBetterNumpyLoader |
python | readthedocs__readthedocs.org | readthedocs/api/v3/views.py | {
"start": 17901,
"end": 18677
} | class ____(
APIv3Settings,
NestedViewSetMixin,
ProjectQuerySetMixin,
FlexFieldsMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateMixin,
UpdateModelMixin,
GenericViewSet,
):
model = Notification
lookup_field = "pk"
lookup_url_kwarg = "notification_pk"
serializer_class = NotificationSerializer
filterset_class = NotificationFilter
# We need to show build notifications to anonymous users
# on public builds (the queryset will filter them out).
# We allow project admins to edit notifications.
permission_classes = [ReadOnlyPermission | (IsAuthenticated & IsProjectAdmin)]
def get_queryset(self):
build = self._get_parent_build()
return build.notifications.all()
| NotificationsBuildViewSet |
python | django__django | tests/unmanaged_models/models.py | {
"start": 2560,
"end": 2651
} | class ____(models.Model):
class Meta:
db_table = "unmanaged_models_proxy2"
| Proxy2 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 130755,
"end": 131522
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of AddProjectCard"""
__schema__ = github_schema
__field_names__ = ("project_column_id", "content_id", "note", "client_mutation_id")
project_column_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectColumnId")
"""The Node ID of the ProjectColumn."""
content_id = sgqlc.types.Field(ID, graphql_name="contentId")
"""The content of the card. Must be a member of the ProjectCardItem
union
"""
note = sgqlc.types.Field(String, graphql_name="note")
"""The note on the card."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| AddProjectCardInput |
python | conda__conda | conda/exceptions.py | {
"start": 19133,
"end": 19279
} | class ____(CondaError, ImportError):
def __init__(self, message: str):
msg = f"{message}"
super().__init__(msg)
| CondaImportError |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 2779,
"end": 7837
} | class ____(metaclass=DataTypeClass):
"""Base class for all Polars data types."""
def _string_repr(self) -> str:
return _dtype_str_repr(self)
@overload # type: ignore[override]
def __eq__(self, other: pl.DataTypeExpr) -> pl.Expr: ...
@overload
def __eq__(self, other: PolarsDataType) -> bool: ...
def __eq__(self, other: pl.DataTypeExpr | PolarsDataType) -> pl.Expr | bool:
if isinstance(other, pl.DataTypeExpr):
return self.to_dtype_expr() == other
elif type(other) is DataTypeClass:
return issubclass(other, type(self))
else:
return isinstance(other, type(self))
def __hash__(self) -> int:
return hash(self.__class__)
def __repr__(self) -> str:
return self.__class__.__name__
@classmethod
def base_type(cls) -> DataTypeClass:
"""
Return this DataType's fundamental/root type class.
Examples
--------
>>> pl.Datetime("ns").base_type()
Datetime
>>> pl.List(pl.Int32).base_type()
List
>>> pl.Struct([pl.Field("a", pl.Int64), pl.Field("b", pl.Boolean)]).base_type()
Struct
"""
return cls
@classinstmethod
def is_(self, other: PolarsDataType) -> bool:
"""
Check if this DataType is the same as another DataType.
This is a stricter check than `self == other`, as it enforces an exact
match of all dtype attributes for nested and/or uninitialised dtypes.
Parameters
----------
other
the other Polars dtype to compare with.
Examples
--------
>>> pl.List == pl.List(pl.Int32)
True
>>> pl.List.is_(pl.List(pl.Int32))
False
"""
return self == other and hash(self) == hash(other)
@classmethod
def is_numeric(cls) -> bool:
"""Check whether the data type is a numeric type."""
return issubclass(cls, NumericType)
@classmethod
def is_decimal(cls) -> bool:
"""Check whether the data type is a decimal type."""
return issubclass(cls, Decimal)
@classmethod
def is_integer(cls) -> bool:
"""Check whether the data type is an integer type."""
return issubclass(cls, IntegerType)
@classmethod
def is_object(cls) -> bool:
"""Check whether the data type is an object type."""
return issubclass(cls, ObjectType)
@classmethod
def is_signed_integer(cls) -> bool:
"""Check whether the data type is a signed integer type."""
return issubclass(cls, SignedIntegerType)
@classmethod
def is_unsigned_integer(cls) -> bool:
"""Check whether the data type is an unsigned integer type."""
return issubclass(cls, UnsignedIntegerType)
@classmethod
def is_float(cls) -> bool:
"""Check whether the data type is a floating point type."""
return issubclass(cls, FloatType)
@classmethod
def is_temporal(cls) -> bool:
"""Check whether the data type is a temporal type."""
return issubclass(cls, TemporalType)
@classmethod
def is_nested(cls) -> bool:
"""Check whether the data type is a nested type."""
return issubclass(cls, NestedType)
@classmethod
def from_python(cls, py_type: PythonDataType) -> PolarsDataType:
"""
Return the Polars data type corresponding to a given Python type.
Notes
-----
Not every Python type has a corresponding Polars data type; in general
you should declare Polars data types explicitly to exactly specify
the desired type and its properties (such as scale/unit).
Examples
--------
>>> pl.DataType.from_python(int)
Int64
>>> pl.DataType.from_python(float)
Float64
>>> from datetime import tzinfo
>>> pl.DataType.from_python(tzinfo) # doctest: +SKIP
TypeError: cannot parse input <class 'datetime.tzinfo'> into Polars data type
"""
from polars.datatypes._parse import parse_into_dtype
return parse_into_dtype(py_type)
@classinstmethod
def to_python(self) -> PythonDataType:
"""
Return the Python type corresponding to this Polars data type.
Examples
--------
>>> pl.Int16().to_python()
<class 'int'>
>>> pl.Float32().to_python()
<class 'float'>
>>> pl.Array(pl.Date(), 10).to_python()
<class 'list'>
"""
from polars.datatypes import dtype_to_py_type
return dtype_to_py_type(self)
@classinstmethod
def to_dtype_expr(self) -> pl.DataTypeExpr:
"""
Return a :class:`DataTypeExpr` with a static :class:`DataType`.
Examples
--------
>>> pl.Int16().to_dtype_expr().collect_dtype({})
Int16
"""
from polars._plr import PyDataTypeExpr
return pl.DataTypeExpr._from_pydatatype_expr(PyDataTypeExpr.from_dtype(self))
| DataType |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/side_channel/incoming_message.py | {
"start": 40,
"end": 3366
} | class ____:
"""
Utility class for reading the message written to a SideChannel.
Values must be read in the order they were written.
"""
def __init__(self, buffer: bytes, offset: int = 0):
"""
Create a new IncomingMessage from the bytes.
"""
self.buffer = buffer
self.offset = offset
def read_bool(self, default_value: bool = False) -> bool:
"""
Read a boolean value from the message buffer.
:param default_value: Default value to use if the end of the message is reached.
:return: The value read from the message, or the default value if the end was reached.
"""
if self._at_end_of_buffer():
return default_value
val = struct.unpack_from("<?", self.buffer, self.offset)[0]
self.offset += 1
return val
def read_int32(self, default_value: int = 0) -> int:
"""
Read an integer value from the message buffer.
:param default_value: Default value to use if the end of the message is reached.
:return: The value read from the message, or the default value if the end was reached.
"""
if self._at_end_of_buffer():
return default_value
val = struct.unpack_from("<i", self.buffer, self.offset)[0]
self.offset += 4
return val
def read_float32(self, default_value: float = 0.0) -> float:
"""
Read a float value from the message buffer.
:param default_value: Default value to use if the end of the message is reached.
:return: The value read from the message, or the default value if the end was reached.
"""
if self._at_end_of_buffer():
return default_value
val = struct.unpack_from("<f", self.buffer, self.offset)[0]
self.offset += 4
return val
def read_float32_list(self, default_value: List[float] = None) -> List[float]:
"""
Read a list of float values from the message buffer.
:param default_value: Default value to use if the end of the message is reached.
:return: The value read from the message, or the default value if the end was reached.
"""
if self._at_end_of_buffer():
return [] if default_value is None else default_value
list_len = self.read_int32()
output = []
for _ in range(list_len):
output.append(self.read_float32())
return output
def read_string(self, default_value: str = "") -> str:
"""
Read a string value from the message buffer.
:param default_value: Default value to use if the end of the message is reached.
:return: The value read from the message, or the default value if the end was reached.
"""
if self._at_end_of_buffer():
return default_value
encoded_str_len = self.read_int32()
val = self.buffer[self.offset : self.offset + encoded_str_len].decode("ascii")
self.offset += encoded_str_len
return val
def get_raw_bytes(self) -> bytes:
"""
Get a copy of the internal bytes used by the message.
"""
return bytearray(self.buffer)
def _at_end_of_buffer(self) -> bool:
return self.offset >= len(self.buffer)
| IncomingMessage |
python | PyCQA__bandit | tests/unit/core/test_issue.py | {
"start": 226,
"end": 4728
} | class ____(testtools.TestCase):
def test_issue_create(self):
new_issue = _get_issue_instance()
self.assertIsInstance(new_issue, issue.Issue)
def test_issue_str(self):
test_issue = _get_issue_instance()
expect = (
"Issue: 'Test issue' from B999:bandit_plugin:"
" CWE: %s,"
" Severity: MEDIUM "
"Confidence: MEDIUM at code.py:1:8"
)
self.assertEqual(
expect % str(issue.Cwe(issue.Cwe.MULTIPLE_BINDS)), str(test_issue)
)
def test_issue_as_dict(self):
test_issue = _get_issue_instance()
test_issue_dict = test_issue.as_dict(with_code=False)
self.assertIsInstance(test_issue_dict, dict)
self.assertEqual("code.py", test_issue_dict["filename"])
self.assertEqual("bandit_plugin", test_issue_dict["test_name"])
self.assertEqual("B999", test_issue_dict["test_id"])
self.assertEqual("MEDIUM", test_issue_dict["issue_severity"])
self.assertEqual(
{
"id": 605,
"link": "https://cwe.mitre.org/data/definitions/605.html",
},
test_issue_dict["issue_cwe"],
)
self.assertEqual("MEDIUM", test_issue_dict["issue_confidence"])
self.assertEqual("Test issue", test_issue_dict["issue_text"])
self.assertEqual(1, test_issue_dict["line_number"])
self.assertEqual([], test_issue_dict["line_range"])
self.assertEqual(8, test_issue_dict["col_offset"])
self.assertEqual(16, test_issue_dict["end_col_offset"])
def test_issue_filter_severity(self):
levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH]
issues = [_get_issue_instance(level, bandit.HIGH) for level in levels]
for level in levels:
rank = constants.RANKING.index(level)
for i in issues:
test = constants.RANKING.index(i.severity)
result = i.filter(level, bandit.UNDEFINED)
self.assertTrue((test >= rank) == result)
def test_issue_filter_confidence(self):
levels = [bandit.LOW, bandit.MEDIUM, bandit.HIGH]
issues = [_get_issue_instance(bandit.HIGH, level) for level in levels]
for level in levels:
rank = constants.RANKING.index(level)
for i in issues:
test = constants.RANKING.index(i.confidence)
result = i.filter(bandit.UNDEFINED, level)
self.assertTrue((test >= rank) == result)
def test_matches_issue(self):
issue_a = _get_issue_instance()
issue_b = _get_issue_instance(severity=bandit.HIGH)
issue_c = _get_issue_instance(confidence=bandit.LOW)
issue_d = _get_issue_instance()
issue_d.text = "ABCD"
issue_e = _get_issue_instance()
issue_e.fname = "file1.py"
issue_f = issue_a
issue_g = _get_issue_instance()
issue_g.test = "ZZZZ"
issue_h = issue_a
issue_h.lineno = 12345
# positive tests
self.assertEqual(issue_a, issue_a)
self.assertEqual(issue_a, issue_f)
self.assertEqual(issue_f, issue_a)
# severity doesn't match
self.assertNotEqual(issue_a, issue_b)
# confidence doesn't match
self.assertNotEqual(issue_a, issue_c)
# text doesn't match
self.assertNotEqual(issue_a, issue_d)
# filename doesn't match
self.assertNotEqual(issue_a, issue_e)
# plugin name doesn't match
self.assertNotEqual(issue_a, issue_g)
# line number doesn't match but should pass because we don't test that
self.assertEqual(issue_a, issue_h)
@mock.patch("linecache.getline")
def test_get_code(self, getline):
getline.return_value = b"\x08\x30"
new_issue = issue.Issue(
bandit.MEDIUM, cwe=issue.Cwe.MULTIPLE_BINDS, lineno=1
)
try:
new_issue.get_code()
except UnicodeDecodeError:
self.fail("Bytes not properly decoded in issue.get_code()")
def _get_issue_instance(
severity=bandit.MEDIUM,
cwe=issue.Cwe.MULTIPLE_BINDS,
confidence=bandit.MEDIUM,
):
new_issue = issue.Issue(severity, cwe, confidence, "Test issue")
new_issue.fname = "code.py"
new_issue.test = "bandit_plugin"
new_issue.test_id = "B999"
new_issue.lineno = 1
new_issue.col_offset = 8
new_issue.end_col_offset = 16
return new_issue
| IssueTests |
python | wepe__MachineLearning | DeepLearning Tutorials/FaceRecognition_CNN(olivettifaces)/use_CNN_olivettifaces.py | {
"start": 1408,
"end": 2213
} | class ____(object):
def __init__(self, input, params_W,params_b,n_in, n_out):
self.W = params_W
self.b = params_b
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
if y.dtype.startswith('int'):
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
| LogisticRegression |
python | ray-project__ray | rllib/examples/envs/classes/transformed_action_space_env.py | {
"start": 51,
"end": 2044
} | class ____(gym.ActionWrapper):
def __init__(self, env, low, high):
super().__init__(env)
self._low = low
self._high = high
self.action_space = type(env.action_space)(
self._low, self._high, env.action_space.shape, env.action_space.dtype
)
def action(self, action):
return (action - self._low) / (self._high - self._low) * (
self.env.action_space.high - self.env.action_space.low
) + self.env.action_space.low
def transform_action_space(env_name_or_creator) -> Type[gym.Env]:
"""Wrapper for gym.Envs to have their action space transformed.
Args:
env_name_or_creator (Union[str, Callable[]]: String specifier or
env_maker function.
Returns:
New transformed_action_space_env function that returns an environment
wrapped by the ActionTransform wrapper. The constructor takes a
config dict with `_low` and `_high` keys specifying the new action
range (default -1.0 to 1.0). The reset of the config dict will be
passed on to the underlying/wrapped env's constructor.
.. testcode::
:skipif: True
# By gym string:
pendulum_300_to_500_cls = transform_action_space("Pendulum-v1")
# Create a transformed pendulum env.
pendulum_300_to_500 = pendulum_300_to_500_cls({"_low": -15.0})
pendulum_300_to_500.action_space
.. testoutput::
gym.spaces.Box(-15.0, 1.0, (1, ), "float32")
"""
def transformed_action_space_env(config):
if isinstance(env_name_or_creator, str):
inner_env = gym.make(env_name_or_creator)
else:
inner_env = env_name_or_creator(config)
_low = config.pop("low", -1.0)
_high = config.pop("high", 1.0)
env = ActionTransform(inner_env, _low, _high)
return env
return transformed_action_space_env
TransformedActionPendulum = transform_action_space("Pendulum-v1")
| ActionTransform |
python | scrapy__scrapy | tests/test_request_left.py | {
"start": 226,
"end": 733
} | class ____(Spider):
name = "signal_catcher"
def __init__(self, crawler, url, *args, **kwargs):
super().__init__(*args, **kwargs)
crawler.signals.connect(self.on_request_left, signal=request_left_downloader)
self.caught_times = 0
self.start_urls = [url]
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
return cls(crawler, *args, **kwargs)
def on_request_left(self, request, spider):
self.caught_times += 1
| SignalCatcherSpider |
python | facebook__pyre-check | tools/upgrade/commands/command.py | {
"start": 1817,
"end": 6683
} | class ____(Command):
def __init__(
self, command_arguments: CommandArguments, repository: Repository
) -> None:
super().__init__(repository)
self._command_arguments: CommandArguments = command_arguments
self._comment: Optional[str] = command_arguments.comment
self._max_line_length: Optional[int] = command_arguments.max_line_length
self._truncate: bool = command_arguments.truncate
self._unsafe: bool = command_arguments.unsafe
self._force_format_unsuppressed: bool = (
command_arguments.force_format_unsuppressed
)
self._lint: bool = command_arguments.lint
self._no_commit: bool = command_arguments.no_commit
self._should_clean: bool = command_arguments.should_clean
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(ErrorSuppressingCommand, ErrorSuppressingCommand).add_arguments(parser)
parser.add_argument("--comment", help="Custom comment after fixme comments")
parser.add_argument(
"--max-line-length",
default=88,
type=int,
help="Enforce maximum line length on new comments "
+ "(default: %(default)s, use 0 to set no maximum line length)",
)
parser.add_argument(
"--truncate",
action="store_true",
help="Truncate error messages to maximum line length.",
)
parser.add_argument(
"--unsafe",
action="store_true",
help="Don't check syntax when applying fixmes.",
)
parser.add_argument(
"--force-format-unsuppressed", action="store_true", help=argparse.SUPPRESS
)
parser.add_argument(
"--lint",
action="store_true",
help="Run lint to ensure added fixmes comply with black formatting. \
Doubles the runtime of pyre-ugprade.",
)
parser.add_argument("--no-commit", action="store_true", help=argparse.SUPPRESS)
parser.add_argument(
"--do-not-run-buck-clean",
action="store_false",
dest="should_clean",
default=True,
help=argparse.SUPPRESS,
)
def _apply_suppressions(self, errors: Errors) -> None:
try:
errors.suppress(
self._comment,
self._max_line_length,
self._truncate,
self._unsafe,
)
except PartialErrorSuppression as partial_error_suppression:
if not self._force_format_unsuppressed:
raise partial_error_suppression
self._repository.force_format(partial_error_suppression.unsuppressed_paths)
errors.suppress(
self._comment,
self._max_line_length,
self._truncate,
self._unsafe,
)
def _get_and_suppress_errors(
self,
configuration: Configuration,
error_source: ErrorSource = ErrorSource.GENERATE,
upgrade_version: bool = False,
only_fix_error_code: Optional[int] = None,
fixme_threshold: Optional[int] = None,
fixme_threshold_fallback_mode: LocalMode = LocalMode.IGNORE,
) -> None:
LOG.info("Processing %s", configuration.get_directory())
if not configuration.is_local:
return
if upgrade_version:
if configuration.version:
configuration.remove_version()
configuration.write()
else:
return
errors = (
Errors.from_stdin(only_fix_error_code)
if error_source == ErrorSource.STDIN and not upgrade_version
else configuration.get_errors(
only_fix_error_code, should_clean=self._should_clean
)
)
if len(errors) == 0:
return
if fixme_threshold is None:
self._apply_suppressions(errors)
else:
for path, path_errors in errors.paths_to_errors.items():
path_errors = list(path_errors)
if len(path_errors) > fixme_threshold:
LOG.info(
"%d errors found in `%s`. Adding file-level ignore.",
len(path_errors),
path,
)
add_local_mode(path, fixme_threshold_fallback_mode)
else:
self._apply_suppressions(Errors(path_errors))
# Lint and re-run pyre once to resolve most formatting issues
if self._lint and self._repository.format():
errors = configuration.get_errors(only_fix_error_code, should_clean=False)
self._apply_suppressions(errors)
| ErrorSuppressingCommand |
python | gawel__pyquery | tests/test_pyquery.py | {
"start": 11870,
"end": 12005
} | class ____(TestCase):
def test_typeerror_on_invalid_value(self):
self.assertRaises(TypeError, pq, object())
| TestConstruction |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-measurespace/llama_index/tools/measurespace/base.py | {
"start": 173,
"end": 7777
} | class ____(BaseToolSpec):
"""Measure Space tool spec."""
spec_functions = [
"get_hourly_weather_forecast",
"get_daily_weather_forecast",
"get_daily_climate_forecast",
"get_daily_air_quality_forecast",
"get_latitude_longitude_from_location",
"get_location_from_latitude_longitude",
]
def __init__(self, api_keys: Dict[str, str], unit: str = "metric") -> None:
"""Initialize with parameters."""
try:
import measure_space_api as msa
except ImportError:
raise ImportError(
"The Measure Space tool requires the measure-space-api package to be installed. "
"Please install it using `pip install measure-space-api`."
)
self.api_keys = api_keys
self.unit = unit
self.msa = msa
def _get_api_key(self, api_name: str):
"""
Get API keys.
Args:
api_name (str): API service name
"""
api_key = self.api_keys.get(api_name)
if not api_key:
raise ValueError(
f"API key is required for {api_name} service. Please get your API key from measurespace.io/pricing."
)
return api_key
def _format_output(self, wx: Dict) -> List[str]:
"""
Format output to a list of string with the following format.
['total precipitation: 1 mm, wind speed: 10 m/s', 'total precipitation: 1 mm, wind speed: 10 m/s']
Args:
wx (Dict): API output in json format
"""
wx_list = []
for i in range(len(wx["time"])):
tmp_list = []
for key, value in wx.items():
if key != "time":
a_name, a_unit = self.msa.get_metadata(key, self.unit)
tmp_list.append(f"{a_name}: {value[i]} {a_unit}")
if tmp_list:
wx_list.append(",".join(tmp_list))
return wx_list
def get_hourly_weather_forecast(self, location: str) -> List[Document]:
"""
Get hourly weather forecast for given location.
Args:
location (str): location name
"""
api_key = self._get_api_key("hourly_weather")
geocoding_api_key = self._get_api_key("geocoding")
params = {"variables": "tp, t2m, windSpeed, windDegree, r2"}
wx = self.msa.get_hourly_weather(
api_key,
geocoding_api_key,
location,
params,
)
# Get variable metadata
for x in ["latitude", "longitude"]:
if x in wx:
del wx[x]
output = self._format_output(wx)
documents = []
for i in range(len(wx["time"])):
documents.append(
Document(
text=output[i],
metadata={
"Hourly weather for location": location,
"Date and time": wx["time"][i],
},
)
)
return documents
def get_daily_weather_forecast(self, location: str) -> List[Document]:
"""
Get daily weather forecast for given location.
Args:
location (str): location name
"""
api_key = self._get_api_key("daily_weather")
geocoding_api_key = self._get_api_key("geocoding")
params = {"variables": "tp, minT, maxT, meanwindSpeed, meanwindDegree, meanRH"}
wx = self.msa.get_daily_weather(
api_key,
geocoding_api_key,
location,
params,
)
# Get variable metadata
for x in ["latitude", "longitude"]:
if x in wx:
del wx[x]
output = self._format_output(wx)
documents = []
for i in range(len(wx["time"])):
documents.append(
Document(
text=output[i],
metadata={
"Daily weather for location": location,
"Date": wx["time"][i],
},
)
)
return documents
def get_daily_climate_forecast(self, location: str) -> List[Document]:
"""
Get hourly climate forecast for given location.
Args:
location (str): location name
"""
api_key = self._get_api_key("daily_climate")
geocoding_api_key = self._get_api_key("geocoding")
params = {"variables": "t2m, tmin, tmax, sh2"}
wx = self.msa.get_daily_climate(
api_key,
geocoding_api_key,
location,
params,
)
# Get variable metadata
for x in ["latitude", "longitude"]:
if x in wx:
del wx[x]
output = self._format_output(wx)
documents = []
for i in range(len(wx["time"])):
documents.append(
Document(
text=output[i],
metadata={
"Daily climate for location": location,
"Date": wx["time"][i],
},
)
)
return documents
def get_daily_air_quality_forecast(self, location: str) -> List[Document]:
"""
Get daily air quality forecast for given location.
Args:
location (str): location name
"""
api_key = self._get_api_key("daily_air_quality")
geocoding_api_key = self._get_api_key("geocoding")
params = {"variables": "AQI, maxPM10, maxPM25"}
wx = self.msa.get_daily_air_quality(
api_key,
geocoding_api_key,
location,
params,
)
# Get variable metadata
for x in ["latitude", "longitude"]:
if x in wx:
del wx[x]
output = self._format_output(wx)
documents = []
for i in range(len(wx["time"])):
documents.append(
Document(
text=output[i],
metadata={
"Daily air quality for location": location,
"Date": wx["time"][i],
},
)
)
return documents
def get_latitude_longitude_from_location(self, location: str) -> List[Document]:
"""
Get latitude and longitude from given location.
Args:
location (str): location name
"""
api_key = self._get_api_key("geocoding")
latitude, longitude = self.msa.get_lat_lon_from_city(
api_key=api_key, location_name=location
)
return [
Document(
text=f"latitude: {latitude}, longitude: {longitude}",
metadata={"Latitude and longitude for location": location},
)
]
def get_location_from_latitude_longitude(
self, latitude: float, longitude: float
) -> List[Document]:
"""
Get nearest location name from given latitude and longitude.
Args:
latitude (float): latitude
longitude (float): longitude
"""
api_key = self._get_api_key("geocoding")
res = self.msa.get_city_from_lat_lon(api_key, latitude, longitude)
return [
Document(
text=f"Location name: {res}",
metadata="Nearest location for given longitude and latitude",
)
]
| MeasureSpaceToolSpec |
python | tensorflow__tensorflow | tensorflow/python/distribute/mirrored_strategy_test.py | {
"start": 46749,
"end": 49285
} | class ____(test.TestCase):
def testAssignReplicaLocalVarSumAggregation(self, distribution):
def model_fn():
v_sum = variable_v1.VariableV1(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertTrue(distribute_utils.is_sync_on_read(sync_on_read_var))
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the SUM of each of
# values on each of the replicas.
self.assertEqual(2.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
# Assigning 6.0 in cross replica context will assign a value of
# 6.0/num_replicas to each replica.
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the assigned value back.
# The value on all the replicas are added before being returned by
# `read_var`.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
def testAssignReplicaLocalVarMeanAggregation(self, distribution):
def model_fn():
v_sum = variable_v1.VariableV1(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertTrue(distribute_utils.is_sync_on_read(sync_on_read_var))
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the MEAN of values
# on all replicas which is the value assigned in replica context.
self.assertEqual(1.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the MEAN of all values
# which is equal to the value assigned.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
| SyncOnReadVariableAssignTest |
python | django-import-export__django-import-export | tests/core/tests/resources.py | {
"start": 1695,
"end": 1820
} | class ____(resources.ModelResource):
class Meta:
model = WithDefault
fields = ("name",)
| WithDefaultResource |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/files.py | {
"start": 12668,
"end": 24157
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncFilesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
"""
return AsyncFilesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
"""
return AsyncFilesWithStreamingResponse(self)
def list(
self,
*,
after_id: str | Omit = omit,
before_id: str | Omit = omit,
limit: int | Omit = omit,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[FileMetadata, AsyncPage[FileMetadata]]:
"""List Files
Args:
after_id: ID of the object to use as a cursor for pagination.
When provided, returns the
page of results immediately after this object.
before_id: ID of the object to use as a cursor for pagination. When provided, returns the
page of results immediately before this object.
limit: Number of items to return per page.
Defaults to `20`. Ranges from `1` to `1000`.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
return self._get_api_list(
"/v1/files?beta=true",
page=AsyncPage[FileMetadata],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after_id": after_id,
"before_id": before_id,
"limit": limit,
},
file_list_params.FileListParams,
),
),
model=FileMetadata,
)
async def delete(
self,
file_id: str,
*,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> DeletedFile:
"""
Delete File
Args:
file_id: ID of the File.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
return await self._delete(
f"/v1/files/{file_id}?beta=true",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=DeletedFile,
)
async def download(
self,
file_id: str,
*,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncBinaryAPIResponse:
"""
Download File
Args:
file_id: ID of the File.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {"Accept": "application/binary", **(extra_headers or {})}
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
return await self._get(
f"/v1/files/{file_id}/content?beta=true",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=AsyncBinaryAPIResponse,
)
async def retrieve_metadata(
self,
file_id: str,
*,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileMetadata:
"""
Get File Metadata
Args:
file_id: ID of the File.
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not file_id:
raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
return await self._get(
f"/v1/files/{file_id}?beta=true",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileMetadata,
)
async def upload(
self,
*,
file: FileTypes,
betas: List[AnthropicBetaParam] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> FileMetadata:
"""
Upload File
Args:
file: The file to upload
betas: Optional header to specify the beta version(s) you want to use.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
extra_headers = {
**strip_not_given(
{
"anthropic-beta": ",".join(chain((str(e) for e in betas), ["files-api-2025-04-14"]))
if is_given(betas)
else not_given
}
),
**(extra_headers or {}),
}
extra_headers = {"anthropic-beta": "files-api-2025-04-14", **(extra_headers or {})}
body = deepcopy_minimal({"file": file})
files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers["Content-Type"] = "multipart/form-data"
return await self._post(
"/v1/files?beta=true",
body=await async_maybe_transform(body, file_upload_params.FileUploadParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=FileMetadata,
)
| AsyncFiles |
python | scrapy__scrapy | tests/test_http2_client_protocol.py | {
"start": 5205,
"end": 5970
} | class ____(LeafResource):
"""Sends all the headers received as a response"""
def render_GET(self, request: TxRequest):
request.setHeader("Content-Type", "application/json; charset=UTF-8")
request.setHeader("Content-Encoding", "UTF-8")
headers = {}
for k, v in request.requestHeaders.getAllRawHeaders():
headers[str(k, "utf-8")] = str(v[0], "utf-8")
return bytes(json.dumps(headers), "utf-8")
def make_request_dfd(client: H2ClientProtocol, request: Request) -> Deferred[Response]:
return client.request(request, DummySpider())
async def make_request(client: H2ClientProtocol, request: Request) -> Response:
return await maybe_deferred_to_future(make_request_dfd(client, request))
| RequestHeaders |
python | numba__numba | numba/tests/test_struct_ref.py | {
"start": 2979,
"end": 7579
} | class ____(MemoryLeakMixin, TestCase):
def test_structref_type(self):
sr = types.StructRef([('a', types.int64)])
self.assertEqual(sr.field_dict['a'], types.int64)
sr = types.StructRef([('a', types.int64), ('b', types.float64)])
self.assertEqual(sr.field_dict['a'], types.int64)
self.assertEqual(sr.field_dict['b'], types.float64)
# bad case
with self.assertRaisesRegex(ValueError,
"expecting a str for field name"):
types.StructRef([(1, types.int64)])
with self.assertRaisesRegex(ValueError,
"expecting a Numba Type for field type"):
types.StructRef([('a', 123)])
def test_invalid_uses(self):
with self.assertRaisesRegex(ValueError, "cannot register"):
structref.register(types.StructRef)
with self.assertRaisesRegex(ValueError, "cannot register"):
structref.define_boxing(types.StructRef, MyStruct)
def test_MySimplerStructType(self):
vs = np.arange(10, dtype=np.intp)
ctr = 13
first_expected = vs + vs
first_got = ctor_by_intrinsic(vs, ctr)
# the returned instance is a structref.StructRefProxy
# but not a MyStruct
self.assertNotIsInstance(first_got, MyStruct)
self.assertPreciseEqual(first_expected, get_values(first_got))
second_expected = first_expected + (ctr * ctr)
second_got = compute_fields(first_got)
self.assertPreciseEqual(second_expected, second_got)
def test_MySimplerStructType_wrapper_has_no_attrs(self):
vs = np.arange(10, dtype=np.intp)
ctr = 13
wrapper = ctor_by_intrinsic(vs, ctr)
self.assertIsInstance(wrapper, structref.StructRefProxy)
with self.assertRaisesRegex(AttributeError, 'values'):
wrapper.values
with self.assertRaisesRegex(AttributeError, 'counter'):
wrapper.counter
def test_MyStructType(self):
vs = np.arange(10, dtype=np.float64)
ctr = 11
first_expected_arr = vs.copy()
first_got = ctor_by_class(vs, ctr)
self.assertIsInstance(first_got, MyStruct)
self.assertPreciseEqual(first_expected_arr, first_got.values)
second_expected = first_expected_arr + ctr
second_got = compute_fields(first_got)
self.assertPreciseEqual(second_expected, second_got)
self.assertEqual(first_got.counter, ctr)
def test_MyStructType_mixed_types(self):
# structref constructor is generic
@njit
def mixed_type(x, y, m, n):
return MyStruct(x, y), MyStruct(m, n)
a, b = mixed_type(1, 2.3, 3.4j, (4,))
self.assertEqual(a.values, 1)
self.assertEqual(a.counter, 2.3)
self.assertEqual(b.values, 3.4j)
self.assertEqual(b.counter, (4,))
def test_MyStructType_in_dict(self):
td = typed.Dict()
td['a'] = MyStruct(1, 2.3)
self.assertEqual(td['a'].values, 1)
self.assertEqual(td['a'].counter, 2.3)
# overwrite
td['a'] = MyStruct(2, 3.3)
self.assertEqual(td['a'].values, 2)
self.assertEqual(td['a'].counter, 3.3)
# mutate
td['a'].values += 10
self.assertEqual(td['a'].values, 12) # changed
self.assertEqual(td['a'].counter, 3.3) # unchanged
# insert
td['b'] = MyStruct(4, 5.6)
def test_MyStructType_in_dict_mixed_type_error(self):
self.disable_leak_check()
td = typed.Dict()
td['a'] = MyStruct(1, 2.3)
self.assertEqual(td['a'].values, 1)
self.assertEqual(td['a'].counter, 2.3)
# ERROR: store different types
with self.assertRaisesRegex(errors.TypingError,
r"Cannot cast numba.MyStructType"):
# because first field is not a float;
# the second field is now an integer.
td['b'] = MyStruct(2.3, 1)
def test_MyStructType_hash_no_typeof_recursion(self):
# Tests that __hash__ is not called prematurely in typeof
# causing infinite recursion (see #8241).
st = MyStruct(1, 2)
typeof(st)
self.assertEqual(hash(st), 3)
@overload_method(MyStructType, "testme")
def _ol_mystructtype_testme(self, arg):
def impl(self, arg):
return self.values * arg + self.counter
return impl
@overload_attribute(MyStructType, "prop")
def _ol_mystructtype_prop(self):
def get(self):
return self.values, self.counter
return get
| TestStructRefBasic |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/dagrun.py | {
"start": 1026,
"end": 1245
} | class ____(StrictBaseModel):
"""Schema for Trigger DAG Run API request."""
logical_date: UtcDateTime | None = None
conf: dict = Field(default_factory=dict)
reset_dag_run: bool = False
| TriggerDAGRunPayload |
python | sqlalchemy__sqlalchemy | test/orm/test_dataclasses.py | {
"start": 774,
"end": 7235
} | class ____(fixtures.MappedTest, testing.AssertsCompiledSQL):
@classmethod
def define_tables(cls, metadata):
Table(
"accounts",
metadata,
Column("account_id", Integer, primary_key=True),
Column("widget_count", Integer, nullable=False),
)
Table(
"widgets",
metadata,
Column("widget_id", Integer, primary_key=True),
Column(
"account_id",
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
),
Column("type", String(30), nullable=False),
Column("name", String(30), nullable=False),
Column("magic", Boolean),
)
@classmethod
def setup_classes(cls):
@dataclasses.dataclass
class Widget:
name: Optional[str] = None
@dataclasses.dataclass
class SpecialWidget(Widget):
magic: bool = False
@dataclasses.dataclass
class Account:
account_id: int
widgets: List[Widget] = dataclasses.field(default_factory=list)
widget_count: int = dataclasses.field(init=False)
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
cls.classes.Account = Account
cls.classes.Widget = Widget
cls.classes.SpecialWidget = SpecialWidget
@classmethod
def setup_mappers(cls):
accounts = cls.tables.accounts
widgets = cls.tables.widgets
Account = cls.classes.Account
Widget = cls.classes.Widget
SpecialWidget = cls.classes.SpecialWidget
cls.mapper_registry.map_imperatively(
Widget,
widgets,
polymorphic_on=widgets.c.type,
polymorphic_identity="normal",
)
cls.mapper_registry.map_imperatively(
SpecialWidget,
widgets,
inherits=Widget,
polymorphic_identity="special",
)
cls.mapper_registry.map_imperatively(
Account, accounts, properties={"widgets": relationship(Widget)}
)
def check_account_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
account_id, widgets, widget_count = dataclasses.fields(obj)
eq_(account_id.name, "account_id")
eq_(widget_count.name, "widget_count")
eq_(widgets.name, "widgets")
def check_widget_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
(name,) = dataclasses.fields(obj)
eq_(name.name, "name")
def check_special_widget_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
name, magic = dataclasses.fields(obj)
eq_(name.name, "name")
eq_(magic.name, "magic")
def data_fixture(self):
Account = self.classes.Account
Widget = self.classes.Widget
SpecialWidget = self.classes.SpecialWidget
return Account(
account_id=42,
widgets=[Widget("Foo"), SpecialWidget("Bar", magic=True)],
)
def check_data_fixture(self, account):
Widget = self.classes.Widget
SpecialWidget = self.classes.SpecialWidget
self.check_account_dataclass(account)
eq_(account.account_id, 42)
eq_(account.widget_count, 2)
eq_(len(account.widgets), 2)
foo, bar = account.widgets
self.check_widget_dataclass(foo)
assert isinstance(foo, Widget)
eq_(foo.name, "Foo")
self.check_special_widget_dataclass(bar)
assert isinstance(bar, SpecialWidget)
eq_(bar.name, "Bar")
eq_(bar.magic, True)
def test_classes_are_still_dataclasses(self):
self.check_account_dataclass(self.classes.Account)
self.check_widget_dataclass(self.classes.Widget)
self.check_special_widget_dataclass(self.classes.SpecialWidget)
def test_construction(self):
SpecialWidget = self.classes.SpecialWidget
account = self.data_fixture()
self.check_data_fixture(account)
widget = SpecialWidget()
eq_(widget.name, None)
eq_(widget.magic, False)
def test_equality(self):
Widget = self.classes.Widget
SpecialWidget = self.classes.SpecialWidget
eq_(Widget("Foo"), Widget("Foo"))
assert Widget("Foo") != Widget("Bar")
assert Widget("Foo") != SpecialWidget("Foo")
def test_asdict_and_astuple_widget(self):
Widget = self.classes.Widget
widget = Widget("Foo")
eq_(dataclasses.asdict(widget), {"name": "Foo"})
eq_(dataclasses.astuple(widget), ("Foo",))
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget("Bar", magic=True)
eq_(dataclasses.asdict(widget), {"name": "Bar", "magic": True})
eq_(dataclasses.astuple(widget), ("Bar", True))
def test_round_trip(self):
Account = self.classes.Account
account = self.data_fixture()
with fixture_session() as session:
session.add(account)
session.commit()
with fixture_session() as session:
a = session.get(Account, 42)
self.check_data_fixture(a)
def test_appending_to_relationship(self):
Account = self.classes.Account
Widget = self.classes.Widget
account = self.data_fixture()
with Session(testing.db) as session, session.begin():
session.add(account)
account.add_widget(Widget("Xyzzy"))
with Session(testing.db) as session:
a = session.get(Account, 42)
eq_(a.widget_count, 3)
eq_(len(a.widgets), 3)
def test_filtering_on_relationship(self):
Account = self.classes.Account
Widget = self.classes.Widget
account = self.data_fixture()
with Session(testing.db) as session:
session.add(account)
session.commit()
with Session(testing.db) as session:
a = (
session.query(Account)
.join(Account.widgets)
.filter(Widget.name == "Foo")
.one()
)
self.check_data_fixture(a)
| DataclassesTest |
python | conda__conda | conda/auxlib/entity.py | {
"start": 25516,
"end": 31357
} | class ____(metaclass=EntityType):
__fields__ = odict()
_lazy_validate = False
def __init__(self, **kwargs):
for key, field in self.__fields__.items():
try:
setattr(self, key, kwargs[key])
except KeyError:
alias = next((ls for ls in field._aliases if ls in kwargs), None)
if alias is not None:
setattr(self, key, kwargs[alias])
elif key in getattr(self, KEY_OVERRIDES_MAP):
# handle case of fields inherited from subclass but overrode on class object
setattr(self, key, getattr(self, KEY_OVERRIDES_MAP)[key])
elif field.required and field.default is NULL:
raise ValidationError(
key,
msg="{} requires a {} field. Instantiated with "
"{}".format(self.__class__.__name__, key, kwargs),
)
except ValidationError:
if kwargs[key] is not None or field.required:
raise
if not self._lazy_validate:
self.validate()
@classmethod
def from_objects(cls, *objects, **override_fields):
"""Construct a new object of type ``cls`` from existing objects or dicts.
Allows the creation of new objects of concrete :class:`Entity` subclasses by
combining information from several sources. This can be any combination of
objects and dictionaries passed in as positional arguments. When looking for
the value of the fields of the :class:`Entity` subclass, the first object
that provides an attribute (or, in the case of a dict an entry) that has the
name of the field or one of its aliases will take precedence. Any keyword
arguments passed in will override this and take precedence.
Args:
cls(:class:`Entity` subclass): The class to create, usually determined by call, e.g. ``PrefixRecord.from_objects(...)``.
*objects(tuple(object or dict)): Any combination of objects and dicts in order of decending precedence.
**override_fields(dict(str, object)): Any individual fields overriding possible contents from ``*objects``.
"""
init_vars = {}
search_maps = tuple(AttrDict(o) if isinstance(o, dict) else o
for o in ((override_fields,) + objects))
for key, field in cls.__fields__.items():
try:
init_vars[key] = find_or_raise(key, search_maps, field._aliases)
except AttributeError:
pass
return cls(**init_vars)
@classmethod
def from_json(cls, json_str):
return cls(**json.loads(json_str))
@classmethod
def load(cls, data_dict):
return cls(**data_dict)
def validate(self):
# TODO: here, validate should only have to determine if the required keys are set
try:
reduce(
lambda _, name: getattr(self, name),
(name for name, field in self.__fields__.items() if field.required),
)
except TypeError as e:
if str(e) == "reduce() of empty sequence with no initial value":
pass
except AttributeError as e:
raise ValidationError(None, msg=e)
def __repr__(self):
def _valid(key):
# TODO: re-enable once aliases are implemented
# if key.startswith('_'):
# return False
if '__' in key:
return False
try:
getattr(self, key)
return True
except AttributeError:
return False
def _val(key):
val = getattr(self, key)
return repr(val.value) if isinstance(val, Enum) else repr(val)
def _sort_helper(key):
field = self.__fields__.get(key)
return field._order_helper if field is not None else -1
kwarg_str = ", ".join(
f"{key}={_val(key)}" for key in sorted(self.__dict__, key=_sort_helper) if _valid(key)
)
return f"{self.__class__.__name__}({kwarg_str})"
@classmethod
def __register__(cls):
pass
def json(self, indent=None, separators=None, **kwargs):
return json.dumps(self, indent=indent, separators=separators, **kwargs)
def pretty_json(self, indent=2, separators=(',', ': '), **kwargs):
return json.dumps(self, indent=indent, separators=separators, **kwargs)
def dump(self):
return odict((field.name, field.dump(self, self.__class__, value))
for field, value in ((field, getattr(self, field.name, NULL))
for field in self.__dump_fields())
if value is not NULL and not (value is field.default
and not field.default_in_dump))
@classmethod
def __dump_fields(cls):
if "__dump_fields_cache" not in cls.__dict__:
cls.__dump_fields_cache = tuple(
field for field in cls.__fields__.values() if field.in_dump
)
return cls.__dump_fields_cache
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
rando_default = 19274656290 # need an arbitrary but definite value if field does not exist
return all(getattr(self, field, rando_default) == getattr(other, field, rando_default)
for field in self.__fields__)
def __hash__(self):
return sum(hash(getattr(self, field, None)) for field in self.__fields__)
@property
def _initd(self):
return getattr(self, f"_{self.__class__.__name__}__initd", None)
| Entity |
python | viewflow__viewflow | tests/json/test_json__basics.py | {
"start": 2578,
"end": 2747
} | class ____(models.Model):
data = models.JSONField()
name = jsonstore.CharField(max_length=250)
address = jsonstore.CharField(max_length=250, blank=True)
| Person |
python | great-expectations__great_expectations | great_expectations/compatibility/bigquery.py | {
"start": 2093,
"end": 2723
} | class ____:
"""Namespace for Bigquery dialect types"""
INTEGER = INTEGER
NUMERIC = NUMERIC
STRING = STRING
BIGNUMERIC = BIGNUMERIC
BYTES = BYTES
BOOL = BOOL
BOOLEAN = BOOLEAN
TIMESTAMP = TIMESTAMP
TIME = TIME
FLOAT = FLOAT
DATE = DATE
DATETIME = DATETIME
try:
from sqlalchemy_bigquery import GEOGRAPHY
BIGQUERY_GEO_SUPPORT = True
except (ImportError, AttributeError):
GEOGRAPHY = SQLALCHEMY_BIGQUERY_NOT_IMPORTED
try:
from sqlalchemy_bigquery import parse_url
except (ImportError, AttributeError):
parse_url = SQLALCHEMY_BIGQUERY_NOT_IMPORTED
| BIGQUERY_TYPES |
python | wandb__wandb | tests/unit_tests/test_launch/test_runner/test_kubernetes.py | {
"start": 8551,
"end": 48640
} | class ____:
def __init__(self):
self.jobs = dict()
async def create_namespaced_custom_object(
self, group, version, namespace, plural, body
):
self.jobs[body["metadata"]["name"]] = body
return body
async def delete_namespaced_custom_object(
self, group, version, namespace, plural, name, body
):
del self.jobs[name]
async def read_namespaced_custom_object(
self, group, version, namespace, plural, name, body
):
return self.jobs[name]
async def get_namespaced_custom_object_status(
self, group, version, namespace, plural, name, body
):
return self.jobs[name]
async def list_namespaced_custom_object(
self, group, version, namespace, plural, field_selector=None
):
return [self.jobs[name] for name in self.jobs]
@pytest.fixture
def mock_event_streams(monkeypatch):
"""Patches the kubernetes event stream with a mock and returns it."""
job_stream = MockEventStream()
pod_stream = MockEventStream()
def _select_stream(_, api_call, *args, **kwargs):
if api_call.__name__ == "list_namespaced_pod":
return pod_stream
elif api_call.__name__ == "list_namespaced_job":
return job_stream
elif api_call.__name__ == "list_namespaced_custom_object":
return job_stream
raise Exception(
f"Event stream requested for unsupported API call: {api_call.__name__} "
)
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_monitor.SafeWatch.stream",
_select_stream,
)
return job_stream, pod_stream
@pytest.fixture
def mock_batch_api(monkeypatch):
"""Patches the kubernetes batch api with a mock and returns it."""
batch_api = MockBatchApi()
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_runner.client.BatchV1Api",
lambda *args, **kwargs: batch_api,
)
return batch_api
@pytest.fixture
def mock_core_api(monkeypatch):
"""Patches the kubernetes core api with a mock and returns it."""
core_api = MockCoreV1Api()
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_runner.client.CoreV1Api",
lambda *args, **kwargs: core_api,
)
return core_api
@pytest.fixture
def mock_custom_api(monkeypatch):
"""Patches the kubernetes custom api with a mock and returns it."""
custom_api = MockCustomObjectsApi()
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_runner.client.CustomObjectsApi",
lambda *args, **kwargs: custom_api,
)
return custom_api
@pytest.fixture
def mock_kube_context_and_api_client(monkeypatch):
"""Patches the kubernetes context and api client with a mock and returns it."""
async def _mock_get_kube_context_and_api_client(*args, **kwargs):
return (None, None)
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_runner.get_kube_context_and_api_client",
_mock_get_kube_context_and_api_client,
)
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_monitor.get_kube_context_and_api_client",
_mock_get_kube_context_and_api_client,
)
@pytest.fixture
def mock_maybe_create_image_pullsecret(monkeypatch):
"""Patches the kubernetes context and api client with a mock and returns it."""
async def _mock_maybe_create_image_pullsecret(*args, **kwargs):
return None
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_runner.maybe_create_imagepull_secret",
_mock_maybe_create_image_pullsecret,
)
@pytest.fixture
def mock_create_from_dict(monkeypatch):
"""Patches the kubernetes create_from_dict with a mock and returns it."""
function_mock = MagicMock()
function_mock.return_value = [MockDict({"metadata": {"name": "test-job"}})]
async def _mock_create_from_yaml(*args, **kwargs):
return function_mock(*args, **kwargs)
monkeypatch.setattr(
"kubernetes_asyncio.utils.create_from_dict",
lambda *args, **kwargs: _mock_create_from_yaml(*args, **kwargs),
)
return function_mock
@pytest.mark.asyncio
@pytest.mark.xfail(reason="This test is flaky")
async def test_launch_kube_works(
monkeypatch,
mock_event_streams,
mock_batch_api,
mock_kube_context_and_api_client,
mock_maybe_create_image_pullsecret,
mock_create_from_dict,
test_api,
manifest,
clean_monitor,
clean_agent,
):
"""Test that we can launch a kubernetes job."""
mock_batch_api.jobs = {"test-job": MockDict(manifest)}
project = LaunchProject(
docker_config={"docker_image": "test_image"},
target_entity="test_entity",
target_project="test_project",
resource_args={"kubernetes": manifest},
launch_spec={},
overrides={
"args": ["--test_arg", "test_value"],
"command": ["test_entry"],
},
resource="kubernetes",
api=test_api,
git_info={},
job="",
uri="https://wandb.ai/test_entity/test_project/runs/test_run",
run_id="test_run_id",
name="test_run",
)
runner = KubernetesRunner(
test_api, {"SYNCHRONOUS": False}, MagicMock(), MagicMock()
)
submitted_run = await runner.run(project, "hello-world")
await asyncio.sleep(1)
assert str(await submitted_run.get_status()) == "unknown"
job_stream, pod_stream = mock_event_streams
await pod_stream.add( # This event does nothing. Added for code coverage of the path where there is no status.
MockDict(
{
"type": "MODIFIED",
"object": {
"metadata": {"labels": {"job-name": "test-job"}},
"status": {"phase": "Pending"},
},
}
)
)
await pod_stream.add(
MockDict(
{
"type": "ADDED",
"object": {
"metadata": {"labels": {"job-name": "test-job"}},
"status": {"phase": "Pending"},
},
}
)
)
await asyncio.sleep(0.1)
assert str(await submitted_run.get_status()) == "unknown"
await pod_stream.add(
MockDict(
{
"type": "MODIFIED",
"object": {
"metadata": {
"name": "test-pod",
"labels": {"job-name": "test-job"},
},
"status": {
"phase": "Pending",
"container_statuses": [
{
"name": "master",
"state": {"waiting": {"reason": "ContainerCreating"}},
}
],
},
},
}
)
)
await asyncio.sleep(0.1)
assert str(await submitted_run.get_status()) == "running"
await job_stream.add(
MockDict(
{
"type": "MODIFIED",
"object": {
"metadata": {"name": "test-job"},
"status": {"succeeded": 1},
},
}
)
)
await asyncio.sleep(0.1)
assert str(await submitted_run.get_status()) == "finished"
assert mock_create_from_dict.call_count == 1
submitted_manifest = mock_create_from_dict.call_args_list[0][0][1]
assert submitted_manifest["spec"]["template"]["spec"]["containers"][0]["args"] == [
"--test_arg",
"test_value",
]
assert (
submitted_manifest["spec"]["template"]["spec"]["containers"][0][
"imagePullPolicy"
]
== "IfNotPresent"
)
# Test cancel
assert "test-job" in mock_batch_api.jobs
await submitted_run.cancel()
assert "test-job" not in mock_batch_api.jobs
def _raise_api_exception(*args, **kwargs):
raise ApiException()
mock_batch_api.delete_namespaced_job = _raise_api_exception
with pytest.raises(LaunchError):
await submitted_run.cancel()
@pytest.mark.asyncio
async def test_launch_crd_works(
monkeypatch,
mock_event_streams,
mock_batch_api,
mock_custom_api,
mock_kube_context_and_api_client,
mock_create_from_dict,
test_api,
volcano_spec,
clean_monitor,
clean_agent,
):
"""Test that we can launch a kubernetes job."""
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_runner.maybe_create_imagepull_secret",
lambda *args, **kwargs: None,
)
mock_batch_api.jobs = {"test-job": MockDict(volcano_spec)}
project = LaunchProject(
docker_config={"docker_image": "test_image"},
target_entity="test_entity",
target_project="test_project",
resource_args={"kubernetes": volcano_spec},
launch_spec={},
overrides={
"args": ["--test_arg", "test_value"],
"command": ["test_entry"],
},
resource="kubernetes",
api=test_api,
git_info={},
job="",
uri="https://wandb.ai/test_entity/test_project/runs/test_run",
run_id="test_run_id",
name="test_run",
)
runner = KubernetesRunner(
test_api, {"SYNCHRONOUS": False}, MagicMock(), MagicMock()
)
submitted_run = await runner.run(project, MagicMock())
assert str(await submitted_run.get_status()) == "unknown"
job_stream, pod_stream = mock_event_streams
# add container creating event
await pod_stream.add(
MockDict(
{
"type": "MODIFIED",
"object": {
"metadata": {
"name": "test-pod",
"labels": {"job-name": "test-job"},
},
"status": {
"phase": "Pending",
"container_statuses": [
{
"name": "master",
"state": {"waiting": {"reason": "ContainerCreating"}},
}
],
},
},
}
)
)
await asyncio.sleep(1)
assert str(await submitted_run.get_status()) == "running"
await job_stream.add(
MockDict(
{
"type": "MODIFIED",
"object": {
"metadata": {"name": "test-job"},
"status": {"state": {"phase": "Running"}},
},
}
)
)
await asyncio.sleep(1)
assert str(await submitted_run.get_status()) == "running"
await job_stream.add(
MockDict(
{
"type": "MODIFIED",
"object": {
"metadata": {"name": "test-job"},
"status": {
"conditions": [
{
"type": "Succeeded",
"status": "True",
"lastTransitionTime": "2021-09-06T20:04:12Z",
}
]
},
},
}
)
)
await asyncio.sleep(1)
assert str(await submitted_run.get_status()) == "finished"
@pytest.mark.asyncio
async def test_launch_crd_pod_schedule_warning(
monkeypatch,
mock_event_streams,
mock_batch_api,
mock_custom_api,
mock_kube_context_and_api_client,
mock_create_from_dict,
test_api,
volcano_spec,
clean_monitor,
clean_agent,
):
mock_batch_api.jobs = {"test-job": MockDict(volcano_spec)}
test_api.update_run_queue_item_warning = MagicMock(return_value=True)
job_tracker = MagicMock()
job_tracker.run_queue_item_id = "test-rqi"
project = LaunchProject(
docker_config={"docker_image": "test_image"},
target_entity="test_entity",
target_project="test_project",
resource_args={"kubernetes": volcano_spec},
launch_spec={},
overrides={
"args": ["--test_arg", "test_value"],
"command": ["test_entry"],
},
resource="kubernetes",
api=test_api,
git_info={},
job="",
uri="https://wandb.ai/test_entity/test_project/runs/test_run",
run_id="test_run_id",
name="test_run",
)
runner = KubernetesRunner(
test_api, {"SYNCHRONOUS": False}, MagicMock(), MagicMock()
)
submitted_run = await runner.run(project, "hello-world")
await asyncio.sleep(1)
_, pod_stream = mock_event_streams
await pod_stream.add(
MockDict(
{
"type": "WARNING",
"object": {
"metadata": {
"owner_references": [{"name": "test-job"}],
"labels": {},
},
"status": {
"phase": "Pending",
"conditions": [
{
"type": "PodScheduled",
"status": "False",
"reason": "Unschedulable",
"message": "Test message",
}
],
},
},
}
)
)
await asyncio.sleep(0.1)
status = await submitted_run.get_status()
assert status.messages == ["Test message"]
@pytest.mark.skipif(
platform.system() == "Windows",
reason="Launch does not support Windows and this test is failing on Windows.",
)
@pytest.mark.asyncio
async def test_launch_kube_base_image_works(
monkeypatch,
mock_event_streams,
mock_batch_api,
mock_kube_context_and_api_client,
mock_maybe_create_image_pullsecret,
mock_create_from_dict,
test_api,
manifest,
clean_monitor,
clean_agent,
tmpdir,
):
"""Test that runner works as expected with base image jobs."""
monkeypatch.setattr(
wandb.sdk.launch.runner.kubernetes_runner,
"SOURCE_CODE_PVC_MOUNT_PATH",
tmpdir,
)
monkeypatch.setattr(
wandb.sdk.launch.runner.kubernetes_runner,
"SOURCE_CODE_PVC_NAME",
"wandb-source-code-pvc",
)
mock_batch_api.jobs = {"test-job": MockDict(manifest)}
project = LaunchProject(
target_entity="test_entity",
target_project="test_project",
resource_args={"kubernetes": manifest},
launch_spec={},
overrides={
"args": ["--test_arg", "test_value"],
"command": ["test_entry"],
},
resource="kubernetes",
api=test_api,
git_info={},
job="",
uri="https://wandb.ai/test_entity/test_project/runs/test_run",
run_id="test_run_id",
name="test_run",
docker_config={},
)
project._job_artifact = MagicMock()
project.set_job_base_image("test_base_image")
runner = KubernetesRunner(
test_api, {"SYNCHRONOUS": False}, MagicMock(), MagicMock()
)
await runner.run(project, "test_base_image")
manifest = mock_create_from_dict.call_args_list[0][0][1]
pod = manifest["spec"]["template"]["spec"]
container = pod["containers"][0]
assert container["workingDir"] == "/mnt/wandb"
assert container["volumeMounts"] == [
{
"mountPath": "/mnt/wandb",
"subPath": project.get_image_source_string(),
"name": "wandb-source-code-volume",
}
]
assert pod["volumes"] == [
{
"name": "wandb-source-code-volume",
"persistentVolumeClaim": {"claimName": "wandb-source-code-pvc"},
}
]
@pytest.mark.skip(
reason="This test is flaky, please remove the skip once the flakyness is fixed."
)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="Launch does not support Windows and this test is failing on Windows.",
)
@pytest.mark.asyncio
async def test_launch_crd_base_image_works(
monkeypatch,
mock_event_streams,
mock_custom_api,
mock_kube_context_and_api_client,
test_api,
volcano_spec,
tmpdir,
):
"""Test that runner works as expected with base image jobs."""
monkeypatch.setattr(
wandb.sdk.launch.runner.kubernetes_runner,
"SOURCE_CODE_PVC_MOUNT_PATH",
tmpdir,
)
monkeypatch.setattr(
wandb.sdk.launch.runner.kubernetes_runner,
"SOURCE_CODE_PVC_NAME",
"wandb-source-code-pvc",
)
mock_batch_api.jobs = {"test-job": MockDict(volcano_spec)}
project = LaunchProject(
docker_config={},
target_entity="test_entity",
target_project="test_project",
resource_args={"kubernetes": volcano_spec},
launch_spec={},
overrides={
"args": ["--test_arg", "test_value"],
"command": ["test_entry"],
},
resource="kubernetes",
api=test_api,
git_info={},
job="",
uri="https://wandb.ai/test_entity/test_project/runs/test_run",
run_id="test_run_id",
name="test_run",
)
project._job_artifact = MagicMock()
project.set_job_base_image("test_base_image")
runner = KubernetesRunner(
test_api, {"SYNCHRONOUS": False}, MagicMock(), MagicMock()
)
await runner.run(project, "test_base_image")
job = mock_custom_api.jobs["test-job"]
pod = job["tasks"][0]["template"]["spec"]
container = pod["containers"][0]
assert container["workingDir"] == "/mnt/wandb"
assert container["volumeMounts"] == [
{
"mountPath": "/mnt/wandb",
"subPath": project.get_image_source_string(),
"name": "wandb-source-code-volume",
}
]
assert pod["volumes"] == [
{
"name": "wandb-source-code-volume",
"persistentVolumeClaim": {"claimName": "wandb-source-code-pvc"},
}
]
@pytest.mark.timeout(320)
@pytest.mark.asyncio
async def test_launch_kube_failed(
monkeypatch,
mock_batch_api,
mock_kube_context_and_api_client,
mock_create_from_dict,
mock_maybe_create_image_pullsecret,
mock_event_streams,
test_api,
manifest,
clean_monitor,
clean_agent,
):
"""Test that we can launch a kubernetes job."""
mock_batch_api.jobs = {"test-job": manifest}
project = LaunchProject(
docker_config={"docker_image": "test_image"},
target_entity="test_entity",
target_project="test_project",
resource_args={"kubernetes": manifest},
launch_spec={},
overrides={
"args": ["--test_arg", "test_value"],
"command": ["test_entry"],
},
resource="kubernetes",
api=test_api,
git_info={},
job="",
uri="https://wandb.ai/test_entity/test_project/runs/test_run",
run_id="test_run_id",
name="test_run",
)
runner = KubernetesRunner(
test_api, {"SYNCHRONOUS": False}, MagicMock(), MagicMock()
)
job_stream, _ = mock_event_streams
await job_stream.add(
MockDict(
{
"type": "MODIFIED",
"object": {
"metadata": {"name": "test-job"},
"status": {"failed": 1},
},
}
)
)
submitted_run = await runner.run(project, "test_image")
await submitted_run.wait()
assert str(await submitted_run.get_status()) == "failed"
@pytest.mark.timeout(320)
@pytest.mark.asyncio
async def test_launch_kube_api_secret_failed(
monkeypatch,
mock_batch_api,
mock_kube_context_and_api_client,
mock_create_from_dict,
mock_maybe_create_image_pullsecret,
mock_event_streams,
test_api,
manifest,
clean_monitor,
clean_agent,
):
async def mock_maybe_create_imagepull_secret(*args, **kwargs):
return None
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_runner.maybe_create_imagepull_secret",
mock_maybe_create_imagepull_secret,
)
mock_la = MagicMock()
mock_la.initialized = MagicMock(return_value=True)
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_runner.LaunchAgent", mock_la
)
async def mock_create_namespaced_secret(*args, **kwargs):
raise Exception("Test exception")
mock_core_api = MagicMock()
mock_core_api.create_namespaced_secret = mock_create_namespaced_secret
monkeypatch.setattr(
"wandb.sdk.launch.runner.kubernetes_runner.kubernetes_asyncio.client.CoreV1Api",
mock_core_api,
)
monkeypatch.setattr("wandb.termwarn", MagicMock())
mock_batch_api.jobs = {"test-job": MockDict(manifest)}
project = LaunchProject(
docker_config={"docker_image": "test_image"},
target_entity="test_entity",
target_project="test_project",
resource_args={"kubernetes": manifest},
launch_spec={"_wandb_api_key": "test_key"},
overrides={
"args": ["--test_arg", "test_value"],
"command": ["test_entry"],
},
resource="kubernetes",
api=test_api,
git_info={},
job="",
uri="https://wandb.ai/test_entity/test_project/runs/test_run",
run_id="test_run_id",
name="test_run",
)
runner = KubernetesRunner(
test_api, {"SYNCHRONOUS": False}, MagicMock(), MagicMock()
)
with pytest.raises(LaunchError):
await runner.run(project, MagicMock())
assert wandb.termwarn.call_count == 6
assert wandb.termwarn.call_args_list[0][0][0].startswith(
"Exception when ensuring Kubernetes API key secret"
)
@pytest.mark.asyncio
async def test_launch_kube_pod_schedule_warning(
monkeypatch,
mock_event_streams,
mock_batch_api,
mock_kube_context_and_api_client,
mock_maybe_create_image_pullsecret,
mock_create_from_dict,
test_api,
manifest,
clean_monitor,
clean_agent,
):
mock_batch_api.jobs = {"test-job": MockDict(manifest)}
job_tracker = MagicMock()
job_tracker.run_queue_item_id = "test-rqi"
project = LaunchProject(
docker_config={"docker_image": "test_image"},
target_entity="test_entity",
target_project="test_project",
resource_args={"kubernetes": manifest},
launch_spec={},
overrides={
"args": ["--test_arg", "test_value"],
"command": ["test_entry"],
},
resource="kubernetes",
api=test_api,
git_info={},
job="",
uri="https://wandb.ai/test_entity/test_project/runs/test_run",
run_id="test_run_id",
name="test_run",
)
runner = KubernetesRunner(
test_api, {"SYNCHRONOUS": False}, MagicMock(), MagicMock()
)
submitted_run = await runner.run(project, "hello-world")
await asyncio.sleep(1)
_, pod_stream = mock_event_streams
await pod_stream.add(
MockDict(
{
"type": "WARNING",
"object": {
"metadata": {"labels": {"job-name": "test-job"}},
"status": {
"phase": "Pending",
"conditions": [
{
"type": "PodScheduled",
"status": "False",
"reason": "Unschedulable",
"message": "Test message",
}
],
},
},
}
)
)
await asyncio.sleep(0.1)
status = await submitted_run.get_status()
assert status.messages == ["Test message"]
@pytest.mark.asyncio
async def test_maybe_create_imagepull_secret_given_creds():
mock_registry = MagicMock()
async def _mock_get_username_password():
return ("testuser", "testpass")
mock_registry.get_username_password.return_value = _mock_get_username_password()
mock_registry.uri = "test.com"
api = MockCoreV1Api()
await maybe_create_imagepull_secret(
api,
mock_registry,
"12345678",
"wandb",
)
namespace, secret = api.secrets[0]
assert namespace == "wandb"
assert secret.metadata.name == "regcred-12345678"
assert secret.data[".dockerconfigjson"] == base64.b64encode(
json.dumps(
{
"auths": {
"test.com": {
"auth": "dGVzdHVzZXI6dGVzdHBhc3M=", # testuser:testpass
"email": "deprecated@wandblaunch.com",
}
}
}
).encode("utf-8")
).decode("utf-8")
@pytest.mark.asyncio
async def test_create_api_key_secret():
api = MockCoreV1Api()
await ensure_api_key_secret(api, "wandb-api-key-testagent", "wandb", "testsecret")
namespace, secret = api.secrets[0]
assert namespace == "wandb"
assert secret.metadata.name == "wandb-api-key-testagent"
assert secret.data["password"] == base64.b64encode(b"testsecret").decode()
@pytest.mark.asyncio
async def test_create_api_key_secret_exists():
api = MockCoreV1Api()
# Create secret with same name but different data, assert it gets overwritten
secret_data = "bad data"
labels = {"wandb.ai/created-by": "launch-agent"}
secret = client.V1Secret(
data=secret_data,
metadata=client.V1ObjectMeta(
name="wandb-api-key-testagent", namespace="wandb", labels=labels
),
kind="Secret",
type="kubernetes.io/basic-auth",
)
await api.create_namespaced_secret("wandb", secret)
await ensure_api_key_secret(api, "wandb-api-key-testagent", "wandb", "testsecret")
namespace, secret = api.secrets[0]
assert namespace == "wandb"
assert secret.metadata.name == "wandb-api-key-testagent"
assert secret.data["password"] == base64.b64encode(b"testsecret").decode()
assert api.calls["delete"] == 1
# Test monitor class.
def job_factory(name, statuses, type="MODIFIED"):
"""Factory for creating job events."""
return MockDict(
{
"type": f"{type}",
"object": {
"status": {f"{status}": 1 for status in statuses},
"metadata": {"name": name},
},
}
)
def pod_factory(event_type, job_name, condition_types, condition_reasons, phase=None):
"""Factory for creating pod events.
Args:
event_type (str): The type of event to create.
condition_types (list): The types of conditions to create.
condition_reasons (list): The reasons of conditions to create.
Returns:
dict: The pod event.
"""
return MockDict(
{
"type": event_type,
"object": {
"metadata": {
"labels": {"job-name": job_name},
},
"status": {
"phase": phase,
"conditions": [
{
"type": condition_type,
"reason": condition_reason,
}
for condition_type, condition_reason in zip(
condition_types, condition_reasons
)
],
},
},
}
)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"reason",
["EvictionByEvictionAPI", "PreemptionByScheduler", "TerminationByKubelet"],
)
async def test_monitor_preempted(
mock_event_streams,
mock_kube_context_and_api_client,
mock_batch_api,
mock_core_api,
reason,
clean_monitor,
clean_agent,
):
"""Test if the monitor thread detects a preempted job."""
await LaunchKubernetesMonitor.ensure_initialized()
LaunchKubernetesMonitor.monitor_namespace("wandb")
_, pod_event_stream = mock_event_streams
await pod_event_stream.add(pod_factory("ADDED", "test-job", [], []))
await asyncio.sleep(0.1)
await pod_event_stream.add(
pod_factory("MODIFIED", "test-job", ["DisruptionTarget"], [reason])
)
await asyncio.sleep(0.1)
assert LaunchKubernetesMonitor.get_status("test-job").state == "preempted"
@pytest.mark.asyncio
async def test_monitor_succeeded(
mock_event_streams,
mock_kube_context_and_api_client,
mock_batch_api,
mock_core_api,
clean_monitor,
clean_agent,
):
"""Test if the monitor thread detects a succeeded job."""
await LaunchKubernetesMonitor.ensure_initialized()
LaunchKubernetesMonitor.monitor_namespace("wandb")
job_event_stream, pod_event_stream = mock_event_streams
await asyncio.sleep(0.1)
await pod_event_stream.add(pod_factory("ADDED", "job-name", [], []))
await asyncio.sleep(0.1)
await job_event_stream.add(job_factory("job-name", ["succeeded"]))
await asyncio.sleep(0.1)
assert LaunchKubernetesMonitor.get_status("job-name").state == "finished"
@pytest.mark.asyncio
async def test_monitor_failed(
mock_event_streams,
mock_kube_context_and_api_client,
mock_batch_api,
mock_core_api,
clean_monitor,
clean_agent,
):
"""Test if the monitor thread detects a failed job."""
await LaunchKubernetesMonitor.ensure_initialized()
LaunchKubernetesMonitor.monitor_namespace("wandb")
job_event_stream, pod_event_stream = mock_event_streams
await asyncio.sleep(0.1)
await pod_event_stream.add(pod_factory("ADDED", "job-name", [], []))
await asyncio.sleep(0.1)
await job_event_stream.add(job_factory("job-name", ["failed"]))
await asyncio.sleep(0.1)
assert LaunchKubernetesMonitor.get_status("job-name").state == "failed"
@pytest.mark.asyncio
async def test_monitor_running(
mock_event_streams,
mock_kube_context_and_api_client,
mock_batch_api,
mock_core_api,
clean_monitor,
clean_agent,
):
"""Test if the monitor thread detects a running job."""
await LaunchKubernetesMonitor.ensure_initialized()
LaunchKubernetesMonitor.monitor_namespace("wandb")
job_event_stream, pod_event_stream = mock_event_streams
await asyncio.sleep(0.1)
await pod_event_stream.add(pod_factory("ADDED", "job-name", [], []))
await asyncio.sleep(0.1)
await job_event_stream.add(job_factory("job-name", ["active"]))
await pod_event_stream.add(
pod_factory("MODIFIED", "job-name", [""], [""], phase="Running")
)
await asyncio.sleep(0.1)
assert LaunchKubernetesMonitor.get_status("job-name").state == "running"
@pytest.mark.asyncio
async def test_monitor_job_deleted(
mock_event_streams,
mock_kube_context_and_api_client,
mock_batch_api,
mock_core_api,
clean_monitor,
clean_agent,
):
"""Test if the monitor thread detects a job being deleted."""
await LaunchKubernetesMonitor.ensure_initialized()
LaunchKubernetesMonitor.monitor_namespace("wandb")
job_event_stream, pod_event_stream = mock_event_streams
await asyncio.sleep(0.1)
await pod_event_stream.add(pod_factory("ADDED", "job-name", [], []))
await asyncio.sleep(0.1)
await job_event_stream.add(job_factory("job-name", ["active"], type="DELETED"))
await asyncio.sleep(0.1)
assert LaunchKubernetesMonitor.get_status("job-name").state == "failed"
# Test util functions
def condition_factory(
condition_type, condition_status, condition_reason, transition_time
):
"""Factory for creating conditions."""
return MockDict(
{
"type": condition_type,
"status": condition_status,
"reason": condition_reason,
"lastTransitionTime": transition_time,
}
)
@pytest.mark.parametrize(
"conditions, expected",
[
(
[condition_factory("Running", "True", "", "2023-09-06T20:04:11Z")],
"running",
),
(
[
condition_factory("Running", "False", "", "2023-09-06T20:04:11Z"),
condition_factory("Succeeded", "True", "", "2023-09-06T20:04:11Z"),
],
"finished",
),
(
[
condition_factory("Running", "True", "", "2023-09-06T20:04:11Z"),
condition_factory("Terminating", "True", "", "2023-09-06T20:04:11Z"),
],
"stopping",
),
([condition_factory("Running", False, "", "2023-09-06T20:04:11Z")], None),
],
)
def test_state_from_conditions(conditions, expected):
"""Test that we extract CRD state from conditions correctly."""
state = _state_from_conditions(conditions)
if isinstance(state, str):
assert state == expected
else:
assert state == expected and state is None
def container_status_factory(reason):
"""Factory for creating container statuses."""
return MockDict({"state": {"waiting": {"reason": reason}}})
@pytest.mark.parametrize(
"conditions, expected",
[
(
[container_status_factory("ContainerCreating")],
True,
),
(
[container_status_factory("PodInitializing")],
False,
),
],
)
def test_is_container_creating(conditions, expected):
pod = MockDict({"container_statuses": conditions})
assert _is_container_creating(pod) == expected
@pytest.mark.parametrize(
"status_dict,expected",
[
({}, None),
({"ready": 1}, "running"),
({"active": 1}, "starting"),
],
)
def test_state_from_replicated_status(status_dict, expected):
"""Test that we extract replicated job state from status correctly."""
state = _state_from_replicated_status(status_dict)
assert state == expected
def test_custom_resource_helper():
"""Test that the custom resource helper class works as expected."""
resource = CustomResource("batch.volcano.sh", "v1alpha1", "jobs")
assert resource.group == "batch.volcano.sh"
assert resource.version == "v1alpha1"
assert resource.plural == "jobs"
assert str(resource) == "batch.volcano.sh/v1alpha1/jobs"
assert hash(resource) == hash(str(resource))
@pytest.mark.asyncio
async def test_log_error_callback(monkeypatch):
"""Test that our callback logs exceptions for crashed tasks."""
monkeypatch.setattr("wandb.termerror", MagicMock())
async def _error_raiser():
raise LaunchError("test error")
task = asyncio.create_task(_error_raiser())
task.add_done_callback(_log_err_task_callback)
with pytest.raises(LaunchError):
await task
assert wandb.termerror.call_count == 2
assert wandb.termerror.call_args_list[0][0][0].startswith("Exception in task")
# Tests for KubernetesSubmittedRun
@pytest.mark.asyncio
@pytest.mark.parametrize(
"pods,logs,expected",
[
(
MockPodList(
[
MockDict(
{
"metadata": MockDict(
{
"name": "test_pod",
"labels": {"job-name": "test_job"},
}
)
}
)
]
),
"test_log",
"test_log",
),
(MockPodList([]), "test_log", None),
(Exception(), "test_log", None),
(
MockPodList(
[
MockDict(
{
"metadata": MockDict(
{
"name": "test_pod",
"labels": {"job-name": "test_job"},
}
)
}
)
]
),
Exception(),
None,
),
],
)
async def test_kubernetes_submitted_run_get_logs(pods, logs, expected):
core_api = MagicMock()
async def _mock_list_namespaced_pod(*args, **kwargs):
if isinstance(pods, Exception):
raise pods
return pods
async def _mock_read_namespaced_pod_log(*args, **kwargs):
if isinstance(logs, Exception):
raise logs
return logs
core_api.list_namespaced_pod = _mock_list_namespaced_pod
core_api.read_namespaced_pod_log = _mock_read_namespaced_pod_log
submitted_run = KubernetesSubmittedRun(
batch_api=MagicMock(),
core_api=core_api,
apps_api=MagicMock(),
network_api=MagicMock(),
namespace="wandb",
name="test_run",
)
# Assert that we get the logs back.
assert await submitted_run.get_logs() == expected
@pytest.mark.asyncio
async def test_launch_additional_services(
monkeypatch,
mock_event_streams,
mock_batch_api,
mock_kube_context_and_api_client,
mock_maybe_create_image_pullsecret,
mock_create_from_dict,
test_api,
manifest,
clean_monitor,
clean_agent,
):
target_entity = "test_entity"
target_project = "test_project"
run_id = "test_run_id"
expected_deployment_name = "deploy-test-entity-test-project-test-run-id"
expected_pod_name = "pod-test-entity-test-project-test-run-id"
expected_label = "auxiliary-resource"
expected_auxiliary_resource_label = "aux-test-entity-test-project-test-run-id"
additional_service = {
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": f"deploy-{target_entity}-{target_project}-{run_id}",
"labels": {
"wandb.ai/label": expected_label,
},
},
"spec": {
"template": {
"spec": {
"containers": [
{
"name": f"pod-{target_entity}-{target_project}-{run_id}",
}
]
},
},
},
}
manifest["wait_for_ready"] = False
project = LaunchProject(
docker_config={"docker_image": "test_image"},
target_entity=target_entity,
target_project=target_project,
resource_args={"kubernetes": manifest},
launch_spec={
"additional_services": [
{
"config": additional_service,
"name": "additional_service",
}
]
},
overrides={},
resource="kubernetes",
api=test_api,
git_info={},
job="",
uri=f"https://wandb.ai/{target_entity}/{target_project}/runs/{run_id}",
run_id=run_id,
name="test_run",
)
runner = KubernetesRunner(
test_api, {"SYNCHRONOUS": False}, MagicMock(), MagicMock()
)
await runner.run(project, "test_image")
calls = mock_create_from_dict.call_args_list
assert len(calls) == 2 # one for the main job, one for the additional service
additional_service_call = next(
c for c in calls if c[0][1].get("kind") == "Deployment"
)
assert (
additional_service_call[0][1].get("metadata").get("name")
== expected_deployment_name
)
assert (
additional_service_call[0][1]
.get("spec")
.get("template")
.get("spec")
.get("containers")[0]
.get("name")
== expected_pod_name
)
labels = additional_service_call[0][1].get("metadata").get("labels")
assert "wandb.ai/label" in labels
assert labels["wandb.ai/label"] == expected_label
assert WANDB_K8S_LABEL_AUXILIARY_RESOURCE in labels
assert (
labels[WANDB_K8S_LABEL_AUXILIARY_RESOURCE] == expected_auxiliary_resource_label
)
| MockCustomObjectsApi |
python | ansible__ansible | lib/ansible/errors/__init__.py | {
"start": 6436,
"end": 6780
} | class ____(AnsibleError):
"""Invalid options were passed."""
# FIXME: This exception is used for many non-CLI related errors.
# The few cases which are CLI related should really be handled by argparse instead, at which point the exit code here can be removed.
_exit_code = ExitCode.INVALID_CLI_OPTION
| AnsibleOptionsError |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 11857,
"end": 12000
} | class ____(models.Model):
library = models.ForeignKey("Library", on_delete=models.CASCADE, null=True)
history = HistoricalRecords()
| State |
python | getsentry__sentry | tests/flagpole/test_conditions.py | {
"start": 6672,
"end": 8526
} | class ____:
def test_is_equal_string(self) -> None:
value = "foo"
condition = EqualsCondition(property="foo", value=value)
assert condition.match(context=EvaluationContext({"foo": "foo"}), segment_name="test")
not_condition = NotEqualsCondition(property="foo", value=value)
assert not not_condition.match(
context=EvaluationContext({"foo": "foo"}), segment_name="test"
)
def test_is_not_equals(self) -> None:
values = "bar"
condition = EqualsCondition(property="foo", value=values)
assert not condition.match(context=EvaluationContext({"foo": "foo"}), segment_name="test")
not_condition = NotEqualsCondition(property="foo", value=values)
assert not_condition.match(context=EvaluationContext({"foo": "foo"}), segment_name="test")
def test_is_equal_case_insensitive(self) -> None:
values = "bAr"
condition = EqualsCondition(property="foo", value=values)
assert condition.match(context=EvaluationContext({"foo": "BaR"}), segment_name="test")
not_condition = NotEqualsCondition(property="foo", value=values)
assert not not_condition.match(
context=EvaluationContext({"foo": "BaR"}), segment_name="test"
)
def test_equality_type_mismatch_strings(self) -> None:
values = ["foo", "bar"]
condition = EqualsCondition(property="foo", value=values, operator="equals")
with pytest.raises(ConditionTypeMismatchException):
condition.match(context=EvaluationContext({"foo": "foo"}), segment_name="test")
not_condition = NotEqualsCondition(property="foo", value=values)
with pytest.raises(ConditionTypeMismatchException):
not_condition.match(context=EvaluationContext({"foo": "foo"}), segment_name="test")
| TestEqualsConditions |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table02.py | {
"start": 315,
"end": 1111
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet()
worksheet2 = workbook.add_worksheet()
worksheet1.set_column("B:J", 10.288)
worksheet2.set_column("C:L", 10.288)
worksheet1.add_table("B3:E11")
worksheet1.add_table("G10:J16")
worksheet1.add_table("C18:F25")
worksheet2.add_table("I4:L11")
worksheet2.add_table("C16:H23")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 15471,
"end": 15892
} | class ____(LocalizableStreamlitException):
"""Exception raised when an invalid ID component is provided."""
def __init__(self, part: str, delimiter: str) -> None:
super().__init__(
"The `{part}` of a bidirectional component's ID must not contain "
"the delimiter sequence `{delimiter}`.",
part=part,
delimiter=delimiter,
)
| BidiComponentInvalidIdError |
python | pyca__cryptography | tests/hazmat/primitives/test_scrypt.py | {
"start": 1850,
"end": 7780
} | class ____:
@pytest.mark.parametrize("params", vectors)
def test_derive(self, backend, params):
_skip_if_memory_limited(_MEM_LIMIT, params)
password = params["password"]
work_factor = int(params["n"])
block_size = int(params["r"])
parallelization_factor = int(params["p"])
length = int(params["length"])
salt = params["salt"]
derived_key = params["derived_key"]
scrypt = Scrypt(
salt,
length,
work_factor,
block_size,
parallelization_factor,
backend,
)
assert binascii.hexlify(scrypt.derive(password)) == derived_key
def test_salt_not_bytes(self, backend):
work_factor = 1024
block_size = 8
parallelization_factor = 16
length = 64
salt = 1
with pytest.raises(TypeError):
Scrypt(
salt, # type: ignore[arg-type]
length,
work_factor,
block_size,
parallelization_factor,
backend,
)
def test_scrypt_malloc_failure(self, backend):
password = b"NaCl"
work_factor = 1024**3
block_size = 589824
parallelization_factor = 16
length = 64
salt = b"NaCl"
scrypt = Scrypt(
salt,
length,
work_factor,
block_size,
parallelization_factor,
backend,
)
with pytest.raises(MemoryError):
scrypt.derive(password)
def test_password_not_bytes(self, backend):
password = 1
work_factor = 1024
block_size = 8
parallelization_factor = 16
length = 64
salt = b"NaCl"
scrypt = Scrypt(
salt,
length,
work_factor,
block_size,
parallelization_factor,
backend,
)
with pytest.raises(TypeError):
scrypt.derive(password) # type: ignore[arg-type]
def test_buffer_protocol(self, backend):
password = bytearray(b"password")
work_factor = 256
block_size = 8
parallelization_factor = 16
length = 10
salt = b"NaCl"
scrypt = Scrypt(
salt,
length,
work_factor,
block_size,
parallelization_factor,
backend,
)
assert scrypt.derive(password) == b"\xf4\x92\x86\xb2\x06\x0c\x848W\x87"
@pytest.mark.parametrize("params", vectors)
def test_verify(self, backend, params):
_skip_if_memory_limited(_MEM_LIMIT, params)
password = params["password"]
work_factor = int(params["n"])
block_size = int(params["r"])
parallelization_factor = int(params["p"])
length = int(params["length"])
salt = params["salt"]
derived_key = params["derived_key"]
scrypt = Scrypt(
salt,
length,
work_factor,
block_size,
parallelization_factor,
backend,
)
scrypt.verify(password, binascii.unhexlify(derived_key))
def test_invalid_verify(self, backend):
password = b"password"
work_factor = 1024
block_size = 8
parallelization_factor = 16
length = 64
salt = b"NaCl"
derived_key = b"fdbabe1c9d3472007856e7190d01e9fe7c6ad7cbc8237830e773"
scrypt = Scrypt(
salt,
length,
work_factor,
block_size,
parallelization_factor,
backend,
)
with pytest.raises(InvalidKey):
scrypt.verify(password, binascii.unhexlify(derived_key))
def test_already_finalized(self, backend):
password = b"password"
work_factor = 1024
block_size = 8
parallelization_factor = 16
length = 64
salt = b"NaCl"
scrypt = Scrypt(
salt,
length,
work_factor,
block_size,
parallelization_factor,
backend,
)
scrypt.derive(password)
with pytest.raises(AlreadyFinalized):
scrypt.derive(password)
def test_invalid_n(self, backend):
# n is less than 2
with pytest.raises(ValueError):
Scrypt(b"NaCl", 64, 1, 8, 16, backend)
# n is not a power of 2
with pytest.raises(ValueError):
Scrypt(b"NaCl", 64, 3, 8, 16, backend)
def test_invalid_r(self, backend):
with pytest.raises(ValueError):
Scrypt(b"NaCl", 64, 2, 0, 16, backend)
def test_invalid_p(self, backend):
with pytest.raises(ValueError):
Scrypt(b"NaCl", 64, 2, 8, 0, backend)
def test_derive_into(self, backend):
scrypt = Scrypt(b"NaCl", 64, 1024, 8, 16, backend)
buf = bytearray(64)
n = scrypt.derive_into(b"password", buf)
assert n == 64
# Verify the output matches what derive would produce
scrypt2 = Scrypt(b"NaCl", 64, 1024, 8, 16, backend)
expected = scrypt2.derive(b"password")
assert buf == expected
@pytest.mark.parametrize(
("buflen", "outlen"), [(63, 64), (65, 64), (32, 64), (128, 64)]
)
def test_derive_into_buffer_incorrect_size(self, buflen, outlen, backend):
scrypt = Scrypt(b"NaCl", outlen, 1024, 8, 16, backend)
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
scrypt.derive_into(b"password", buf)
def test_derive_into_already_finalized(self, backend):
scrypt = Scrypt(b"NaCl", 64, 1024, 8, 16, backend)
buf = bytearray(64)
scrypt.derive_into(b"password", buf)
with pytest.raises(AlreadyFinalized):
scrypt.derive_into(b"password", buf)
| TestScrypt |
python | doocs__leetcode | solution/2900-2999/2930.Number of Strings Which Can Be Rearranged to Contain Substring/Solution2.py | {
"start": 0,
"end": 421
} | class ____:
def stringCount(self, n: int) -> int:
mod = 10**9 + 7
a = b = pow(25, n, mod)
c = pow(25, n, mod) + n * pow(25, n - 1, mod)
ab = pow(24, n, mod)
ac = bc = (pow(24, n, mod) + n * pow(24, n - 1, mod)) % mod
abc = (pow(23, n, mod) + n * pow(23, n - 1, mod)) % mod
tot = pow(26, n, mod)
return (tot - (a + b + c - ab - ac - bc + abc)) % mod
| Solution |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 88729,
"end": 93358
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(8638464332)
# Expected cdf values were computed with mpmath. For given x and c,
# x = mpmath.mpf(x)
# c = mpmath.mpf(c)
# cdf = mpmath.gammainc(c, 0, mpmath.exp(x),
# regularized=True)
@pytest.mark.parametrize('x, c, cdf',
[(1, 2, 0.7546378854206702),
(-1, 14, 6.768116452566383e-18),
(-745.1, 0.001, 0.4749605142005238),
(-800, 0.001, 0.44958802911019136),
(-725, 0.1, 3.4301205868273265e-32),
(-740, 0.75, 1.0074360436599631e-241)])
def test_cdf_ppf(self, x, c, cdf):
p = stats.loggamma.cdf(x, c)
assert_allclose(p, cdf, rtol=1e-13)
y = stats.loggamma.ppf(cdf, c)
assert_allclose(y, x, rtol=1e-13)
# Expected sf values were computed with mpmath. For given x and c,
# x = mpmath.mpf(x)
# c = mpmath.mpf(c)
# sf = mpmath.gammainc(c, mpmath.exp(x), mpmath.inf,
# regularized=True)
@pytest.mark.parametrize('x, c, sf',
[(4, 1.5, 1.6341528919488565e-23),
(6, 100, 8.23836829202024e-74),
(-800, 0.001, 0.5504119708898086),
(-743, 0.0025, 0.8437131370024089)])
def test_sf_isf(self, x, c, sf):
s = stats.loggamma.sf(x, c)
assert_allclose(s, sf, rtol=1e-13)
y = stats.loggamma.isf(sf, c)
assert_allclose(y, x, rtol=1e-13)
def test_logpdf(self):
# Test logpdf with x=-500, c=2. ln(gamma(2)) = 0, and
# exp(-500) ~= 7e-218, which is far smaller than the ULP
# of c*x=-1000, so logpdf(-500, 2) = c*x - exp(x) - ln(gamma(2))
# should give -1000.0.
lp = stats.loggamma.logpdf(-500, 2)
assert_allclose(lp, -1000.0, rtol=1e-14)
def test_logcdf(self):
x = 4.0
c = 4.5
logcdf = stats.loggamma.logcdf(x, c)
# Reference value computed with mpmath.
ref = -2.1429747073164531e-19
assert_allclose(logcdf, ref, rtol=5e-15)
def test_logsf(self):
x = -25.0
c = 3.5
logsf = stats.loggamma.logsf(x, c)
# Reference value computed with mpmath.
ref = -8.58200139319556e-40
assert_allclose(logsf, ref, rtol=5e-15)
def test_stats(self):
# The following precomputed values are from the table in section 2.2
# of "A Statistical Study of Log-Gamma Distribution", by Ping Shing
# Chan (thesis, McMaster University, 1993).
table = np.array([
# c, mean, var, skew, exc. kurt.
0.5, -1.9635, 4.9348, -1.5351, 4.0000,
1.0, -0.5772, 1.6449, -1.1395, 2.4000,
12.0, 2.4427, 0.0869, -0.2946, 0.1735,
]).reshape(-1, 5)
for c, mean, var, skew, kurt in table:
computed = stats.loggamma.stats(c, moments='msvk')
assert_array_almost_equal(computed, [mean, var, skew, kurt],
decimal=4)
@pytest.mark.parametrize('c', [0.1, 0.001])
def test_rvs(self, c):
# Regression test for gh-11094.
x = stats.loggamma.rvs(c, size=100000, random_state=self.rng)
# Before gh-11094 was fixed, the case with c=0.001 would
# generate many -inf values.
assert np.isfinite(x).all()
# Crude statistical test. About half the values should be
# less than the median and half greater than the median.
med = stats.loggamma.median(c)
btest = stats.binomtest(np.count_nonzero(x < med), len(x))
ci = btest.proportion_ci(confidence_level=0.999)
assert ci.low < 0.5 < ci.high
@pytest.mark.parametrize("c, ref",
[(1e-8, 19.420680753952364),
(1, 1.5772156649015328),
(1e4, -3.186214986116763),
(1e10, -10.093986931748889),
(1e100, -113.71031611649761)])
def test_entropy(self, c, ref):
# Reference values were calculated with mpmath
# from mpmath import mp
# mp.dps = 500
# def loggamma_entropy_mpmath(c):
# c = mp.mpf(c)
# return float(mp.log(mp.gamma(c)) + c * (mp.one - mp.digamma(c)))
assert_allclose(stats.loggamma.entropy(c), ref, rtol=1e-14)
| TestLoggamma |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 578554,
"end": 579145
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ReleaseEdge"), graphql_name="edges")
nodes = sgqlc.types.Field(sgqlc.types.list_of("Release"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| ReleaseConnection |
python | coleifer__peewee | peewee.py | {
"start": 46627,
"end": 49140
} | class ____(ColumnBase):
no_coerce_functions = set(('sum', 'count', 'avg', 'cast', 'array_agg'))
def __init__(self, name, arguments, coerce=True, python_value=None):
self.name = name
self.arguments = arguments
self._filter = None
self._order_by = None
self._python_value = python_value
if name and name.lower() in self.no_coerce_functions:
self._coerce = False
else:
self._coerce = coerce
def __getattr__(self, attr):
def decorator(*args, **kwargs):
return Function(attr, args, **kwargs)
return decorator
@Node.copy
def filter(self, where=None):
self._filter = where
@Node.copy
def order_by(self, *ordering):
self._order_by = ordering
@Node.copy
def python_value(self, func=None):
self._python_value = func
def over(self, partition_by=None, order_by=None, start=None, end=None,
frame_type=None, window=None, exclude=None):
if isinstance(partition_by, Window) and window is None:
window = partition_by
if window is not None:
node = WindowAlias(window)
else:
node = Window(partition_by=partition_by, order_by=order_by,
start=start, end=end, frame_type=frame_type,
exclude=exclude, _inline=True)
return NodeList((self, SQL('OVER'), node))
def __sql__(self, ctx):
ctx.literal(self.name)
if not len(self.arguments):
ctx.literal('()')
else:
args = self.arguments
# If this is an ordered aggregate, then we will modify the last
# argument to append the ORDER BY ... clause. We do this to avoid
# double-wrapping any expression args in parentheses, as NodeList
# has a special check (hack) in place to work around this.
if self._order_by:
args = list(args)
args[-1] = NodeList((args[-1], SQL('ORDER BY'),
CommaNodeList(self._order_by)))
with ctx(in_function=True, function_arg_count=len(self.arguments)):
ctx.sql(EnclosedNodeList([
(arg if isinstance(arg, Node) else Value(arg, False))
for arg in args]))
if self._filter:
ctx.literal(' FILTER (WHERE ').sql(self._filter).literal(')')
return ctx
fn = Function(None, None)
| Function |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 64103,
"end": 64603
} | class ____(sgqlc.types.Enum):
"""The possible roles of a collaborator on a project.
Enumeration Choices:
* `ADMIN`: The collaborator can view, edit, and maange the
settings of the project
* `NONE`: The collaborator has no direct access to the project
* `READER`: The collaborator can view the project
* `WRITER`: The collaborator can view and edit the project
"""
__schema__ = github_schema
__choices__ = ("ADMIN", "NONE", "READER", "WRITER")
| ProjectV2Roles |
python | sympy__sympy | sympy/plotting/series.py | {
"start": 69280,
"end": 71877
} | class ____(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
self.use_cm = kwargs.get("use_cm", False)
# NOTE: why should SurfaceOver2DRangeSeries support is polar?
# After all, the same result can be achieve with
# ParametricSurfaceSeries. For example:
# sin(r) for (r, 0, 2 * pi) and (theta, 0, pi/2) can be parameterized
# as (r * cos(theta), r * sin(theta), sin(t)) for (r, 0, 2 * pi) and
# (theta, 0, pi/2).
# Because it is faster to evaluate (important for interactive plots).
self.is_polar = kwargs.get("is_polar", kwargs.get("polar", False))
self.surface_color = kwargs.get("surface_color", None)
self.color_func = kwargs.get("color_func", lambda x, y, z: z)
if callable(self.surface_color):
self.color_func = self.surface_color
self.surface_color = None
def _set_surface_label(self, label):
exprs = self.expr
self._label = str(exprs) if label is None else label
self._latex_label = latex(exprs) if label is None else label
# if the expressions is a lambda function and no label
# has been provided, then its better to do the following to avoid
# surprises on the backend
is_lambda = (callable(exprs) if not hasattr(exprs, "__iter__")
else any(callable(e) for e in exprs))
if is_lambda and (self._label == str(exprs)):
self._label = ""
self._latex_label = ""
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
nargs = arity(c)
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if nargs == 1:
return f(variables[0])
elif nargs == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
if isinstance(self, SurfaceOver2DRangeSeries):
return c*np.ones(min(self.nb_of_points_x, self.nb_of_points_y))
else:
return c*np.ones(min(self.nb_of_points_u, self.nb_of_points_v))
| SurfaceBaseSeries |
python | gevent__gevent | src/greentest/3.10/test_httplib.py | {
"start": 61830,
"end": 71253
} | class ____(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with socket_helper.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
support.requires('network')
selfsigned_pythontestdotnet = 'self-signed.pythontest.net'
with socket_helper.transient_internet(selfsigned_pythontestdotnet):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(context.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(context.check_hostname, True)
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
try:
h = client.HTTPSConnection(selfsigned_pythontestdotnet, 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
except ssl.SSLError as ssl_err:
ssl_err_str = str(ssl_err)
# In the error message of [SSL: CERTIFICATE_VERIFY_FAILED] on
# modern Linux distros (Debian Buster, etc) default OpenSSL
# configurations it'll fail saying "key too weak" until we
# address https://bugs.python.org/issue36816 to use a proper
# key size on self-signed.pythontest.net.
if re.search(r'(?i)key.too.weak', ssl_err_str):
raise unittest.SkipTest(
f'Got {ssl_err_str} trying to connect '
f'to {selfsigned_pythontestdotnet}. '
'See https://bugs.python.org/issue36816.')
raise
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_tls13_pha(self):
import ssl
if not ssl.HAS_TLSv1_3:
self.skipTest('TLS 1.3 support required')
# just check status of PHA flag
h = client.HTTPSConnection('localhost', 443)
self.assertTrue(h._context.post_handshake_auth)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertFalse(context.post_handshake_auth)
h = client.HTTPSConnection('localhost', 443, context=context)
self.assertIs(h._context, context)
self.assertFalse(h._context.post_handshake_auth)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'key_file, cert_file and check_hostname are deprecated',
DeprecationWarning)
h = client.HTTPSConnection('localhost', 443, context=context,
cert_file=CERT_localhost)
self.assertTrue(h._context.post_handshake_auth)
| HTTPSTest |
python | django__django | django/tasks/exceptions.py | {
"start": 241,
"end": 339
} | class ____(ImproperlyConfigured):
"""The provided Task backend is invalid."""
| InvalidTaskBackend |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_header_image02.py | {
"start": 315,
"end": 1110
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image02.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_header(
"&L&G&C&G",
{
"image_left": self.image_dir + "red.jpg",
"image_center": self.image_dir + "blue.jpg",
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | wandb__wandb | tests/system_tests/test_launch/test_launch_add.py | {
"start": 360,
"end": 18546
} | class ____:
def __init__(self, name):
self.name = name
@pytest.fixture
def push_to_run_queue_by_name_spy(wandb_backend_spy):
gql = wandb_backend_spy.gql
responder = gql.Capture()
wandb_backend_spy.stub_gql(
gql.Matcher(operation="pushToRunQueueByName"),
responder,
)
return responder
@pytest.fixture
def push_to_run_queue_spy(wandb_backend_spy):
gql = wandb_backend_spy.gql
responder = gql.Capture()
wandb_backend_spy.stub_gql(
gql.Matcher(operation="pushToRunQueue"),
responder,
)
return responder
@pytest.fixture
def mocked_fetchable_git_repo():
m = mock.Mock()
def fixture_open(path, mode="r"):
"""Return an opened fixture file."""
return open(fixture_path(path), mode)
def fixture_path(path):
print(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir,
os.pardir,
"unit_tests_old",
"assets",
"fixtures",
path,
)
)
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.pardir,
os.pardir,
"unit_tests_old",
"assets",
"fixtures",
path,
)
def populate_dst_dir(dst_dir):
repo = mock.Mock()
reference = MockBranch("master")
repo.references = [reference]
def create_remote(o, r):
origin = mock.Mock()
origin.refs = {"master": mock.Mock()}
return origin
repo.create_remote = create_remote
repo.heads = {"master": mock.Mock()}
with open(os.path.join(dst_dir, "train.py"), "w") as f:
f.write(fixture_open("train.py").read())
with open(os.path.join(dst_dir, "requirements.txt"), "w") as f:
f.write(fixture_open("requirements.txt").read())
with open(os.path.join(dst_dir, "patch.txt"), "w") as f:
f.write("test")
return repo
m.Repo.init = mock.Mock(side_effect=populate_dst_dir)
mock_branch = MockBranch("master")
m.Repo.references = [mock_branch]
with mock.patch.dict("sys.modules", git=m):
yield m
def test_launch_add_delete_queued_run(
use_local_wandb_backend,
user,
test_settings,
):
_ = use_local_wandb_backend
queue = "default"
proj = "test2"
docker_image = "test/test:test"
entry_point = ["python", "/examples/examples/launch/launch-quickstart/train.py"]
settings = test_settings({"project": LAUNCH_DEFAULT_PROJECT})
api = InternalApi()
with wandb.init(settings=settings):
api.create_run_queue(
entity=user,
project=LAUNCH_DEFAULT_PROJECT,
queue_name=queue,
access="PROJECT",
)
queued_run = launch_add(
docker_image=docker_image,
entity=user,
project=proj,
queue_name=queue,
entry_point=entry_point,
config={"resource": "local-process"},
project_queue=LAUNCH_DEFAULT_PROJECT,
)
assert queued_run.state == "pending"
queued_run.delete()
def test_launch_add_default_specify(
user,
push_to_run_queue_by_name_spy,
push_to_run_queue_spy,
mocked_fetchable_git_repo,
):
proj = "test_project1"
docker_image = "test/test:test"
entry_point = ["python", "train.py"]
args = {
"docker_image": docker_image,
"project": proj,
"entity": user,
"queue_name": "default",
"entry_point": entry_point,
"resource": "local-container",
}
with wandb.init(settings=wandb.Settings(project=LAUNCH_DEFAULT_PROJECT)):
queued_run = launch_add(**args)
assert queued_run.id
assert queued_run.state == "pending"
assert queued_run.entity == args["entity"]
assert queued_run.project == args["project"]
assert queued_run.queue_name == args["queue_name"]
assert queued_run.project_queue == LAUNCH_DEFAULT_PROJECT
# below should fail for non-existent default queue,
# then fallback to legacy method
assert push_to_run_queue_by_name_spy.total_calls == 1
assert push_to_run_queue_spy.total_calls == 1
def test_launch_add_default_specify_project_queue(
push_to_run_queue_by_name_spy,
push_to_run_queue_spy,
user,
mocked_fetchable_git_repo,
):
proj = "test_project1"
docker_image = "test/test:test"
entry_point = ["python", "train.py"]
args = {
"docker_image": docker_image,
"project": proj,
"entity": user,
"queue_name": "default",
"entry_point": entry_point,
"resource": "local-container",
"project_queue": proj,
}
with wandb.init(settings=wandb.Settings(project=proj)):
queued_run = launch_add(**args)
assert queued_run.id
assert queued_run.state == "pending"
assert queued_run.entity == args["entity"]
assert queued_run.project == args["project"]
assert queued_run.queue_name == args["queue_name"]
assert queued_run.project_queue == proj
# below should fail for non-existent default queue,
# then fallback to legacy method
assert push_to_run_queue_by_name_spy.total_calls == 1
assert push_to_run_queue_spy.total_calls == 1
def test_push_to_runqueue_exists(
push_to_run_queue_by_name_spy,
push_to_run_queue_spy,
user,
mocked_fetchable_git_repo,
):
proj = "test_project2"
queue = "existing-queue"
uri = "https://github.com/FooBar/examples.git"
entry_point = ["python", "train.py"]
args = {
"uri": uri,
"project": proj,
"entity": user,
"queue": "default",
"entry_point": entry_point,
"resource": "local-process",
}
with wandb.init(settings=wandb.Settings(project=LAUNCH_DEFAULT_PROJECT)):
api = wandb.sdk.internal.internal_api.Api()
api.create_run_queue(
entity=user, project=LAUNCH_DEFAULT_PROJECT, queue_name=queue, access="USER"
)
result = api.push_to_run_queue(queue, args, None, LAUNCH_DEFAULT_PROJECT)
assert result["runQueueItemId"]
assert push_to_run_queue_by_name_spy.total_calls == 1
assert push_to_run_queue_spy.total_calls == 0
def test_push_to_default_runqueue_notexist(
use_local_wandb_backend,
user,
mocked_fetchable_git_repo,
):
_ = use_local_wandb_backend
api = wandb.sdk.internal.internal_api.Api()
proj = "test_project54"
uri = "https://github.com/FooBar/examples.git"
entry_point = ["python", "train.py"]
launch_spec = {
"uri": uri,
"entity": user,
"project": proj,
"entry_point": entry_point,
"resource": "local-process",
}
with wandb.init(settings=wandb.Settings(project=LAUNCH_DEFAULT_PROJECT)):
res = api.push_to_run_queue(
"nonexistent-queue",
launch_spec,
None,
LAUNCH_DEFAULT_PROJECT,
)
assert not res
def test_push_to_runqueue_old_server(
use_local_wandb_backend,
user,
monkeypatch,
mocked_fetchable_git_repo,
test_settings,
):
_ = use_local_wandb_backend
proj = "test_project0"
queue = "existing-queue"
uri = "https://github.com/FooBar/examples.git"
entry_point = ["python", "train.py"]
args = {
"uri": uri,
"project": proj,
"entity": user,
"queue": "default",
"entry_point": entry_point,
"resource": "local-process",
}
settings = test_settings({"project": LAUNCH_DEFAULT_PROJECT})
monkeypatch.setattr(
"wandb.sdk.internal.internal_api.Api.push_to_run_queue_by_name",
lambda *args: None,
)
with wandb.init(settings=settings):
api = wandb.sdk.internal.internal_api.Api()
api.create_run_queue(
entity=user, project=LAUNCH_DEFAULT_PROJECT, queue_name=queue, access="USER"
)
result = api.push_to_run_queue(queue, args, None, LAUNCH_DEFAULT_PROJECT)
assert result["runQueueItemId"]
def test_launch_add_with_priority(
push_to_run_queue_by_name_spy,
push_to_run_queue_spy,
runner,
user,
monkeypatch,
):
def patched_push_to_run_queue_introspection(*args, **kwargs):
args[0].server_supports_template_variables = True
args[0].server_push_to_run_queue_supports_priority = True
return (True, True)
monkeypatch.setattr(
wandb.sdk.internal.internal_api.Api,
"push_to_run_queue_introspection",
patched_push_to_run_queue_introspection,
)
def patched_create_run_queue_introspection(*args, **kwargs):
args[0].server_create_run_queue_supports_drc = True
args[0].server_create_run_queue_supports_priority = True
return (True, True, True)
monkeypatch.setattr(
wandb.sdk.internal.internal_api.Api,
"create_run_queue_introspection",
patched_create_run_queue_introspection,
)
queue_name = "prio_queue"
proj = "test1"
queue_config = {}
base_config = {}
with runner.isolated_filesystem():
api = PublicApi(api_key=user)
api.create_run_queue(
entity=user,
name=queue_name,
type="local-container",
config=queue_config,
prioritization_mode="V0",
)
_ = launch_add(
project=proj,
entity=user,
queue_name=queue_name,
docker_image="abc:latest",
config=base_config,
priority=0,
)
assert push_to_run_queue_by_name_spy.total_calls == 1
assert push_to_run_queue_spy.total_calls == 0
def test_launch_add_with_priority_to_no_prio_queue_raises(
use_local_wandb_backend,
runner,
user,
monkeypatch,
):
_ = use_local_wandb_backend
def patched_push_to_run_queue_introspection(*args, **kwargs):
args[0].server_supports_template_variables = True
args[0].server_push_to_run_queue_supports_priority = True
return (True, True)
monkeypatch.setattr(
wandb.sdk.internal.internal_api.Api,
"push_to_run_queue_introspection",
patched_push_to_run_queue_introspection,
)
# Backend returns 4xx if you attempt to push an item with
# non-default priority to a queue that doesn't support priority
def patched_push_to_run_queue_by_name(*args, **kwargs):
return None
monkeypatch.setattr(
wandb.sdk.internal.internal_api.Api,
"push_to_run_queue_by_name",
patched_push_to_run_queue_by_name,
)
queue_name = "no_prio_queue"
proj = "test1"
queue_config = {}
base_config = {}
with runner.isolated_filesystem():
api = PublicApi(api_key=user)
api.create_run_queue(
entity=user,
name=queue_name,
type="local-container",
config=queue_config,
)
with pytest.raises(LaunchError):
_ = launch_add(
project=proj,
entity=user,
queue_name=queue_name,
docker_image="abc:latest",
config=base_config,
priority=0,
)
def test_launch_add_template_variables(
push_to_run_queue_by_name_spy,
push_to_run_queue_spy,
runner,
user,
):
queue_name = "tvqueue"
proj = "test1"
queue_config = {"e": ["{{var1}}"]}
queue_template_variables = {
"var1": {"schema": {"type": "string", "enum": ["a", "b"]}}
}
template_variables = {"var1": "a"}
base_config = {"template_variables": {"var1": "b"}}
with runner.isolated_filesystem():
api = PublicApi(api_key=user)
api.create_run_queue(
entity=user,
name=queue_name,
type="local-container",
config=queue_config,
template_variables=queue_template_variables,
)
_ = launch_add(
template_variables=template_variables,
project=proj,
entity=user,
queue_name=queue_name,
docker_image="abc:latest",
config=base_config,
)
assert push_to_run_queue_spy.total_calls == 0
requests = push_to_run_queue_by_name_spy.requests
assert len(requests) == 1
assert requests[0].variables["templateVariableValues"] == '{"var1": "a"}'
def test_launch_add_template_variables_legacy_push(
push_to_run_queue_by_name_spy,
push_to_run_queue_spy,
runner,
user,
monkeypatch,
):
queue_name = "tvqueue"
proj = "test1"
queue_config = {"e": ["{{var1}}"]}
queue_template_variables = {
"var1": {"schema": {"type": "string", "enum": ["a", "b"]}}
}
template_variables = {"var1": "a"}
monkeypatch.setattr(
wandb.sdk.internal.internal_api.Api,
"push_to_run_queue_by_name",
lambda *args, **kwargs: None,
)
with runner.isolated_filesystem():
api = PublicApi(api_key=user)
api.create_run_queue(
entity=user,
name=queue_name,
type="local-container",
config=queue_config,
template_variables=queue_template_variables,
)
_ = launch_add(
template_variables=template_variables,
project=proj,
entity=user,
queue_name=queue_name,
docker_image="abc:latest",
)
assert push_to_run_queue_spy.total_calls == 1
assert push_to_run_queue_by_name_spy.total_calls == 0
def test_launch_add_template_variables_not_supported(user, monkeypatch):
queue_name = "tvqueue"
proj = "test1"
queue_config = {"e": ["{{var1}}"]}
template_variables = {"var1": "a"}
def patched_push_to_run_queue_introspection(*args, **kwargs):
args[0].server_supports_template_variables = False
return (False, False)
monkeypatch.setattr(
wandb.sdk.internal.internal_api.Api,
"push_to_run_queue_introspection",
patched_push_to_run_queue_introspection,
)
api = PublicApi(api_key=user)
api.create_run_queue(
entity=user,
name=queue_name,
type="local-container",
config=queue_config,
)
with pytest.raises(UnsupportedError):
_ = launch_add(
template_variables=template_variables,
project=proj,
entity=user,
queue_name=queue_name,
docker_image="abc:latest",
)
def test_launch_add_template_variables_not_supported_legacy_push(
runner, user, monkeypatch
):
queue_name = "tvqueue"
proj = "test1"
queue_config = {"e": ["{{var1}}"]}
template_variables = {"var1": "a"}
def patched_push_to_run_queue_introspection(*args, **kwargs):
args[0].server_supports_template_variables = False
return (False, False)
monkeypatch.setattr(
wandb.sdk.internal.internal_api.Api,
"push_to_run_queue_introspection",
patched_push_to_run_queue_introspection,
)
monkeypatch.setattr(
wandb.sdk.internal.internal_api.Api,
"push_to_run_queue_by_name",
lambda *args, **kwargs: None,
)
with runner.isolated_filesystem():
api = PublicApi(api_key=user)
api.create_run_queue(
entity=user,
name=queue_name,
type="local-container",
config=queue_config,
)
with pytest.raises(UnsupportedError):
_ = launch_add(
template_variables=template_variables,
project=proj,
entity=user,
queue_name=queue_name,
docker_image="abc:latest",
)
def test_display_updated_runspec(
use_local_wandb_backend,
user,
test_settings,
monkeypatch,
):
_ = use_local_wandb_backend
queue = "default"
proj = "test1"
entry_point = ["python", "/examples/examples/launch/launch-quickstart/train.py"]
settings = test_settings({"project": proj})
api = InternalApi()
def push_with_drc(
api, queue_name, launch_spec, template_variables, project_queue, priority
):
# mock having a DRC
res = api.push_to_run_queue(
queue_name, launch_spec, template_variables, project_queue, priority
)
res["runSpec"] = launch_spec
res["runSpec"]["resource_args"] = {"kubernetes": {"volume": "x/awda/xxx"}}
return res
monkeypatch.setattr(
wandb.sdk.launch._launch_add,
"push_to_queue",
lambda *args, **kwargs: push_with_drc(*args, **kwargs),
)
with wandb.init(settings=settings):
api.create_run_queue(
entity=user,
project=proj,
queue_name=queue,
access="PROJECT",
)
_ = launch_add(
docker_image="test/test:test",
entity=user,
project=proj,
entry_point=entry_point,
repository="testing123",
config={"resource": "kubernetes"},
project_queue=proj,
)
def test_container_queued_run(monkeypatch, user):
def patched_push_to_run_queue_by_name(*args, **kwargs):
return {"runQueueItemId": "1"}
monkeypatch.setattr(
wandb.sdk.internal.internal_api.Api,
"push_to_run_queue_by_name",
lambda *arg, **kwargs: patched_push_to_run_queue_by_name(*arg, **kwargs),
)
monkeypatch.setattr(
wandb.PublicApi,
"_artifact",
lambda *arg, **kwargs: "artifact",
)
queued_run = launch_add(job="test/test/test-job:v0")
assert queued_run
def test_job_dne(monkeypatch, user):
def patched_push_to_run_queue_by_name(*args, **kwargs):
return {"runQueueItemId": "1"}
monkeypatch.setattr(
wandb.sdk.internal.internal_api.Api,
"push_to_run_queue_by_name",
lambda *arg, **kwargs: patched_push_to_run_queue_by_name(*arg, **kwargs),
)
with pytest.raises(LaunchError):
launch_add(job="test/test/test-job:v0")
| MockBranch |
python | GoogleCloudPlatform__python-docs-samples | functions/v2/ocr/main_test.py | {
"start": 725,
"end": 3870
} | class ____:
@mock.patch.object(main, "publisher")
@mock.patch.object(main, "translate_client")
@mock.patch.object(main, "vision_client")
def test_detect_text(
self, mock_vision_client, mock_translate_client, mock_publisher
):
mock_annotation = mock.MagicMock()
mock_annotation.description = "sample text"
mock_annotations = mock.MagicMock()
mock_annotations.text_annotations = [mock_annotation]
mock_vision_client.text_detection.return_value = mock_annotations
mock_translate_client.detect_language.return_value = {"language": "en"}
mock_future = concurrent.futures.Future()
mock_future.set_result(True)
mock_publisher.publish.return_value = mock_future
main.detect_text("sample-bucket", "sample-file")
@mock.patch.object(main, "detect_text")
def test_process_image(self, m):
m.return_value = None
attributes = {
"type": "google.cloud.storage.object.v1.finalized",
"source": "https://example.com/event-producer",
}
data = {"bucket": "sample-bucket", "name": "sample-file"}
event = CloudEvent(attributes, data)
main.process_image(event)
@mock.patch.object(main, "publisher")
@mock.patch.object(main, "translate_client")
def test_translate_text(self, mock_translate_client, mock_publisher):
mock_translate_client.translate.return_value = {"translatedText": ""}
mock_future = concurrent.futures.Future()
mock_future.set_result(True)
mock_publisher.publish.return_value = mock_future
attributes = {
"type": "google.cloud.pubsub.topic.v1.messagePublished",
"source": "https://example.com/event-producer",
}
data = {
"message": {
"data": base64.b64encode(
json.dumps(
{
"text": "menu",
"filename": "sample-file",
"lang": "es",
"src_lang": "en",
}
).encode("utf-8")
)
}
}
event = CloudEvent(attributes, data)
main.translate_text(event)
@mock.patch.object(main, "storage_client")
def test_save_result(self, m):
bucket = m.bucket.return_value
file = bucket.file.return_value
file.save.return_value = None
attributes = {
"type": "google.cloud.pubsub.topic.v1.messagePublished",
"source": "https://example.com/event-producer",
}
data = {
"message": {
"data": base64.b64encode(
json.dumps(
{
"text": "menu",
"filename": "sample-file",
"lang": "fr",
}
).encode("utf-8")
)
}
}
event = CloudEvent(attributes, data)
main.save_result(event)
| TestGCFPyOCRSample |
python | google__jax | jax/_src/dtypes.py | {
"start": 1948,
"end": 2302
} | class ____(extended):
"""Scalar class for PRNG Key dtypes.
This is an abstract class that should never be instantiated, but rather
exists for the sake of `jnp.issubdtype`.
Examples:
>>> from jax import random
>>> from jax import dtypes
>>> key = random.key(0)
>>> jnp.issubdtype(key.dtype, dtypes.prng_key)
True
"""
| prng_key |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 798033,
"end": 806040
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"app",
"company_url",
"configuration_resource_path",
"configuration_url",
"documentation_url",
"extended_description",
"extended_description_html",
"full_description",
"full_description_html",
"has_published_free_trial_plans",
"has_terms_of_service",
"has_verified_owner",
"how_it_works",
"how_it_works_html",
"installation_url",
"installed_for_viewer",
"is_archived",
"is_draft",
"is_paid",
"is_public",
"is_rejected",
"is_unverified",
"is_unverified_pending",
"is_verification_pending_from_draft",
"is_verification_pending_from_unverified",
"is_verified",
"logo_background_color",
"logo_url",
"name",
"normalized_short_description",
"pricing_url",
"primary_category",
"privacy_policy_url",
"resource_path",
"screenshot_urls",
"secondary_category",
"short_description",
"slug",
"status_url",
"support_email",
"support_url",
"terms_of_service_url",
"url",
"viewer_can_add_plans",
"viewer_can_approve",
"viewer_can_delist",
"viewer_can_edit",
"viewer_can_edit_categories",
"viewer_can_edit_plans",
"viewer_can_redraft",
"viewer_can_reject",
"viewer_can_request_approval",
"viewer_has_purchased",
"viewer_has_purchased_for_all_organizations",
"viewer_is_listing_admin",
)
app = sgqlc.types.Field(App, graphql_name="app")
company_url = sgqlc.types.Field(URI, graphql_name="companyUrl")
configuration_resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="configurationResourcePath"
)
configuration_url = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="configurationUrl"
)
documentation_url = sgqlc.types.Field(URI, graphql_name="documentationUrl")
extended_description = sgqlc.types.Field(String, graphql_name="extendedDescription")
extended_description_html = sgqlc.types.Field(
sgqlc.types.non_null(HTML), graphql_name="extendedDescriptionHTML"
)
full_description = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="fullDescription"
)
full_description_html = sgqlc.types.Field(
sgqlc.types.non_null(HTML), graphql_name="fullDescriptionHTML"
)
has_published_free_trial_plans = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="hasPublishedFreeTrialPlans"
)
has_terms_of_service = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="hasTermsOfService"
)
has_verified_owner = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="hasVerifiedOwner"
)
how_it_works = sgqlc.types.Field(String, graphql_name="howItWorks")
how_it_works_html = sgqlc.types.Field(
sgqlc.types.non_null(HTML), graphql_name="howItWorksHTML"
)
installation_url = sgqlc.types.Field(URI, graphql_name="installationUrl")
installed_for_viewer = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="installedForViewer"
)
is_archived = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isArchived"
)
is_draft = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isDraft")
is_paid = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isPaid")
is_public = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isPublic"
)
is_rejected = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isRejected"
)
is_unverified = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isUnverified"
)
is_unverified_pending = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isUnverifiedPending"
)
is_verification_pending_from_draft = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isVerificationPendingFromDraft"
)
is_verification_pending_from_unverified = sgqlc.types.Field(
sgqlc.types.non_null(Boolean),
graphql_name="isVerificationPendingFromUnverified",
)
is_verified = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isVerified"
)
logo_background_color = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="logoBackgroundColor"
)
logo_url = sgqlc.types.Field(
URI,
graphql_name="logoUrl",
args=sgqlc.types.ArgDict(
(("size", sgqlc.types.Arg(Int, graphql_name="size", default=400)),)
),
)
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
normalized_short_description = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="normalizedShortDescription"
)
pricing_url = sgqlc.types.Field(URI, graphql_name="pricingUrl")
primary_category = sgqlc.types.Field(
sgqlc.types.non_null(MarketplaceCategory), graphql_name="primaryCategory"
)
privacy_policy_url = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="privacyPolicyUrl"
)
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
screenshot_urls = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(String)), graphql_name="screenshotUrls"
)
secondary_category = sgqlc.types.Field(
MarketplaceCategory, graphql_name="secondaryCategory"
)
short_description = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="shortDescription"
)
slug = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="slug")
status_url = sgqlc.types.Field(URI, graphql_name="statusUrl")
support_email = sgqlc.types.Field(String, graphql_name="supportEmail")
support_url = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="supportUrl"
)
terms_of_service_url = sgqlc.types.Field(URI, graphql_name="termsOfServiceUrl")
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
viewer_can_add_plans = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanAddPlans"
)
viewer_can_approve = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanApprove"
)
viewer_can_delist = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanDelist"
)
viewer_can_edit = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanEdit"
)
viewer_can_edit_categories = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanEditCategories"
)
viewer_can_edit_plans = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanEditPlans"
)
viewer_can_redraft = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanRedraft"
)
viewer_can_reject = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanReject"
)
viewer_can_request_approval = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerCanRequestApproval"
)
viewer_has_purchased = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerHasPurchased"
)
viewer_has_purchased_for_all_organizations = sgqlc.types.Field(
sgqlc.types.non_null(Boolean),
graphql_name="viewerHasPurchasedForAllOrganizations",
)
viewer_is_listing_admin = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerIsListingAdmin"
)
| MarketplaceListing |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess4.py | {
"start": 359,
"end": 395
} | class ____(Mixin1):
item = "hi"
| B1 |
python | pytorch__pytorch | torch/testing/_internal/common_fsdp.py | {
"start": 2427,
"end": 2620
} | class ____(Enum):
# No FSDP wrapping
NO_FSDP = auto()
# FSDP recursive wrapping
RECURSIVE = auto()
# TODO: FSDP non-recursive wrapping
# NONRECURSIVE = auto()
| FSDPInitMode |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/table.py | {
"start": 1577,
"end": 1806
} | class ____(graphene.ObjectType):
schema = graphene.NonNull(GrapheneTableSchema)
records = non_null_list(graphene.String) # each element is one record serialized as JSON
class Meta:
name = "Table"
| GrapheneTable |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/snowflake_datasource.py | {
"start": 13351,
"end": 33846
} | class ____(SQLDatasource):
"""Adds a Snowflake datasource to the data context.
Args:
name: The name of this Snowflake datasource.
connection_string: The SQLAlchemy connection string used to connect to the Snowflake database.
For example: "snowflake://<user_login_name>:<password>@<account_identifier>"
assets: An optional dictionary whose keys are TableAsset or QueryAsset names and whose values
are TableAsset or QueryAsset objects.
""" # noqa: E501 # FIXME CoP
type: Literal["snowflake"] = "snowflake" # type: ignore[assignment] # FIXME CoP
# TODO: rename this to `connection` for v1?
connection_string: Union[ConnectionDetails, KeyPairConnectionDetails, ConfigUri, SnowflakeDsn] # type: ignore[assignment] # Deviation from parent class as individual args are supported for connection
# TODO: add props for user, password, etc?
@property
def account(self) -> AccountIdentifier | None:
"""Convenience property to get the `account` regardless of the connection string format."""
if isinstance(
self.connection_string, (ConnectionDetails, KeyPairConnectionDetails, SnowflakeDsn)
):
return self.connection_string.account
subbed_str: str | None = _get_config_substituted_connection_string(
self, warning_msg="Unable to determine account"
)
if not subbed_str:
return None
hostname = urllib.parse.urlparse(subbed_str).hostname
if hostname:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=GxDatasourceWarning)
return AccountIdentifier(hostname)
return None
@property
def schema_(self) -> str | None:
"""
Convenience property to get the `schema` regardless of the connection string format.
`schema_` to avoid conflict with Pydantic models schema property.
"""
if isinstance(
self.connection_string, (ConnectionDetails, KeyPairConnectionDetails, SnowflakeDsn)
):
return to_lower_if_not_quoted(self.connection_string.schema_)
subbed_str: str | None = _get_config_substituted_connection_string(
self, warning_msg="Unable to determine schema"
)
if not subbed_str:
return None
url_path: str = urllib.parse.urlparse(subbed_str).path
return to_lower_if_not_quoted(_get_database_and_schema_from_path(url_path)["schema"])
@property
def database(self) -> str | None:
"""Convenience property to get the `database` regardless of the connection string format."""
if isinstance(
self.connection_string, (ConnectionDetails, KeyPairConnectionDetails, SnowflakeDsn)
):
return self.connection_string.database
subbed_str: str | None = _get_config_substituted_connection_string(
self, warning_msg="Unable to determine database"
)
if not subbed_str:
return None
url_path: str = urllib.parse.urlparse(subbed_str).path
return _get_database_and_schema_from_path(url_path)["database"]
@property
def warehouse(self) -> str | None:
"""
Convenience property to get the `warehouse` regardless of the connection string format.
"""
if isinstance(
self.connection_string, (ConnectionDetails, KeyPairConnectionDetails, SnowflakeDsn)
):
return self.connection_string.warehouse
subbed_str: str | None = _get_config_substituted_connection_string(
self, warning_msg="Unable to determine warehouse"
)
if not subbed_str:
return None
return urllib.parse.parse_qs(urllib.parse.urlparse(subbed_str).query).get(
"warehouse", [None]
)[0]
@property
def role(self) -> str | None:
"""Convenience property to get the `role` regardless of the connection string format."""
if isinstance(
self.connection_string, (ConnectionDetails, KeyPairConnectionDetails, SnowflakeDsn)
):
return self.connection_string.role
subbed_str: str | None = _get_config_substituted_connection_string(
self, warning_msg="Unable to determine role"
)
if not subbed_str:
return None
return urllib.parse.parse_qs(urllib.parse.urlparse(subbed_str).query).get("role", [None])[0]
@property
def private_key(self) -> str | ConfigStr | None:
"""Convenience property to get the `private_key` regardless of
the connection string format.
"""
if isinstance(self.connection_string, KeyPairConnectionDetails):
return self.connection_string.private_key
elif "private_key" in self.kwargs:
return self.kwargs["private_key"]
else:
return None
@override
def test_connection(self, test_assets: bool = True) -> None:
"""Test the connection for the SnowflakeDatasource.
Args:
test_assets: If assets have been passed to the SQLDatasource,
whether to test them as well.
Raises:
TestConnectionError: If the connection test fails.
"""
try:
super().test_connection(test_assets=test_assets)
except TestConnectionError as e:
if self.account and not self.account.match:
raise TestConnectionError(
message=e.__class__.__name__,
addendum=AccountIdentifier.MSG_TEMPLATE.format(
value=self.account,
formats=AccountIdentifier.FORMATS,
),
) from e
raise
@deprecated_method_or_class(
version="1.0.0a4",
message="`schema_name` is deprecated. The schema now comes from the datasource.",
)
@public_api
@override
def add_table_asset(
self,
name: str,
table_name: str = "",
schema_name: Optional[str] = MISSING, # type: ignore[assignment] # sentinel value
batch_metadata: Optional[BatchMetadata] = None,
) -> TableAsset:
"""Adds a table asset to this datasource.
Args:
name: The name of this table asset.
table_name: The table where the data resides.
schema_name: The schema that holds the table. Will use the datasource schema if not
provided.
batch_metadata: BatchMetadata we want to associate with this DataAsset and all batches
derived from it.
Returns:
The table asset that is added to the datasource.
The type of this object will match the necessary type for this datasource.
"""
if schema_name is MISSING:
# using MISSING to indicate that the user did not provide a value
schema_name = self.schema_
else:
# deprecated-v0.18.16
warnings.warn(
"The `schema_name argument` is deprecated and will be removed in a future release."
" The schema now comes from the datasource.",
category=DeprecationWarning,
)
if schema_name != self.schema_:
warnings.warn(
f"schema_name {schema_name} does not match datasource schema {self.schema_}",
category=GxDatasourceWarning,
)
return super().add_table_asset(
name=name,
table_name=table_name,
schema_name=schema_name,
batch_metadata=batch_metadata,
)
@pydantic.root_validator(pre=True)
def _convert_root_connection_detail_fields(cls, values: dict) -> dict:
"""
Convert root level connection detail fields to a ConnectionDetails compatible object.
This preserves backwards compatibility with the previous implementation of SnowflakeDatasource.
It also allows for users to continue to provide connection details in the
`context.data_sources.add_snowflake()` factory functions without nesting it in a
`connection_string` dict.
""" # noqa: E501 # FIXME CoP
connection_detail_fields: set[str] = {
"schema", # field name in ConnectionDetails is schema_ (with underscore)
*ConnectionDetails.__fields__.keys(),
*KeyPairConnectionDetails.__fields__.keys(),
}
connection_string: Any | None = values.get("connection_string")
provided_fields = tuple(values.keys())
connection_details = {}
for field_name in provided_fields:
if field_name in connection_detail_fields:
if connection_string:
raise ValueError( # noqa: TRY003 # FIXME CoP
"Provided both connection detail keyword args and `connection_string`."
)
connection_details[field_name] = values.pop(field_name)
if connection_details:
values["connection_string"] = connection_details
return values
@pydantic.validator("connection_string", pre=True)
def _check_config_template(cls, connection_string: Any) -> Any:
"""
If connection_string has a config template, parse it as a ConfigUri, ignore other errors.
"""
if isinstance(connection_string, str):
if ConfigUri.str_contains_config_template(connection_string):
LOGGER.debug("`connection_string` contains config template")
return pydantic.parse_obj_as(ConfigUri, connection_string)
return connection_string
@pydantic.root_validator
def _check_xor_input_args(cls, values: dict) -> dict:
# keeping this validator isn't strictly necessary, but it provides a better error message
connection_string: str | ConfigUri | ConnectionDetails | KeyPairConnectionDetails | None = (
values.get("connection_string")
)
if connection_string:
# Method 1 - connection string
is_connection_string: bool = isinstance(
connection_string, (str, ConfigStr, SnowflakeDsn)
)
# Method 2 - individual args (account, user, and password are bare minimum)
has_min_connection_detail_values: bool = isinstance(
connection_string, ConnectionDetails
) and bool(
connection_string.account and connection_string.user and connection_string.password
)
# Method 3 - individual args for key pair auth
# (account, user, and private_key are bare minimum)
has_min_keypair_detail_values: bool = isinstance(
connection_string, KeyPairConnectionDetails
) and bool(
connection_string.account
and connection_string.user
and connection_string.private_key
)
if (
is_connection_string
or has_min_connection_detail_values
or has_min_keypair_detail_values
):
return values
raise ValueError( # noqa: TRY003 # FIXME CoP
"Must provide either a connection string or"
f" a combination of {', '.join(ConnectionDetails.required_fields())} as keyword args"
f" or a combination of {', '.join(KeyPairConnectionDetails.required_fields())} "
f"as keyword args."
)
@pydantic.validator("connection_string")
def _check_for_required_query_params(
cls,
connection_string: ConnectionDetails | KeyPairConnectionDetails | SnowflakeDsn | ConfigUri,
) -> ConnectionDetails | KeyPairConnectionDetails | SnowflakeDsn | ConfigUri:
"""
If connection_string is a SnowflakeDsn,
check for required query parameters according to `REQUIRED_QUERY_PARAMS`.
"""
if not isinstance(connection_string, (SnowflakeDsn, ConfigUri)):
return connection_string
missing_keys: set[str] = set(REQUIRED_QUERY_PARAMS)
if connection_string.query:
query_params: dict[str, list[str]] = urllib.parse.parse_qs(connection_string.query)
for key in REQUIRED_QUERY_PARAMS:
if key in query_params:
missing_keys.remove(key)
if missing_keys:
raise _UrlMissingQueryError(
msg=f"missing {', '.join(sorted(missing_keys))}",
)
return connection_string
@pydantic.validator("kwargs")
def _validate_and_process_kwargs(cls, kwargs: dict) -> dict:
"""
Validate and process kwargs.
- Warn if private_key is found in kwargs['connect_args']
- Base64 encode private_key if present
"""
if connect_args := kwargs.get("connect_args", {}):
if "private_key" in connect_args:
msg = (
"Passing 'private_key' via kwargs['connect_args'] is deprecated. "
"Please pass 'private_key' as a keyword argument directly to add_snowflake(), "
"update_snowflake(), or add_or_update_snowflake(). "
"See https://docs.greatexpectations.io/docs/reference/datasources/"
"snowflake for more information."
)
# deprecated-v1.8.0
warnings.warn(
msg,
category=DeprecationWarning,
stacklevel=4,
)
if private_key := connect_args.get("private_key"):
# test if it's already base64 encoded
if _is_b64_encoded(private_key):
LOGGER.info("private_key is already base64 encoded")
else:
LOGGER.info("private_key is not base64 encoded, encoding now")
connect_args["private_key"] = base64.standard_b64encode(private_key)
return kwargs
class Config:
@staticmethod
def schema_extra(schema: dict, model: type[SnowflakeDatasource]) -> None:
"""
Customize jsonschema for SnowflakeDatasource.
https://docs.pydantic.dev/1.10/usage/schema/#schema-customization
Change connection_string to be a string or a dict, but not both.
"""
connection_string_prop = schema["properties"]["connection_string"].pop("anyOf")
schema["properties"]["connection_string"].update({"oneOf": connection_string_prop})
def _get_connect_args(self) -> dict[str, str | bool]:
excluded_fields: set[str] = set(SQLDatasource.__fields__.keys())
# dump as json dict to force serialization of things like AnyUrl
return self._json_dict(exclude=excluded_fields, exclude_none=True)
def _get_snowflake_partner_application(self) -> str:
"""
This is used to set the application query parameter in the Snowflake connection URL,
which provides attribution to GX for the Snowflake Partner program.
"""
# This import is here to avoid a circular import
from great_expectations.data_context import CloudDataContext
if isinstance(self._data_context, CloudDataContext):
return SNOWFLAKE_PARTNER_APPLICATION_CLOUD
return SNOWFLAKE_PARTNER_APPLICATION_OSS
def _get_url_args(self) -> dict[str, str | bool]:
excluded_fields: set[str] = set(SQLDatasource.__fields__.keys())
# dump as json dict to force serialization of things like AnyUrl
return self._json_dict(exclude=excluded_fields, exclude_none=True)
@override
def get_execution_engine(self) -> SqlAlchemyExecutionEngine:
"""
Overrides get_execution_engine in Datasource
Standard behavior is to assume all top-level Datasource config (unless part of `cls._EXTRA_EXCLUDED_EXEC_ENG_ARGS`)
should be passed to the GX ExecutionEngine constructor.
for SQLAlchemy this would lead to creating 2 different `sqlalchemy.engine.Engine` objects
one for the Datasource and one for the ExecutionEngine. This is wasteful and causes multiple connections to
the database to be created.
For Snowflake specifically we may represent the connection_string as a dict, which is not supported by SQLAlchemy.
""" # noqa: E501 # FIXME CoP
gx_execution_engine_type: Type[SqlAlchemyExecutionEngine] = self.execution_engine_type
connection_string: str | None = (
self.connection_string if isinstance(self.connection_string, str) else None
)
gx_exec_engine = gx_execution_engine_type(
self.name,
connection_string=connection_string,
engine=self.get_engine(),
create_temp_table=self.create_temp_table,
data_context=self._data_context,
)
self._execution_engine = gx_exec_engine
return gx_exec_engine
@override
def get_engine(self) -> sqlalchemy.Engine:
if self.connection_string != self._cached_connection_string or not self._engine:
try:
model_dict = self.dict(
exclude=self._get_exec_engine_excludes(),
config_provider=self._config_provider,
)
_check_config_substitutions_needed(
self, model_dict, raise_warning_if_provider_not_present=True
)
kwargs = model_dict.pop("kwargs", {})
connection_string: str | dict = model_dict.pop("connection_string")
if isinstance(connection_string, str):
url = sa.engine.url.make_url(connection_string)
url = url.update_query_dict(
query_parameters={"application": self._get_snowflake_partner_application()}
)
self._engine = self._build_engine_with_connect_args(url=url, **kwargs)
else:
self._engine = self._build_engine_with_connect_args(
application=self._get_snowflake_partner_application(),
**connection_string,
**kwargs,
)
except Exception as e:
# connection_string passed pydantic validation, but fails to create sqla engine
# Possible case is a missing plugin (e.g. snowflake-sqlalchemy)
if IS_SNOWFLAKE_INSTALLED:
raise SQLAlchemyCreateEngineError(
cause=e, addendum=str(SNOWFLAKE_NOT_IMPORTED)
) from e
raise SQLAlchemyCreateEngineError(cause=e) from e
# connection string isn't strictly required for Snowflake, so we conditionally cache
if isinstance(self.connection_string, str):
self._cached_connection_string = self.connection_string
return self._engine
def _build_engine_with_connect_args(
self,
url: SnowflakeURL | None = None,
connect_args: dict[str, Any] | None = None,
**kwargs,
) -> sqlalchemy.Engine:
if not url:
url_args = self._get_url_args()
url_args.update(kwargs)
password = url_args.get("password")
if isinstance(password, str):
url_args["password"] = quote(password)
url = SnowflakeURL(**url_args)
else:
url_args = {}
engine_kwargs: dict[Literal["url", "connect_args"], Any] = {}
if connect_args:
if private_key := connect_args.get("private_key"):
url_args.pop( # TODO: update models + validation to handle this
"password", None
)
LOGGER.info(
"private_key detected,"
" ignoring password and using private_key for authentication"
)
# assume the private_key is base64 encoded
connect_args["private_key"] = base64.standard_b64decode(private_key)
engine_kwargs["connect_args"] = connect_args
engine_kwargs["url"] = url
return sa.create_engine(**engine_kwargs) # type: ignore[misc] # FIXME CoP
| SnowflakeDatasource |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed6.py | {
"start": 746,
"end": 1025
} | class ____(TypedDict, extra_items=int):
num: int
def func4(p1: IntDict2):
# This should generate an error.
d1: dict[str, int] = p1
# This should generate an error.
m1: MutableMapping[str, int] = p1
# This should generate an error.
func1(p1)
| IntDict2 |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/typing_extensions/test_backported_types.py | {
"start": 6697,
"end": 6751
} | class ____(TypedDict, total=True):
author: str
| Story |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/test_indexing.py | {
"start": 546,
"end": 3606
} | class ____:
def test_getitem_slice_keeps_name(self):
# GH4226
st = Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles")
et = Timestamp("2013-07-02 00:00:00", tz="America/Los_Angeles")
dr = date_range(st, et, freq="h", name="timebucket")
assert dr[1:].name == dr.name
@pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
def test_getitem(self, tz):
idx = date_range("2011-01-01", "2011-01-31", freq="D", tz=tz, name="idx")
result = idx[0]
assert result == Timestamp("2011-01-01", tz=idx.tz)
result = idx[0:5]
expected = date_range(
"2011-01-01", "2011-01-05", freq="D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = date_range(
"2011-01-01", "2011-01-09", freq="2D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = date_range(
"2011-01-12", "2011-01-24", freq="3D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
dtype=idx.dtype,
freq="-1D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
@pytest.mark.parametrize("freq", ["B", "C"])
def test_dti_business_getitem(self, freq):
rng = bdate_range(START, END, freq=freq)
smaller = rng[:5]
exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq=freq)
tm.assert_index_equal(smaller, exp)
assert smaller.freq == exp.freq
assert smaller.freq == rng.freq
sliced = rng[::5]
assert sliced.freq == to_offset(freq) * 5
fancy_indexed = rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
assert isinstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert rng[4] == rng[np_long(4)]
@pytest.mark.parametrize("freq", ["B", "C"])
def test_dti_business_getitem_matplotlib_hackaround(self, freq):
rng = bdate_range(START, END, freq=freq)
with pytest.raises(ValueError, match="Multi-dimensional indexing"):
# GH#30588 multi-dimensional indexing deprecated
rng[:, None]
def test_getitem_int_list(self):
dti = date_range(start="1/1/2005", end="12/1/2005", freq="ME")
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
assert v1 == Timestamp("2/28/2005")
assert v2 == Timestamp("4/30/2005")
assert v3 == Timestamp("6/30/2005")
# getitem with non-slice drops freq
assert dti2.freq is None
| TestGetItem |
python | doocs__leetcode | solution/0800-0899/0893.Groups of Special-Equivalent Strings/Solution.py | {
"start": 0,
"end": 180
} | class ____:
def numSpecialEquivGroups(self, words: List[str]) -> int:
s = {''.join(sorted(word[::2]) + sorted(word[1::2])) for word in words}
return len(s)
| Solution |
python | pytorch__pytorch | test/inductor/test_minifier.py | {
"start": 7476,
"end": 10901
} | class ____(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(10, 16)
self.relu = torch.nn.ReLU()
self.sigmoid = torch.nn.Sigmoid()
def forward(self, inp, *, k):
x = inp["x"]
y = inp["y"]
x = self.fc1(x)
y = self.fc1(y)
k = self.fc1(k)
x = self.relu(x)
x = self.sigmoid(x)
return x + y + k
with torch.no_grad():
model = Model().to("{device}")
val = torch.randn(8, 10).to("{device}")
example_inputs = ({{"x": val.clone(), "y": val.clone()}},)
kwargs = {{"k": val.clone()}}
ep = torch.export.export(
model, example_inputs, kwargs
)
torch._inductor.aoti_compile_and_package(ep)
"""
return self._run_full_test(
run_code, "aot_inductor", expected_error, isolate=False
)
def _aoti_check_relu_repro(self, res):
assert res is not None
ep_file_path = res.get_exported_program_path()
assert ep_file_path is not None
gm = export_load(ep_file_path).module(check_guards=False)
self.assertExpectedInline(
str(gm.code).strip(),
"""\
def forward(self, linear):
linear, = fx_pytree.tree_flatten_spec(([linear], {}), self._in_spec)
relu = torch.ops.aten.relu.default(linear); linear = None
return pytree.tree_unflatten((relu,), self._out_spec)""",
)
@unittest.skipIf(IS_JETSON, "Fails on Jetson")
@inductor_config.patch(
"cpp.inject_relu_bug_TESTING_ONLY",
"compile_error",
)
def test_aoti_cpu_compile_error(self):
res = self._test_aoti("cpu", "CppCompileError")
self._aoti_check_relu_repro(res)
@unittest.skipIf(IS_JETSON, "Fails on Jetson")
@inductor_config.patch(
"cpp.inject_relu_bug_TESTING_ONLY",
"compile_error",
)
def test_aoti_cpu_compile_error_unflatten(self):
res = self._test_aoti_unflattened_inputs("cpu", "CppCompileError")
self._aoti_check_relu_repro(res)
@requires_gpu
@inductor_config.patch(
"triton.inject_relu_bug_TESTING_ONLY",
"compile_error",
)
def test_aoti_gpu_compile_error(self):
res = self._test_aoti(GPU_TYPE, "SyntaxError")
self._aoti_check_relu_repro(res)
@requires_gpu
@inductor_config.patch(
"triton.inject_relu_bug_TESTING_ONLY",
"compile_error",
)
def test_aoti_gpu_compile_error_unflatten(self):
res = self._test_aoti_unflattened_inputs(GPU_TYPE, "SyntaxError")
self._aoti_check_relu_repro(res)
@unittest.skipIf(IS_JETSON, "Fails on Jetson")
@inductor_config.patch("cpp.inject_relu_bug_TESTING_ONLY", "accuracy")
def test_aoti_cpu_accuracy_error(self):
res = self._test_aoti("cpu", "AccuracyError")
self._aoti_check_relu_repro(res)
@requires_gpu
@inductor_config.patch("triton.inject_relu_bug_TESTING_ONLY", "accuracy")
def test_aoti_gpu_accuracy_error(self):
res = self._test_aoti(GPU_TYPE, "AccuracyError")
self._aoti_check_relu_repro(res)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
# Skip CI tests on mac since CPU inductor does not seem to work due to C++ compile errors,
# also skip on ASAN due to https://github.com/pytorch/pytorch/issues/98262
if not IS_MACOS and not TEST_WITH_ASAN:
run_tests()
| Model |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/schemas/test_user_schema.py | {
"start": 1815,
"end": 2328
} | class ____:
@pytest.fixture(autouse=True)
def setup_attrs(self, configured_app) -> None:
self.app = configured_app
self.client = self.app.test_client()
self.role = self.app.appbuilder.sm.find_role("TestRole")
self.session = self.app.appbuilder.session
def teardown_method(self):
user = self.session.scalars(select(User).where(User.email == TEST_EMAIL)).first()
if user:
self.session.delete(user)
self.session.commit()
| TestUserBase |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py | {
"start": 2180,
"end": 2275
} | class ____(RunErrorEvent, Event):
type: EventType = EventType.RUN_ERROR
| RunErrorWorkflowEvent |
python | celery__celery | t/unit/tasks/test_tasks.py | {
"start": 1225,
"end": 1456
} | class ____(Task):
autoretry_for = (Exception,)
dont_autoretry_for = (TypeError,)
retry_kwargs = {'max_retries': 5}
retry_backoff = True
retry_backoff_max = 700
retry_jitter = False
| TaskWithRetryButForTypeError |
python | tornadoweb__tornado | tornado/test/websocket_test.py | {
"start": 6001,
"end": 6103
} | class ____(TestWebSocketHandler):
def open(self):
raise Exception("boom")
| ErrorInOpenHandler |
python | tensorflow__tensorflow | tensorflow/core/platform/ram_file_system_test.py | {
"start": 1175,
"end": 4248
} | class ____(test_util.TensorFlowTestCase):
def test_create_and_delete_directory(self):
file_io.create_dir_v2('ram://testdirectory')
file_io.delete_recursively_v2('ram://testdirectory')
def test_create_and_delete_directory_tree_recursive(self):
file_io.create_dir_v2('ram://testdirectory')
file_io.create_dir_v2('ram://testdirectory/subdir1')
file_io.create_dir_v2('ram://testdirectory/subdir2')
file_io.create_dir_v2('ram://testdirectory/subdir1/subdir3')
with gfile.GFile('ram://testdirectory/subdir1/subdir3/a.txt', 'w') as f:
f.write('Hello, world.')
file_io.delete_recursively_v2('ram://testdirectory')
self.assertEqual(gfile.Glob('ram://testdirectory/*'), [])
def test_write_file(self):
with gfile.GFile('ram://a.txt', 'w') as f:
f.write('Hello, world.')
f.write('Hello, world.')
with gfile.GFile('ram://a.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_append_file_with_seek(self):
with gfile.GFile('ram://c.txt', 'w') as f:
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'w+') as f:
f.seek(offset=0, whence=2)
f.write('Hello, world.')
with gfile.GFile('ram://c.txt', 'r') as f:
self.assertEqual(f.read(), 'Hello, world.' * 2)
def test_list_dir(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['%d.txt' % i for i in range(10)]
self.assertEqual(gfile.ListDirectory('ram://a/b/'), matches)
def test_glob(self):
for i in range(10):
with gfile.GFile('ram://a/b/%d.txt' % i, 'w') as f:
f.write('')
with gfile.GFile('ram://c/b/%d.txt' % i, 'w') as f:
f.write('')
matches = ['ram://a/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://a/b/*'), matches)
matches = []
self.assertEqual(gfile.Glob('ram://b/b/*'), matches)
matches = ['ram://c/b/%d.txt' % i for i in range(10)]
self.assertEqual(gfile.Glob('ram://c/b/*'), matches)
def test_file_exists(self):
with gfile.GFile('ram://exists/a/b/c.txt', 'w') as f:
f.write('')
self.assertTrue(gfile.Exists('ram://exists/a'))
self.assertTrue(gfile.Exists('ram://exists/a/b'))
self.assertTrue(gfile.Exists('ram://exists/a/b/c.txt'))
self.assertFalse(gfile.Exists('ram://exists/b'))
self.assertFalse(gfile.Exists('ram://exists/a/c'))
self.assertFalse(gfile.Exists('ram://exists/a/b/k'))
def test_savedmodel(self):
if platform.system() == 'Windows':
self.skipTest('RAM FS not fully supported on Windows.')
class MyModule(module.Module):
@def_function.function(input_signature=[])
def foo(self):
return constant_op.constant([1])
saved_model.save(MyModule(), 'ram://my_module')
loaded = saved_model.load('ram://my_module')
self.assertAllEqual(loaded.foo(), [1])
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| RamFilesystemTest |
python | doocs__leetcode | lcof2/剑指 Offer II 032. 有效的变位词/Solution.py | {
"start": 0,
"end": 169
} | class ____:
def isAnagram(self, s: str, t: str) -> bool:
if len(s) != len(t) or s == t:
return False
return Counter(s) == Counter(t)
| Solution |
python | django__django | tests/check_framework/test_model_checks.py | {
"start": 9243,
"end": 13556
} | class ____(TestCase):
def test_collision_in_same_model(self):
class Model(models.Model):
class Meta:
constraints = [
models.CheckConstraint(condition=models.Q(id__gt=0), name="foo"),
models.CheckConstraint(condition=models.Q(id__lt=100), name="foo"),
]
self.assertEqual(
checks.run_checks(app_configs=self.apps.get_app_configs()),
[
Error(
"constraint name 'foo' is not unique for model "
"check_framework.Model.",
id="models.E031",
),
],
)
def test_collision_in_different_models(self):
constraint = models.CheckConstraint(condition=models.Q(id__gt=0), name="foo")
class Model1(models.Model):
class Meta:
constraints = [constraint]
class Model2(models.Model):
class Meta:
constraints = [constraint]
self.assertEqual(
checks.run_checks(app_configs=self.apps.get_app_configs()),
[
Error(
"constraint name 'foo' is not unique among models: "
"check_framework.Model1, check_framework.Model2.",
id="models.E032",
),
],
)
def test_collision_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
constraints = [
models.CheckConstraint(condition=models.Q(id__gt=0), name="foo")
]
abstract = True
class Model1(AbstractModel):
pass
class Model2(AbstractModel):
pass
self.assertEqual(
checks.run_checks(app_configs=self.apps.get_app_configs()),
[
Error(
"constraint name 'foo' is not unique among models: "
"check_framework.Model1, check_framework.Model2.",
id="models.E032",
),
],
)
def test_no_collision_abstract_model_interpolation(self):
class AbstractModel(models.Model):
class Meta:
constraints = [
models.CheckConstraint(
condition=models.Q(id__gt=0), name="%(app_label)s_%(class)s_foo"
),
]
abstract = True
class Model1(AbstractModel):
pass
class Model2(AbstractModel):
pass
self.assertEqual(checks.run_checks(app_configs=self.apps.get_app_configs()), [])
@modify_settings(INSTALLED_APPS={"append": "basic"})
@isolate_apps("basic", "check_framework", kwarg_name="apps")
def test_collision_across_apps(self, apps):
constraint = models.CheckConstraint(condition=models.Q(id__gt=0), name="foo")
class Model1(models.Model):
class Meta:
app_label = "basic"
constraints = [constraint]
class Model2(models.Model):
class Meta:
app_label = "check_framework"
constraints = [constraint]
self.assertEqual(
checks.run_checks(app_configs=apps.get_app_configs()),
[
Error(
"constraint name 'foo' is not unique among models: "
"basic.Model1, check_framework.Model2.",
id="models.E032",
),
],
)
@modify_settings(INSTALLED_APPS={"append": "basic"})
@isolate_apps("basic", "check_framework", kwarg_name="apps")
def test_no_collision_across_apps_interpolation(self, apps):
constraint = models.CheckConstraint(
condition=models.Q(id__gt=0), name="%(app_label)s_%(class)s_foo"
)
class Model1(models.Model):
class Meta:
app_label = "basic"
constraints = [constraint]
class Model2(models.Model):
class Meta:
app_label = "check_framework"
constraints = [constraint]
self.assertEqual(checks.run_checks(app_configs=apps.get_app_configs()), [])
| ConstraintNameTests |
python | pytorch__pytorch | test/dynamo/test_functions.py | {
"start": 73320,
"end": 73710
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3]"):
l_x_ = L_x_
sum_1: "f32[]" = l_x_.sum(); l_x_ = None
gt: "b8[]" = sum_1 > 0; sum_1 = None
return (gt,)
""",
)
else:
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | Textualize__textual | src/textual/widgets/_rich_log.py | {
"start": 1445,
"end": 12192
} | class ____(ScrollView, can_focus=True):
"""A widget for logging Rich renderables and text."""
DEFAULT_CSS = """
RichLog{
background: $surface;
color: $foreground;
overflow-y: scroll;
&:focus {
background-tint: $foreground 5%;
}
}
"""
max_lines: var[int | None] = var[Optional[int]](None)
min_width: var[int] = var(78)
wrap: var[bool] = var(False)
highlight: var[bool] = var(False)
markup: var[bool] = var(False)
auto_scroll: var[bool] = var(True)
def __init__(
self,
*,
max_lines: int | None = None,
min_width: int = 78,
wrap: bool = False,
highlight: bool = False,
markup: bool = False,
auto_scroll: bool = True,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
) -> None:
"""Create a `RichLog` widget.
Args:
max_lines: Maximum number of lines in the log or `None` for no maximum.
min_width: Width to use for calls to `write` with no specified `width`.
wrap: Enable word wrapping (default is off).
highlight: Automatically highlight content. By default, the `ReprHighlighter` is used.
To customize highlighting, set `highlight=True` and then set the `highlighter`
attribute to an instance of `Highlighter`.
markup: Apply Rich console markup.
auto_scroll: Enable automatic scrolling to end.
name: The name of the text log.
id: The ID of the text log in the DOM.
classes: The CSS classes of the text log.
disabled: Whether the text log is disabled or not.
"""
super().__init__(name=name, id=id, classes=classes, disabled=disabled)
self.max_lines = max_lines
"""Maximum number of lines in the log or `None` for no maximum."""
self._start_line: int = 0
self.lines: list[Strip] = []
"""The lines currently visible in the log."""
self._line_cache: LRUCache[tuple[int, int, int, int], Strip]
self._line_cache = LRUCache(1024)
self._deferred_renders: deque[DeferredRender] = deque()
"""Queue of deferred renderables to be rendered."""
self.min_width = min_width
"""Minimum width of renderables."""
self.wrap = wrap
"""Enable word wrapping."""
self.highlight = highlight
"""Automatically highlight content."""
self.markup = markup
"""Apply Rich console markup."""
self.auto_scroll = auto_scroll
"""Automatically scroll to the end on write."""
self.highlighter: Highlighter = ReprHighlighter()
"""Rich Highlighter used to highlight content when highlight is True"""
self._widest_line_width = 0
"""The width of the widest line currently in the log."""
self._size_known = False
"""Flag which is set to True when the size of the RichLog is known,
indicating we can proceed with rendering deferred writes."""
def notify_style_update(self) -> None:
super().notify_style_update()
self._line_cache.clear()
def on_resize(self, event: Resize) -> None:
if event.size.width and not self._size_known:
# This size is known for the first time.
self._size_known = True
deferred_renders = self._deferred_renders
while deferred_renders:
deferred_render = deferred_renders.popleft()
self.write(*deferred_render)
def get_content_width(self, container: Size, viewport: Size) -> int:
if self._size_known:
return self.virtual_size.width
else:
return container.width
def _make_renderable(self, content: RenderableType | object) -> RenderableType:
"""Make content renderable.
Args:
content: Content to render.
Returns:
A Rich renderable.
"""
renderable: RenderableType
if not is_renderable(content):
renderable = Pretty(content)
else:
if isinstance(content, str):
if self.markup:
renderable = Text.from_markup(content)
else:
renderable = Text(content)
if self.highlight:
renderable = self.highlighter(renderable)
else:
renderable = cast(RenderableType, content)
if isinstance(renderable, Text):
renderable.expand_tabs()
return renderable
def write(
self,
content: RenderableType | object,
width: int | None = None,
expand: bool = False,
shrink: bool = True,
scroll_end: bool | None = None,
animate: bool = False,
) -> Self:
"""Write a string or a Rich renderable to the bottom of the log.
Notes:
The rendering of content will be deferred until the size of the `RichLog` is known.
This means if you call `write` in `compose` or `on_mount`, the content will not be
rendered immediately.
Args:
content: Rich renderable (or a string).
width: Width to render, or `None` to use `RichLog.min_width`.
If specified, `expand` and `shrink` will be ignored.
expand: Permit expanding of content to the width of the content region of the RichLog.
If `width` is specified, then `expand` will be ignored.
shrink: Permit shrinking of content to fit within the content region of the RichLog.
If `width` is specified, then `shrink` will be ignored.
scroll_end: Enable automatic scroll to end, or `None` to use `self.auto_scroll`.
animate: Enable animation if the log will scroll.
Returns:
The `RichLog` instance.
"""
if not self._size_known:
# We don't know the size yet, so we'll need to render this later.
# We defer ALL writes until the size is known, to ensure ordering is preserved.
if isinstance(content, Text):
content = content.copy()
self._deferred_renders.append(
DeferredRender(content, width, expand, shrink, scroll_end)
)
return self
renderable = self._make_renderable(content)
auto_scroll = self.auto_scroll if scroll_end is None else scroll_end
console = self.app.console
render_options = console.options
if isinstance(renderable, Text) and not self.wrap:
render_options = render_options.update(overflow="ignore", no_wrap=True)
if width is not None:
# Use the width specified by the caller.
# We ignore `expand` and `shrink` when a width is specified.
# This also overrides `min_width` set on the RichLog.
render_width = width
else:
# Compute the width based on available information.
renderable_width = measure_renderables(
console, render_options, [renderable]
).maximum
render_width = renderable_width
scrollable_content_width = self.scrollable_content_region.width
if expand and renderable_width < scrollable_content_width:
# Expand the renderable to the width of the scrollable content region.
render_width = max(renderable_width, scrollable_content_width)
if shrink and renderable_width > scrollable_content_width:
# Shrink the renderable down to fit within the scrollable content region.
render_width = min(renderable_width, scrollable_content_width)
# The user has not supplied a width, so make sure min_width is respected.
render_width = max(render_width, self.min_width)
render_options = render_options.update_width(render_width)
# Render into (possibly) wrapped lines.
segments = self.app.console.render(renderable, render_options)
lines = list(Segment.split_lines(segments))
if not lines:
self._widest_line_width = max(render_width, self._widest_line_width)
self.lines.append(Strip.blank(render_width))
else:
strips = Strip.from_lines(lines)
for strip in strips:
strip.adjust_cell_length(render_width)
self.lines.extend(strips)
if self.max_lines is not None and len(self.lines) > self.max_lines:
self._start_line += len(self.lines) - self.max_lines
self.refresh()
self.lines = self.lines[-self.max_lines :]
# Compute the width after wrapping and trimming
# TODO - this is wrong because if we trim a long line, the max width
# could decrease, but we don't look at which lines were trimmed here.
self._widest_line_width = max(
self._widest_line_width,
max(sum([segment.cell_length for segment in _line]) for _line in lines),
)
# Update the virtual size - the width may have changed after adding
# the new line(s), and the height will definitely have changed.
self.virtual_size = Size(self._widest_line_width, len(self.lines))
if auto_scroll:
self.scroll_end(animate=animate, immediate=False, x_axis=False)
return self
def clear(self) -> Self:
"""Clear the text log.
Returns:
The `RichLog` instance.
"""
self.lines.clear()
self._line_cache.clear()
self._start_line = 0
self._widest_line_width = 0
self._deferred_renders.clear()
self.virtual_size = Size(0, len(self.lines))
self.refresh()
return self
def render_line(self, y: int) -> Strip:
scroll_x, scroll_y = self.scroll_offset
line = self._render_line(
scroll_y + y, scroll_x, self.scrollable_content_region.width
)
strip = line.apply_style(self.rich_style)
return strip
def _render_line(self, y: int, scroll_x: int, width: int) -> Strip:
if y >= len(self.lines):
return Strip.blank(width, self.rich_style)
key = (y + self._start_line, scroll_x, width, self._widest_line_width)
if key in self._line_cache:
return self._line_cache[key]
line = self.lines[y].crop_extend(scroll_x, scroll_x + width, self.rich_style)
self._line_cache[key] = line
return line
| RichLog |
python | doocs__leetcode | solution/1300-1399/1377.Frog Position After T Seconds/Solution.py | {
"start": 0,
"end": 731
} | class ____:
def frogPosition(
self, n: int, edges: List[List[int]], t: int, target: int
) -> float:
g = defaultdict(list)
for u, v in edges:
g[u].append(v)
g[v].append(u)
q = deque([(1, 1.0)])
vis = [False] * (n + 1)
vis[1] = True
while q and t >= 0:
for _ in range(len(q)):
u, p = q.popleft()
cnt = len(g[u]) - int(u != 1)
if u == target:
return p if cnt * t == 0 else 0
for v in g[u]:
if not vis[v]:
vis[v] = True
q.append((v, p / cnt))
t -= 1
return 0
| Solution |
python | doocs__leetcode | lcof2/剑指 Offer II 067. 最大的异或/Solution2.py | {
"start": 0,
"end": 649
} | class ____:
def __init__(self):
self.children = [None] * 2
def insert(self, x):
node = self
for i in range(30, -1, -1):
v = (x >> i) & 1
if node.children[v] is None:
node.children[v] = Trie()
node = node.children[v]
def search(self, x):
node = self
res = 0
for i in range(30, -1, -1):
v = (x >> i) & 1
if node.children[v ^ 1]:
res = res << 1 | 1
node = node.children[v ^ 1]
else:
res <<= 1
node = node.children[v]
return res
| Trie |
python | Farama-Foundation__Gymnasium | gymnasium/envs/tabular/blackjack.py | {
"start": 16473,
"end": 17769
} | class ____(FunctionalJaxEnv, EzPickle):
"""A Gymnasium Env wrapper for the functional blackjack env."""
metadata = {"render_modes": ["rgb_array"], "render_fps": 50, "jax": True}
def __init__(self, render_mode: str | None = None, **kwargs):
"""Initializes Gym wrapper for blackjack functional env."""
EzPickle.__init__(self, render_mode=render_mode, **kwargs)
env = BlackjackFunctional(**kwargs)
env.transform(jax.jit)
super().__init__(
env,
metadata=self.metadata,
render_mode=render_mode,
)
# Pixel art from Mariia Khmelnytska (https://www.123rf.com/photo_104453049_stock-vector-pixel-art-playing-cards-standart-deck-vector-set.html)
# Jax structure inspired by https://medium.com/@ngoodger_7766/writing-an-rl-environment-in-jax-9f74338898ba
if __name__ == "__main__":
"""
Temporary environment tester function.
"""
env = HumanRendering(BlackJackJaxEnv(render_mode="rgb_array"))
obs, info = env.reset()
print(obs, info)
terminal = False
while not terminal:
action = int(input("Please input an action\n"))
obs, reward, terminal, truncated, info = env.step(action)
print(obs, reward, terminal, truncated, info)
exit()
| BlackJackJaxEnv |
python | numba__numba | numba/cpython/hashing.py | {
"start": 13826,
"end": 13929
} | class ____(Structure):
_fields_ = [
('k0', c_uint64),
('k1', c_uint64),
]
| SIPHASH |
python | astropy__astropy | astropy/coordinates/tests/test_representation_arithmetic.py | {
"start": 36984,
"end": 38681
} | class ____:
def setup_method(self):
s = SphericalRepresentation(
lon=[0.0, 6.0, 21.0] * u.hourangle,
lat=[0.0, -30.0, 85.0] * u.deg,
distance=[1, 2, 3] * u.kpc,
)
self.s = s
self.r = s.represent_as(RadialRepresentation)
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert RadialDifferential.name == "radial"
assert RadialDifferential.name in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
r, s, e, sf = self.r, self.s, self.e, self.sf
o_distance = RadialDifferential(1.0 * u.mpc)
# Can be applied to RadialRepresentation, though not most useful.
r_distance = r + o_distance
assert_quantity_allclose(
r_distance.distance, r.distance + o_distance.d_distance
)
r_distance2 = o_distance + r
assert_quantity_allclose(
r_distance2.distance, r.distance + o_distance.d_distance
)
# More sense to apply it relative to spherical representation.
o_distancec = o_distance.to_cartesian(base=s)
assert_quantity_allclose(
o_distancec[0].xyz, [1e-6, 0.0, 0.0] * u.kpc, atol=1.0 * u.npc
)
o_recover = RadialDifferential.from_cartesian(o_distancec, base=s)
assert_quantity_allclose(o_recover.d_distance, o_distance.d_distance)
s_distance = s + 1.0 * u.mpc * sf["distance"] * e["distance"]
assert_representation_allclose(o_distancec, s_distance - s, atol=1 * u.npc)
s_distance2 = s + o_distance
assert_representation_allclose(s_distance2, s_distance)
| TestRadialDifferential |
python | kamyu104__LeetCode-Solutions | Python/generate-tag-for-video-caption.py | {
"start": 604,
"end": 877
} | class ____(object):
def generateTag(self, caption):
"""
:type caption: str
:rtype: str
"""
L = 100
return ('#'+"".join(x.lower() if i == 0 else x[0].upper()+x[1:].lower() for i, x in enumerate(caption.split())))[:L]
| Solution2 |
python | prabhupant__python-ds | data_structures/binary_trees/check_if_path_exists.py | {
"start": 80,
"end": 538
} | class ____:
def __init__(self, val):
self.val = val
self.right = None
self.left = None
def check_path(root, arr, n, index):
if root is None:
return n == 0
if root.left == None and root.right == None and root.val == arr[index] and index == n -1:
return True
return (index < n) and (root.val == arr[index]) and (check_path(root.left, arr, n, index + 1) or check_path(root.right, arr, n, index + 1))
| Node |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/test/steps/common.py | {
"start": 3645,
"end": 6827
} | class ____(VersionCheck):
context: ConnectorContext
title = "Connector version increment check"
BYPASS_CHECK_FOR = [
METADATA_FILE_NAME,
"acceptance-test-config.yml",
"README.md",
"bootstrap.md",
".dockerignore",
"unit_tests",
"integration_tests",
"src/test",
"src/test-integration",
"src/test-performance",
"build.gradle",
"erd",
"build_customization.py",
]
@property
def should_run(self) -> bool:
# Skip if connector opts out of version checks
if self.context.metadata and self.context.metadata.get("ab_internal", {}).get("requireVersionIncrementsInPullRequests") is False:
return False
for filename in self.context.modified_files:
relative_path = str(filename).replace(str(self.context.connector.code_directory) + "/", "")
if not any([relative_path.startswith(to_bypass) for to_bypass in self.BYPASS_CHECK_FOR]):
return True
return False
def is_version_not_incremented(self) -> bool:
return self.master_connector_version >= self.current_connector_version
def get_failure_message_for_no_increment(self) -> str:
return (
f"The dockerImageTag in {METADATA_FILE_NAME} was not incremented. "
f"Master version is {self.master_connector_version}, current version is {self.current_connector_version}"
)
def are_both_versions_release_candidates(self) -> bool:
return bool(
self.master_connector_version.prerelease
and self.current_connector_version.prerelease
and "rc" in self.master_connector_version.prerelease
and "rc" in self.current_connector_version.prerelease
)
def have_same_major_minor_patch(self) -> bool:
return (
self.master_connector_version.major == self.current_connector_version.major
and self.master_connector_version.minor == self.current_connector_version.minor
and self.master_connector_version.patch == self.current_connector_version.patch
)
def validate(self) -> StepResult:
if self.is_version_not_incremented():
return self._get_failure_result(
(
f"The dockerImageTag in {METADATA_FILE_NAME} was not incremented. "
f"Master version is {self.master_connector_version}, current version is {self.current_connector_version}"
)
)
if self.are_both_versions_release_candidates():
if not self.have_same_major_minor_patch():
return self._get_failure_result(
(
f"Master and current version are release candidates but they have different major, minor or patch versions. "
f"Release candidates should only differ in the prerelease part. Master version is {self.master_connector_version}, "
f"current version is {self.current_connector_version}"
)
)
return self.success_result
| VersionIncrementCheck |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes5.py | {
"start": 5724,
"end": 5997
} | class ____:
test1: int = 1
test2: int | None = None
test3: int
@property
def test4(self) -> int:
return 3
test5: int
test6: Any
test7: int
# This should generate 4 errors if reportIncompatibleVariableOverride
# is enabled.
| PeerClass2 |
python | getsentry__sentry | tests/sentry/issues/auto_source_code_config/test_process_event.py | {
"start": 23603,
"end": 24803
} | class ____(LanguageSpecificDeriveCodeMappings):
platform = "go"
def test_auto_source_code_config_go_abs_filename(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["sentry/capybara.go"]},
frames=[self.frame("/Users/JohnDoe/code/sentry/capybara.go", True)],
platform=self.platform,
expected_new_code_mappings=[self.code_mapping("/Users/JohnDoe/code/", "")],
)
def test_auto_source_code_config_go_long_abs_filename(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["sentry/kangaroo.go"]},
frames=[self.frame("/Users/JohnDoe/code/sentry/kangaroo.go", True)],
platform=self.platform,
expected_new_code_mappings=[self.code_mapping("/Users/JohnDoe/code/", "")],
)
def test_auto_source_code_config_similar_but_incorrect_file(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["not-sentry/main.go"]},
frames=[self.frame("Users/JohnDoe/src/sentry/main.go", True)],
platform=self.platform,
)
| TestGoDeriveCodeMappings |
python | django__django | django/template/defaulttags.py | {
"start": 13806,
"end": 14332
} | class ____(Node):
def __init__(self, partial_name, partial_mapping):
# Defer lookup in `partial_mapping` and nodelist to runtime.
self.partial_name = partial_name
self.partial_mapping = partial_mapping
def render(self, context):
try:
return self.partial_mapping[self.partial_name].render(context)
except KeyError:
raise TemplateSyntaxError(
f"Partial '{self.partial_name}' is not defined in the current template."
)
| PartialNode |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 57684,
"end": 59259
} | class ____(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def __init__(self, pos, value: bool, type=None):
assert value is True or value is False, repr(value)
super().__init__(pos, value=value, constant_result=value)
if type is not None and type is not self.type:
assert type is Builtin.bool_type, type
self.type = type
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
if self.type.is_pyobject:
return 'Py_True' if self.value else 'Py_False'
else:
return '1' if self.value else '0'
def coerce_to_pyobject(self, env):
if self.type.is_pyobject:
return self
return BoolNode(self.pos, value=self.value, type=Builtin.bool_type)
def coerce_to_boolean(self, env):
if self.type.is_int:
return self
return BoolNode(self.pos, value=self.value)
def coerce_to(self, dst_type, env):
if dst_type == self.type:
return self
if dst_type is py_object_type and self.type is Builtin.bool_type:
return self
if dst_type.is_pyobject and self.type.is_int:
return BoolNode(self.pos, value=self.value, type=Builtin.bool_type)
if dst_type.is_int and self.type.is_pyobject:
return BoolNode(self.pos, value=self.value)
return ConstNode.coerce_to(self, dst_type, env)
| BoolNode |
python | pytorch__pytorch | test/inductor/test_external_callables.py | {
"start": 1005,
"end": 3240
} | class ____(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._saved_config = config.save_config()
def tearDown(self):
super().tearDown()
config.load_config(self._saved_config)
def test_matmul_cpu(self):
# 2I + 2I == (2I)(2I)
x = torch.eye(128, 128) * 2
opt_fn = torch.compile(
MatMulModule(),
options={"max_autotune": True, "external_matmul": [matmul_cpu]},
)
opt_fn_golden = torch.compile(MatMulModule(), options={"max_autotune": True})
torch.testing.assert_close(
opt_fn(x),
opt_fn_golden(x),
msg=f"torch.compile(..., external_matmul = {matmul_cpu}) failed",
)
def test_matmul_dup(self):
# 2I + 2I == (2I)(2I)
x = torch.eye(128, 128) * 2
# This should only register the first external call
opt_fn = torch.compile(
MatMulModule(),
options={"max_autotune": True, "external_matmul": [matmul_dup, matmul_dup]},
)
opt_fn_golden = torch.compile(MatMulModule(), options={"max_autotune": True})
torch.testing.assert_close(
opt_fn(x),
opt_fn_golden(x),
msg=f"torch.compile(..., external_matmul = {matmul_dup}) failed",
)
@unittest.skipIf(not TEST_CUDA and not TEST_XPU, "CUDA and XPU not found")
@unittest.skipIf(
torch.cuda.is_available() and torch.cuda.get_device_capability() < (7, 0),
"Triton does not support device capability < 7.0",
)
def test_matmul_cuda(self):
device = torch.device(device_type)
x = (torch.eye(128, 128) * 2).to(device=device)
opt_fn = torch.compile(
MatMulModule().to(device),
options={"max_autotune": True, "external_matmul": [matmul_cuda]},
)
opt_fn_golden = torch.compile(
MatMulModule().to(device), options={"max_autotune": True}
)
torch.testing.assert_close(
opt_fn(x),
opt_fn_golden(x),
msg=f"torch.compile(..., external_matmul = {matmul_cuda}) failed",
)
if __name__ == "__main__":
run_tests()
| TestInductorExternalCallable |
python | joblib__joblib | joblib/logger.py | {
"start": 1459,
"end": 2558
} | class ____(object):
"""Base class for logging messages."""
def __init__(self, depth=3, name=None):
"""
Parameters
----------
depth: int, optional
The depth of objects printed.
name: str, optional
The namespace to log to. If None, defaults to joblib.
"""
self.depth = depth
self._name = name if name else "joblib"
def warn(self, msg):
logging.getLogger(self._name).warning("[%s]: %s" % (self, msg))
def info(self, msg):
logging.info("[%s]: %s" % (self, msg))
def debug(self, msg):
# XXX: This conflicts with the debug flag used in children class
logging.getLogger(self._name).debug("[%s]: %s" % (self, msg))
def format(self, obj, indent=0):
"""Return the formatted representation of the object."""
return pformat(obj, indent=indent, depth=self.depth)
###############################################################################
# class `PrintTime`
###############################################################################
| Logger |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/externaltool/package.py | {
"start": 217,
"end": 399
} | class ____(Package):
homepage = "http://somewhere.com"
has_code = False
version("1.0")
version("0.9")
version("0.8.1")
depends_on("externalprereq")
| Externaltool |
python | apache__airflow | airflow-core/tests/unit/models/test_dag_version.py | {
"start": 1136,
"end": 3188
} | class ____:
def setup_method(self):
clear_db_dags()
def teardown_method(self):
clear_db_dags()
@pytest.mark.need_serialized_dag
def test_writing_dag_version(self, dag_maker, session):
with dag_maker("test_writing_dag_version") as dag:
pass
latest_version = DagVersion.get_latest_version(dag.dag_id)
assert latest_version.version_number == 1
assert latest_version.dag_id == dag.dag_id
def test_writing_dag_version_with_changes(self, dag_maker, session):
"""This also tested the get_latest_version method"""
with dag_maker("test1") as dag:
EmptyOperator(task_id="task1")
sync_dag_to_db(dag)
dag_maker.create_dagrun()
# Add extra task to change the dag
with dag_maker("test1") as dag2:
EmptyOperator(task_id="task1")
EmptyOperator(task_id="task2")
sync_dag_to_db(dag2)
latest_version = DagVersion.get_latest_version(dag.dag_id)
assert latest_version.version_number == 2
assert session.scalar(select(func.count()).where(DagVersion.dag_id == dag.dag_id)) == 2
@pytest.mark.need_serialized_dag
def test_get_version(self, dag_maker, session):
"""The two dags have the same version name and number but different dag ids"""
dag1_id = "test1"
with dag_maker(dag1_id):
EmptyOperator(task_id="task1")
with dag_maker("test2"):
EmptyOperator(task_id="task1")
with dag_maker("test3"):
EmptyOperator(task_id="task1")
version = DagVersion.get_version(dag1_id)
assert version.version_number == 1
assert version.dag_id == dag1_id
assert version.version == f"{dag1_id}-1"
@pytest.mark.need_serialized_dag
def test_version_property(self, dag_maker):
with dag_maker("test1") as dag:
pass
latest_version = DagVersion.get_latest_version(dag.dag_id)
assert latest_version.version == f"{dag.dag_id}-1"
| TestDagVersion |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 15905,
"end": 16223
} | class ____(BaseModel):
root: JsonValue
type: Literal["XComSequenceIndexResult"] = "XComSequenceIndexResult"
@classmethod
def from_response(cls, response: XComSequenceIndexResponse) -> XComSequenceIndexResult:
return cls(root=response.root, type="XComSequenceIndexResult")
| XComSequenceIndexResult |
python | nryoung__algorithms | tests/test_sorting.py | {
"start": 2010,
"end": 2271
} | class ____(SortingAlgorithmTestCase):
"""
Tests Insertion sort on a small range from 0-9
"""
def test_insertionsort(self):
self.output = insertion_sort.sort(self.input)
self.assertEqual(self.correct, self.output)
| TestInsertionSort |
python | numba__numba | numba/tests/test_lists.py | {
"start": 22680,
"end": 24915
} | class ____(MemoryLeakMixin, TestCase):
"""
Test reflection of native Numba lists on Python list objects.
"""
def check_reflection(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
samples = [([1., 2., 3., 4.], [0.]),
([1., 2., 3., 4.], [5., 6., 7., 8., 9.]),
]
for dest, src in samples:
expected = list(dest)
got = list(dest)
pyres = pyfunc(expected, src)
with self.assertRefCount(got, src):
cres = cfunc(got, src)
self.assertPreciseEqual(cres, pyres)
self.assertPreciseEqual(expected, got)
self.assertEqual(pyres[0] is expected, cres[0] is got)
del pyres, cres
def test_reflect_simple(self):
self.check_reflection(reflect_simple)
def test_reflect_conditional(self):
self.check_reflection(reflect_conditional)
def test_reflect_exception(self):
"""
When the function exits with an exception, lists should still be
reflected.
"""
pyfunc = reflect_exception
cfunc = jit(nopython=True)(pyfunc)
l = [1, 2, 3]
with self.assertRefCount(l):
with self.assertRaises(ZeroDivisionError):
cfunc(l)
self.assertPreciseEqual(l, [1, 2, 3, 42])
def test_reflect_same_list(self):
"""
When the same list object is reflected twice, behaviour should
be consistent.
"""
pyfunc = reflect_dual
cfunc = jit(nopython=True)(pyfunc)
pylist = [1, 2, 3]
clist = pylist[:]
expected = pyfunc(pylist, pylist)
got = cfunc(clist, clist)
self.assertPreciseEqual(expected, got)
self.assertPreciseEqual(pylist, clist)
self.assertRefCountEqual(pylist, clist)
def test_reflect_clean(self):
"""
When the list wasn't mutated, no reflection should take place.
"""
cfunc = jit(nopython=True)(noop)
# Use a complex, as Python integers can be cached
l = [12.5j]
ids = [id(x) for x in l]
cfunc(l)
self.assertEqual([id(x) for x in l], ids)
| TestListReflection |
python | django__django | tests/model_formsets/models.py | {
"start": 299,
"end": 696
} | class ____(models.Model):
author = models.ForeignKey(Author, models.CASCADE)
title = models.CharField(max_length=100)
class Meta:
unique_together = (("author", "title"),)
ordering = ["id"]
def __str__(self):
return self.title
def clean(self):
# Ensure author is always accessible in clean method
assert self.author.name is not None
| Book |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 163355,
"end": 163434
} | class ____(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
| RecvmsgIntoUDPTest |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/deployment.py | {
"start": 246,
"end": 501
} | class ____(BaseModel):
"""Deployment resource model."""
id: int # Deployment IDs are integers in the GraphQL schema
name: str
type: DeploymentType
class Config:
from_attributes = True # For future ORM compatibility
| Deployment |
python | huggingface__transformers | src/transformers/models/mixtral/modular_mixtral.py | {
"start": 18587,
"end": 18666
} | class ____(MistralForTokenClassification):
pass
| MixtralForTokenClassification |
python | pytest-dev__pytest | src/_pytest/cacheprovider.py | {
"start": 15733,
"end": 23149
} | class ____:
"""Plugin which implements the --nf (run new-first) option."""
def __init__(self, config: Config) -> None:
self.config = config
self.active = config.option.newfirst
assert config.cache is not None
self.cached_nodeids = set(config.cache.get("cache/nodeids", []))
@hookimpl(wrapper=True, tryfirst=True)
def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> Generator[None]:
res = yield
if self.active:
new_items: dict[str, nodes.Item] = {}
other_items: dict[str, nodes.Item] = {}
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
new_items.values()
) + self._get_increasing_order(other_items.values())
self.cached_nodeids.update(new_items)
else:
self.cached_nodeids.update(item.nodeid for item in items)
return res
def _get_increasing_order(self, items: Iterable[nodes.Item]) -> list[nodes.Item]:
return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True)
def pytest_sessionfinish(self) -> None:
config = self.config
if config.getoption("cacheshow") or hasattr(config, "workerinput"):
return
if config.getoption("collectonly"):
return
assert config.cache is not None
config.cache.set("cache/nodeids", sorted(self.cached_nodeids))
def pytest_addoption(parser: Parser) -> None:
"""Add command-line options for cache functionality.
:param parser: Parser object to add command-line options to.
"""
group = parser.getgroup("general")
group.addoption(
"--lf",
"--last-failed",
action="store_true",
dest="lf",
help="Rerun only the tests that failed at the last run (or all if none failed)",
)
group.addoption(
"--ff",
"--failed-first",
action="store_true",
dest="failedfirst",
help="Run all tests, but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown.",
)
group.addoption(
"--nf",
"--new-first",
action="store_true",
dest="newfirst",
help="Run tests from new files first, then the rest of the tests "
"sorted by file mtime",
)
group.addoption(
"--cache-show",
action="append",
nargs="?",
dest="cacheshow",
help=(
"Show cache contents, don't perform collection or tests. "
"Optional argument: glob (default: '*')."
),
)
group.addoption(
"--cache-clear",
action="store_true",
dest="cacheclear",
help="Remove all cache contents at start of test run",
)
cache_dir_default = ".pytest_cache"
if "TOX_ENV_DIR" in os.environ:
cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default)
parser.addini("cache_dir", default=cache_dir_default, help="Cache directory path")
group.addoption(
"--lfnf",
"--last-failed-no-failures",
action="store",
dest="last_failed_no_failures",
choices=("all", "none"),
default="all",
help="With ``--lf``, determines whether to execute tests when there "
"are no previously (known) failures or when no "
"cached ``lastfailed`` data was found. "
"``all`` (the default) runs the full test suite again. "
"``none`` just emits a message about no known failures and exits successfully.",
)
def pytest_cmdline_main(config: Config) -> int | ExitCode | None:
if config.option.cacheshow and not config.option.help:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
return None
@hookimpl(tryfirst=True)
def pytest_configure(config: Config) -> None:
"""Configure cache system and register related plugins.
Creates the Cache instance and registers the last-failed (LFPlugin)
and new-first (NFPlugin) plugins with the plugin manager.
:param config: pytest configuration object.
"""
config.cache = Cache.for_config(config, _ispytest=True)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
@fixture
def cache(request: FixtureRequest) -> Cache:
"""Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be ``/`` separated strings, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
assert request.config.cache is not None
return request.config.cache
def pytest_report_header(config: Config) -> str | None:
"""Display cachedir with --cache-show and if non-default."""
if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache":
assert config.cache is not None
cachedir = config.cache._cachedir
# TODO: evaluate generating upward relative paths
# starting with .., ../.. if sensible
try:
displaypath = cachedir.relative_to(config.rootpath)
except ValueError:
displaypath = cachedir
return f"cachedir: {displaypath}"
return None
def cacheshow(config: Config, session: Session) -> int:
"""Display cache contents when --cache-show is used.
Shows cached values and directories matching the specified glob pattern
(default: '*'). Displays cache location, cached test results, and
any cached directories created by plugins.
:param config: pytest configuration object.
:param session: pytest session object.
:returns: Exit code (0 for success).
"""
from pprint import pformat
assert config.cache is not None
tw = TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.is_dir():
tw.line("cache is empty")
return 0
glob = config.option.cacheshow[0]
if glob is None:
glob = "*"
dummy = object()
basedir = config.cache._cachedir
vdir = basedir / Cache._CACHE_PREFIX_VALUES
tw.sep("-", f"cache values for {glob!r}")
for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()):
key = str(valpath.relative_to(vdir))
val = config.cache.get(key, dummy)
if val is dummy:
tw.line(f"{key} contains unreadable content, will be ignored")
else:
tw.line(f"{key} contains:")
for line in pformat(val).splitlines():
tw.line(" " + line)
ddir = basedir / Cache._CACHE_PREFIX_DIRS
if ddir.is_dir():
contents = sorted(ddir.rglob(glob))
tw.sep("-", f"cache directories for {glob!r}")
for p in contents:
# if p.is_dir():
# print("%s/" % p.relative_to(basedir))
if p.is_file():
key = str(p.relative_to(basedir))
tw.line(f"{key} is a file of length {p.stat().st_size}")
return 0
| NFPlugin |
python | numba__numba | numba/core/types/scalars.py | {
"start": 5072,
"end": 5138
} | class ____(_NPDatetimeBase):
type_name = 'datetime64'
| NPDatetime |
python | pypa__warehouse | tests/unit/admin/views/test_projects.py | {
"start": 38094,
"end": 41803
} | class ____:
def test_archive(self, db_request):
project = ProjectFactory.create(name="foo")
user = UserFactory.create(username="testuser")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.method = "POST"
db_request.user = user
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.archive_project_view(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert project.lifecycle_status == LifecycleStatus.ArchivedNoindex
assert db_request.route_path.calls == [
pretend.call("admin.project.detail", project_name=project.name)
]
def test_unarchive_project(self, db_request):
project = ProjectFactory.create(
name="foo", lifecycle_status=LifecycleStatus.Archived
)
user = UserFactory.create(username="testuser")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.method = "POST"
db_request.user = user
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.unarchive_project_view(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.route_path.calls == [
pretend.call("admin.project.detail", project_name=project.name)
]
assert project.lifecycle_status is None
def test_disallowed_archive(self, db_request):
project = ProjectFactory.create(name="foo", lifecycle_status="quarantine-enter")
user = UserFactory.create(username="testuser")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.method = "POST"
db_request.user = user
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.archive_project_view(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call(
f"Cannot archive project with status {project.lifecycle_status}",
queue="error",
)
]
assert db_request.route_path.calls == [
pretend.call("admin.project.detail", project_name="foo")
]
assert project.lifecycle_status == "quarantine-enter"
def test_disallowed_unarchive(self, db_request):
project = ProjectFactory.create(name="foo", lifecycle_status="quarantine-enter")
user = UserFactory.create(username="testuser")
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/the-redirect")
db_request.method = "POST"
db_request.user = user
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
result = views.unarchive_project_view(project, db_request)
assert isinstance(result, HTTPSeeOther)
assert result.headers["Location"] == "/the-redirect"
assert db_request.session.flash.calls == [
pretend.call("Can only unarchive an archived project", queue="error")
]
assert db_request.route_path.calls == [
pretend.call("admin.project.detail", project_name="foo")
]
assert project.lifecycle_status == "quarantine-enter"
| TestProjectArchival |
python | ray-project__ray | python/ray/data/tests/unit/test_datatype.py | {
"start": 20568,
"end": 26672
} | class ____:
"""Test type predicate methods (is_list_type, is_struct_type, etc.)."""
@pytest.mark.parametrize(
"datatype,expected_result",
[
# List types
(DataType.list(DataType.int64()), True),
(DataType.large_list(DataType.string()), True),
(DataType.fixed_size_list(DataType.float32(), 3), True),
# Tensor types (should return False)
(DataType.tensor(shape=(3, 4), dtype=DataType.float32()), False),
(DataType.variable_shaped_tensor(dtype=DataType.float64(), ndim=2), False),
# Non-list types
(DataType.int64(), False),
(DataType.string(), False),
(DataType.struct([("x", DataType.int32())]), False),
],
)
def test_is_list_type(self, datatype, expected_result):
"""Test is_list_type predicate."""
assert datatype.is_list_type() == expected_result
@pytest.mark.parametrize(
"datatype,expected_result",
[
(DataType.tensor(shape=(3, 4), dtype=DataType.float32()), True),
(DataType.variable_shaped_tensor(dtype=DataType.float64(), ndim=2), True),
],
)
def test_is_tensor_type(self, datatype, expected_result):
"""Test is_tensor_type predicate."""
assert datatype.is_tensor_type() == expected_result
@pytest.mark.parametrize(
"datatype,expected_result",
[
(DataType.struct([("x", DataType.int64())]), True),
(
DataType.struct([("a", DataType.string()), ("b", DataType.float32())]),
True,
),
(DataType.list(DataType.int64()), False),
(DataType.int64(), False),
],
)
def test_is_struct_type(self, datatype, expected_result):
"""Test is_struct_type predicate."""
assert datatype.is_struct_type() == expected_result
@pytest.mark.parametrize(
"datatype,expected_result",
[
(DataType.map(DataType.string(), DataType.int64()), True),
(DataType.map(DataType.int32(), DataType.float32()), True),
(DataType.list(DataType.int64()), False),
(DataType.int64(), False),
],
)
def test_is_map_type(self, datatype, expected_result):
"""Test is_map_type predicate."""
assert datatype.is_map_type() == expected_result
@pytest.mark.parametrize(
"datatype,expected_result",
[
# Nested types
(DataType.list(DataType.int64()), True),
(DataType.struct([("x", DataType.int32())]), True),
(DataType.map(DataType.string(), DataType.int64()), True),
# Non-nested types
(DataType.int64(), False),
(DataType.string(), False),
(DataType.float32(), False),
],
)
def test_is_nested_type(self, datatype, expected_result):
"""Test is_nested_type predicate."""
assert datatype.is_nested_type() == expected_result
@pytest.mark.parametrize(
"datatype,expected_result",
[
# Numerical Arrow types
(DataType.int64(), True),
(DataType.int32(), True),
(DataType.float32(), True),
(DataType.float64(), True),
# Numerical NumPy types
(DataType.from_numpy(np.dtype("int32")), True),
(DataType.from_numpy(np.dtype("float64")), True),
# Numerical Python types
(DataType(int), True),
(DataType(float), True),
# Non-numerical types
(DataType.string(), False),
(DataType.binary(), False),
(DataType(str), False),
],
)
def test_is_numerical_type(self, datatype, expected_result):
"""Test is_numerical_type predicate."""
assert datatype.is_numerical_type() == expected_result
@pytest.mark.parametrize(
"datatype,expected_result",
[
# String Arrow types
(DataType.string(), True),
(DataType.from_arrow(pa.large_string()), True),
# String NumPy types
(DataType.from_numpy(np.dtype("U10")), True),
# String Python types
(DataType(str), True),
# Non-string types
(DataType.int64(), False),
(DataType.binary(), False),
],
)
def test_is_string_type(self, datatype, expected_result):
"""Test is_string_type predicate."""
assert datatype.is_string_type() == expected_result
@pytest.mark.parametrize(
"datatype,expected_result",
[
# Binary Arrow types
(DataType.binary(), True),
(DataType.from_arrow(pa.large_binary()), True),
(DataType.from_arrow(pa.binary(10)), True), # fixed_size_binary
# Binary Python types
(DataType(bytes), True),
(DataType(bytearray), True),
# Non-binary types
(DataType.string(), False),
(DataType.int64(), False),
],
)
def test_is_binary_type(self, datatype, expected_result):
"""Test is_binary_type predicate."""
assert datatype.is_binary_type() == expected_result
@pytest.mark.parametrize(
"datatype,expected_result",
[
# Temporal Arrow types
(DataType.temporal("timestamp", unit="s"), True),
(DataType.temporal("date32"), True),
(DataType.temporal("time64", unit="us"), True),
(DataType.temporal("duration", unit="ms"), True),
# Temporal NumPy types
(DataType.from_numpy(np.dtype("datetime64[D]")), True),
(DataType.from_numpy(np.dtype("timedelta64[s]")), True),
# Non-temporal types
(DataType.int64(), False),
(DataType.string(), False),
],
)
def test_is_temporal_type(self, datatype, expected_result):
"""Test is_temporal_type predicate."""
assert datatype.is_temporal_type() == expected_result
| TestTypePredicates |
python | ansible__ansible | lib/ansible/_internal/_templating/_jinja_plugins.py | {
"start": 6246,
"end": 7249
} | class ____:
"""Functions/methods marked `_DirectCall` bypass Jinja Environment checks for `Marker`."""
_marker_attr: t.Final[str] = "_directcall"
@classmethod
def mark[T: t.Callable](cls, src: T) -> T:
setattr(src, cls._marker_attr, True)
return src
@classmethod
def is_marked(cls, value: t.Callable) -> bool:
return callable(value) and getattr(value, cls._marker_attr, False)
@_DirectCall.mark
def _query(plugin_name: str, /, *args, **kwargs) -> t.Any:
"""wrapper for lookup, force wantlist true"""
kwargs['wantlist'] = True
return _invoke_lookup(plugin_name=plugin_name, lookup_terms=list(args), lookup_kwargs=kwargs)
@_DirectCall.mark
def _lookup(plugin_name: str, /, *args, **kwargs) -> t.Any:
# convert the args tuple to a list, since some plugins make a poor assumption that `run.args` is a list
return _invoke_lookup(plugin_name=plugin_name, lookup_terms=list(args), lookup_kwargs=kwargs)
@dataclasses.dataclass
| _DirectCall |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-of-buying-candies-with-discount.py | {
"start": 42,
"end": 267
} | class ____(object):
def minimumCost(self, cost):
"""
:type cost: List[int]
:rtype: int
"""
cost.sort(reverse=True)
return sum(x for i, x in enumerate(cost) if i%3 != 2)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 84161,
"end": 103054
} | class ____(test.TestCase):
def setUp(self):
self._test_precision_at_k = functools.partial(
_test_precision_at_k, test_case=self)
self._test_precision_at_top_k = functools.partial(
_test_precision_at_top_k, test_case=self)
self._test_average_precision_at_k = functools.partial(
_test_average_precision_at_k, test_case=self)
@test_util.run_deprecated_v1
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_idx_ex1 = (5, 3, 6, 0, 1)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in range(4):
k = i + 1
self._test_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_precision_at_top_k(
(predictions_idx_ex1[:k],), labels, k=k, expected=precision_ex1[i])
self._test_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_idx_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in range(4):
k = i + 1
self._test_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_precision_at_top_k(
(predictions_idx_ex2[:k],), labels, k=k, expected=precision_ex2[i])
self._test_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in range(4):
k = i + 1
predictions_idx = (predictions_idx_ex1[:k], predictions_idx_ex2[:k])
self._test_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
self._test_precision_at_top_k(
predictions_idx, labels, k=k, expected=streaming_precision[i])
self._test_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in range(4):
k = i + 1
self._test_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
@test_util.run_deprecated_v1
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_idx_ex1 = (5, 3, 6, 0, 1)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in range(4):
k = i + 1
self._test_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_precision_at_top_k(
(predictions_idx_ex1[:k],), labels, k=k, expected=precision_ex1[i])
self._test_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
@test_util.run_deprecated_v1
def test_average_precision_different_num_labels(self):
"""Tests the case where the numbers of labels differ across examples."""
predictions = [[0.4, 0.3, 0.2, 0.1], [0.1, 0.2, 0.3, 0.4]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 1], [0, 0, 0, 1]])
dense_labels = np.array([[2, 3], [3, -1]], dtype=np.int64)
predictions_idx_ex1 = np.array(((0, 1, 2, 3), (3, 2, 1, 0)))
precision_ex1 = ((0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4),
(1.0 / 1, 1.0 / 2, 1.0 / 3, 1.0 / 4))
mean_precision_ex1 = np.mean(precision_ex1, axis=0)
avg_precision_ex1 = (
(0.0 / 1, 0.0 / 2, 1.0 / 3 / 2, (1.0 / 3 + 2.0 / 4) / 2),
(1.0 / 1, 1.0 / 1, 1.0 / 1, 1.0 / 1))
mean_avg_precision_ex1 = np.mean(avg_precision_ex1, axis=0)
for labels in (sparse_labels, dense_labels):
for i in range(4):
k = i + 1
self._test_precision_at_k(
predictions, labels, k, expected=mean_precision_ex1[i])
self._test_precision_at_top_k(
predictions_idx_ex1[:, :k], labels, k=k,
expected=mean_precision_ex1[i])
self._test_average_precision_at_k(
predictions, labels, k, expected=mean_avg_precision_ex1[i])
@test_util.run_deprecated_v1
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
predictions_idx = [[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=NAN, class_id=class_id)
@test_util.run_deprecated_v1
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
predictions_idx = [[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=0.0, class_id=class_id)
@test_util.run_deprecated_v1
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
predictions_idx = [[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=3.0 / 10)
@test_util.run_deprecated_v1
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
predictions_idx = [[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_precision_at_top_k(
predictions_idx, sp_labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_precision_at_top_k(
predictions_idx, sp_labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_precision_at_top_k(
predictions_idx, sp_labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_precision_at_top_k(
predictions_idx, sp_labels, k=5, expected=3.0 / 10)
@test_util.run_deprecated_v1
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
predictions_idx = [[[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]],
[[5, 7, 2, 9, 6], [9, 4, 6, 2, 0]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=NAN, class_id=class_id)
@test_util.run_deprecated_v1
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
predictions_idx = [[[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]],
[[5, 7, 2, 9, 6], [9, 4, 6, 2, 0]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=0.0, class_id=class_id)
@test_util.run_deprecated_v1
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
predictions_idx = [[[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]],
[[5, 7, 2, 9, 6], [9, 4, 6, 2, 0]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=7.0 / 20)
@test_util.run_deprecated_v1
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
predictions_idx = [[[9, 4, 6, 2, 0], [5, 7, 2, 9, 6]],
[[5, 7, 2, 9, 6], [9, 4, 6, 2, 0]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[1], [0]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[0], [1]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=2.0 / 2.0, class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1.0, class_id=7,
weights=[[1], [0]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=0.0 / 1.0, class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1.0, class_id=7,
weights=[[0], [1]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=1.0 / 1.0, class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=7,
weights=[[1, 0], [0, 1]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=NAN, class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2.0, class_id=7,
weights=[[0, 1], [1, 0]])
self._test_precision_at_top_k(
predictions_idx, labels, k=5, expected=1.0 / 2.0, class_id=7,
weights=[[0, 1], [1, 0]])
def _test_recall_at_k(predictions,
labels,
k,
expected,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
def _test_recall_at_top_k(
predictions_idx,
labels,
expected,
k=None,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.recall_at_top_k(
predictions_idx=constant_op.constant(predictions_idx, dtypes_lib.int32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
| MultiLabelPrecisionAtKTest |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_compat/common/_linalg.py | {
"start": 875,
"end": 930
} | class ____(NamedTuple):
Q: Array
R: Array
| QRResult |
python | run-llama__llama_index | llama-index-core/llama_index/core/base/response/schema.py | {
"start": 3601,
"end": 5673
} | class ____:
"""
StreamingResponse object.
Returned if streaming=True.
Attributes:
response_gen: The response generator.
"""
response_gen: TokenGen
source_nodes: List[NodeWithScore] = field(default_factory=list)
metadata: Optional[Dict[str, Any]] = None
response_txt: Optional[str] = None
def __str__(self) -> str:
"""Convert to string representation."""
if self.response_txt is None and self.response_gen is not None:
response_txt = ""
for text in self.response_gen:
response_txt += text
self.response_txt = response_txt
return self.response_txt or "None"
def get_response(self) -> Response:
"""Get a standard response object."""
if self.response_txt is None and self.response_gen is not None:
response_txt = ""
for text in self.response_gen:
response_txt += text
self.response_txt = response_txt
return Response(self.response_txt, self.source_nodes, self.metadata)
def print_response_stream(self) -> None:
"""Print the response stream."""
if self.response_txt is None and self.response_gen is not None:
response_txt = ""
for text in self.response_gen:
print(text, end="", flush=True)
response_txt += text
self.response_txt = response_txt
else:
print(self.response_txt)
def get_formatted_sources(self, length: int = 100, trim_text: int = True) -> str:
"""Get formatted sources text."""
texts = []
for source_node in self.source_nodes:
fmt_text_chunk = source_node.node.get_content()
if trim_text:
fmt_text_chunk = truncate_text(fmt_text_chunk, length)
node_id = source_node.node.node_id or "None"
source_text = f"> Source (Node id: {node_id}): {fmt_text_chunk}"
texts.append(source_text)
return "\n\n".join(texts)
@dataclass
| StreamingResponse |
python | encode__django-rest-framework | rest_framework/authentication.py | {
"start": 866,
"end": 1488
} | class ____:
"""
All authentication classes should extend BaseAuthentication.
"""
def authenticate(self, request):
"""
Authenticate the request and return a two-tuple of (user, token).
"""
raise NotImplementedError(".authenticate() must be overridden.")
def authenticate_header(self, request):
"""
Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
pass
| BaseAuthentication |
python | walkccc__LeetCode | solutions/2615. Sum of Distances/2615.py | {
"start": 0,
"end": 584
} | class ____:
def distance(self, nums: list[int]) -> list[int]:
ans = [0] * len(nums)
numToIndices = collections.defaultdict(list)
for i, num in enumerate(nums):
numToIndices[num].append(i)
for indices in numToIndices.values():
n = len(indices)
if n == 1:
continue
sumSoFar = sum(indices)
prevIndex = 0
for i in range(n):
sumSoFar += (i - 1) * (indices[i] - prevIndex)
sumSoFar -= (n - 1 - i) * (indices[i] - prevIndex)
ans[indices[i]] = sumSoFar
prevIndex = indices[i]
return ans
| Solution |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/context.py | {
"start": 1158,
"end": 13020
} | class ____(PipelineContext):
"""The connector context is used to store configuration for a specific connector pipeline run."""
DEFAULT_CONNECTOR_ACCEPTANCE_TEST_IMAGE = "airbyte/connector-acceptance-test:dev"
def __init__(
self,
pipeline_name: str,
connector: ConnectorWithModifiedFiles,
is_local: bool,
git_branch: str,
git_revision: str,
diffed_branch: str,
git_repo_url: str,
report_output_prefix: str,
ci_report_bucket: Optional[str] = None,
ci_gcp_credentials: Optional[Secret] = None,
ci_git_user: Optional[str] = None,
ci_github_access_token: Optional[Secret] = None,
connector_acceptance_test_image: str = DEFAULT_CONNECTOR_ACCEPTANCE_TEST_IMAGE,
gha_workflow_run_url: Optional[str] = None,
dagger_logs_url: Optional[str] = None,
pipeline_start_timestamp: Optional[int] = None,
ci_context: Optional[str] = None,
slack_webhook: Optional[str] = None,
pull_request: Optional[PullRequest.PullRequest] = None,
should_save_report: bool = True,
code_tests_only: bool = False,
use_local_cdk: bool = False,
use_cdk_ref: Optional[str] = None,
use_host_gradle_dist_tar: bool = False,
enable_report_auto_open: bool = True,
docker_hub_username: Optional[Secret] = None,
docker_hub_password: Optional[Secret] = None,
s3_build_cache_access_key_id: Optional[Secret] = None,
s3_build_cache_secret_key: Optional[Secret] = None,
genai_api_key: Optional[Secret] = None,
dbdocs_token: Optional[Secret] = None,
concurrent_cat: Optional[bool] = False,
run_step_options: RunStepOptions = RunStepOptions(),
targeted_platforms: Sequence[Platform] = BUILD_PLATFORMS,
secret_stores: Dict[str, SecretStore] | None = None,
) -> None:
"""Initialize a connector context.
Args:
connector (Connector): The connector under test.
is_local (bool): Whether the context is for a local run or a CI run.
git_branch (str): The current git branch name.
git_revision (str): The current git revision, commit hash.
diffed_branch: str: The branch to compare the current branch against.
git_repo_url: str: The URL of the git repository.
report_output_prefix (str): The S3 key to upload the test report to.
connector_acceptance_test_image (Optional[str], optional): The image to use to run connector acceptance tests. Defaults to DEFAULT_CONNECTOR_ACCEPTANCE_TEST_IMAGE.
gha_workflow_run_url (Optional[str], optional): URL to the github action workflow run. Only valid for CI run. Defaults to None.
dagger_logs_url (Optional[str], optional): URL to the dagger logs. Only valid for CI run. Defaults to None.
pipeline_start_timestamp (Optional[int], optional): Timestamp at which the pipeline started. Defaults to None.
ci_context (Optional[str], optional): Pull requests, workflow dispatch or nightly build. Defaults to None.
slack_webhook (Optional[str], optional): The slack webhook to send messages to. Defaults to None.
pull_request (PullRequest, optional): The pull request object if the pipeline was triggered by a pull request. Defaults to None.
code_tests_only (bool, optional): Whether to ignore non-code tests like QA and metadata checks. Defaults to False.
use_host_gradle_dist_tar (bool, optional): Used when developing java connectors with gradle. Defaults to False.
enable_report_auto_open (bool, optional): Open HTML report in browser window. Defaults to True.
docker_hub_username (Optional[Secret], optional): Docker Hub username to use to read registries. Defaults to None.
docker_hub_password (Optional[Secret], optional): Docker Hub password to use to read registries. Defaults to None.
s3_build_cache_access_key_id (Optional[Secret], optional): Gradle S3 Build Cache credentials. Defaults to None.
s3_build_cache_secret_key (Optional[Secret], optional): Gradle S3 Build Cache credentials. Defaults to None.
concurrent_cat (bool, optional): Whether to run the CAT tests in parallel. Defaults to False.
targeted_platforms (Optional[Iterable[Platform]], optional): The platforms to build the connector image for. Defaults to BUILD_PLATFORMS.
"""
self.pipeline_name = pipeline_name
self.connector = connector
self.connector_acceptance_test_image = connector_acceptance_test_image
self._secrets_dir: Optional[Directory] = None
self._updated_secrets_dir: Optional[Directory] = None
self.cdk_version: Optional[str] = None
self.should_save_report = should_save_report
self.code_tests_only = code_tests_only
self.use_local_cdk = use_local_cdk
self.use_cdk_ref = use_cdk_ref
self.use_host_gradle_dist_tar = use_host_gradle_dist_tar
self.enable_report_auto_open = enable_report_auto_open
self.docker_hub_username = docker_hub_username
self.docker_hub_password = docker_hub_password
self.s3_build_cache_access_key_id = s3_build_cache_access_key_id
self.s3_build_cache_secret_key = s3_build_cache_secret_key
self.genai_api_key = genai_api_key
self.dbdocs_token = dbdocs_token
self.concurrent_cat = concurrent_cat
self.targeted_platforms = targeted_platforms
super().__init__(
pipeline_name=pipeline_name,
is_local=is_local,
git_branch=git_branch,
git_revision=git_revision,
diffed_branch=diffed_branch,
git_repo_url=git_repo_url,
report_output_prefix=report_output_prefix,
gha_workflow_run_url=gha_workflow_run_url,
dagger_logs_url=dagger_logs_url,
pipeline_start_timestamp=pipeline_start_timestamp,
ci_context=ci_context,
slack_webhook=slack_webhook,
pull_request=pull_request,
ci_report_bucket=ci_report_bucket,
ci_gcp_credentials=ci_gcp_credentials,
ci_git_user=ci_git_user,
ci_github_access_token=ci_github_access_token,
run_step_options=run_step_options,
enable_report_auto_open=enable_report_auto_open,
secret_stores=secret_stores,
)
@property
def modified_files(self) -> FrozenSet[NativePath]:
return self.connector.modified_files
@property
def secrets_dir(self) -> Optional[Directory]:
return self._secrets_dir
@secrets_dir.setter
def secrets_dir(self, secrets_dir: Directory) -> None:
self._secrets_dir = secrets_dir
@property
def updated_secrets_dir(self) -> Optional[Directory]:
return self._updated_secrets_dir
@updated_secrets_dir.setter
def updated_secrets_dir(self, updated_secrets_dir: Directory) -> None:
self._updated_secrets_dir = updated_secrets_dir
@property
def connector_acceptance_test_source_dir(self) -> Directory:
return self.get_repo_dir("airbyte-integrations/bases/connector-acceptance-test")
@property
def live_tests_dir(self) -> Directory:
return self.get_repo_dir("airbyte-ci/connectors/live-tests")
@property
def erd_package_dir(self) -> Directory:
return self.get_repo_dir("airbyte-ci/connectors/erd")
@property
def should_save_updated_secrets(self) -> bool:
return self.ci_gcp_credentials is not None and self.updated_secrets_dir is not None
@property
def host_image_export_dir_path(self) -> str:
return "." if self.is_ci else "/tmp"
@property
def metadata_path(self) -> Path:
return self.connector.code_directory / METADATA_FILE_NAME
@property
def metadata(self) -> dict:
return yaml.safe_load(self.metadata_path.read_text())["data"]
@property
def docker_repository(self) -> str:
return self.metadata["dockerRepository"]
@property
def docker_image_tag(self) -> str:
return self.metadata["dockerImageTag"]
@property
def docker_image(self) -> str:
return f"{self.docker_repository}:{self.docker_image_tag}"
@property
def local_secret_store_name(self) -> str:
return f"{self.connector.technical_name}-local"
@property
def local_secret_store(self) -> Optional[LocalDirectorySecretStore]:
connector_secrets_path = self.connector.code_directory / "secrets"
if connector_secrets_path.is_dir():
return LocalDirectorySecretStore(connector_secrets_path)
return None
async def get_connector_dir(self, exclude: Optional[List[str]] = None, include: Optional[List[str]] = None) -> Directory:
"""Get the connector under test source code directory.
Args:
exclude ([List[str], optional): List of files or directories to exclude from the directory. Defaults to None.
include ([List[str], optional): List of files or directories to include in the directory. Defaults to None.
Returns:
Directory: The connector under test source code directory.
"""
vanilla_connector_dir = self.get_repo_dir(str(self.connector.code_directory), exclude=exclude, include=include)
return await vanilla_connector_dir.with_timestamps(1)
async def __aexit__(
self, exception_type: Optional[type[BaseException]], exception_value: Optional[BaseException], traceback: Optional[TracebackType]
) -> bool:
"""Perform teardown operation for the ConnectorContext.
On the context exit the following operations will happen:
- Upload updated connector secrets back to Google Secret Manager
- Write a test report in JSON format locally and to S3 if running in a CI environment
- Update the commit status check on GitHub if running in a CI environment.
It should gracefully handle the execution error that happens and always upload a test report and update commit status check.
Args:
exception_type (Optional[type[BaseException]]): The exception type if an exception was raised in the context execution, None otherwise.
exception_value (Optional[BaseException]): The exception value if an exception was raised in the context execution, None otherwise.
traceback (Optional[TracebackType]): The traceback if an exception was raised in the context execution, None otherwise.
Returns:
bool: Whether the teardown operation ran successfully.
"""
self.stopped_at = datetime.utcnow()
self.state = self.determine_final_state(self.report, exception_value)
if exception_value:
self.logger.error("An error got handled by the ConnectorContext", exc_info=True)
if self.report is None:
self.logger.error("No test report was provided. This is probably due to an upstream error")
self.report = ConnectorReport(self, [])
if self.should_save_updated_secrets:
await secrets.upload(self)
self.report.print()
if self.should_save_report:
await self.report.save()
await asyncify(update_commit_status_check)(**self.github_commit_status)
if self.should_send_slack_message:
# Using a type ignore here because the should_send_slack_message property is checking for non nullity of the slack_webhook
await asyncify(send_message_to_webhook)(self.create_slack_message(), self.get_slack_channels(), self.slack_webhook) # type: ignore
# Supress the exception if any
return True
| ConnectorContext |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.