language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/models/flex_olmo/modular_flex_olmo.py
{ "start": 12724, "end": 13311 }
class ____(MixtralPreTrainedModel): _can_record_outputs = { "router_logits": OutputRecorder(nn.Linear, layer_name="mlp.gate", index=0), "hidden_states": FlexOlmoDecoderLayer, "attentions": FlexOlmoAttention, } # FlexOlmo uses Mixtral model as its base instead of OlmoE model since Mixtral is more up-to-date with the rest # of the transformers library. For example, it uses the newer mechanisms of recording submodule outputs. # FlexOlmo model is identical to Mixtral model except: # - FlexOlmo does not use sliding window attention.
FlexOlmoPreTrainedModel
python
huggingface__transformers
src/transformers/models/sew_d/modeling_sew_d.py
{ "start": 50354, "end": 56184 }
class ____(SEWDPreTrainedModel): def __init__(self, config: SEWDConfig): super().__init__(config) self.config = config self.feature_extractor = SEWDFeatureEncoder(config) self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.feature_layer_norm_eps) self.project_features = config.conv_dim[-1] != config.hidden_size if self.project_features: self.feature_projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.feature_dropout = nn.Dropout(config.feat_proj_dropout) if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_()) self.encoder = SEWDEncoder(config) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://huggingface.co/papers/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @auto_docstring def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict masked extracted features in *config.proj_codevector_dim* space. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) extract_features = self.layer_norm(extract_features) if self.project_features: extract_features = self.feature_projection(extract_features) hidden_states = self.feature_dropout(extract_features) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if not return_dict: return (hidden_states,) + encoder_outputs[1:] return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" SEW-D Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC). """ ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->SEWD, wav2vec2->sew_d, WAV2VEC2->SEWD
SEWDModel
python
dagster-io__dagster
python_modules/dagster/dagster_tests/components_tests/unit_tests/test_build_defs_for_component.py
{ "start": 113, "end": 575 }
class ____(dg.Component, dg.Resolvable): asset_key: str def build_defs(self, context: ComponentLoadContext) -> dg.Definitions: return dg.Definitions(assets=[dg.AssetSpec(key=self.asset_key)]) def test_build_defs_for_component_basic(): component = MyComponent.from_attributes_dict(attributes={"asset_key": "asset1"}) defs = dg.build_defs_for_component(component=component) assert defs.assets == [dg.AssetSpec(key="asset1")]
MyComponent
python
great-expectations__great_expectations
tests/integration/fixtures/partition_and_sample_data/partitioner_test_cases_and_fixtures.py
{ "start": 411, "end": 7915 }
class ____: def __init__( self, test_df: pd.DataFrame, test_column_name: Optional[str] = None, test_column_names: Optional[List[str]] = None, column_names_to_convert: Optional[List[str]] = None, ): if ( sum( bool(x) for x in [ test_column_name is not None, test_column_names is not None, ] ) > 1 ): raise ValueError( "No more than one of test_column_name or test_column_names can be specified." ) self._test_column_name = test_column_name self._test_column_names = test_column_names # Convert specified columns (e.g., "pickup_datetime" and "dropoff_datetime") to datetime column type. # noqa: E501 # FIXME CoP convert_string_columns_to_datetime( df=test_df, column_names_to_convert=column_names_to_convert ) self._test_df = test_df @property def test_df(self) -> pd.DataFrame: return self._test_df @property def test_column_name(self) -> Optional[str]: return self._test_column_name @property def test_column_names(self) -> Optional[List[str]]: return self._test_column_names @staticmethod def years_in_taxi_data() -> List[datetime.datetime]: return ( pd.date_range(start="2018-01-01", end="2020-12-31", freq="YS").to_pydatetime().tolist() ) def year_batch_identifier_data(self) -> List[dict]: return [{DatePart.YEAR.value: dt.year} for dt in self.years_in_taxi_data()] @staticmethod def months_in_taxi_data() -> List[datetime.datetime]: return ( pd.date_range(start="2018-01-01", end="2020-12-31", freq="MS").to_pydatetime().tolist() ) def get_unique_sorted_months_in_taxi_data(self) -> List[str]: months: List[datetime.datetime] = sorted(set(self.months_in_taxi_data())) month: datetime.datetime return [month.strftime("%Y-%m-%d") for month in months] def year_month_batch_identifier_data(self) -> List[dict]: return [ {DatePart.YEAR.value: dt.year, DatePart.MONTH.value: dt.month} for dt in self.months_in_taxi_data() ] def month_batch_identifier_data(self) -> List[dict]: return [{DatePart.MONTH.value: dt.month} for dt in self.months_in_taxi_data()] def year_month_day_batch_identifier_data(self) -> List[dict]: # Since taxi data does not contain all days, # we need to introspect the data to build the fixture: year_month_day_batch_identifier_list_unsorted: List[dict] = list( {val[0]: val[1], val[2]: val[3], val[4]: val[5]} for val in { ( DatePart.YEAR.value, dt.year, DatePart.MONTH.value, dt.month, DatePart.DAY.value, dt.day, ) for dt in self.test_df[self.test_column_name] } ) return sorted( year_month_day_batch_identifier_list_unsorted, key=lambda x: ( x[DatePart.YEAR.value], x[DatePart.MONTH.value], x[DatePart.DAY.value], ), ) def get_test_column_values(self) -> List[Optional[Any]]: column_values: List[Optional[Any]] = self.test_df[self.test_column_name].tolist() return column_values def get_test_multi_column_values(self) -> List[dict]: multi_column_values: List[dict] = self.test_df[self.test_column_names].to_dict("records") return multi_column_values def get_unique_sorted_test_column_values( self, reverse: Optional[bool] = False, move_null_to_front: Optional[bool] = False, limit: Optional[int] = None, ) -> List[Optional[Any]]: column_values: List[Optional[Any]] = self.get_test_column_values() column_values = list(set(column_values)) column_values = sorted( column_values, key=lambda element: (element is None, element), reverse=reverse, ) column_value: Any if ( move_null_to_front and any(column_value is None for column_value in column_values) and column_values[0] is not None and column_values[-1] is None ): num_null_values: int = column_values.count(None) column_values = list(filter(None, column_values)) column_values = num_null_values * [None] + column_values if limit is None: return column_values return column_values[:limit] def get_unique_sorted_test_multi_column_values( self, reverse: Optional[bool] = False, limit: Optional[int] = None, ) -> List[dict]: multi_column_values: List[dict] = self.get_test_multi_column_values() multi_column_values = sorted( multi_column_values, key=lambda element: sum( map( ord, hashlib.md5( str(tuple(zip(element.keys(), element.values(), strict=False))).encode( "utf-8" ) ).hexdigest(), ) ), reverse=reverse, ) unique_multi_column_values: List[dict] = [] hash_codes: List[str] = [] hash_code: str dictionary_element: dict for dictionary_element in multi_column_values: hash_code = hashlib.md5( str( tuple(zip(dictionary_element.keys(), dictionary_element.values(), strict=False)) ).encode("utf-8") ).hexdigest() if hash_code not in hash_codes: unique_multi_column_values.append(dictionary_element) hash_codes.append(hash_code) if limit is None: return unique_multi_column_values return unique_multi_column_values[:limit] def get_divided_integer_test_column_values(self, divisor: int) -> List[Optional[Any]]: column_values: List[Optional[Any]] = self.get_test_column_values() column_value: Any column_values = [column_value // divisor for column_value in column_values] return list(set(column_values)) def get_mod_integer_test_column_values(self, mod: int) -> List[Optional[Any]]: column_values: List[Optional[Any]] = self.get_test_column_values() column_value: Any column_values = [column_value % mod for column_value in column_values] return list(set(column_values)) def get_hashed_test_column_values(self, hash_digits: int) -> List[Optional[Any]]: """ hashlib.md5(string).hexdigest() hashlib.md5(str(tuple_).encode("utf-8")).hexdigest() [:num_digits] """ column_values: List[Optional[Any]] = self.get_unique_sorted_test_column_values( reverse=False, move_null_to_front=False, limit=None ) column_value: Any column_values = [ hashlib.md5(str(column_value).encode("utf-8")).hexdigest()[-1 * hash_digits :] for column_value in column_values ] return list(sorted(set(column_values))) @dataclass
TaxiTestData
python
apache__thrift
contrib/fb303/py/fb303/FacebookBase.py
{ "start": 908, "end": 2038 }
class ____(FacebookService.Iface): def __init__(self, name): self.name = name self.alive = int(time.time()) self.counters = {} def getName(self, ): return self.name def getVersion(self, ): return '' def getStatus(self, ): return fb_status.ALIVE def getCounters(self): return self.counters def resetCounter(self, key): self.counters[key] = 0 def getCounter(self, key): if self.counters.has_key(key): return self.counters[key] return 0 def incrementCounter(self, key): self.counters[key] = self.getCounter(key) + 1 def setOption(self, key, value): pass def getOption(self, key): return "" def getOptions(self): return {} def getOptions(self): return {} def aliveSince(self): return self.alive def getCpuProfile(self, duration): return "" def getLimitedReflection(self): return thrift.reflection.limited.Service() def reinitialize(self): pass def shutdown(self): pass
FacebookBase
python
google__jax
jax/_src/interpreters/batching.py
{ "start": 38346, "end": 42326 }
class ____: pass zero_if_mapped = ZeroIfMapped() ### functions for handling custom_vjp @lu.transformation_with_aux2 def batch_custom_jvp_subtrace(f, store, tag, axis_data, in_dims, *in_vals): size = axis_data.size mesh_axis = axis_data.explicit_mesh_axis with core.take_current_trace() as parent_trace: trace = BatchTrace(parent_trace, tag, axis_data) in_tracers = [val if dim is None else SymbolicZero(core.mapped_aval(size, dim, val.aval)) if type(val) is SymbolicZero else BatchTracer(trace, val, dim) for val, dim in zip(in_vals, in_dims * 2)] with core.set_current_trace(trace): out_tracers: list[BatchTracer | SymbolicZero] = f(*in_tracers) out_vals, out_dims = unzip2(map(trace.to_batch_info, out_tracers)) out_primals, out_tangents = split_list(out_vals, [len(out_vals) // 2]) out_primal_bds, out_tangent_bds = split_list(out_dims, [len(out_vals) // 2]) out_dims = map(_merge_bdims, out_primal_bds, out_tangent_bds) out_primals = map(partial(matchaxis, trace.axis_data.name, size, mesh_axis), out_primal_bds, out_dims, out_primals) out_tangents = map(partial(_matchaxis_symzeros, trace.axis_data.name, size, mesh_axis), out_tangent_bds, out_dims, out_tangents) store.store(out_dims) return out_primals + out_tangents def batch_custom_vjp_bwd(bwd: lu.WrappedFun, tag: core.TraceTag, axis_data: AxisData, in_dims: Callable[[], Sequence[int | None]], out_dim_dests: Sequence[int | None]) -> lu.WrappedFun: axis_size = axis_data.size axis_name = axis_data.name mesh_axis = axis_data.explicit_mesh_axis def new_bwd(*args): in_dims_ = in_dims() if callable(in_dims) else in_dims args = [SymbolicZero(core.mapped_aval(axis_size, dim, x.aval)) if type(x) is SymbolicZero else x for x, dim in zip(args, in_dims_)] in_dims_ = [None if type(x) is SymbolicZero else d for x, d in zip(args, in_dims_)] bwd_, out_dims_thunk = batch_subtrace(bwd, tag, axis_data, in_dims_) bwd_ = _match_axes_and_sum(bwd_, axis_size, axis_name, mesh_axis, out_dims_thunk, out_dim_dests) return bwd_.call_wrapped(*args) return lu.wrap_init(new_bwd, debug_info=bwd.debug_info) @lu.transformation2 def _match_axes_and_sum(f, axis_size, axis_name, mesh_axis, out_dims_thunk, out_dim_dests, *in_vals): # this is like _match_axes, but we do reduce-sums as needed out_vals = f(*in_vals) return map(partial(_matchaxis_symzeros, axis_name, axis_size, mesh_axis, sum_match=True), out_dims_thunk(), out_dim_dests, out_vals) def _matchaxis_symzeros(axis_name, sz, mesh_axis, src, dst, x, sum_match=False): # Just like `matchaxis`, but handles symbolic zeros using ad_util.py # TODO(mattjj): dedup with matchaxis if isinstance(x, (Zero, SymbolicZero)): if src == dst: return x elif type(src) == type(dst) == int: aval = core.mapped_aval(sz, src, x.aval) return type(x)(core.unmapped_aval(sz, dst, aval, mesh_axis)) elif src is not_mapped and dst is not not_mapped: return type(x)(core.unmapped_aval(sz, dst, x.aval, mesh_axis)) elif dst is not_mapped and sum_match: return type(x)(core.mapped_aval(sz, src, x.aval)) else: raise ValueError((axis_name, x, src, dst)) else: return matchaxis(axis_name, sz, mesh_axis, src, dst, x, sum_match=sum_match) ### utilities for defining primitives' batching rules BatchingRule = Callable[ ..., tuple[Any, Union[int, None, tuple[Union[int, None], ...]]] ] primitive_batchers : dict[core.Primitive, BatchingRule] = {} # "fancy" primitive batchers just take a extra leading `AxisData` and "trace type" args fancy_primitive_batchers: dict[core.Primitive, Callable] = {} # backwards compat shim. TODO: delete
ZeroIfMapped
python
ansible__ansible
lib/ansible/modules/hostname.py
{ "start": 24484, "end": 24603 }
class ____(Hostname): platform = 'Linux' distribution = 'Parrot' strategy_class = FileStrategy
ParrotHostname
python
PyCQA__pylint
tests/functional/a/abstract/abstract_class_instantiated.py
{ "start": 1220, "end": 1288 }
class ____(Structure): def __len__(self): return 42
Sizable
python
milvus-io__pymilvus
pymilvus/exceptions.py
{ "start": 1696, "end": 1791 }
class ____(MilvusException): """Raise when server's Unavaliable"""
MilvusUnavailableException
python
vyperlang__vyper
vyper/ast/nodes.py
{ "start": 30125, "end": 30173 }
class ____(ExprNode): __slots__ = ("id",)
Name
python
scikit-image__scikit-image
tests/skimage/morphology/test_extrema.py
{ "start": 341, "end": 11380 }
class ____: def test_saturated_arithmetic(self): """Adding/subtracting a constant and clipping""" # Test for unsigned integer data = np.array( [[250, 251, 5, 5], [100, 200, 253, 252], [4, 10, 1, 3]], dtype=np.uint8 ) # adding the constant img_constant_added = extrema._add_constant_clip(data, 4) expected = np.array( [[254, 255, 9, 9], [104, 204, 255, 255], [8, 14, 5, 7]], dtype=np.uint8 ) error = diff(img_constant_added, expected) assert error < eps img_constant_subtracted = extrema._subtract_constant_clip(data, 4) expected = np.array( [[246, 247, 1, 1], [96, 196, 249, 248], [0, 6, 0, 0]], dtype=np.uint8 ) error = diff(img_constant_subtracted, expected) assert error < eps # Test for signed integer data = np.array([[32767, 32766], [-32768, -32767]], dtype=np.int16) img_constant_added = extrema._add_constant_clip(data, 1) expected = np.array([[32767, 32767], [-32767, -32766]], dtype=np.int16) error = diff(img_constant_added, expected) assert error < eps img_constant_subtracted = extrema._subtract_constant_clip(data, 1) expected = np.array([[32766, 32765], [-32768, -32768]], dtype=np.int16) error = diff(img_constant_subtracted, expected) assert error < eps def test_h_maxima(self): """h-maxima for various data types""" data = np.array( [ [10, 11, 13, 14, 14, 15, 14, 14, 13, 11], [11, 13, 15, 16, 16, 16, 16, 16, 15, 13], [13, 15, 40, 40, 18, 18, 18, 60, 60, 15], [14, 16, 40, 40, 19, 19, 19, 60, 60, 16], [14, 16, 18, 19, 19, 19, 19, 19, 18, 16], [15, 16, 18, 19, 19, 20, 19, 19, 18, 16], [14, 16, 18, 19, 19, 19, 19, 19, 18, 16], [14, 16, 80, 80, 19, 19, 19, 100, 100, 16], [13, 15, 80, 80, 18, 18, 18, 100, 100, 15], [11, 13, 15, 16, 16, 16, 16, 16, 15, 13], ], dtype=np.uint8, ) expected_result = np.array( [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 1, 1, 0], [0, 0, 1, 1, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ], dtype=np.uint8, ) for dtype in [np.uint8, np.uint64, np.int8, np.int64]: data = data.astype(dtype) out = extrema.h_maxima(data, 40) error = diff(expected_result, out) assert error < eps def test_h_minima(self): """h-minima for various data types""" data = np.array( [ [10, 11, 13, 14, 14, 15, 14, 14, 13, 11], [11, 13, 15, 16, 16, 16, 16, 16, 15, 13], [13, 15, 40, 40, 18, 18, 18, 60, 60, 15], [14, 16, 40, 40, 19, 19, 19, 60, 60, 16], [14, 16, 18, 19, 19, 19, 19, 19, 18, 16], [15, 16, 18, 19, 19, 20, 19, 19, 18, 16], [14, 16, 18, 19, 19, 19, 19, 19, 18, 16], [14, 16, 80, 80, 19, 19, 19, 100, 100, 16], [13, 15, 80, 80, 18, 18, 18, 100, 100, 15], [11, 13, 15, 16, 16, 16, 16, 16, 15, 13], ], dtype=np.uint8, ) data = 100 - data expected_result = np.array( [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 1, 1, 0], [0, 0, 1, 1, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ], dtype=np.uint8, ) for dtype in [np.uint8, np.uint64, np.int8, np.int64]: data = data.astype(dtype) out = extrema.h_minima(data, 40) error = diff(expected_result, out) assert error < eps assert out.dtype == expected_result.dtype def test_extrema_float(self): """specific tests for float type""" data = np.array( [ [0.10, 0.11, 0.13, 0.14, 0.14, 0.15, 0.14, 0.14, 0.13, 0.11], [0.11, 0.13, 0.15, 0.16, 0.16, 0.16, 0.16, 0.16, 0.15, 0.13], [0.13, 0.15, 0.40, 0.40, 0.18, 0.18, 0.18, 0.60, 0.60, 0.15], [0.14, 0.16, 0.40, 0.40, 0.19, 0.19, 0.19, 0.60, 0.60, 0.16], [0.14, 0.16, 0.18, 0.19, 0.19, 0.19, 0.19, 0.19, 0.18, 0.16], [0.15, 0.182, 0.18, 0.19, 0.204, 0.20, 0.19, 0.19, 0.18, 0.16], [0.14, 0.16, 0.18, 0.19, 0.19, 0.19, 0.19, 0.19, 0.18, 0.16], [0.14, 0.16, 0.80, 0.80, 0.19, 0.19, 0.19, 1.0, 1.0, 0.16], [0.13, 0.15, 0.80, 0.80, 0.18, 0.18, 0.18, 1.0, 1.0, 0.15], [0.11, 0.13, 0.15, 0.16, 0.16, 0.16, 0.16, 0.16, 0.15, 0.13], ], dtype=np.float32, ) inverted_data = 1.0 - data out = extrema.h_maxima(data, 0.003) expected_result = np.array( [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 1, 1, 0], [0, 0, 1, 1, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 1, 1, 0], [0, 0, 1, 1, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ], dtype=np.uint8, ) error = diff(expected_result, out) assert error < eps out = extrema.h_minima(inverted_data, 0.003) error = diff(expected_result, out) assert error < eps def test_h_maxima_float_image(self): """specific tests for h-maxima float image type""" w = 10 x, y = np.mgrid[0:w, 0:w] data = 20 - 0.2 * ((x - w / 2) ** 2 + (y - w / 2) ** 2) data[2:4, 2:4] = 40 data[2:4, 7:9] = 60 data[7:9, 2:4] = 80 data[7:9, 7:9] = 100 data = data.astype(np.float32) expected_result = np.zeros_like(data) expected_result[(data > 19.9)] = 1.0 for h in [1.0e-12, 1.0e-6, 1.0e-3, 1.0e-2, 1.0e-1, 0.1]: out = extrema.h_maxima(data, h) error = diff(expected_result, out) assert error < eps def test_h_maxima_float_h(self): """specific tests for h-maxima float h parameter""" data = np.array( [ [0, 0, 0, 0, 0], [0, 3, 3, 3, 0], [0, 3, 4, 3, 0], [0, 3, 3, 3, 0], [0, 0, 0, 0, 0], ], dtype=np.uint8, ) h_vals = np.linspace(1.0, 2.0, 100) failures = 0 for h in h_vals: if h % 1 != 0: with pytest.warns( UserWarning, match='possible precision loss converting image' ): maxima = extrema.h_maxima(data, h) else: maxima = extrema.h_maxima(data, h) if maxima[2, 2] == 0: failures += 1 assert failures == 0 def test_h_maxima_large_h(self): """test that h-maxima works correctly for large h""" data = np.array( [ [10, 10, 10, 10, 10], [10, 13, 13, 13, 10], [10, 13, 14, 13, 10], [10, 13, 13, 13, 10], [10, 10, 10, 10, 10], ], dtype=np.uint8, ) maxima = extrema.h_maxima(data, 5) assert np.sum(maxima) == 0 data = np.array( [ [10, 10, 10, 10, 10], [10, 13, 13, 13, 10], [10, 13, 14, 13, 10], [10, 13, 13, 13, 10], [10, 10, 10, 10, 10], ], dtype=np.float32, ) maxima = extrema.h_maxima(data, 5.0) assert np.sum(maxima) == 0 def test_h_minima_float_image(self): """specific tests for h-minima float image type""" w = 10 x, y = np.mgrid[0:w, 0:w] data = 180 + 0.2 * ((x - w / 2) ** 2 + (y - w / 2) ** 2) data[2:4, 2:4] = 160 data[2:4, 7:9] = 140 data[7:9, 2:4] = 120 data[7:9, 7:9] = 100 data = data.astype(np.float32) expected_result = np.zeros_like(data) expected_result[(data < 180.1)] = 1.0 for h in [1.0e-12, 1.0e-6, 1.0e-3, 1.0e-2, 1.0e-1, 0.1]: out = extrema.h_minima(data, h) error = diff(expected_result, out) assert error < eps def test_h_minima_float_h(self): """specific tests for h-minima float h parameter""" data = np.array( [ [4, 4, 4, 4, 4], [4, 1, 1, 1, 4], [4, 1, 0, 1, 4], [4, 1, 1, 1, 4], [4, 4, 4, 4, 4], ], dtype=np.uint8, ) h_vals = np.linspace(1.0, 2.0, 100) failures = 0 for h in h_vals: if h % 1 != 0: with pytest.warns( UserWarning, match='possible precision loss converting image' ): minima = extrema.h_minima(data, h) else: minima = extrema.h_minima(data, h) if minima[2, 2] == 0: failures += 1 assert failures == 0 def test_h_minima_large_h(self): """test that h-minima works correctly for large h""" data = np.array( [ [14, 14, 14, 14, 14], [14, 11, 11, 11, 14], [14, 11, 10, 11, 14], [14, 11, 11, 11, 14], [14, 14, 14, 14, 14], ], dtype=np.uint8, ) maxima = extrema.h_minima(data, 5) assert np.sum(maxima) == 0 data = np.array( [ [14, 14, 14, 14, 14], [14, 11, 11, 11, 14], [14, 11, 10, 11, 14], [14, 11, 11, 11, 14], [14, 14, 14, 14, 14], ], dtype=np.float32, ) maxima = extrema.h_minima(data, 5.0) assert np.sum(maxima) == 0
TestExtrema
python
pytorch__pytorch
torch/_dynamo/utils.py
{ "start": 34457, "end": 42636 }
class ____: """Similar to weakref.WeakKeyDictionary, but use `is`/`id` rather than `==` to compare equality""" def __init__(self) -> None: self.values: dict[int, Any] = {} self.refs: dict[int, weakref.ReferenceType[Any]] = {} def __getitem__(self, key: Any) -> Any: return self.values[id(key)] def get(self, key: Any, default: Any = None) -> Any: return self.values.get(id(key), default) def __contains__(self, key: Any) -> bool: return id(key) in self.values def __setitem__(self, key: Any, value: Any) -> None: idx = id(key) if idx not in self.refs: self.refs[idx] = weakref.ref(key, lambda ref: self._remove_id(idx)) self.values[idx] = value def _remove_id(self, idx: int) -> None: if idx in self.values: del self.values[idx] if idx in self.refs: del self.refs[idx] def clear(self) -> None: self.refs.clear() self.values.clear() @overload def istype(obj: object, allowed_types: type[T]) -> TypeIs[T]: ... @overload def istype( obj: object, allowed_types: tuple[type[list[T]], type[tuple[T, ...]]] ) -> TypeIs[T]: ... @overload def istype(obj: object, allowed_types: Iterable[type]) -> bool: ... def istype(obj: object, allowed_types: Any) -> bool: """isinstance() without subclasses""" if isinstance(allowed_types, (tuple, list, set)): return type(obj) in allowed_types return type(obj) is allowed_types if sys.version_info >= (3, 12): # Some typing classes moved to C in 3.12, # which no longer have the _Final mixin. # Check for consistency e.g. here: # https://github.com/python/cpython/blob/f2b82b3b3b1f8c7a81e84df35ee921e44517cf32/Lib/typing.py#L32 _builtin_final_typing_classes = ( typing.ParamSpecArgs, typing.ParamSpecKwargs, typing.ParamSpec, typing.TypeVar, typing.TypeVarTuple, typing.TypeAliasType, ) if sys.version_info >= (3, 14): _builtin_final_typing_classes += (typing.Union,) def is_typing(value: Any) -> bool: # _Final catches most of typing classes: # - Any # - Callable # - Union (Python < 3.14) # ... # # NB: we intentionally ignore classes that inherit from Generic, since they # can be used as both TypingVariable as well as UserDefinedClassVariable. if sys.version_info >= (3, 12) and isinstance(value, _builtin_final_typing_classes): return True return ( isinstance(value, typing._Final) # type: ignore[attr-defined] or value is typing.Generic or value is typing.Union ) def is_numpy_int_type(value: Any) -> bool: if not np: return False return istype( value, ( np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, ), ) def is_numpy_float_type(value: Any) -> bool: if not np: return False return istype( value, ( np.float16, np.float32, np.float64, ), ) @overload def is_lru_cache_wrapped_function( value: Callable[..., T], ) -> TypeGuard[functools._lru_cache_wrapper[T]]: ... @overload def is_lru_cache_wrapped_function( value: Any, ) -> TypeGuard[functools._lru_cache_wrapper[Any]]: ... def is_lru_cache_wrapped_function( value: Any, ) -> bool: return isinstance(value, functools._lru_cache_wrapper) and is_function( inspect.getattr_static(value, "__wrapped__") ) _FuncTypes: TypeAlias = Union[ types.FunctionType, types.BuiltinFunctionType, types.MethodDescriptorType, types.WrapperDescriptorType, ] def is_function_or_wrapper( value: Any, ) -> TypeIs[Union[_FuncTypes, torch._ops.OpOverloadPacket, torch._ops.OpOverload]]: return is_function(value) or isinstance( value, (torch._ops.OpOverloadPacket, torch._ops.OpOverload) ) def is_function( value: Any, ) -> TypeIs[_FuncTypes]: return isinstance( value, ( types.FunctionType, types.BuiltinFunctionType, types.MethodDescriptorType, types.WrapperDescriptorType, ), ) cmp_name_to_op_mapping = { "__eq__": operator.eq, "__ne__": operator.ne, "__lt__": operator.lt, "__le__": operator.le, "__gt__": operator.gt, "__ge__": operator.ge, } cmp_name_to_op_str_mapping = { "__eq__": "==", "__ne__": "!=", "__lt__": "<", "__le__": "<=", "__gt__": ">", "__ge__": ">=", } def is_wrapper_or_member_descriptor( value: Any, ) -> TypeIs[ Union[ types.GetSetDescriptorType, types.MethodDescriptorType, types.WrapperDescriptorType, types.MemberDescriptorType, types.MethodWrapperType, ] ]: return isinstance( value, ( # set up by PyGetSetDef types.GetSetDescriptorType, # set by PyMethodDef, e.g. list.append types.MethodDescriptorType, # slots - list.__add__ types.WrapperDescriptorType, # set up by PyMemberDef types.MemberDescriptorType, # wrapper over C functions types.MethodWrapperType, ), ) def unwrap_if_wrapper(fn: Any) -> Any: return unwrap_with_attr_name_if_wrapper(fn)[0] def unwrap_with_attr_name_if_wrapper(fn: Any) -> tuple[Any, Optional[str]]: # TODO(anijain2305) - Investigate if we can get rid of this function # unpack @torch._dynamo.optimize()(fn) wrapped function if is_function(fn) and inspect.getattr_static(fn, "_torchdynamo_inline", False): fn = inspect.getattr_static(fn, "_torchdynamo_inline", fn) attr_name = "_torchdynamo_inline" else: attr_name = None return fn, attr_name def is_numpy_ndarray(value: Any) -> TypeGuard[np.ndarray]: # type: ignore[type-arg] if not np: return False return istype(value, np.ndarray) def istensor(obj: Any) -> bool: """Check of obj is a tensor""" tensor_list: tuple[type, ...] = ( torch.Tensor, torch.nn.Parameter, *config.traceable_tensor_subclasses, ) tensor_list = tensor_list + (torch._subclasses.FakeTensor,) return istype(obj, tensor_list) def is_lazy_module(mod: Any) -> bool: return isinstance(mod, LazyModuleMixin) @functools.lru_cache(4096) def print_once(*args: Any) -> None: print(*args) def make_cell(val: Any = None) -> types.CellType: """Some black magic to create a cell object that usually only exists in a closure""" x = val def f() -> Any: return x assert f.__closure__ is not None and len(f.__closure__) == 1 return f.__closure__[0] def proxy_args_kwargs(args: Any, kwargs: Any) -> tuple[tuple[Any, ...], dict[str, Any]]: try: proxy_args = tuple(arg.as_proxy() for arg in args) proxy_kwargs = {key: arg.as_proxy() for key, arg in kwargs.items()} return proxy_args, proxy_kwargs except NotImplementedError as e: from .exc import unimplemented from .variables.base import typestr unimplemented( gb_type="Failed to convert args/kwargs to proxy", context=f"call_function args: {typestr(*args)} {typestr(*list(kwargs.values()))}", explanation="Missing `as_proxy()` implementation for some arg/kwarg.", hints=[], from_exc=e, ) def to_int_ms(v: Optional[float]) -> Optional[int]: return None if v is None else int(v * 1000) # float64 timestamp has a quarter microsecond precision in 2024, so while # this is suboptimal we shouldn't meaningfully lose precision def to_int_us(v: Optional[float]) -> Optional[int]: return None if v is None else int(v * 1_000_000) # Version field added to every log. Increment to make it easier to distinguish new # vs. old entries when you make a substantive change to how the logs are populated. LOG_FORMAT_VERSION = 3 @dataclasses.dataclass
ExactWeakKeyDictionary
python
sqlalchemy__sqlalchemy
test/engine/test_deprecations.py
{ "start": 1261, "end": 2652 }
class ____(fixtures.TestBase): """test various things associated with "connectionless" executions.""" def check_usage(self, inspector): with inspector._operation_context() as conn: is_instance_of(conn, Connection) def test_inspector_constructor_engine(self): with testing.expect_deprecated( r"The __init__\(\) method on Inspector is deprecated and will " r"be removed in a future release." ): i1 = reflection.Inspector(testing.db) is_(i1.bind, testing.db) self.check_usage(i1) def test_inspector_constructor_connection(self): with testing.db.connect() as conn: with testing.expect_deprecated( r"The __init__\(\) method on Inspector is deprecated and " r"will be removed in a future release." ): i1 = reflection.Inspector(conn) is_(i1.bind, conn) is_(i1.engine, testing.db) self.check_usage(i1) def test_inspector_from_engine(self): with testing.expect_deprecated( r"The from_engine\(\) method on Inspector is deprecated and will " r"be removed in a future release." ): i1 = reflection.Inspector.from_engine(testing.db) is_(i1.bind, testing.db) self.check_usage(i1)
ConnectionlessDeprecationTest
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_default_format07.py
{ "start": 315, "end": 2247 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("default_format07.xlsx") def test_create_file(self): """Test the creation of a file with user defined default format""" workbook = Workbook( self.got_filename, { "default_format_properties": {"font_name": "Calibri", "font_size": 11}, "default_row_height": 20, "default_column_width": 64, }, ) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png", {"x_offset": 32}) # Set user column width and row height to test positioning calculation. worksheet.set_column_pixels(4, 4, 96) worksheet.set_row_pixels(8, 32) # Set column to text column width less than 1 character. worksheet.set_column_pixels(6, 6, 10) workbook.close() self.assertExcelEqual() def test_create_file_with_character_units(self): """Test the creation of a file with user defined default format""" # Same as workbook = Workbook( self.got_filename, { "default_format_properties": {"font_name": "Calibri", "font_size": 11}, "default_row_height": 20, "default_column_width": 64, }, ) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png", {"x_offset": 32}) # Set user column width and row height to test positioning calculation. worksheet.set_column(4, 4, 13.0) worksheet.set_row(8, 24.0) # Set column to text column width less than 1 character. worksheet.set_column(6, 6, 0.83) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
numpy__numpy
numpy/_core/tests/test_numeric.py
{ "start": 32513, "end": 37720 }
class ____: def assert_raises_fpe(self, fpeerr, flop, x, y): ftype = type(x) try: flop(x, y) assert_(False, f"Type {ftype} did not raise fpe error '{fpeerr}'.") except FloatingPointError as exc: assert_(str(exc).find(fpeerr) >= 0, f"Type {ftype} raised wrong fpe error '{exc}'.") def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): # Check that fpe exception is raised. # # Given a floating operation `flop` and two scalar values, check that # the operation raises the floating point exception specified by # `fpeerr`. Tests all variants with 0-d array scalars as well. self.assert_raises_fpe(fpeerr, flop, sc1, sc2) self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]) self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]) # Test for all real and complex float types @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") @pytest.mark.parametrize("typecode", np.typecodes["AllFloat"]) def test_floating_exceptions(self, typecode): if 'bsd' in sys.platform and typecode in 'gG': pytest.skip(reason="Fallback impl for (c)longdouble may not raise " "FPE errors as expected on BSD OSes, " "see gh-24876, gh-23379") # Test basic arithmetic function errors with np.errstate(all='raise'): ftype = obj2sctype(typecode) if np.dtype(ftype).kind == 'f': # Get some extreme values for the type fi = np.finfo(ftype) ft_tiny = fi.tiny ft_max = fi.max ft_eps = fi.eps underflow = 'underflow' divbyzero = 'divide by zero' else: # 'c', complex, corresponding real dtype rtype = type(ftype(0).real) fi = np.finfo(rtype) ft_tiny = ftype(fi.tiny) ft_max = ftype(fi.max) ft_eps = ftype(fi.eps) # The complex types raise different exceptions underflow = '' divbyzero = '' overflow = 'overflow' invalid = 'invalid' # The value of tiny for double double is NaN, so we need to # pass the assert if not np.isnan(ft_tiny): self.assert_raises_fpe(underflow, lambda a, b: a / b, ft_tiny, ft_max) self.assert_raises_fpe(underflow, lambda a, b: a * b, ft_tiny, ft_tiny) self.assert_raises_fpe(overflow, lambda a, b: a * b, ft_max, ftype(2)) self.assert_raises_fpe(overflow, lambda a, b: a / b, ft_max, ftype(0.5)) self.assert_raises_fpe(overflow, lambda a, b: a + b, ft_max, ft_max * ft_eps) self.assert_raises_fpe(overflow, lambda a, b: a - b, -ft_max, ft_max * ft_eps) # On AIX, pow() with double does not raise the overflow exception, # it returns inf. Long double is the same as double. if sys.platform != 'aix' or typecode not in 'dDgG': self.assert_raises_fpe(overflow, np.power, ftype(2), ftype(2**fi.nexp)) self.assert_raises_fpe(divbyzero, lambda a, b: a / b, ftype(1), ftype(0)) self.assert_raises_fpe( invalid, lambda a, b: a / b, ftype(np.inf), ftype(np.inf) ) self.assert_raises_fpe(invalid, lambda a, b: a / b, ftype(0), ftype(0)) self.assert_raises_fpe( invalid, lambda a, b: a - b, ftype(np.inf), ftype(np.inf) ) self.assert_raises_fpe( invalid, lambda a, b: a + b, ftype(np.inf), ftype(-np.inf) ) self.assert_raises_fpe(invalid, lambda a, b: a * b, ftype(0), ftype(np.inf)) @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") def test_warnings(self): # test warning code path with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") with np.errstate(all="warn"): np.divide(1, 0.) assert_equal(len(w), 1) assert_("divide by zero" in str(w[0].message)) np.array(1e300) * np.array(1e300) assert_equal(len(w), 2) assert_("overflow" in str(w[-1].message)) np.array(np.inf) - np.array(np.inf) assert_equal(len(w), 3) assert_("invalid value" in str(w[-1].message)) np.array(1e-300) * np.array(1e-300) assert_equal(len(w), 4) assert_("underflow" in str(w[-1].message))
TestFloatExceptions
python
coleifer__peewee
peewee.py
{ "start": 205746, "end": 216301 }
class ____(object): def __init__(self, model, database=None, table_name=None, indexes=None, primary_key=None, constraints=None, schema=None, only_save_dirty=False, depends_on=None, options=None, db_table=None, table_function=None, table_settings=None, without_rowid=False, temporary=False, strict_tables=None, legacy_table_names=True, **kwargs): if db_table is not None: __deprecated__('"db_table" has been deprecated in favor of ' '"table_name" for Models.') table_name = db_table self.model = model self.database = database self.fields = {} self.columns = {} self.combined = {} self._sorted_field_list = _SortedFieldList() self.sorted_fields = [] self.sorted_field_names = [] self.defaults = {} self._default_by_name = {} self._default_dict = {} self._default_callables = {} self._default_callable_list = [] self.name = model.__name__.lower() self.table_function = table_function self.legacy_table_names = legacy_table_names if not table_name: table_name = (self.table_function(model) if self.table_function else self.make_table_name()) self.table_name = table_name self._table = None self.indexes = list(indexes) if indexes else [] self.constraints = constraints self._schema = schema self.primary_key = primary_key self.composite_key = self.auto_increment = None self.only_save_dirty = only_save_dirty self.depends_on = depends_on self.table_settings = table_settings self.without_rowid = without_rowid self.strict_tables = strict_tables self.temporary = temporary self.refs = {} self.backrefs = {} self.model_refs = collections.defaultdict(list) self.model_backrefs = collections.defaultdict(list) self.manytomany = {} self.options = options or {} for key, value in kwargs.items(): setattr(self, key, value) self._additional_keys = set(kwargs.keys()) # Allow objects to register hooks that are called if the model is bound # to a different database. For example, BlobField uses a different # Python data-type depending on the db driver / python version. When # the database changes, we need to update any BlobField so they can use # the appropriate data-type. self._db_hooks = [] def make_table_name(self): if self.legacy_table_names: return re.sub(r'[^\w]+', '_', self.name) return make_snake_case(self.model.__name__) def model_graph(self, refs=True, backrefs=True, depth_first=True): if not refs and not backrefs: raise ValueError('One of `refs` or `backrefs` must be True.') accum = [(None, self.model, None)] seen = set() queue = collections.deque((self,)) method = queue.pop if depth_first else queue.popleft while queue: curr = method() if curr in seen: continue seen.add(curr) if refs: for fk, model in curr.refs.items(): accum.append((fk, model, False)) queue.append(model._meta) if backrefs: for fk, model in curr.backrefs.items(): accum.append((fk, model, True)) queue.append(model._meta) return accum def add_ref(self, field): rel = field.rel_model self.refs[field] = rel self.model_refs[rel].append(field) rel._meta.backrefs[field] = self.model rel._meta.model_backrefs[self.model].append(field) def remove_ref(self, field): rel = field.rel_model del self.refs[field] self.model_refs[rel].remove(field) del rel._meta.backrefs[field] rel._meta.model_backrefs[self.model].remove(field) def add_manytomany(self, field): self.manytomany[field.name] = field def remove_manytomany(self, field): del self.manytomany[field.name] @property def table(self): if self._table is None: self._table = Table( self.table_name, [field.column_name for field in self.sorted_fields], schema=self.schema, _model=self.model, _database=self.database) return self._table @table.setter def table(self, value): raise AttributeError('Cannot set the "table".') @table.deleter def table(self): self._table = None @property def schema(self): return self._schema @schema.setter def schema(self, value): self._schema = value del self.table @property def entity(self): if self._schema: return Entity(self._schema, self.table_name) else: return Entity(self.table_name) def _update_sorted_fields(self): self.sorted_fields = list(self._sorted_field_list) self.sorted_field_names = [f.name for f in self.sorted_fields] def get_rel_for_model(self, model): if isinstance(model, ModelAlias): model = model.model forwardrefs = self.model_refs.get(model, []) backrefs = self.model_backrefs.get(model, []) return (forwardrefs, backrefs) def add_field(self, field_name, field, set_attribute=True): if field_name in self.fields: self.remove_field(field_name) elif field_name in self.manytomany: self.remove_manytomany(self.manytomany[field_name]) if not isinstance(field, MetaField): del self.table field.bind(self.model, field_name, set_attribute) self.fields[field.name] = field self.columns[field.column_name] = field self.combined[field.name] = field self.combined[field.column_name] = field self._sorted_field_list.insert(field) self._update_sorted_fields() if field.default is not None: # This optimization helps speed up model instance construction. self.defaults[field] = field.default if callable_(field.default): self._default_callables[field] = field.default self._default_callable_list.append((field.name, field.default)) else: self._default_dict[field] = field.default self._default_by_name[field.name] = field.default else: field.bind(self.model, field_name, set_attribute) if isinstance(field, ForeignKeyField): self.add_ref(field) elif isinstance(field, ManyToManyField) and field.name: self.add_manytomany(field) def remove_field(self, field_name): if field_name not in self.fields: return del self.table original = self.fields.pop(field_name) del self.columns[original.column_name] del self.combined[field_name] try: del self.combined[original.column_name] except KeyError: pass self._sorted_field_list.remove(original) self._update_sorted_fields() if original.default is not None: del self.defaults[original] if self._default_callables.pop(original, None): for i, (name, _) in enumerate(self._default_callable_list): if name == field_name: self._default_callable_list.pop(i) break else: self._default_dict.pop(original, None) self._default_by_name.pop(original.name, None) if isinstance(original, ForeignKeyField): self.remove_ref(original) def set_primary_key(self, name, field): self.composite_key = isinstance(field, CompositeKey) self.add_field(name, field) self.primary_key = field self.auto_increment = ( field.auto_increment or bool(field.sequence)) def get_primary_keys(self): if self.composite_key: return tuple([self.fields[field_name] for field_name in self.primary_key.field_names]) else: return (self.primary_key,) if self.primary_key is not False else () def get_default_dict(self): dd = self._default_by_name.copy() for field_name, default in self._default_callable_list: dd[field_name] = default() return dd def fields_to_index(self): indexes = [] for f in self.sorted_fields: if f.primary_key: continue if f.index or f.unique: indexes.append(ModelIndex(self.model, (f,), unique=f.unique, using=f.index_type)) for index_obj in self.indexes: if isinstance(index_obj, Node): indexes.append(index_obj) elif isinstance(index_obj, (list, tuple)): index_parts, unique = index_obj fields = [] for part in index_parts: if isinstance(part, basestring): fields.append(self.combined[part]) elif isinstance(part, Node): fields.append(part) else: raise ValueError('Expected either a field name or a ' 'subclass of Node. Got: %s' % part) indexes.append(ModelIndex(self.model, fields, unique=unique)) return indexes def set_database(self, database): self.database = database self.model._schema._database = database del self.table # Apply any hooks that have been registered. If we have an # uninitialized proxy object, we will treat that as `None`. if isinstance(database, Proxy) and database.obj is None: database = None for hook in self._db_hooks: hook(database) def set_table_name(self, table_name): self.table_name = table_name del self.table
Metadata
python
django__django
django/template/smartif.py
{ "start": 4382, "end": 4551 }
class ____(TokenBase): lbp = 0 def nud(self, parser): raise parser.error_class("Unexpected end of expression in if tag.") EndToken = EndToken()
EndToken
python
getsentry__sentry
src/sentry/notifications/models/notificationaction.py
{ "start": 2560, "end": 3479 }
class ____(FlexibleIntEnum): """ Explains the contents of target_identifier """ # The target_identifier is a direct reference used by the service (e.g. email address, slack channel id) SPECIFIC = 0 # The target_identifier is an id from the User model in Sentry USER = 1 # The target_identifier is an id from the Team model in Sentry TEAM = 2 # The target_identifier is an id from the SentryApp model in Sentry SENTRY_APP = 3 # There is no target_identifier, but we want to send notifications to the issue owners ISSUE_OWNERS = 4 @classmethod def as_choices(cls) -> tuple[tuple[int, str], ...]: return ( (cls.SPECIFIC.value, "specific"), (cls.USER.value, "user"), (cls.TEAM.value, "team"), (cls.SENTRY_APP.value, "sentry_app"), (cls.ISSUE_OWNERS.value, "issue_owners"), )
ActionTarget
python
crytic__slither
slither/core/expressions/conditional_expression.py
{ "start": 481, "end": 1897 }
class ____(Expression): def __init__( self, if_expression: Union[BinaryOperation, Identifier, Literal], then_expression: Union[ "ConditionalExpression", TypeConversion, Literal, TupleExpression, Identifier ], else_expression: Union[TupleExpression, UnaryOperation, Identifier, Literal], ) -> None: assert isinstance(if_expression, Expression) assert isinstance(then_expression, Expression) assert isinstance(else_expression, Expression) super().__init__() self._if_expression: Expression = if_expression self._then_expression: Expression = then_expression self._else_expression: Expression = else_expression @property def expressions(self) -> List[Expression]: return [self._if_expression, self._then_expression, self._else_expression] @property def if_expression(self) -> Expression: return self._if_expression @property def else_expression(self) -> Expression: return self._else_expression @property def then_expression(self) -> Expression: return self._then_expression def __str__(self) -> str: return ( "if " + str(self._if_expression) + " then " + str(self._then_expression) + " else " + str(self._else_expression) )
ConditionalExpression
python
hynek__structlog
tests/test_stdlib.py
{ "start": 15984, "end": 16992 }
class ____: def test_logger_name_added(self): """ The logger name is added to the event dict. """ name = "sample-name" logger = logging.getLogger(name) event_dict = add_logger_name(logger, None, {}) assert name == event_dict["logger"] def test_logger_name_added_with_record(self, make_log_record): """ The logger name is deduced from the LogRecord if provided. """ name = "sample-name" record = make_log_record(name=name) event_dict = add_logger_name(None, None, {"_record": record}) assert name == event_dict["logger"] def extra_dict() -> dict[str, Any]: """ A dict to be passed in the `extra` parameter of the `logging` module's log methods. """ return { "this": "is", "some": "extra values", "x_int": 4, "x_bool": True, } @pytest.fixture(name="extra_dict") def extra_dict_fixture(): return extra_dict()
TestAddLoggerName
python
langchain-ai__langchain
libs/langchain/tests/integration_tests/cache/fake_embeddings.py
{ "start": 189, "end": 1270 }
class ____(Embeddings): """Fake embeddings functionality for testing.""" @override def embed_documents(self, texts: list[str]) -> list[list[float]]: """Return simple embeddings. Embeddings encode each text as its index. Args: texts: List of text to embed. Returns: List of embeddings. """ return [[1.0] * 9 + [float(i)] for i in range(len(texts))] async def aembed_documents(self, texts: list[str]) -> list[list[float]]: return self.embed_documents(texts) @override def embed_query(self, text: str) -> list[float]: """Return constant query embeddings. Embeddings are identical to embed_documents(texts)[0]. Distance to each text will be that text's index, as it was passed to embed_documents. Args: text: Text to embed. Returns: Embedding. """ return [1.0] * 9 + [0.0] async def aembed_query(self, text: str) -> list[float]: return self.embed_query(text)
FakeEmbeddings
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 198113, "end": 198469 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("client_mutation_id", "comment") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") comment = sgqlc.types.Field("DiscussionComment", graphql_name="comment")
AddDiscussionCommentPayload
python
pennersr__django-allauth
allauth/socialaccount/providers/mailchimp/provider.py
{ "start": 819, "end": 1740 }
class ____(OAuth2Provider): """OAuth2Provider subclass for MailChimp v3.""" id = "mailchimp" name = "MailChimp" account_class = MailChimpAccount oauth2_adapter_class = MailChimpOAuth2Adapter def extract_uid(self, data): """Extract uid ('user_id') and ensure it's a str.""" return str(data["user_id"]) def get_default_scope(self): """Ensure scope is null to fit their API.""" return [""] def extract_common_fields(self, data): """Extract fields from a metadata query.""" return dict( dc=data.get("dc"), role=data.get("role"), account_name=data.get("accountname"), user_id=data.get("user_id"), login=data.get("login"), login_url=data.get("login_url"), api_endpoint=data.get("api_endpoint"), ) provider_classes = [MailChimpProvider]
MailChimpProvider
python
rapidsai__cudf
python/cudf/cudf/core/tools/datetimes.py
{ "start": 14961, "end": 15169 }
class ____: def _maybe_as_fast_pandas_offset(self): return pd._libs.tslibs.offsets.YearEnd(month=12) def __eq__(self, other): return self._maybe_as_fast_pandas_offset() == other
YearEnd
python
django__django
django/db/models/sql/compiler.py
{ "start": 1241, "end": 71516 }
class ____: # Multiline ordering SQL clause may appear from RawSQL. ordering_parts = _lazy_re_compile( r"^(.*)\s(?:ASC|DESC).*", re.MULTILINE | re.DOTALL, ) def __init__(self, query, connection, using, elide_empty=True): self.query = query self.connection = connection self.using = using # Some queries, e.g. coalesced aggregation, need to be executed even if # they would return an empty result set. self.elide_empty = elide_empty self.quote_cache = {"*": "*"} # The select, klass_info, and annotations are needed by # QuerySet.iterator() these are set as a side-effect of executing the # query. Note that we calculate separately a list of extra select # columns needed for grammatical correctness of the query, but these # columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None self._meta_ordering = None def __repr__(self): return ( f"<{self.__class__.__qualname__} " f"model={self.query.model.__qualname__} " f"connection={self.connection!r} using={self.using!r}>" ) def setup_query(self, with_col_aliases=False): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select( with_col_aliases=with_col_aliases, ) self.col_count = len(self.select) def pre_sql_setup(self, with_col_aliases=False): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query(with_col_aliases=with_col_aliases) order_by = self.get_order_by() self.where, self.having, self.qualify = self.query.where.split_having_qualify( must_group_by=self.query.group_by is not None ) extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk') # .annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] group_by_refs = set() if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, "as_sql"): expr = self.query.resolve_ref(expr) if isinstance(expr, Ref): if expr.refs not in group_by_refs: group_by_refs.add(expr.refs) expressions.append(expr.source) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. selected_expr_positions = {} for ordinal, (expr, _, alias) in enumerate(select, start=1): if alias: selected_expr_positions[expr] = ordinal # Skip members of the select clause that are already explicitly # grouped against. if alias in group_by_refs: continue expressions.extend(expr.get_group_by_cols()) if not self._meta_ordering: for expr, (sql, params, is_ref) in order_by: # Skip references to the SELECT clause, as all expressions in # the SELECT clause are already part of the GROUP BY. if not is_ref: expressions.extend(expr.get_group_by_cols()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) allows_group_by_select_index = ( self.connection.features.allows_group_by_select_index ) for expr in expressions: try: sql, params = self.compile(expr) except (EmptyResultSet, FullResultSet): continue if ( allows_group_by_select_index and (position := selected_expr_positions.get(expr)) is not None ): sql, params = str(position), () else: sql, params = expr.select_format(self, sql, params) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the database supports group by functional dependence reduction, # then the expressions can be reduced to the set of selected table # primary keys as all other columns are functionally dependent on them. if self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if ( hasattr(expr, "target") and expr.target.primary_key and self.connection.features.allows_group_by_selected_pks_on_model( expr.target.model ) ) } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or expr in having or getattr(expr, "alias", None) not in aliases ] return expressions @classmethod def get_select_from_parent(cls, klass_info): for ki in klass_info["related_klass_infos"]: if ki["from_parent"]: ki["select_fields"] = klass_info["select_fields"] + ki["select_fields"] cls.get_select_from_parent(ki) def get_select(self, with_col_aliases=False): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} assert not (self.query.select and self.query.default_cols) select_mask = self.query.get_select_mask() if self.query.default_cols: cols = self.get_default_columns(select_mask) else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select selected = [] select_fields = None if self.query.selected is None: selected = [ *( (alias, RawSQL(*args)) for alias, args in self.query.extra_select.items() ), *((None, col) for col in cols), *self.query.annotation_select.items(), ] select_fields = list( range( len(self.query.extra_select), len(self.query.extra_select) + len(cols), ) ) else: select_fields = [] for index, (alias, expression) in enumerate(self.query.selected.items()): # Reference to an annotation. if isinstance(expression, str): expression = self.query.annotations[expression] # Reference to a column. elif isinstance(expression, int): select_fields.append(index) expression = cols[expression] # ColPairs cannot be aliased. if isinstance(expression, ColPairs): alias = None selected.append((alias, expression)) if select_fields: klass_info = {"model": self.query.model, "select_fields": select_fields} for select_idx, (alias, expression) in enumerate(selected): if alias: annotations[alias] = select_idx select.append((expression, alias)) if self.query.select_related: related_klass_infos = self.get_related_selections(select, select_mask) klass_info["related_klass_infos"] = related_klass_infos self.get_select_from_parent(klass_info) ret = [] col_idx = 1 for col, alias in select: try: sql, params = self.compile(col) except EmptyResultSet: empty_result_set_value = getattr( col, "empty_result_set_value", NotImplemented ) if empty_result_set_value is NotImplemented: # Select a predicate that's always False. sql, params = "0", () else: sql, params = self.compile(Value(empty_result_set_value)) except FullResultSet: sql, params = self.compile(Value(True)) else: sql, params = col.select_format(self, sql, params) if alias is None and with_col_aliases: alias = f"col{col_idx}" col_idx += 1 ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def _order_by_pairs(self): if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif (meta := self.query.get_meta()) and meta.ordering: ordering = meta.ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: default_order, _ = ORDER_DIR["ASC"] else: default_order, _ = ORDER_DIR["DESC"] selected_exprs = {} # Avoid computing `selected_exprs` if there is no `ordering` as it's # relatively expensive. if ordering and (select := self.select): for ordinal, (expr, _, alias) in enumerate(select, start=1): pos_expr = PositionRef(ordinal, alias, expr) if alias: selected_exprs[alias] = pos_expr selected_exprs[expr] = pos_expr for field in ordering: if hasattr(field, "resolve_expression"): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() select_ref = selected_exprs.get(field.expression) if select_ref or ( isinstance(field.expression, F) and (select_ref := selected_exprs.get(field.expression.name)) ): # Emulation of NULLS (FIRST|LAST) cannot be combined with # the usage of ordering by position. if ( field.nulls_first is None and field.nulls_last is None ) or self.connection.features.supports_order_by_nulls_modifier: field = field.copy() field.expression = select_ref # Alias collisions are not possible when dealing with # combined queries so fallback to it if emulation of NULLS # handling is required. elif self.query.combinator: field = field.copy() field.expression = Ref(select_ref.refs, select_ref.source) yield field, select_ref is not None continue if field == "?": # random yield OrderBy(Random()), False continue col, order = get_order_dir(field, default_order) descending = order == "DESC" if select_ref := selected_exprs.get(col): # Reference to expression in SELECT clause yield ( OrderBy( select_ref, descending=descending, ), True, ) continue if expr := self.query.annotations.get(col): ref = col transforms = [] else: ref, *transforms = col.split(LOOKUP_SEP) expr = self.query.annotations.get(ref) if expr: if self.query.combinator and self.select: if transforms: raise NotImplementedError( "Ordering combined queries by transforms is not " "implemented." ) # Don't use the resolved annotation because other # combined queries might define it differently. expr = F(ref) if transforms: for name in transforms: expr = self.query.try_transform(expr, name) if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) yield OrderBy(expr, descending=descending), False continue if "." in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split(".", 1) yield ( OrderBy( RawSQL( "%s.%s" % (self.quote_name_unless_alias(table), col), [] ), descending=descending, ), False, ) continue if self.query.extra and col in self.query.extra: if col in self.query.extra_select: yield ( OrderBy( Ref(col, RawSQL(*self.query.extra[col])), descending=descending, ), True, ) else: yield ( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False, ) else: if self.query.combinator and self.select: # Don't use the first model's field because other # combinated queries might define it differently. yield OrderBy(F(col), descending=descending), False else: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. yield from self.find_ordering_name( field, self.query.get_meta(), default_order=default_order, ) def get_order_by(self): """ Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ result = [] seen = set() for expr, is_ref in self._order_by_pairs(): resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if not is_ref and self.query.combinator and self.select: src = resolved.expression expr_src = expr.expression for sel_expr, _, col_alias in self.select: if src == sel_expr: # When values() is used the exact alias must be used to # reference annotations. if ( self.query.has_select_fields and col_alias in self.query.annotation_select and not ( isinstance(expr_src, F) and col_alias == expr_src.name ) ): continue resolved.set_source_expressions( [Ref(col_alias if col_alias else src.target.column, src)] ) break else: # Add column used in ORDER BY clause to the selected # columns and to each combined query. order_by_idx = len(self.query.select) + 1 col_alias = f"__orderbycol{order_by_idx}" for q in self.query.combined_queries: # If fields were explicitly selected through values() # combined queries cannot be augmented. if q.has_select_fields: raise DatabaseError( "ORDER BY term does not match any column in " "the result set." ) q.add_annotation(expr_src, col_alias) self.query.add_select_col(resolved, col_alias) resolved.set_source_expressions([Ref(col_alias, src)]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql)[1] params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql)[1] if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ( (name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( self.query.external_aliases.get(name) and name not in self.query.table_map ) ): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node): vendor_impl = getattr(node, "as_" + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection, self.elide_empty) for query in self.query.combined_queries ] if not features.supports_slicing_ordering_in_compound: for compiler in compilers: if compiler.query.is_sliced: raise DatabaseError( "LIMIT/OFFSET not allowed in subqueries of compound statements." ) if compiler.get_order_by(): raise DatabaseError( "ORDER BY not allowed in subqueries of compound statements." ) parts = [] empty_compiler = None for compiler in compilers: try: parts.append(self._get_combinator_part_sql(compiler)) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == "union" or (combinator == "difference" and parts): empty_compiler = compiler continue raise if not parts: raise EmptyResultSet elif len(parts) == 1 and combinator == "union" and self.query.is_sliced: # A sliced union cannot be composed of a single component because # in the event the later is also sliced it might result in invalid # SQL due to the usage of multiple LIMIT clauses. Prevent that from # happening by always including an empty resultset query to force # the creation of an union. empty_compiler.elide_empty = False parts.append(self._get_combinator_part_sql(empty_compiler)) combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == "union": combinator_sql += " ALL" braces = "{}" if not self.query.subquery and features.supports_slicing_ordering_in_compound: braces = "({})" sql_parts, args_parts = zip( *((braces.format(sql), args) for sql, args in parts) ) result = [" {} ".format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def _get_combinator_part_sql(self, compiler): features = self.connection.features # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. selected = self.query.selected if selected is not None and compiler.query.selected is None: compiler.query = compiler.query.clone() compiler.query.set_values(selected) part_sql, part_args = compiler.as_sql(with_col_aliases=True) if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = "SELECT * FROM ({})".format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif ( self.query.subquery or not features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) elif self.query.subquery and features.supports_slicing_ordering_in_compound: part_sql = "({})".format(part_sql) return part_sql, part_args def get_qualify_sql(self): where_parts = [] if self.where: where_parts.append(self.where) if self.having: where_parts.append(self.having) inner_query = self.query.clone() inner_query.subquery = True inner_query.where = inner_query.where.__class__(where_parts) # Augment the inner query with any window function references that # might have been masked via values() and alias(). If any masked # aliases are added they'll be masked again to avoid fetching # the data in the `if qual_aliases` branch below. select = { expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0] } select_aliases = set(select.values()) qual_aliases = set() replacements = {} def collect_replacements(expressions): while expressions: expr = expressions.pop() if expr in replacements: continue elif select_alias := select.get(expr): replacements[expr] = select_alias elif isinstance(expr, Lookup): expressions.extend(expr.get_source_expressions()) elif isinstance(expr, Ref): if expr.refs not in select_aliases: expressions.extend(expr.get_source_expressions()) else: num_qual_alias = len(qual_aliases) select_alias = f"qual{num_qual_alias}" qual_aliases.add(select_alias) inner_query.add_annotation(expr, select_alias) replacements[expr] = select_alias collect_replacements(list(self.qualify.leaves())) self.qualify = self.qualify.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) order_by = [] for order_by_expr, *_ in self.get_order_by(): collect_replacements(order_by_expr.get_source_expressions()) order_by.append( order_by_expr.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) ) inner_query_compiler = inner_query.get_compiler( self.using, connection=self.connection, elide_empty=self.elide_empty ) inner_sql, inner_params = inner_query_compiler.as_sql( # The limits must be applied to the outer query to avoid pruning # results too eagerly. with_limits=False, # Force unique aliasing of selected columns to avoid collisions # and make rhs predicates referencing easier. with_col_aliases=True, ) qualify_sql, qualify_params = self.compile(self.qualify) result = [ "SELECT * FROM (", inner_sql, ")", self.connection.ops.quote_name("qualify"), "WHERE", qualify_sql, ] if qual_aliases: # If some select aliases were unmasked for filtering purposes they # must be masked back. cols = [self.connection.ops.quote_name(alias) for alias in select.values()] result = [ "SELECT", ", ".join(cols), "FROM (", *result, ")", self.connection.ops.quote_name("qualify_mask"), ] params = list(inner_params) + qualify_params # As the SQL spec is unclear on whether or not derived tables # ordering must propagate it has to be explicitly repeated on the # outer-most query to ensure it's preserved. if order_by: ordering_sqls = [] for ordering in order_by: ordering_sql, ordering_params = self.compile(ordering) ordering_sqls.append(ordering_sql) params.extend(ordering_params) result.extend(["ORDER BY", ", ".join(ordering_sqls)]) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: combinator = self.query.combinator extra_select, order_by, group_by = self.pre_sql_setup( with_col_aliases=with_col_aliases or bool(combinator), ) for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and self.query.is_sliced combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, "supports_select_{}".format(combinator)): raise NotSupportedError( "{} is not supported on this database backend.".format( combinator ) ) result, params = self.get_combinator_sql( combinator, self.query.combinator_all ) elif self.qualify: result, params = self.get_qualify_sql() order_by = None else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() try: where, w_params = ( self.compile(self.where) if self.where is not None else ("", []) ) except EmptyResultSet: if self.elide_empty: raise # Use a predicate that's always False. where, w_params = "0 = 1", [] except FullResultSet: where, w_params = "", [] try: having, h_params = ( self.compile(self.having) if self.having is not None else ("", []) ) except FullResultSet: having, h_params = "", [] result = ["SELECT"] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = "%s AS %s" % ( s_sql, self.connection.ops.quote_name(alias), ) params.extend(s_params) out_cols.append(s_sql) result += [", ".join(out_cols)] if from_: result += ["FROM", *from_] elif self.connection.features.bare_select_suffix: result += [self.connection.features.bare_select_suffix] params.extend(f_params) if self.query.select_for_update and features.has_select_for_update: if ( self.connection.get_autocommit() # Don't raise an exception when database doesn't # support transactions, as it's a noop. and features.supports_transactions ): raise TransactionManagementError( "select_for_update cannot be used outside of a transaction." ) if ( with_limit_offset and not features.supports_select_for_update_with_limit ): raise NotSupportedError( "LIMIT/OFFSET is not supported with " "select_for_update on this database backend." ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of no_key = self.query.select_for_no_key_update # If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the # backend doesn't support it, raise NotSupportedError to # prevent a possible deadlock. if nowait and not features.has_select_for_update_nowait: raise NotSupportedError( "NOWAIT is not supported on this database backend." ) elif skip_locked and not features.has_select_for_update_skip_locked: raise NotSupportedError( "SKIP LOCKED is not supported on this database backend." ) elif of and not features.has_select_for_update_of: raise NotSupportedError( "FOR UPDATE OF is not supported on this database backend." ) elif no_key and not features.has_select_for_no_key_update: raise NotSupportedError( "FOR NO KEY UPDATE is not supported on this " "database backend." ) for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), no_key=no_key, ) if for_update_part and features.for_update_after_from: result.append(for_update_part) if where: result.append("WHERE %s" % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError( "annotate() + distinct(fields) is not implemented." ) order_by = order_by or self.connection.ops.force_no_ordering() result.append("GROUP BY %s" % ", ".join(grouping)) if self._meta_ordering: order_by = None if having: if not grouping: result.extend(self.connection.ops.force_group_by()) result.append("HAVING %s" % having) params.extend(h_params) if self.query.explain_info: result.insert( 0, self.connection.ops.explain_query_prefix( self.query.explain_info.format, **self.query.explain_info.options, ), ) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) order_by_sql = "ORDER BY %s" % ", ".join(ordering) if combinator and features.requires_compound_order_by_subquery: result = ["SELECT * FROM (", *result, ")", order_by_sql] else: result.append(order_by_sql) if with_limit_offset: result.append( self.connection.ops.limit_offset_sql( self.query.low_mark, self.query.high_mark ) ) if for_update_part and not features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering # expressions to be selected as well. Wrap the query in another # subquery to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if alias: sub_selects.append( "%s.%s" % ( self.connection.ops.quote_name("subquery"), self.connection.ops.quote_name(alias), ) ) else: select_clone = select.relabeled_clone( {select.alias: "subquery"} ) subselect, subparams = select_clone.as_sql( self, self.connection ) sub_selects.append(subselect) sub_params.extend(subparams) return "SELECT %s FROM (%s) subquery" % ( ", ".join(sub_selects), " ".join(result), ), tuple(sub_params + params) return " ".join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns( self, select_mask, start_alias=None, opts=None, from_parent=None ): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: if (opts := self.query.get_meta()) is None: return result start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} select_mask_fields = set(composite.unnest(select_mask)) for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if ( from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model ) ): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if select_mask and field not in select_mask_fields: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins( parts, opts, None ) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(self.connection.ops.quote_name(name)) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name( self, name, opts, alias=None, default_order="ASC", already_seen=None ): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == "DESC" pieces = name.split(LOOKUP_SEP) ( field, targets, alias, joins, path, opts, transform_function, ) = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless it is the pk # shortcut or the attribute name of the field that is specified or # there are transforms to process. if ( field.is_relation and opts.ordering and getattr(field, "attname", None) != pieces[-1] and name != "pk" and not getattr(transform_function, "has_transforms", False) ): # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple( getattr(self.query.alias_map[j], "join_cols", None) for j in joins ) if join_tuple in already_seen: raise FieldError("Infinite loop caused by ordering.") already_seen.add(join_tuple) results = [] for item in opts.ordering: if hasattr(item, "resolve_expression") and not isinstance( item, OrderBy ): item = item.desc() if descending else item.asc() if isinstance(item, OrderBy): results.append( (item.prefix_references(f"{name}{LOOKUP_SEP}"), False) ) continue results.extend( (expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref) for expr, is_ref in self.find_ordering_name( item, opts, alias, order, already_seen ) ) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [ (OrderBy(transform_function(t, alias), descending=descending), False) for t in targets ] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins( pieces, opts, alias ) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] # Copy alias_map to a tuple in case Join.as_sql() subclasses (objects # in alias_map) alter compiler.query.alias_map. That would otherwise # raise "RuntimeError: dictionary changed size during iteration". for alias, from_clause in tuple(self.query.alias_map.items()): if not self.query.alias_refcount[alias]: continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if ( alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1 ): result.append(", %s" % self.quote_name_unless_alias(alias)) return result, params def get_related_selections( self, select, select_mask, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None, ): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain( direct_choices, reverse_choices, self.query._filtered_relations ) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info["related_klass_infos"] = related_klass_infos for f in opts.fields: fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or "(none)", ) ) else: next = False if not select_related_descend(f, restricted, requested, select_mask): continue related_select_mask = select_mask.get(f) or {} klass_info = { "model": f.remote_field.model, "field": f, "reverse": False, "local_setter": f.set_cached_value, "remote_setter": ( f.remote_field.set_cached_value if f.unique else lambda x, y: None ), "from_parent": False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=f.remote_field.model._meta ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_klass_infos = self.get_related_selections( select, related_select_mask, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o, o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for related_object, related_field, model in related_fields: if not select_related_descend( related_object, restricted, requested, select_mask, ): continue related_select_mask = select_mask.get(related_object) or {} related_field_name = related_field.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins( [related_field_name], opts, root_alias ) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { "model": model, "field": related_field, "reverse": True, "local_setter": related_object.set_cached_value, "remote_setter": related_field.set_cached_value, "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next = requested.get(related_field_name, {}) next_klass_infos = self.get_related_selections( select, related_select_mask, model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) def local_setter(final_field, obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: final_field.remote_field.set_cached_value(from_obj, obj) def local_setter_noop(obj, from_obj): pass def remote_setter(name, obj, from_obj): setattr(from_obj, name, obj) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) final_field, _, join_opts, joins, _, _ = self.query.setup_joins( [name], opts, root_alias ) model = join_opts.model alias = joins[-1] from_parent = ( issubclass(model, opts.model) and model is not opts.model ) klass_info = { "model": model, "field": final_field, "reverse": True, "local_setter": ( partial(local_setter, final_field) if len(joins) <= 2 else local_setter_noop ), "remote_setter": partial(remote_setter, name), "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] field_select_mask = select_mask.get((name, final_field)) or {} columns = self.get_default_columns( field_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, field_select_mask, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( "Invalid field name(s) given in select_related: %s. " "Choices are: %s" % ( ", ".join(invalid_fields), ", ".join(_get_field_choices()) or "(none)", ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_parent_klass_info(klass_info): concrete_model = klass_info["model"]._meta.concrete_model for parent_model, parent_link in concrete_model._meta.parents.items(): all_parents = parent_model._meta.all_parents yield { "model": parent_model, "field": parent_link, "reverse": False, "select_fields": [ select_index for select_index in klass_info["select_fields"] # Selected columns from a model or its parents. if ( self.select[select_index][0].target.model == parent_model or self.select[select_index][0].target.model in all_parents ) ], } def _get_first_selected_col_from_model(klass_info): """ Find the first selected column from a model. If it doesn't exist, don't lock a model. select_fields is filled recursively, so it also contains fields from the parent models. """ concrete_model = klass_info["model"]._meta.concrete_model for select_index in klass_info["select_fields"]: if self.select[select_index][0].target.model == concrete_model: return self.select[select_index][0] def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield "self" else: field = klass_info["field"] if klass_info["reverse"]: field = field.remote_field path = [*parent_path, field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in _get_parent_klass_info(klass_info) ) queue.extend( (path, klass_info) for klass_info in klass_info.get("related_klass_infos", []) ) if not self.klass_info: return [] result = [] invalid_names = [] for name in self.query.select_for_update_of: klass_info = self.klass_info if name == "self": col = _get_first_selected_col_from_model(klass_info) else: for part in name.split(LOOKUP_SEP): klass_infos = ( *klass_info.get("related_klass_infos", []), *_get_parent_klass_info(klass_info), ) for related_klass_info in klass_infos: field = related_klass_info["field"] if related_klass_info["reverse"]: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue col = _get_first_selected_col_from_model(klass_info) if col is not None: if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( "Invalid field name(s) given in select_for_update(of=(...)): %s. " "Only relational fields followed in the query are allowed. " "Choices are: %s." % ( ", ".join(invalid_names), ", ".join(_get_field_choices()), ) ) return result def get_converters(self, expressions): i = 0 converters = {} for expression in expressions: if isinstance(expression, ColPairs): cols = expression.get_source_expressions() cols_converters = self.get_converters(cols) for j, (convs, col) in cols_converters.items(): converters[i + j] = (convs, col) i += len(expression) elif expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) i += 1 else: i += 1 return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def has_composite_fields(self, expressions): # Check for composite fields before calling the relatively costly # composite_fields_to_tuples. return any(isinstance(expression, ColPairs) for expression in expressions) def composite_fields_to_tuples(self, rows, expressions): col_pair_slices = [ slice(i, i + len(expression)) for i, expression in enumerate(expressions) if isinstance(expression, ColPairs) ] for row in map(list, rows): for pos in col_pair_slices: row[pos] = (tuple(row[pos]),) yield row def results_iter( self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE, ): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql( MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size ) fields = [s[0] for s in self.select[0 : self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if self.has_composite_fields(fields): rows = self.composite_fields_to_tuples(rows, fields) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ return bool(self.execute_sql(SINGLE)) def execute_sql( self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE ): """ Run the query against the database and return the result(s). The return value depends on the value of result_type. When result_type is: - MULTI: Retrieves all rows using fetchmany(). Wraps in an iterator for chunked reads when supported. - SINGLE: Retrieves a single row using fetchone(). - ROW_COUNT: Retrieves the number of rows in the result. - CURSOR: Runs the query, and returns the cursor object. It is the caller's responsibility to close the cursor. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == ROW_COUNT: try: return cursor.rowcount finally: cursor.close() if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0 : self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) return result def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. format_ = self.query.explain_info.format output_formatter = json.dumps if format_ and format_.lower() == "json" else str for row in result: for value in row: if not isinstance(value, str): yield " ".join([output_formatter(c) for c in value]) else: yield value
SQLCompiler
python
TheAlgorithms__Python
greedy_methods/gas_station.py
{ "start": 1100, "end": 2960 }
class ____: gas_quantity: int cost: int def get_gas_stations( gas_quantities: list[int], costs: list[int] ) -> tuple[GasStation, ...]: """ This function returns a tuple of gas stations. Args: gas_quantities: Amount of gas available at each station costs: The cost of gas required to move from one station to the next Returns: A tuple of gas stations >>> gas_stations = get_gas_stations([1, 2, 3, 4, 5], [3, 4, 5, 1, 2]) >>> len(gas_stations) 5 >>> gas_stations[0] GasStation(gas_quantity=1, cost=3) >>> gas_stations[-1] GasStation(gas_quantity=5, cost=2) """ return tuple( GasStation(quantity, cost) for quantity, cost in zip(gas_quantities, costs) ) def can_complete_journey(gas_stations: tuple[GasStation, ...]) -> int: """ This function returns the index from which to start the journey in order to reach the end. Args: gas_quantities [list]: Amount of gas available at each station cost [list]: The cost of gas required to move from one station to the next Returns: start [int]: start index needed to complete the journey Examples: >>> can_complete_journey(get_gas_stations([1, 2, 3, 4, 5], [3, 4, 5, 1, 2])) 3 >>> can_complete_journey(get_gas_stations([2, 3, 4], [3, 4, 3])) -1 """ total_gas = sum(gas_station.gas_quantity for gas_station in gas_stations) total_cost = sum(gas_station.cost for gas_station in gas_stations) if total_gas < total_cost: return -1 start = 0 net = 0 for i, gas_station in enumerate(gas_stations): net += gas_station.gas_quantity - gas_station.cost if net < 0: start = i + 1 net = 0 return start if __name__ == "__main__": import doctest doctest.testmod()
GasStation
python
tensorflow__tensorflow
tensorflow/python/debug/lib/debug_v2_ops_test.py
{ "start": 1590, "end": 9212 }
class ____(dumping_callback_test_lib.DumpingCallbackTestBase): """Tests for DebugIdentityV2Op: when DebugEventsWriter is initialized. DebugEventsWriter being initialized prior to DebugIdentityV2 ops being invoked for the first time is the typical case (e.g., tfdbg2 running on a local machine with only local devices.) """ def setUp(self): super(DebugIdentityV2OpTest, self).setUp() # Testing using a small circular-buffer size. self.circular_buffer_size = 4 self.tfdbg_run_id = "test_tfdbg_run" self.writer = debug_events_writer.DebugEventsWriter( self.dump_root, self.tfdbg_run_id, self.circular_buffer_size) def tearDown(self): self.writer.Close() super(DebugIdentityV2OpTest, self).tearDown() @test_util.run_in_graph_and_eager_modes def testSingleTensorFullTensorDebugModeWithCircularBufferBehavior(self): @def_function.function def write_debug_trace(x): square = math_ops.square(x) gen_debug_ops.debug_identity_v2( square, tfdbg_context_id="deadbeaf", op_name="Square", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root]) sqrt = math_ops.sqrt(x) gen_debug_ops.debug_identity_v2( sqrt, tfdbg_context_id="beafdead", op_name="Sqrt", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root]) return square + sqrt x = np.array([3.0, 4.0]) # Only the graph-execution trace of the last iteration should be written # to self.dump_root. for _ in range(self.circular_buffer_size // 2 + 1): self.assertAllClose( write_debug_trace(x), [9.0 + np.sqrt(3.0), 16.0 + 2.0]) with debug_events_reader.DebugEventsReader(self.dump_root) as reader: # Check that the .metadata DebugEvents data file has been created, even # before FlushExecutionFiles() is called. self.assertGreater(reader.starting_wall_time(), 0) self.assertTrue(reader.tensorflow_version()) self.assertTrue(reader.tfdbg_file_version().startswith("debug.Event")) graph_trace_iter = reader.graph_execution_traces_iterators()[0] # Before FlushExecutionFiles() is called, the .graph_execution_traces file # ought to be empty. with self.assertRaises(StopIteration): next(graph_trace_iter) # Flush the circular buffer. self.writer.FlushExecutionFiles() graph_trace_iter = reader.graph_execution_traces_iterators()[0] # The circular buffer has a size of 4. So only the data from the # last two iterations should have been written to self.dump_root. for _ in range(2): debug_event = next(graph_trace_iter).debug_event self.assertGreater(debug_event.wall_time, 0) trace = debug_event.graph_execution_trace self.assertEqual(trace.tfdbg_context_id, "deadbeaf") self.assertEqual(trace.op_name, "Square") self.assertEqual(trace.output_slot, 0) self.assertEqual(trace.tensor_debug_mode, debug_event_pb2.TensorDebugMode.FULL_TENSOR) tensor_value = tensor_util.MakeNdarray(trace.tensor_proto) self.assertAllClose(tensor_value, [9.0, 16.0]) debug_event = next(graph_trace_iter).debug_event self.assertGreater(debug_event.wall_time, 0) trace = debug_event.graph_execution_trace self.assertEqual(trace.tfdbg_context_id, "beafdead") self.assertEqual(trace.op_name, "Sqrt") self.assertEqual(trace.output_slot, 0) self.assertEqual(trace.tensor_debug_mode, debug_event_pb2.TensorDebugMode.FULL_TENSOR) tensor_value = tensor_util.MakeNdarray(trace.tensor_proto) self.assertAllClose(tensor_value, [np.sqrt(3.0), 2.0]) # Only the graph-execution trace of the last iteration should be written # to self.dump_root. with self.assertRaises(StopIteration): next(graph_trace_iter) @test_util.run_in_graph_and_eager_modes def testControlFlow(self): @def_function.function def collatz(x): counter = constant_op.constant(0, dtype=dtypes.int32) while math_ops.greater(x, 1): counter = counter + 1 gen_debug_ops.debug_identity_v2( x, tfdbg_context_id="deadbeaf", op_name="x", output_slot=0, tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root]) if math_ops.equal(x % 2, 0): x = math_ops.div(x, 2) else: x = x * 3 + 1 return counter x = constant_op.constant(10, dtype=dtypes.int32) self.evaluate(collatz(x)) self.writer.FlushExecutionFiles() with debug_events_reader.DebugEventsReader(self.dump_root) as reader: graph_trace_iter = reader.graph_execution_traces_iterators()[0] try: x_values = [] timestamp = 0 while True: debug_event = next(graph_trace_iter).debug_event self.assertGreater(debug_event.wall_time, timestamp) timestamp = debug_event.wall_time trace = debug_event.graph_execution_trace self.assertEqual(trace.tfdbg_context_id, "deadbeaf") self.assertEqual(trace.op_name, "x") self.assertEqual(trace.output_slot, 0) self.assertEqual(trace.tensor_debug_mode, debug_event_pb2.TensorDebugMode.FULL_TENSOR) x_values.append(int(tensor_util.MakeNdarray(trace.tensor_proto))) except StopIteration: pass # Due to the circular buffer, only the last 4 iterations of # [10, 5, 16, 8, 4, 2] should have been written. self.assertAllEqual(x_values, [16, 8, 4, 2]) @test_util.run_in_graph_and_eager_modes def testTwoDumpRoots(self): another_dump_root = os.path.join(self.dump_root, "another") another_debug_url = "file://%s" % another_dump_root another_writer = debug_events_writer.DebugEventsWriter( another_dump_root, "test_tfdbg_run") @def_function.function def write_debug_trace(x): # DebugIdentityV2 is a stateful op. It ought to be included by auto # control dependency. square = math_ops.square(x) gen_debug_ops.debug_identity_v2( square, tfdbg_context_id="deadbeaf", tensor_debug_mode=debug_event_pb2.TensorDebugMode.FULL_TENSOR, debug_urls=["file://%s" % self.dump_root, another_debug_url]) return square + 1.0 x = np.array([3.0, 4.0]) self.assertAllClose(write_debug_trace(x), np.array([10.0, 17.0])) self.writer.FlushExecutionFiles() another_writer.FlushExecutionFiles() another_writer.Close() for debug_root in (self.dump_root, another_dump_root): with debug_events_reader.DebugEventsReader(debug_root) as reader: graph_trace_iter = reader.graph_execution_traces_iterators()[0] debug_event = next(graph_trace_iter).debug_event trace = debug_event.graph_execution_trace self.assertEqual(trace.tfdbg_context_id, "deadbeaf") self.assertEqual(trace.op_name, "") self.assertEqual(trace.tensor_debug_mode, debug_event_pb2.TensorDebugMode.FULL_TENSOR) tensor_value = tensor_util.MakeNdarray(trace.tensor_proto) self.assertAllClose(tensor_value, [9.0, 16.0]) with self.assertRaises(StopIteration): next(graph_trace_iter)
DebugIdentityV2OpTest
python
apache__airflow
providers/microsoft/azure/tests/unit/microsoft/azure/hooks/test_powerbi.py
{ "start": 2191, "end": 9758 }
class ____: @pytest.mark.asyncio async def test_get_refresh_history(self, powerbi_hook): response_data = {"value": [{"requestId": "1234", "status": "Completed", "serviceExceptionJson": ""}]} with mock.patch.object(KiotaRequestAdapterHook, "run", new_callable=mock.AsyncMock) as mock_run: mock_run.return_value = response_data result = await powerbi_hook.get_refresh_history(DATASET_ID, GROUP_ID) expected = [{"request_id": "1234", "status": "Completed", "error": ""}] assert result == expected @pytest.mark.asyncio async def test_get_refresh_history_airflow_exception(self, powerbi_hook): """Test handling of AirflowException in get_refresh_history.""" with mock.patch.object(KiotaRequestAdapterHook, "run", new_callable=mock.AsyncMock) as mock_run: mock_run.side_effect = AirflowException("Test exception") with pytest.raises(PowerBIDatasetRefreshException, match="Failed to retrieve refresh history"): await powerbi_hook.get_refresh_history(DATASET_ID, GROUP_ID) @pytest.mark.parametrize( ("input_data", "expected_output"), [ ( {"requestId": "1234", "status": "Completed", "serviceExceptionJson": ""}, { PowerBIDatasetRefreshFields.REQUEST_ID.value: "1234", PowerBIDatasetRefreshFields.STATUS.value: "Completed", PowerBIDatasetRefreshFields.ERROR.value: "", }, ), ( {"requestId": "5678", "status": "Unknown", "serviceExceptionJson": "Some error"}, { PowerBIDatasetRefreshFields.REQUEST_ID.value: "5678", PowerBIDatasetRefreshFields.STATUS.value: "In Progress", PowerBIDatasetRefreshFields.ERROR.value: "Some error", }, ), ( {"requestId": None, "status": None, "serviceExceptionJson": None}, { PowerBIDatasetRefreshFields.REQUEST_ID.value: "None", PowerBIDatasetRefreshFields.STATUS.value: "None", PowerBIDatasetRefreshFields.ERROR.value: "None", }, ), ( {}, # Empty input dictionary { PowerBIDatasetRefreshFields.REQUEST_ID.value: "None", PowerBIDatasetRefreshFields.STATUS.value: "None", PowerBIDatasetRefreshFields.ERROR.value: "None", }, ), ], ) def test_raw_to_refresh_details(self, input_data, expected_output): """Test raw_to_refresh_details method.""" result = PowerBIHook.raw_to_refresh_details(input_data) assert result == expected_output @pytest.mark.asyncio async def test_get_refresh_details_by_refresh_id(self, powerbi_hook): # Mock the get_refresh_history method to return a list of refresh histories refresh_histories = FORMATTED_RESPONSE powerbi_hook.get_refresh_history = mock.AsyncMock(return_value=refresh_histories) # Call the function with a valid request ID refresh_id = "5e2d9921-e91b-491f-b7e1-e7d8db49194c" result = await powerbi_hook.get_refresh_details_by_refresh_id( dataset_id=DATASET_ID, group_id=GROUP_ID, refresh_id=refresh_id ) # Assert that the correct refresh details are returned assert result == { PowerBIDatasetRefreshFields.REQUEST_ID.value: "5e2d9921-e91b-491f-b7e1-e7d8db49194c", PowerBIDatasetRefreshFields.STATUS.value: "Completed", PowerBIDatasetRefreshFields.ERROR.value: "None", } # Call the function with an invalid request ID invalid_request_id = "invalid_request_id" with pytest.raises(PowerBIDatasetRefreshException): await powerbi_hook.get_refresh_details_by_refresh_id( dataset_id=DATASET_ID, group_id=GROUP_ID, refresh_id=invalid_request_id ) @pytest.mark.asyncio async def test_get_refresh_details_by_refresh_id_empty_history(self, powerbi_hook): """Test exception when refresh history is empty.""" # Mock the get_refresh_history method to return an empty list powerbi_hook.get_refresh_history = mock.AsyncMock(return_value=[]) # Call the function with a request ID refresh_id = "any_request_id" with pytest.raises( PowerBIDatasetRefreshException, match=f"Unable to fetch the details of dataset refresh with Request Id: {refresh_id}", ): await powerbi_hook.get_refresh_details_by_refresh_id( dataset_id=DATASET_ID, group_id=GROUP_ID, refresh_id=refresh_id ) @pytest.mark.asyncio async def test_get_refresh_details_by_refresh_id_not_found(self, powerbi_hook): """Test exception when the refresh ID is not found in the refresh history.""" # Mock the get_refresh_history method to return a list of refresh histories without the specified ID powerbi_hook.get_refresh_history = mock.AsyncMock(return_value=FORMATTED_RESPONSE) # Call the function with an invalid request ID invalid_request_id = "invalid_request_id" with pytest.raises( PowerBIDatasetRefreshException, match=f"Unable to fetch the details of dataset refresh with Request Id: {invalid_request_id}", ): await powerbi_hook.get_refresh_details_by_refresh_id( dataset_id=DATASET_ID, group_id=GROUP_ID, refresh_id=invalid_request_id ) @pytest.mark.asyncio async def test_trigger_dataset_refresh_success(self, powerbi_hook): response_data = {"requestid": "5e2d9921-e91b-491f-b7e1-e7d8db49194c"} with mock.patch.object(KiotaRequestAdapterHook, "run", new_callable=mock.AsyncMock) as mock_run: mock_run.return_value = response_data result = await powerbi_hook.trigger_dataset_refresh(dataset_id=DATASET_ID, group_id=GROUP_ID) assert result == "5e2d9921-e91b-491f-b7e1-e7d8db49194c" @pytest.mark.asyncio async def test_trigger_dataset_refresh_failure(self, powerbi_hook): """Test failure to trigger dataset refresh due to AirflowException.""" with mock.patch.object(KiotaRequestAdapterHook, "run", new_callable=mock.AsyncMock) as mock_run: mock_run.side_effect = AirflowException("Test exception") with pytest.raises(PowerBIDatasetRefreshException, match="Failed to trigger dataset refresh."): await powerbi_hook.trigger_dataset_refresh(dataset_id=DATASET_ID, group_id=GROUP_ID) @pytest.mark.asyncio async def test_cancel_dataset_refresh(self, powerbi_hook): dataset_refresh_id = "5e2d9921-e91b-491f-b7e1-e7d8db49194c" with mock.patch.object(KiotaRequestAdapterHook, "run", new_callable=mock.AsyncMock) as mock_run: await powerbi_hook.cancel_dataset_refresh(DATASET_ID, GROUP_ID, dataset_refresh_id) mock_run.assert_called_once_with( url="myorg/groups/{group_id}/datasets/{dataset_id}/refreshes/{dataset_refresh_id}", response_type=None, path_parameters={ "group_id": GROUP_ID, "dataset_id": DATASET_ID, "dataset_refresh_id": dataset_refresh_id, }, method="DELETE", )
TestPowerBIHook
python
pytest-dev__pytest
src/_pytest/logging.py
{ "start": 32727, "end": 34930 }
class ____(logging_StreamHandler): """A logging StreamHandler used by the live logging feature: it will write a newline before the first log message in each test. During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured and won't appear in the terminal. """ # Officially stream needs to be a IO[str], but TerminalReporter # isn't. So force it. stream: TerminalReporter = None # type: ignore def __init__( self, terminal_reporter: TerminalReporter, capture_manager: CaptureManager | None, ) -> None: super().__init__(stream=terminal_reporter) # type: ignore[arg-type] self.capture_manager = capture_manager self.reset() self.set_when(None) self._test_outcome_written = False def reset(self) -> None: """Reset the handler; should be called before the start of each test.""" self._first_record_emitted = False def set_when(self, when: str | None) -> None: """Prepare for the given test phase (setup/call/teardown).""" self._when = when self._section_name_shown = False if when == "start": self._test_outcome_written = False def emit(self, record: logging.LogRecord) -> None: ctx_manager = ( self.capture_manager.global_and_fixture_disabled() if self.capture_manager else nullcontext() ) with ctx_manager: if not self._first_record_emitted: self.stream.write("\n") self._first_record_emitted = True elif self._when in ("teardown", "finish"): if not self._test_outcome_written: self._test_outcome_written = True self.stream.write("\n") if not self._section_name_shown and self._when: self.stream.section("live log " + self._when, sep="-", bold=True) self._section_name_shown = True super().emit(record) def handleError(self, record: logging.LogRecord) -> None: # Handled by LogCaptureHandler. pass
_LiveLoggingStreamHandler
python
PyCQA__flake8
src/flake8/formatting/default.py
{ "start": 1436, "end": 1996 }
class ____(SimpleFormatter): """Default formatter for Flake8. This also handles backwards compatibility for people specifying a custom format string. """ error_format = ( "%(bold)s%(path)s%(reset)s" "%(cyan)s:%(reset)s%(row)d%(cyan)s:%(reset)s%(col)d%(cyan)s:%(reset)s " "%(bold)s%(red)s%(code)s%(reset)s %(text)s" ) def after_init(self) -> None: """Check for a custom format string.""" if self.options.format.lower() != "default": self.error_format = self.options.format
Default
python
sqlalchemy__sqlalchemy
test/orm/test_unitofworkv2.py
{ "start": 50747, "end": 53072 }
class ____(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table( "parent", metadata, Column("id", Integer, primary_key=True), Column("data", Integer), ) Table( "child", metadata, Column("id", Integer, ForeignKey("parent.id"), primary_key=True), Column("data", Integer), ) def _fixture(self): parent, child = self.tables.parent, self.tables.child class Parent(BasicEntity): pass class Child(BasicEntity): pass self.mapper_registry.map_imperatively( Parent, parent, properties={ "child": relationship( Child, uselist=False, cascade="all, delete-orphan", backref="parent", ) }, ) self.mapper_registry.map_imperatively(Child, child) return Parent, Child def test_switch_on_update(self): Parent, Child = self._fixture() sess = fixture_session() p1 = Parent(id=1, child=Child()) sess.add(p1) sess.commit() sess.close() p2 = Parent(id=1, child=Child()) p3 = sess.merge(p2) old = attributes.get_history(p3, "child")[2][0] assert old in sess # essentially no SQL should emit here, # because we've replaced the row with another identical one sess.flush() assert p3.child._sa_instance_state.session_id == sess.hash_key assert p3.child in sess p4 = Parent(id=1, child=Child()) p5 = sess.merge(p4) old = attributes.get_history(p5, "child")[2][0] assert old in sess sess.flush() def test_switch_on_delete(self): Parent, Child = self._fixture() sess = fixture_session() p1 = Parent(id=1, data=2, child=None) sess.add(p1) sess.flush() p1.id = 5 sess.delete(p1) eq_(p1.id, 5) sess.flush() eq_( sess.scalar( select(func.count("*")).select_from(self.tables.parent) ), 0, ) sess.close()
RowswitchAccountingTest
python
aimacode__aima-python
deep_learning4e.py
{ "start": 1597, "end": 1870 }
class ____(Activation): def __init__(self, alpha=0.01): self.alpha = alpha def function(self, x): return x if x > 0 else self.alpha * (np.exp(x) - 1) def derivative(self, value): return 1 if value > 0 else self.alpha * np.exp(value)
ELU
python
dagster-io__dagster
python_modules/dagster/dagster/_core/remote_representation/external_data.py
{ "start": 41276, "end": 56287 }
class ____(NamedTuple): resource_key: str node_handle: NodeHandle def _get_resource_usage_from_node( pipeline: JobDefinition, node: Node, parent_handle: Optional[NodeHandle] = None, ) -> Iterable[NodeHandleResourceUse]: handle = NodeHandle(node.name, parent_handle) if isinstance(node, OpNode): for resource_req in node.get_resource_requirements(pipeline.graph): if isinstance(resource_req, ResourceKeyRequirement): yield NodeHandleResourceUse(resource_req.key, handle) elif isinstance(node, GraphNode): for nested_node in node.definition.nodes: yield from _get_resource_usage_from_node(pipeline, nested_node, handle) def _get_resource_job_usage(job_defs: Sequence[JobDefinition]) -> ResourceJobUsageMap: resource_job_usage_map: dict[str, list[ResourceJobUsageEntry]] = defaultdict(list) for job_def in job_defs: job_name = job_def.name if is_reserved_asset_job_name(job_name): continue resource_usage: list[NodeHandleResourceUse] = [] for solid in job_def.nodes_in_topological_order: resource_usage += [use for use in _get_resource_usage_from_node(job_def, solid)] node_use_by_key: dict[str, list[NodeHandle]] = defaultdict(list) for use in resource_usage: node_use_by_key[use.resource_key].append(use.node_handle) for resource_key in node_use_by_key: resource_job_usage_map[resource_key].append( ResourceJobUsageEntry( job_name=job_def.name, node_handles=node_use_by_key[resource_key] ) ) return resource_job_usage_map def asset_check_node_snaps_from_repo(repo: RepositoryDefinition) -> Sequence[AssetCheckNodeSnap]: job_names_by_check_key: dict[AssetCheckKey, list[str]] = defaultdict(list) for job_def in repo.get_all_jobs(): asset_layer = job_def.asset_layer for check_key in asset_layer.asset_graph.asset_check_keys: job_names_by_check_key[check_key].append(job_def.name) asset_check_node_snaps: list[AssetCheckNodeSnap] = [] for check_key, job_names in job_names_by_check_key.items(): spec = repo.asset_graph.get_check_spec(check_key) automation_condition, automation_condition_snapshot = resolve_automation_condition_args( spec.automation_condition ) asset_check_node_snaps.append( AssetCheckNodeSnap( name=check_key.name, asset_key=check_key.asset_key, description=spec.description, execution_set_identifier=repo.asset_graph.get_execution_set_identifier(check_key), job_names=job_names, blocking=spec.blocking, additional_asset_keys=[dep.asset_key for dep in spec.additional_deps], automation_condition=automation_condition, automation_condition_snapshot=automation_condition_snapshot, ) ) return sorted(asset_check_node_snaps, key=lambda check: (check.asset_key, check.name)) def asset_node_snaps_from_repo(repo: RepositoryDefinition) -> Sequence[AssetNodeSnap]: # First iterate over all job defs to identify a "primary node" for each materializable asset # key. This is the node that will be used to populate the AssetNodeSnap. We need to identify # a primary node because the same asset can be materialized as part of multiple jobs. primary_node_pairs_by_asset_key: dict[AssetKey, tuple[NodeOutputHandle, JobDefinition]] = {} job_defs_by_asset_key: dict[AssetKey, list[JobDefinition]] = defaultdict(list) for job_def in repo.get_all_jobs(): asset_layer = job_def.asset_layer for asset_key in asset_layer.external_job_asset_keys: job_defs_by_asset_key[asset_key].append(job_def) for asset_key in asset_layer.selected_asset_keys: job_defs_by_asset_key[asset_key].append(job_def) if asset_key not in primary_node_pairs_by_asset_key: op_handle = asset_layer.get_op_output_handle(asset_key) primary_node_pairs_by_asset_key[asset_key] = (op_handle, job_def) asset_node_snaps: list[AssetNodeSnap] = [] asset_graph = repo.asset_graph for key in sorted(asset_graph.get_all_asset_keys()): asset_node = asset_graph.get(key) # Materializable assets (which are always part of at least one job, due to asset base jobs) # have various fields related to their op/output/jobs etc defined. External assets have null # values for all these fields. if key in primary_node_pairs_by_asset_key: output_handle, job_def = primary_node_pairs_by_asset_key[key] root_node_handle = output_handle.node_handle.root node_def = job_def.graph.get_node(output_handle.node_handle).definition node_handles = job_def.asset_layer.upstream_dep_op_handles(key) # graph_name is only set for assets that are produced by nested ops. graph_name = ( root_node_handle.name if root_node_handle != output_handle.node_handle else None ) op_defs = [ cast("OpDefinition", job_def.graph.get_node(node_handle).definition) for node_handle in node_handles if isinstance(job_def.graph.get_node(node_handle).definition, OpDefinition) ] pools = {op_def.pool for op_def in op_defs if op_def.pool} op_names = sorted([str(handle) for handle in node_handles]) op_name = graph_name or next(iter(op_names), None) or node_def.name compute_kind = node_def.tags.get(COMPUTE_KIND_TAG) node_definition_name = node_def.name # Confusingly, the `name` field sometimes mismatches the `name` field on the # OutputDefinition. We need to fix this. output_name = node_def.output_def_named(output_handle.output_name).name required_top_level_resources = ( sorted(node_def.required_resource_keys) if isinstance(node_def, OpDefinition) else [] ) else: graph_name = None pools = set() op_names = [] op_name = None compute_kind = None node_definition_name = None output_name = None required_top_level_resources = [] # Partition mappings are only exposed on the AssetNodeSnap if at least one asset is # partitioned and the partition mapping is one of the builtin types. partition_mappings: dict[AssetKey, Optional[PartitionMapping]] = {} builtin_partition_mapping_types = get_builtin_partition_mapping_types() for pk in asset_node.parent_keys: # directly access the partition mapping to avoid the inference step of # get_partition_mapping, as we want to defer the inference to the global RemoteAssetGraph partition_mapping = repo.asset_graph.get(key).partition_mappings.get(pk) if ( asset_node.partitions_def or repo.asset_graph.get(pk).partitions_def ) and isinstance(partition_mapping, builtin_partition_mapping_types): partition_mappings[pk] = partition_mapping automation_condition, automation_condition_snapshot = resolve_automation_condition_args( asset_node.automation_condition ) asset_node_snaps.append( AssetNodeSnap( asset_key=key, parent_edges=[ AssetParentEdgeSnap( parent_asset_key=pk, partition_mapping=partition_mappings.get(pk) ) for pk in sorted(asset_node.parent_keys) ], child_edges=[ AssetChildEdgeSnap(child_asset_key=k) for k in sorted(asset_node.child_keys) ], execution_type=asset_node.execution_type, compute_kind=compute_kind, pools=pools, op_name=op_name, op_names=op_names, code_version=asset_node.code_version, node_definition_name=node_definition_name, graph_name=graph_name, description=asset_node.description, job_names=sorted([jd.name for jd in job_defs_by_asset_key[key]]), partitions=( PartitionsSnap.from_def(asset_node.partitions_def) if asset_node.partitions_def else None ), output_name=output_name, metadata=asset_node.metadata, tags=asset_node.tags, group_name=asset_node.group_name, legacy_freshness_policy=asset_node.legacy_freshness_policy, freshness_policy=asset_node.freshness_policy, is_source=asset_node.is_external, is_observable=asset_node.is_observable, execution_set_identifier=repo.asset_graph.get_execution_set_identifier(key), required_top_level_resources=required_top_level_resources, auto_materialize_policy=automation_condition.as_auto_materialize_policy() if automation_condition else None, automation_condition_snapshot=automation_condition_snapshot, backfill_policy=asset_node.backfill_policy, auto_observe_interval_minutes=asset_node.auto_observe_interval_minutes, owners=asset_node.owners, ) ) return asset_node_snaps def resource_value_snap_from_raw(v: Any) -> ResourceValueSnap: if isinstance(v, dict) and set(v.keys()) == {"env"}: return ResourceConfigEnvVarSnap(name=v["env"]) return json.dumps(v) def _get_nested_resources_map( resource_datas: Mapping[str, ResourceDefinition], top_level_resources: Mapping[str, ResourceDefinition], ) -> Mapping[str, Mapping[str, NestedResource]]: out_map: Mapping[str, Mapping[str, NestedResource]] = {} for resource_name, resource_def in resource_datas.items(): out_map[resource_name] = _get_nested_resources(resource_def, top_level_resources) return out_map def _find_match(nested_resource, resource_defs) -> Optional[str]: if is_coercible_to_resource(nested_resource): defn = coerce_to_resource(nested_resource) else: return None for k, v in resource_defs.items(): if defn is v: return k return None def _get_nested_resources( resource_def: ResourceDefinition, top_level_resources: Mapping[str, ResourceDefinition], ) -> Mapping[str, NestedResource]: # ConfigurableResources may have "anonymous" nested resources, which are not # explicitly specified as top-level resources if isinstance( resource_def, ( ConfigurableResourceFactoryResourceDefinition, ConfigurableIOManagerFactoryResourceDefinition, ), ): results = {} for k, nested_resource in resource_def.nested_resources.items(): top_level_key = _find_match(nested_resource, top_level_resources) if top_level_key: results[k] = NestedResource(NestedResourceType.TOP_LEVEL, top_level_key) else: results[k] = NestedResource( NestedResourceType.ANONYMOUS, nested_resource.__class__.__name__ ) return results else: return { k: NestedResource(NestedResourceType.TOP_LEVEL, k) for k in resource_def.required_resource_keys } PARTITION_SET_SNAP_NAME_SUFFIX: Final = "_partition_set" def partition_set_snap_name_for_job_name(job_name) -> str: return f"{job_name}{PARTITION_SET_SNAP_NAME_SUFFIX}" def job_name_for_partition_set_snap_name(name: str) -> str: job_name_len = len(name) - len(PARTITION_SET_SNAP_NAME_SUFFIX) return name[:job_name_len] def active_presets_from_job_def(job_def: JobDefinition) -> Sequence[PresetSnap]: check.inst_param(job_def, "job_def", JobDefinition) if job_def.run_config is None: return [] else: return [ PresetSnap( name=DEFAULT_PRESET_NAME, run_config=job_def.run_config, op_selection=None, mode=DEFAULT_MODE_NAME, tags={}, ) ] def get_preview_tags(job_def: JobDefinition) -> Mapping[str, str]: return {k: v for k, v in job_def.tags.items() if k in TAGS_INCLUDE_IN_REMOTE_JOB_REF} def resolve_automation_condition_args( automation_condition: Optional[AutomationCondition], ) -> tuple[Optional[AutomationCondition], Optional[AutomationConditionSnapshot]]: if automation_condition is None: return None, None elif automation_condition.is_serializable: # to avoid serializing too much data, only store the full condition if # it is available return automation_condition, None else: # for non-serializable conditions, only include the snapshot return None, automation_condition.get_snapshot() def _extract_fast(serialized_job_data: str): target_key = f'"{_JOB_SNAP_STORAGE_FIELD}": ' target_substr = target_key + get_prefix_for_a_serialized(JobSnap) # look for key: type idx = serialized_job_data.find(target_substr) check.invariant(idx > 0) # slice starting after key: start_idx = idx + len(target_key) # trim outer object } # assumption that pipeline_snapshot is last field under test in test_job_data_snap_layout serialized_job_snap = serialized_job_data[start_idx:-1] check.invariant(serialized_job_snap[0] == "{" and serialized_job_snap[-1] == "}") return serialized_job_snap def _extract_safe(serialized_job_data: str): # Intentionally use json directly instead of serdes to avoid losing information if the current process # is older than the source process. return json.dumps(json.loads(serialized_job_data)[_JOB_SNAP_STORAGE_FIELD]) DISABLE_FAST_EXTRACT_ENV_VAR = "DAGSTER_DISABLE_JOB_SNAP_FAST_EXTRACT" def extract_serialized_job_snap_from_serialized_job_data_snap(serialized_job_data_snap: str): # utility used by DagsterCloudAgent to extract JobSnap out of JobDataSnap # efficiently and safely if not serialized_job_data_snap.startswith(get_prefix_for_a_serialized(JobDataSnap)): raise Exception("Passed in string does not meet expectations for a serialized JobDataSnap") if not os.getenv(DISABLE_FAST_EXTRACT_ENV_VAR): try: return _extract_fast(serialized_job_data_snap) except Exception: pass return _extract_safe(serialized_job_data_snap) @whitelist_for_serdes @record
NodeHandleResourceUse
python
ray-project__ray
python/ray/dashboard/modules/reporter/tests/test_gpu_providers.py
{ "start": 15311, "end": 18806 }
class ____(unittest.TestCase): """Test AmdGpuProvider class.""" def setUp(self): """Set up test fixtures.""" self.provider = AmdGpuProvider() def test_get_provider_name(self): """Test provider name.""" self.assertEqual(self.provider.get_provider_name(), GpuProviderType.AMD) @patch("ray._private.thirdparty.pyamdsmi", create=True) def test_is_available_success(self, mock_pyamdsmi): """Test is_available when AMD GPU is available.""" mock_pyamdsmi.smi_initialize.return_value = None mock_pyamdsmi.smi_shutdown.return_value = None self.assertTrue(self.provider.is_available()) mock_pyamdsmi.smi_initialize.assert_called_once() mock_pyamdsmi.smi_shutdown.assert_called_once() @patch("ray._private.thirdparty.pyamdsmi", create=True) def test_is_available_failure(self, mock_pyamdsmi): """Test is_available when AMD GPU is not available.""" mock_pyamdsmi.smi_initialize.side_effect = Exception("AMD driver not found") self.assertFalse(self.provider.is_available()) @patch("ray._private.thirdparty.pyamdsmi", create=True) def test_initialize_success(self, mock_pyamdsmi): """Test successful initialization.""" mock_pyamdsmi.smi_initialize.return_value = None self.assertTrue(self.provider._initialize()) self.assertTrue(self.provider._initialized) mock_pyamdsmi.smi_initialize.assert_called_once() @patch("ray._private.thirdparty.pyamdsmi", create=True) def test_get_gpu_utilization_success(self, mock_pyamdsmi): """Test successful GPU utilization retrieval.""" mock_process = Mock() mock_process.process_id = 5678 mock_process.vram_usage = 512 * MB # Configure mocks mock_pyamdsmi.smi_initialize.return_value = None mock_pyamdsmi.smi_get_device_count.return_value = 1 mock_pyamdsmi.smi_get_device_id.return_value = "device_0" mock_pyamdsmi.smi_get_device_utilization.return_value = 85 mock_pyamdsmi.smi_get_device_compute_process.return_value = [mock_process] mock_pyamdsmi.smi_get_compute_process_info_by_device.return_value = [ mock_process ] mock_pyamdsmi.smi_get_device_name.return_value = b"AMD Radeon RX 6800 XT" mock_pyamdsmi.smi_get_device_unique_id.return_value = 0x123456789ABCDEF0 mock_pyamdsmi.smi_get_device_memory_used.return_value = 6 * MB * 1024 mock_pyamdsmi.smi_get_device_memory_total.return_value = 16 * MB * 1024 mock_pyamdsmi.smi_shutdown.return_value = None # Set up provider state self.provider._pyamdsmi = mock_pyamdsmi self.provider._initialized = True result = self.provider.get_gpu_utilization() self.assertEqual(len(result), 1) gpu_info = result[0] self.assertEqual(gpu_info["index"], 0) self.assertEqual(gpu_info["name"], "AMD Radeon RX 6800 XT") self.assertEqual(gpu_info["uuid"], hex(0x123456789ABCDEF0)) self.assertEqual(gpu_info["utilization_gpu"], 85) self.assertEqual(gpu_info["memory_used"], 6 * 1024) # 6GB in MB self.assertEqual(gpu_info["memory_total"], 16 * 1024) # 16GB in MB self.assertEqual(len(gpu_info["processes_pids"]), 1) self.assertEqual(gpu_info["processes_pids"][5678]["pid"], 5678) self.assertEqual(gpu_info["processes_pids"][5678]["gpu_memory_usage"], 512)
TestAmdGpuProvider
python
kamyu104__LeetCode-Solutions
Python/maximum-number-of-removal-queries-that-can-be-processed-i.py
{ "start": 38, "end": 895 }
class ____(object): def maximumProcessableQueries(self, nums, queries): """ :type nums: List[int] :type queries: List[int] :rtype: int """ dp = [[float("-inf")]*len(nums) for _ in xrange(len(nums))] dp[0][-1] = 0 for l in reversed(xrange(1, len(nums))): for i in xrange(len(nums)-(l-1)): j = i+(l-1) if i-1 >= 0: dp[i][j] = max(dp[i][j], dp[i-1][j]+(1 if nums[i-1] >= queries[dp[i-1][j]] else 0)) if j+1 < len(nums): dp[i][j] = max(dp[i][j], dp[i][j+1]+(1 if nums[j+1] >= queries[dp[i][j+1]] else 0)) if dp[i][j] == len(queries): return len(queries) return max(dp[i][i]+(1 if nums[i] >= queries[dp[i][i]] else 0) for i in xrange(len(nums)))
Solution
python
django__django
tests/test_runner/test_parallel.py
{ "start": 2546, "end": 7465 }
class ____(SimpleTestCase): def test_was_successful_no_events(self): result = RemoteTestResult() self.assertIs(result.wasSuccessful(), True) def test_was_successful_one_success(self): result = RemoteTestResult() test = None result.startTest(test) try: result.addSuccess(test) finally: result.stopTest(test) self.assertIs(result.wasSuccessful(), True) def test_was_successful_one_expected_failure(self): result = RemoteTestResult() test = None result.startTest(test) try: result.addExpectedFailure(test, _test_error_exc_info()) finally: result.stopTest(test) self.assertIs(result.wasSuccessful(), True) def test_was_successful_one_skip(self): result = RemoteTestResult() test = None result.startTest(test) try: result.addSkip(test, "Skipped") finally: result.stopTest(test) self.assertIs(result.wasSuccessful(), True) @unittest.skipUnless(tblib is not None, "requires tblib to be installed") def test_was_successful_one_error(self): result = RemoteTestResult() test = None result.startTest(test) try: result.addError(test, _test_error_exc_info()) finally: result.stopTest(test) self.assertIs(result.wasSuccessful(), False) @unittest.skipUnless(tblib is not None, "requires tblib to be installed") def test_was_successful_one_failure(self): result = RemoteTestResult() test = None result.startTest(test) try: result.addFailure(test, _test_error_exc_info()) finally: result.stopTest(test) self.assertIs(result.wasSuccessful(), False) @unittest.skipUnless(tblib is not None, "requires tblib to be installed") def test_add_error_before_first_test(self): result = RemoteTestResult() test_id = "test_foo (tests.test_foo.FooTest.test_foo)" test = _ErrorHolder(test_id) # Call addError() without a call to startTest(). result.addError(test, _test_error_exc_info()) (event,) = result.events self.assertEqual(event[0], "addError") self.assertEqual(event[1], -1) self.assertEqual(event[2], test_id) (error_type, _, _) = event[3] self.assertEqual(error_type, ValueError) self.assertIs(result.wasSuccessful(), False) def test_picklable(self): result = RemoteTestResult() loaded_result = pickle.loads(pickle.dumps(result)) self.assertEqual(result.events, loaded_result.events) def test_pickle_errors_detection(self): picklable_error = RuntimeError("This is fine") not_unpicklable_error = ExceptionThatFailsUnpickling("arg") result = RemoteTestResult() result._confirm_picklable(picklable_error) # The exception can be pickled but not unpickled. pickle.dumps(not_unpicklable_error) msg = "__init__() missing 1 required positional argument" with self.assertRaisesMessage(TypeError, msg): result._confirm_picklable(not_unpicklable_error) @unittest.skipUnless(tblib is not None, "requires tblib to be installed") def test_unpicklable_subtest(self): result = RemoteTestResult() subtest_test = SampleFailingSubtest(methodName="pickle_error_test") subtest_test.run(result=result) events = result.events subtest_event = events[1] assertion_error = subtest_event[3] self.assertEqual(str(assertion_error[1]), "expected failure") @unittest.skipUnless(tblib is not None, "requires tblib to be installed") def test_add_failing_subtests(self): """ Failing subtests are added correctly using addSubTest(). """ # Manually run a test with failing subtests to prevent the failures # from affecting the actual test run. result = RemoteTestResult() subtest_test = SampleFailingSubtest(methodName="dummy_test") subtest_test.run(result=result) events = result.events self.assertEqual(len(events), 5) self.assertIs(result.wasSuccessful(), False) event = events[1] self.assertEqual(event[0], "addSubTest") self.assertEqual( str(event[2]), "dummy_test (test_runner.test_parallel.SampleFailingSubtest.dummy_test) " "(index=0)", ) self.assertEqual(repr(event[3][1]), "AssertionError('0 != 1')") event = events[2] self.assertEqual(repr(event[3][1]), "AssertionError('2 != 1')") def test_add_duration(self): result = RemoteTestResult() result.addDuration(None, 2.3) self.assertEqual(result.collectedDurations, [("None", 2.3)])
RemoteTestResultTest
python
streamlit__streamlit
lib/tests/streamlit/runtime/context_test.py
{ "start": 1013, "end": 5395 }
class ____(unittest.TestCase): mocked_cookie = Morsel() mocked_cookie.set("cookieName", "cookieValue", "cookieValue") @patch( "streamlit.runtime.context._get_request", MagicMock( return_value=MagicMock(headers=HTTPHeaders({"the-header": "header-value"})) ), ) def test_context_headers(self): """Test that `st.context.headers` returns headers from ScriptRunContext""" assert st.context.headers.to_dict() == {"The-Header": "header-value"} @patch( "streamlit.runtime.context._get_request", MagicMock(return_value=MagicMock(cookies={"cookieName": mocked_cookie})), ) def test_context_cookies(self): """Test that `st.context.cookies` returns cookies from ScriptRunContext""" assert st.context.cookies.to_dict() == {"cookieName": "cookieValue"} @parameterized.expand( [ ("8.8.8.8", "8.8.8.8"), # Regular IP address ("192.168.1.1", "192.168.1.1"), # Private IP address ("127.0.0.1", None), # IPv4 localhost ("::1", None), # IPv6 localhost ] ) @patch("streamlit.runtime.context._get_request") def test_ip_address_values(self, remote_ip, expected_value, mock_get_request): """Test that `st.context.ip_address` handles different IP addresses correctly""" mock_get_request.return_value = MagicMock(remote_ip=remote_ip) assert st.context.ip_address == expected_value @patch( "streamlit.runtime.context.get_script_run_ctx", MagicMock(return_value=None), ) def test_url_none_context(self): """Test that `st.context.url` returns None if context is None""" assert st.context.url is None @patch("streamlit.runtime.context.get_script_run_ctx") def test_url_none_context_info(self, mock_get_script_run_ctx): """Test that `st.context.url` returns None if context_info is None""" # Create a mock context with None context_info mock_ctx = MagicMock() mock_ctx.context_info = None mock_get_script_run_ctx.return_value = mock_ctx assert st.context.url is None @patch("streamlit.runtime.context.get_script_run_ctx") @patch("streamlit.runtime.context.maybe_trim_page_path") @patch("streamlit.runtime.context.maybe_add_page_path") def test_url(self, mock_add_path, mock_trim_path, mock_get_script_run_ctx): """Test that `st.context.url` returns the URL from the context after processing""" # Create a mock context with a URL mock_context_info = MagicMock() mock_context_info.url = "https://example.com/original" mock_ctx = MagicMock() mock_ctx.context_info = mock_context_info mock_get_script_run_ctx.return_value = mock_ctx # Mock the page manager mock_ctx.pages_manager = MagicMock() # Set up the mock return values for the URL processing functions mock_trim_path.return_value = "https://example.com/" mock_add_path.return_value = "https://example.com/added" # Test that the URL is processed by both functions result = st.context.url # Verify the result assert result == "https://example.com/added" # Verify that the functions were called with the correct arguments mock_trim_path.assert_called_once_with( "https://example.com/original", mock_ctx.pages_manager ) mock_add_path.assert_called_once_with( "https://example.com/", mock_ctx.pages_manager ) @parameterized.expand( [ ("coNtent-TYPE", "Content-Type"), ("coNtent-type", "Content-Type"), ("Content-Type", "Content-Type"), ("Content-Type", "Content-Type"), ("Cache-Control", "Cache-Control"), ("Cache-control", "Cache-Control"), ("cache-control", "Cache-Control"), ("cache-CONTROL", "Cache-Control"), ("Access-Control-Max-Age", "Access-Control-Max-Age"), ("Access-control-max-age", "Access-Control-Max-Age"), ("access-control-MAX-age", "Access-Control-Max-Age"), ] ) def test_normalize_header(self, name, expected): """Test that `_normalize_header` normalizes header names""" assert _normalize_header(name) == expected
StContextTest
python
numba__llvmlite
llvmlite/binding/orcjit.py
{ "start": 133, "end": 294 }
class ____(ctypes.Structure): _fields_ = [("element_kind", c_uint8), ("value", c_char_p), ("value_len", c_size_t)]
_LinkElement
python
huggingface__transformers
tests/models/opt/test_modeling_opt.py
{ "start": 1723, "end": 7241 }
class ____: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=50, eos_token_id=2, pad_token_id=1, bos_token_id=0, embed_dim=16, num_labels=3, word_embed_proj_dim=16, type_sequence_label_size=2, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.embed_dim = embed_dim self.num_labels = num_labels self.type_sequence_label_size = type_sequence_label_size self.word_embed_proj_dim = word_embed_proj_dim self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_opt_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def get_config(self): return OPTConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, is_encoder_decoder=False, word_embed_proj_dim=self.word_embed_proj_dim, ) def get_pipeline_config(self): config = self.get_config() config.max_position_embeddings = 100 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = OPTModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) @require_torch
OPTModelTester
python
lazyprogrammer__machine_learning_examples
rl3/a2c/atari_wrappers.py
{ "start": 7030, "end": 8284 }
class ____: def __init__(self, frames): """This object ensures that common frames between the observations are only stored once. It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay buffers. This object should only be converted to numpy array before being passed to the model. You'd not believe how complex the previous solution was.""" self._frames = frames def __array__(self, dtype=None): out = np.concatenate(self._frames, axis=2) if dtype is not None: out = out.astype(dtype) return out def make_atari(env_id): env = gym.make(env_id) assert 'NoFrameskip' in env.spec.id env = NoopResetEnv(env, noop_max=30) env = MaxAndSkipEnv(env, skip=4) return env def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False): """Configure environment for DeepMind-style Atari. """ if episode_life: env = EpisodicLifeEnv(env) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireResetEnv(env) env = WarpFrame(env) if clip_rewards: env = ClipRewardEnv(env) if frame_stack: env = FrameStack(env, 4) return env
LazyFrames
python
pennersr__django-allauth
allauth/headless/mfa/views.py
{ "start": 7041, "end": 7613 }
class ____(AuthenticationStageAPIView): input_class = { "POST": AuthenticateWebAuthnInput, } stage_class = AuthenticateStage def get(self, request, *args, **kwargs): request_options = webauthn_auth.begin_authentication(self.stage.login.user) return response.WebAuthnRequestOptionsResponse(request, request_options) def get_input_kwargs(self): return {"user": self.stage.login.user} def post(self, request, *args, **kwargs): self.input.save() return self.respond_next_stage()
AuthenticateWebAuthnView
python
dagster-io__dagster
python_modules/dagster/dagster/_core/storage/schedules/sqlite/sqlite_schedule_storage.py
{ "start": 1016, "end": 4632 }
class ____(SqlScheduleStorage, ConfigurableClass): """Local SQLite backed schedule storage.""" def __init__(self, conn_string: str, inst_data: Optional[ConfigurableClassData] = None): check.str_param(conn_string, "conn_string") self._conn_string = conn_string self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData) super().__init__() @property def inst_data(self) -> Optional[ConfigurableClassData]: return self._inst_data @classmethod def config_type(cls) -> UserConfigSchema: return {"base_dir": StringSource} @classmethod def from_config_value( cls, inst_data: Optional[ConfigurableClassData], config_value ) -> "SqliteScheduleStorage": return SqliteScheduleStorage.from_local(inst_data=inst_data, **config_value) @classmethod def from_local( cls, base_dir: str, inst_data: Optional[ConfigurableClassData] = None ) -> "SqliteScheduleStorage": check.str_param(base_dir, "base_dir") mkdir_p(base_dir) conn_string = create_db_conn_string(base_dir, "schedules") engine = create_engine(conn_string, poolclass=NullPool) alembic_config = get_alembic_config(__file__) should_migrate_data = False with engine.connect() as connection: db_revision, head_revision = check_alembic_revision(alembic_config, connection) if not (db_revision and head_revision): table_names = db.inspect(engine).get_table_names() if "job_ticks" in table_names: # The ticks table exists but the alembic version table does not. This means that the SQLite db was # initialized with SQLAlchemy 2.0 before https://github.com/dagster-io/dagster/pull/25740 was merged. # We should pin the alembic revision to the last known stamped revision before we unpinned SQLAlchemy 2.0 # This should be safe because we have guarded all known migrations since then. rev_to_stamp = LAST_KNOWN_STAMPED_SQLITE_ALEMBIC_REVISION else: should_migrate_data = True rev_to_stamp = "head" ScheduleStorageSqlMetadata.create_all(engine) connection.execute(db.text("PRAGMA journal_mode=WAL;")) stamp_alembic_rev(alembic_config, connection, rev=rev_to_stamp) safe_commit(connection) schedule_storage = cls(conn_string, inst_data) if should_migrate_data: schedule_storage.migrate() schedule_storage.optimize() return schedule_storage @contextmanager def connect(self) -> Iterator[Connection]: engine = create_engine(self._conn_string, poolclass=NullPool) with engine.connect() as conn: with conn.begin(): yield conn @property def supports_batch_queries(self) -> bool: if not super().supports_batch_queries: return False return super().supports_batch_queries and parse(get_sqlite_version()) >= parse( MINIMUM_SQLITE_BATCH_VERSION ) def upgrade(self) -> None: alembic_config = get_alembic_config(__file__) with self.connect() as conn: run_alembic_upgrade(alembic_config, conn) def alembic_version(self) -> AlembicVersion: alembic_config = get_alembic_config(__file__) with self.connect() as conn: return check_alembic_revision(alembic_config, conn)
SqliteScheduleStorage
python
pypa__pip
src/pip/_internal/resolution/resolvelib/reporter.py
{ "start": 306, "end": 2838 }
class ____(BaseReporter[Requirement, Candidate, str]): def __init__(self, constraints: Mapping[str, Constraint] | None = None) -> None: self.reject_count_by_package: defaultdict[str, int] = defaultdict(int) self._constraints = constraints or {} self._messages_at_reject_count = { 1: ( "pip is looking at multiple versions of {package_name} to " "determine which version is compatible with other " "requirements. This could take a while." ), 8: ( "pip is still looking at multiple versions of {package_name} to " "determine which version is compatible with other " "requirements. This could take a while." ), 13: ( "This is taking longer than usual. You might need to provide " "the dependency resolver with stricter constraints to reduce " "runtime. See https://pip.pypa.io/warnings/backtracking for " "guidance. If you want to abort this run, press Ctrl + C." ), } def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None: """Report a candidate being rejected. Logs both the rejection count message (if applicable) and details about the requirements and constraints that caused the rejection. """ self.reject_count_by_package[candidate.name] += 1 count = self.reject_count_by_package[candidate.name] if count in self._messages_at_reject_count: message = self._messages_at_reject_count[count] logger.info("INFO: %s", message.format(package_name=candidate.name)) msg = "Will try a different candidate, due to conflict:" for req_info in criterion.information: req, parent = req_info.requirement, req_info.parent msg += "\n " if parent: msg += f"{parent.name} {parent.version} depends on " else: msg += "The user requested " msg += req.format_for_error() # Add any relevant constraints if self._constraints: name = candidate.name constraint = self._constraints.get(name) if constraint and constraint.specifier: constraint_text = f"{name}{constraint.specifier}" msg += f"\n The user requested (constraint) {constraint_text}" logger.debug(msg)
PipReporter
python
allegroai__clearml
clearml/storage/helper.py
{ "start": 77429, "end": 130814 }
class ____(object): """Storage helper. Used by the entire system to download/upload files. Supports both local and remote files (currently local files, network-mapped files, HTTP/S and Amazon S3) """ _temp_download_suffix = ".partially" _quotable_uri_schemes = set(_HttpDriver.schemes) @classmethod def _get_logger(cls) -> logging.Logger: return get_logger("storage") @attrs class _PathSubstitutionRule(object): registered_prefix = attrib(type=str) local_prefix = attrib(type=str) replace_windows_sep = attrib(type=bool) replace_linux_sep = attrib(type=bool) path_substitution_config = "storage.path_substitution" @classmethod def load_list_from_config( cls, ) -> List["_StorageHelper._PathSubstitutionRule"]: rules_list = [] for index, sub_config in enumerate(config.get(cls.path_substitution_config, list())): rule = cls( registered_prefix=sub_config.get("registered_prefix", None), local_prefix=sub_config.get("local_prefix", None), replace_windows_sep=sub_config.get("replace_windows_sep", False), replace_linux_sep=sub_config.get("replace_linux_sep", False), ) if any(prefix is None for prefix in (rule.registered_prefix, rule.local_prefix)): cls._get_logger().warning( "Illegal substitution rule configuration '{}[{}]': {}".format( cls.path_substitution_config, index, asdict(rule), ) ) continue if all((rule.replace_windows_sep, rule.replace_linux_sep)): cls._get_logger().warning( "Only one of replace_windows_sep and replace_linux_sep flags may be set." "'{}[{}]': {}".format( cls.path_substitution_config, index, asdict(rule), ) ) continue rules_list.append(rule) return rules_list class _UploadData(object): @property def src_path(self) -> str: return self._src_path @property def dest_path(self) -> str: return self._dest_path @property def canonized_dest_path(self) -> str: return self._canonized_dest_path @property def extra(self) -> dict: return self._extra @property def callback(self) -> Any: return self._callback @property def retries(self) -> int: return self._retries @property def return_canonized(self) -> bool: return self._return_canonized def __init__( self, src_path: str, dest_path: str, canonized_dest_path: str, extra: dict, callback: Any, retries: int, return_canonized: bool, ) -> None: self._src_path = src_path self._dest_path = dest_path self._canonized_dest_path = canonized_dest_path self._extra = extra self._callback = callback self._retries = retries self._return_canonized = return_canonized def __str__(self) -> str: return "src=%s" % self.src_path _helpers = {} # cache of helper instances # global terminate event for async upload threads # _terminate = threading.Event() _async_upload_threads = set() _upload_pool = None _upload_pool_pid = None # collect all bucket credentials that aren't empty (ignore entries with an empty key or secret) _s3_configurations = deferred_config("aws.s3", {}, transform=S3BucketConfigurations.from_config) _gs_configurations = deferred_config("google.storage", {}, transform=GSBucketConfigurations.from_config) _azure_configurations = deferred_config("azure.storage", {}, transform=AzureContainerConfigurations.from_config) _path_substitutions = deferred_config(transform=_PathSubstitutionRule.load_list_from_config) @property def log(self) -> logging.Logger: return self._log @property def scheme(self) -> str: return self._scheme @property def secure(self) -> bool: return self._secure @property def base_url(self) -> str: return self._base_url @classmethod def get(cls, url: str, logger: Optional[logging.Logger] = None, **kwargs: Any) -> Optional["_StorageHelper"]: """ Get a storage helper instance for the given URL :return: A _StorageHelper instance. """ # Handle URL substitution etc before locating the correct storage driver url = cls._canonize_url(url) # Get the credentials we should use for this url base_url = cls._resolve_base_url(url) instance_key = "%s_%s" % (base_url, threading.current_thread().ident or 0) # noinspection PyBroadException try: configs = kwargs.get("configs") if configs: instance_key += "_{}".format(configs.cache_name) except Exception: pass force_create = kwargs.pop("__force_create", False) if (instance_key in cls._helpers) and (not force_create) and base_url != "file://": return cls._helpers[instance_key] # Don't canonize URL since we already did it try: instance = cls(base_url=base_url, url=url, logger=logger, canonize_url=False, **kwargs) except (StorageError, UsageError) as ex: cls._get_logger().error(str(ex)) return None except Exception as ex: cls._get_logger().error("Failed creating storage object {} Reason: {}".format(base_url or url, ex)) return None cls._helpers[instance_key] = instance return instance @classmethod def get_local_copy(cls, remote_url: str, skip_zero_size_check: bool = False) -> str: """ Download a file from remote URL to a local storage, and return path to local copy, :param remote_url: Remote URL. Example: https://example.com/file.jpg s3://bucket/folder/file.mp4 etc. :param skip_zero_size_check: If True, no error will be raised for files with zero bytes size. :return: Path to local copy of the downloaded file. None if error occurred. """ helper = cls.get(remote_url) if not helper: return None # create temp file with the requested file name file_name = "." + remote_url.split("/")[-1].split(os.path.sep)[-1] _, local_path = mkstemp(suffix=file_name) return helper.download_to_file(remote_url, local_path, skip_zero_size_check=skip_zero_size_check) def __init__( self, base_url: str, url: str, key: Optional[str] = None, secret: Optional[str] = None, region: Optional[str] = None, verbose: bool = False, logger: Optional[logging.Logger] = None, retries: int = 5, token: Optional[str] = None, profile: Optional[str] = None, **kwargs: Any, ) -> None: level = config.get("storage.log.level", None) if level: try: self._get_logger().setLevel(level) except (TypeError, ValueError): self._get_logger().error("invalid storage log level in configuration: %s" % level) self._log = logger or self._get_logger() self._verbose = verbose self._retries = retries self._extra = {} self._base_url = base_url self._secure = True self._driver = None self._container = None self._conf = None if kwargs.get("canonize_url", True): url = self._canonize_url(url) parsed = urlparse(url) self._scheme = parsed.scheme if self._scheme == _AzureBlobServiceStorageDriver.scheme: self._conf = copy(self._azure_configurations.get_config_by_uri(url)) if self._conf is None: raise StorageError("Missing Azure Blob Storage configuration for {}".format(url)) if not self._conf.account_name or not self._conf.account_key: raise StorageError("Missing account name or key for Azure Blob Storage access for {}".format(base_url)) self._driver = _AzureBlobServiceStorageDriver() self._container = self._driver.get_container(config=self._conf, account_url=parsed.netloc) elif self._scheme == _Boto3Driver.scheme: self._conf = copy(self._s3_configurations.get_config_by_uri(url)) self._secure = self._conf.secure final_region = region if region else self._conf.region if not final_region: final_region = None self._conf.update( key=key or self._conf.key, secret=secret or self._conf.secret, multipart=self._conf.multipart, region=final_region, use_credentials_chain=self._conf.use_credentials_chain, token=token or self._conf.token, profile=profile or self._conf.profile, secure=self._secure, extra_args=self._conf.extra_args, ) if not self._conf.use_credentials_chain: if not self._conf.key or not self._conf.secret: raise ValueError("Missing key and secret for S3 storage access (%s)" % base_url) self._driver = _Boto3Driver() self._container = self._driver.get_container( container_name=self._base_url, retries=retries, config=self._conf ) elif self._scheme == _GoogleCloudStorageDriver.scheme: self._conf = copy(self._gs_configurations.get_config_by_uri(url)) self._driver = _GoogleCloudStorageDriver() self._container = self._driver.get_container(container_name=self._base_url, config=self._conf) elif self._scheme in _HttpDriver.schemes: self._driver = _HttpDriver(retries=retries) self._container = self._driver.get_container(container_name=self._base_url) else: # elif self._scheme == 'file': # if this is not a known scheme assume local file # url2pathname is specifically intended to operate on (urlparse result).path # and returns a cross-platform compatible result new_url = normalize_local_path(url[len("file://") :] if url.startswith("file://") else url) self._driver = _FileStorageDriver(new_url) # noinspection PyBroadException try: self._container = self._driver.get_container("") except Exception: self._container = None @classmethod def terminate_uploads(cls, force: bool = True, timeout: float = 2.0) -> None: if force: # since async uploaders are daemon threads, we can just return and let them close by themselves return # signal all threads to terminate and give them a chance for 'timeout' seconds (total, not per-thread) # cls._terminate.set() remaining_timeout = timeout for thread in cls._async_upload_threads: t = time() # noinspection PyBroadException try: thread.join(timeout=remaining_timeout) except Exception: pass remaining_timeout -= time() - t @classmethod def get_aws_storage_uri_from_config(cls, bucket_config: BucketConfig) -> str: uri = ( "s3://{}/{}".format(bucket_config.host, bucket_config.bucket) if bucket_config.host else "s3://{}".format(bucket_config.bucket) ) if bucket_config.subdir: uri += "/" + bucket_config.subdir return uri @classmethod def get_gcp_storage_uri_from_config(cls, bucket_config: BucketConfig) -> str: return ( "gs://{}/{}".format(bucket_config.bucket, bucket_config.subdir) if bucket_config.subdir else "gs://{}".format(bucket_config.bucket) ) @classmethod def get_azure_storage_uri_from_config(cls, bucket_config: BucketConfig) -> str: return "azure://{}.blob.core.windows.net/{}".format(bucket_config.account_name, bucket_config.container_name) @classmethod def get_configuration(cls, bucket_config: BucketConfig) -> S3BucketConfig: return cls.get_aws_configuration(bucket_config) @classmethod def get_aws_configuration(cls, bucket_config: BucketConfig) -> S3BucketConfig: return cls._s3_configurations.get_config_by_bucket(bucket_config.bucket, bucket_config.host) @classmethod def get_gcp_configuration(cls, bucket_config: BucketConfig) -> GSBucketConfigurations: return cls._gs_configurations.get_config_by_uri( cls.get_gcp_storage_uri_from_config(bucket_config), create_if_not_found=False, ) @classmethod def get_azure_configuration(cls, bucket_config: AzureContainerConfig) -> AzureContainerConfig: return cls._azure_configurations.get_config(bucket_config.account_name, bucket_config.container_name) @classmethod def add_configuration( cls, bucket_config: BucketConfig, log: Optional[logging.Logger] = None, _test_config: bool = True, ) -> None: return cls.add_aws_configuration(bucket_config, log=log, _test_config=_test_config) @classmethod def add_aws_configuration( cls, bucket_config: BucketConfig, log: Optional[logging.Logger] = None, _test_config: bool = True, ) -> None: # Try to use existing configuration if we have no key and secret use_existing = not bucket_config.is_valid() # Get existing config anyway (we'll either try to use it or alert we're replacing it existing = cls.get_aws_configuration(bucket_config) configs = cls._s3_configurations uri = cls.get_aws_storage_uri_from_config(bucket_config) if not use_existing: # Test bucket config, fails if unsuccessful if _test_config: _Boto3Driver._test_bucket_config(bucket_config, log) # noqa if existing: if log: log.warning("Overriding existing configuration for '{}'".format(uri)) configs.remove_config(existing) configs.add_config(bucket_config) else: # Try to use existing configuration good_config = False if existing: if log: log.info("Using existing credentials for '{}'".format(uri)) good_config = _Boto3Driver._test_bucket_config(existing, log, raise_on_error=False) # noqa if not good_config: # Try to use global key/secret configs.update_config_with_defaults(bucket_config) if log: log.info("Using global credentials for '{}'".format(uri)) if _test_config: _Boto3Driver._test_bucket_config(bucket_config, log) # noqa configs.add_config(bucket_config) @classmethod def add_gcp_configuration(cls, bucket_config: BucketConfig, log: Optional[logging.Logger] = None) -> None: use_existing = not bucket_config.is_valid() existing = cls.get_gcp_configuration(bucket_config) configs = cls._gs_configurations uri = cls.get_gcp_storage_uri_from_config(bucket_config) if not use_existing: if existing: if log: log.warning("Overriding existing configuration for '{}'".format(uri)) configs.remove_config(existing) configs.add_config(bucket_config) else: good_config = False if existing: if log: log.info("Using existing config for '{}'".format(uri)) good_config = _GoogleCloudStorageDriver.test_upload(None, bucket_config) if not good_config: configs.update_config_with_defaults(bucket_config) if log: log.info("Using global credentials for '{}'".format(uri)) configs.add_config(bucket_config) @classmethod def add_azure_configuration(cls, bucket_config: BucketConfig, log: Optional[logging.Logger] = None) -> None: use_existing = not bucket_config.is_valid() existing = cls.get_azure_configuration(bucket_config) configs = cls._azure_configurations uri = cls.get_azure_storage_uri_from_config(bucket_config) if not use_existing: if existing: if log: log.warning("Overriding existing configuration for '{}'".format(uri)) configs.remove_config(existing) configs.add_config(bucket_config) else: good_config = False if existing: if log: log.info("Using existing config for '{}'".format(uri)) good_config = _AzureBlobServiceStorageDriver.test_upload(None, bucket_config) if not good_config: configs.update_config_with_defaults(bucket_config) if log: log.info("Using global credentials for '{}'".format(uri)) configs.add_config(bucket_config) @classmethod def add_path_substitution( cls, registered_prefix: str, local_prefix: str, replace_windows_sep: bool = False, replace_linux_sep: bool = False, ) -> None: """ Add a path substitution rule for storage paths. Useful for case where the data was registered under some path, and that path was later renamed. This may happen with local storage paths where each machine is has different mounts or network drives configurations :param registered_prefix: The prefix to search for and replace. This is the prefix of the path the data is registered under. This should be the exact url prefix, case sensitive, as the data is registered. :param local_prefix: The prefix to replace 'registered_prefix' with. This is the prefix of the path the data is actually saved under. This should be the exact url prefix, case sensitive, as the data is saved under. :param replace_windows_sep: If set to True, and the prefix matches, the rest of the url has all of the windows path separators (backslash '\') replaced with the native os path separator. :param replace_linux_sep: If set to True, and the prefix matches, the rest of the url has all of the linux/unix path separators (slash '/') replaced with the native os path separator. """ if not registered_prefix or not local_prefix: raise UsageError("Path substitution prefixes must be non empty strings") if replace_windows_sep and replace_linux_sep: raise UsageError("Only one of replace_windows_sep and replace_linux_sep may be set.") rule = cls._PathSubstitutionRule( registered_prefix=registered_prefix, local_prefix=local_prefix, replace_windows_sep=replace_windows_sep, replace_linux_sep=replace_linux_sep, ) cls._path_substitutions.append(rule) @classmethod def clear_path_substitutions(cls) -> None: """ Removes all path substitution rules, including ones from the configuration file. """ cls._path_substitutions = list() def get_object_size_bytes(self, remote_url: str, silence_errors: bool = False) -> Optional[int]: """ Get size of the remote file in bytes. :param str remote_url: The url where the file is stored. E.g. 's3://bucket/some_file.txt', 'file://local/file' :param bool silence_errors: Silence errors that might occur when fetching the size of the file. Default: False :return: The size of the file in bytes. None if the file could not be found or an error occurred. """ obj = self.get_object(remote_url, silence_errors=silence_errors) return self._get_object_size_bytes(obj, silence_errors) def _get_object_size_bytes(self, obj: Any, silence_errors: bool = False) -> Optional[int]: """ Auxiliary function for `get_object_size_bytes`. Get size of the remote object in bytes. :param object obj: The remote object :param bool silence_errors: Silence errors that might occur when fetching the size of the file. Default: False :return: The size of the object in bytes. None if an error occurred. """ if not obj: return None size = None try: if isinstance(self._driver, _HttpDriver) and obj: obj = self._driver._get_download_object(obj) # noqa size = int(obj.headers.get("Content-Length", 0)) elif hasattr(obj, "size"): size = obj.size # Google storage has the option to reload the object to get the size if size is None and hasattr(obj, "reload"): # noinspection PyBroadException try: # To catch google.api_core exceptions obj.reload() size = obj.size except Exception as e: if not silence_errors: self.log.warning( "Failed obtaining object size on reload: {}('{}')".format(e.__class__.__name__, str(e)) ) elif hasattr(obj, "content_length"): # noinspection PyBroadException try: # To catch botocore exceptions size = obj.content_length # noqa except Exception as e: if not silence_errors: self.log.warning( "Failed obtaining content_length while getting object size: {}('{}')".format( e.__class__.__name__, str(e) ) ) except Exception as e: if not silence_errors: self.log.warning("Failed getting object size: {}('{}')".format(e.__class__.__name__, str(e))) return size def get_object_metadata(self, obj: Any) -> dict: """ Get the metadata of the remote object. The metadata is a dict containing the following keys: `name`, `size`. :param object obj: The remote object :return: A dict containing the metadata of the remote object """ name_fields = ("name", "url", "key", "blob_name") metadata = { "size": self._get_object_size_bytes(obj), "name": next(filter(None, (getattr(obj, f, None) for f in name_fields)), None), } return metadata def verify_upload( self, folder_uri: str = "", raise_on_error: bool = True, log_on_error: bool = True, ) -> str: """ Verify that this helper can upload files to a folder. An upload is possible iff: 1. the destination folder is under the base uri of the url used to create the helper 2. the helper has credentials to write to the destination folder :param folder_uri: The destination folder to test. Must be an absolute url that begins with the base uri of the url used to create the helper. :param raise_on_error: Raise an exception if an upload is not possible :param log_on_error: Log an error if an upload is not possible :return: True, if, and only if, an upload to folder_uri is possible. """ folder_uri = self._canonize_url(folder_uri) folder_uri = self.conform_url(folder_uri, self._base_url) test_path = self._normalize_object_name(folder_uri) if self._scheme == _Boto3Driver.scheme: _Boto3Driver._test_bucket_config( self._conf, self._log, test_path=test_path, raise_on_error=raise_on_error, log_on_error=log_on_error, ) elif self._scheme == _GoogleCloudStorageDriver.scheme: self._driver.test_upload(test_path, self._conf) elif self._scheme == "file": # Check path exists Path(test_path).mkdir(parents=True, exist_ok=True) # check path permissions Path(test_path).touch(exist_ok=True) return folder_uri def upload_from_stream( self, stream: Any, dest_path: str, extra: dict = None, retries: int = 1, return_canonized: bool = True, ) -> str: canonized_dest_path = self._canonize_url(dest_path) object_name = self._normalize_object_name(canonized_dest_path) extra = extra.copy() if extra else {} extra.update(self._extra) last_ex = None cb = UploadProgressReport.from_stream(stream, object_name, self._verbose, self._log) for i in range(max(1, int(retries))): try: self._driver.upload_object_via_stream( iterator=stream, container=self._container, object_name=object_name, callback=cb, extra=extra, ) last_ex = None break except Exception as ex: last_ex = ex # seek to beginning if possible # noinspection PyBroadException try: stream.seek(0) except Exception: pass if cb: cb.close(report_completed=not bool(last_ex)) if last_ex: raise last_ex result_dest_path = canonized_dest_path if return_canonized else dest_path if self.scheme in _StorageHelper._quotable_uri_schemes: # TODO: fix-driver-schema # quote link result_dest_path = quote_url(result_dest_path, _StorageHelper._quotable_uri_schemes) return result_dest_path def upload( self, src_path: str, dest_path: Optional[str] = None, extra: Optional[dict] = None, async_enable: bool = False, cb: Optional[Callable] = None, retries: int = 3, return_canonized: bool = True, ) -> Union[AsyncResult, str]: if not dest_path: dest_path = os.path.basename(src_path) canonized_dest_path = self._canonize_url(dest_path) dest_path = dest_path.replace("\\", "/") canonized_dest_path = canonized_dest_path.replace("\\", "/") result_path = canonized_dest_path if return_canonized else dest_path if cb and self.scheme in _StorageHelper._quotable_uri_schemes: # TODO: fix-driver-schema # store original callback a_cb = cb # quote link def callback(result: bool) -> str: return a_cb(quote_url(result_path, _StorageHelper._quotable_uri_schemes) if result else result) # replace callback with wrapper cb = callback if async_enable: data = self._UploadData( src_path=src_path, dest_path=dest_path, canonized_dest_path=canonized_dest_path, extra=extra, callback=cb, retries=retries, return_canonized=return_canonized, ) _StorageHelper._initialize_upload_pool() return _StorageHelper._upload_pool.apply_async(self._do_async_upload, args=(data,)) else: res = self._do_upload( src_path=src_path, dest_path=dest_path, canonized_dest_path=canonized_dest_path, extra=extra, cb=cb, verbose=False, retries=retries, return_canonized=return_canonized, ) if res: result_path = quote_url(result_path, _StorageHelper._quotable_uri_schemes) return result_path def list(self, prefix: Optional[str] = None, with_metadata: bool = False) -> List[Union[str, Dict[str, Any]]]: """ List entries in the helper base path. Return a list of names inside this helper base path or a list of dictionaries containing the objects' metadata. The base path is determined at creation time and is specific for each storage medium. For Google Storage and S3 it is the bucket of the path. For local files it is the root directory. This operation is not supported for http and https protocols. :param prefix: If None, return the list as described above. If not, it must be a string - the path of a sub directory under the base path. the returned list will include only objects under that subdir. :param with_metadata: Instead of returning just the names of the objects, return a list of dictionaries containing the name and metadata of the remote file. Thus, each dictionary will contain the following keys: `name`, `size`. :return: The paths of all the objects in the storage base path under prefix or a list of dictionaries containing the objects' metadata. Listed relative to the base path. """ if prefix: prefix = self._canonize_url(prefix) if prefix.startswith(self._base_url): prefix = prefix[len(self._base_url) :] if self._base_url != "file://": prefix = prefix.lstrip("/") if self._base_url == "file://": prefix = prefix.rstrip("/") if prefix.startswith(str(self._driver.base_path)): prefix = prefix[len(str(self._driver.base_path)) :] res = self._driver.list_container_objects(self._container, ex_prefix=prefix) result = [obj.name if not with_metadata else self.get_object_metadata(obj) for obj in res] if self._base_url == "file://": if not with_metadata: result = [Path(f).as_posix() for f in result] else: for metadata_entry in result: metadata_entry["name"] = Path(metadata_entry["name"]).as_posix() return result else: return [ obj.name if not with_metadata else self.get_object_metadata(obj) for obj in self._driver.list_container_objects(self._container) ] def download_to_file( self, remote_path: str, local_path: str, overwrite_existing: bool = False, delete_on_failure: bool = True, verbose: Optional[bool] = None, skip_zero_size_check: bool = False, silence_errors: bool = False, direct_access: bool = True, ) -> Optional[str]: def next_chunk(astream: Union[bytes, Iterable]) -> Tuple[Optional[bytes], Optional[Iterable]]: if isinstance(astream, (bytes, bytearray)): chunk = astream astream = None elif astream: try: chunk = next(astream) except StopIteration: chunk = None else: chunk = None return chunk, astream remote_path = self._canonize_url(remote_path) verbose = self._verbose if verbose is None else verbose tmp_remote_path = remote_path # noinspection PyBroadException try: tmp_remote_path = normalize_local_path(tmp_remote_path) if tmp_remote_path.exists(): remote_path = "file://{}".format(str(tmp_remote_path)) except Exception: pass # Check if driver type supports direct access: direct_access_path = self.get_driver_direct_access(remote_path) if direct_access_path and direct_access: return direct_access_path temp_local_path = None cb = None try: if verbose: self._log.info("Start downloading from {}".format(remote_path)) # check for 0 sized files as well - we want to override empty files that were created # via mkstemp or similar functions if not overwrite_existing and Path(local_path).is_file() and Path(local_path).stat().st_size != 0: self._log.debug( "File {} already exists, no need to download, thread id = {}".format( local_path, threading.current_thread().ident, ), ) return local_path if remote_path.startswith("file://"): Path(local_path).parent.mkdir(parents=True, exist_ok=True) # use remote_path, because direct_access_path might be None, because of access_rules # len("file://") == 7 shutil.copyfile(remote_path[7:], local_path) return local_path # we download into temp_local_path so that if we accidentally stop in the middle, # we won't think we have the entire file temp_local_path = "{}_{}{}".format(local_path, time(), self._temp_download_suffix) obj = self.get_object(remote_path, silence_errors=silence_errors) if not obj: return None # object size in bytes total_size_mb = -1 dl_total_mb = 0.0 download_reported = False # chunks size is ignored and always 5Mb chunk_size_mb = 5 # make sure we have the destination folder # noinspection PyBroadException Path(temp_local_path).parent.mkdir(parents=True, exist_ok=True) total_size_bytes = self.get_object_size_bytes(remote_path, silence_errors=silence_errors) if total_size_bytes is not None: total_size_mb = float(total_size_bytes) / (1024 * 1024) # if driver supports download with callback, use it (it might be faster) if hasattr(self._driver, "download_object"): # callback if verbose we already reported download start, no need to do that again cb = DownloadProgressReport( total_size_mb, verbose, remote_path, self._log, report_start=True if verbose else None, ) self._driver.download_object(obj, temp_local_path, callback=cb) download_reported = bool(cb.last_reported) dl_total_mb = cb.current_status_mb else: stream = self._driver.download_object_as_stream(obj, chunk_size_mb * 1024 * 1024) if stream is None: raise ValueError("Could not download %s" % remote_path) with open(temp_local_path, "wb") as fd: data, stream = next_chunk(stream) while data: fd.write(data) data, stream = next_chunk(stream) if not skip_zero_size_check and Path(temp_local_path).stat().st_size <= 0: raise Exception("downloaded a 0-sized file") # if we are on Windows, we need to remove the target file before renaming # otherwise posix rename will overwrite the target if os.name != "posix": # noinspection PyBroadException try: os.remove(local_path) except Exception: pass # rename temp file to local_file # noinspection PyBroadException try: os.rename(temp_local_path, local_path) except Exception: # noinspection PyBroadException try: os.unlink(temp_local_path) except Exception: pass # file was downloaded by a parallel process, check we have the final output and delete the partial copy path_local_path = Path(local_path) if not path_local_path.is_file() or (not skip_zero_size_check and path_local_path.stat().st_size <= 0): raise Exception("Failed renaming partial file, downloaded file exists and a 0-sized file") # report download if we are on the second chunk if cb: cb.close( report_completed=True, report_summary=verbose or download_reported, report_prefix="Downloaded", report_suffix="from {} , saved to {}".format(remote_path, local_path), ) elif verbose or download_reported: self._log.info( "Downloaded {:.2f} MB successfully from {} , saved to {}".format( dl_total_mb, remote_path, local_path ) ) return local_path except DownloadError: if cb: cb.close() raise except Exception as e: if cb: cb.close() self._log.error("Could not download {} , err: {} ".format(remote_path, e)) if delete_on_failure and temp_local_path: # noinspection PyBroadException try: os.remove(temp_local_path) except Exception: pass return None def download_as_stream( self, remote_path: str, chunk_size: Optional[int] = None ) -> Optional[Generator[bytes, None, None]]: remote_path = self._canonize_url(remote_path) try: obj = self.get_object(remote_path) return self._driver.download_object_as_stream( obj, chunk_size=chunk_size, verbose=self._verbose, log=self.log ) except DownloadError: raise except Exception as e: self._log.error("Could not download file : %s, err:%s " % (remote_path, str(e))) return None def download_as_nparray(self, remote_path: str, chunk_size: Optional[int] = None) -> Optional[numpy.ndarray]: try: stream = self.download_as_stream(remote_path, chunk_size) if stream is None: return # TODO: ugly py3 hack, please remove ASAP if not isinstance(stream, GeneratorType): import numpy as np return np.frombuffer(stream, dtype=np.uint8) else: import numpy as np return np.asarray(bytearray(b"".join(stream)), dtype=np.uint8) except Exception as e: self._log.error("Could not download file : %s, err:%s " % (remote_path, str(e))) def delete(self, path: str, silent: bool = False) -> bool: path = self._canonize_url(path) return self._driver.delete_object(self.get_object(path), silent=silent) def check_write_permissions(self, dest_path: Optional[str] = None) -> bool: # create a temporary file, then delete it base_url = dest_path or self._base_url dest_path = base_url + "/.clearml.{}.test".format(str(uuid.uuid4())) # do not check http/s connection permissions if dest_path.startswith("http"): return True try: self.upload_from_stream(stream=BytesIO(b"clearml"), dest_path=dest_path) except Exception: raise ValueError("Insufficient permissions (write failed) for {}".format(base_url)) try: self.delete(path=dest_path) except Exception: raise ValueError("Insufficient permissions (delete failed) for {}".format(base_url)) return True @classmethod def download_from_url( cls, remote_url: str, local_path: str, overwrite_existing: bool = False, skip_zero_size_check: bool = False, ) -> str: """ Download a file from remote URL to a local storage :param remote_url: Remote URL. Example: https://example.com/image.jpg or s3://bucket/folder/file.mp4 etc. :param local_path: target location for downloaded file. Example: /tmp/image.jpg :param overwrite_existing: If True, and local_path exists, it will overwrite it, otherwise print warning :param skip_zero_size_check: If True, no error will be raised for files with zero bytes size. :return: local_path if download was successful. """ helper = cls.get(remote_url) if not helper: return None return helper.download_to_file( remote_url, local_path, overwrite_existing=overwrite_existing, skip_zero_size_check=skip_zero_size_check, ) def get_driver_direct_access(self, path: str) -> Optional[str]: """ Check if the helper's driver has a direct access to the file :param str path: file path to check access to :return: Return the string representation of the file as path if have access to it, else None """ path = self._canonize_url(path) return self._driver.get_direct_access(path) @classmethod def _canonize_url(cls, url: str) -> str: return cls._apply_url_substitutions(url) @classmethod def _apply_url_substitutions(cls, url: str) -> str: def replace_separator(_url: str, where: int, sep: str) -> str: return _url[:where] + _url[where:].replace(sep, os.sep) for index, rule in enumerate(cls._path_substitutions): if url.startswith(rule.registered_prefix): url = url.replace( rule.registered_prefix, rule.local_prefix, 1, # count. str.replace() does not support keyword arguments ) if rule.replace_windows_sep: url = replace_separator(url, len(rule.local_prefix), "\\") if rule.replace_linux_sep: url = replace_separator(url, len(rule.local_prefix), "/") break return url @classmethod def _resolve_base_url(cls, base_url: str) -> str: parsed = urlparse(base_url) if parsed.scheme == _Boto3Driver.scheme: conf = cls._s3_configurations.get_config_by_uri(base_url) bucket = conf.bucket if not bucket: parts = Path(parsed.path.strip("/")).parts if parts: bucket = parts[0] return "/".join(x for x in ("s3:/", conf.host, bucket) if x) elif parsed.scheme == _AzureBlobServiceStorageDriver.scheme: conf = cls._azure_configurations.get_config_by_uri(base_url) if not conf: raise StorageError("Can't find azure configuration for {}".format(base_url)) return str(furl(base_url).set(path=conf.container_name)) elif parsed.scheme == _GoogleCloudStorageDriver.scheme: conf = cls._gs_configurations.get_config_by_uri(base_url) return str(furl(scheme=parsed.scheme, netloc=conf.bucket)) elif parsed.scheme in _HttpDriver.schemes: for files_server in _Driver.get_file_server_hosts(): if base_url.startswith(files_server): return files_server return parsed.scheme + "://" else: # if parsed.scheme == 'file': # if we do not know what it is, we assume file return "file://" @classmethod def conform_url(cls, folder_uri: str, base_url: str = None) -> str: if not folder_uri: return folder_uri _base_url = cls._resolve_base_url(folder_uri) if not base_url else base_url if not folder_uri.startswith(_base_url): prev_folder_uri = folder_uri if _base_url == "file://": folder_uri = str(Path(folder_uri).absolute()) if folder_uri.startswith("/"): folder_uri = _base_url + folder_uri elif platform.system() == "Windows": folder_uri = "".join((_base_url, folder_uri)) else: folder_uri = "/".join((_base_url, folder_uri)) cls._get_logger().debug( "Upload destination {} amended to {} for registration purposes".format(prev_folder_uri, folder_uri) ) else: raise ValueError("folder_uri: {} does not start with base url: {}".format(folder_uri, _base_url)) return folder_uri def _absolute_object_name(self, path: str) -> str: """Returns absolute remote path, including any prefix that is handled by the container""" if not path.startswith(self.base_url): return self.base_url.rstrip("/") + "///" + path.lstrip("/") return path def _normalize_object_name(self, path: str) -> str: """Normalize remote path. Remove any prefix that is already handled by the container""" if path.startswith(self.base_url): path = path[len(self.base_url) :] if path.startswith("/") and os.name == "nt": path = path[1:] if self.scheme in ( _Boto3Driver.scheme, _GoogleCloudStorageDriver.scheme, _AzureBlobServiceStorageDriver.scheme, ): path = path.lstrip("/") return path def _do_async_upload(self, data: _UploadData) -> str: assert isinstance(data, self._UploadData) return self._do_upload( data.src_path, data.dest_path, data.canonized_dest_path, extra=data.extra, cb=data.callback, verbose=True, retries=data.retries, return_canonized=data.return_canonized, ) def _upload_from_file(self, local_path: str, dest_path: str, extra: Optional[dict] = None) -> Any: if not hasattr(self._driver, "upload_object"): with open(local_path, "rb") as stream: res = self.upload_from_stream(stream=stream, dest_path=dest_path, extra=extra) else: object_name = self._normalize_object_name(dest_path) extra = extra.copy() if extra else {} extra.update(self._extra) cb = UploadProgressReport.from_file(local_path, self._verbose, self._log) res = self._driver.upload_object( file_path=local_path, container=self._container, object_name=object_name, callback=cb, extra=extra, ) if cb: cb.close() return res def _do_upload( self, src_path: str, dest_path: str, canonized_dest_path: str, extra: Optional[dict] = None, cb: Optional[Callable] = None, verbose: bool = False, retries: int = 1, return_canonized: bool = False, ) -> str: object_name = self._normalize_object_name(canonized_dest_path) if cb: try: cb(None) except Exception as e: self._log.error("Calling upload callback when starting upload: %s" % str(e)) if verbose: msg = "Starting upload: {} => {}{}".format( src_path, (self._container.name if self._container.name.endswith("/") else self._container.name + "/") if self._container and self._container.name else "", object_name, ) if object_name.startswith("file://") or object_name.startswith("/"): self._log.debug(msg) else: self._log.info(msg) last_ex = None for i in range(max(1, int(retries))): try: if not self._upload_from_file(local_path=src_path, dest_path=canonized_dest_path, extra=extra): # retry if failed last_ex = ValueError("Upload failed") continue last_ex = None break except Exception as e: last_ex = e if last_ex: self._log.error("Exception encountered while uploading %s" % str(last_ex)) if cb: try: cb(False) except Exception as e: self._log.warning("Exception on upload callback: %s" % str(e)) raise last_ex if verbose: self._log.debug("Finished upload: %s => %s" % (src_path, object_name)) if cb: try: cb(canonized_dest_path if return_canonized else dest_path) except Exception as e: self._log.warning("Exception on upload callback: %s" % str(e)) return canonized_dest_path if return_canonized else dest_path def get_object(self, path: str, silence_errors: bool = False) -> Any: """ Gets the remote object stored at path. The data held by the object differs depending on where it is stored. :param str path: the path where the remote object is stored :param bool silence_errors: Silence errors that might occur when fetching the remote object :return: The remote object """ path = self._canonize_url(path) object_name = self._normalize_object_name(path) try: return self._driver.get_object( container_name=self._container.name if self._container else "", object_name=object_name, ) except ConnectionError: raise DownloadError except Exception as e: if not silence_errors: self.log.warning("Storage helper problem for {}: {}".format(str(object_name), str(e))) return None @staticmethod def _initialize_upload_pool() -> None: if not _StorageHelper._upload_pool or _StorageHelper._upload_pool_pid != os.getpid(): _StorageHelper._upload_pool_pid = os.getpid() _StorageHelper._upload_pool = ThreadPool(processes=1) @staticmethod def close_async_threads() -> None: if _StorageHelper._upload_pool: pool = _StorageHelper._upload_pool _StorageHelper._upload_pool = None # noinspection PyBroadException try: pool.terminate() pool.join() except Exception: pass def exists_file(self, remote_url: str) -> bool: remote_url = self._canonize_url(remote_url) object_name = self._normalize_object_name(remote_url) return self._driver.exists_file( container_name=self._container.name if self._container else "", object_name=object_name, ) @classmethod def sanitize_url(cls, remote_url): base_url = cls._resolve_base_url(remote_url) if base_url != 'file://' or remote_url.startswith("file://"): return remote_url absoulte_path = os.path.abspath(remote_url) return base_url + absoulte_path CLEARML_SECONDARY_CACHE_DIR = EnvEntry("CLEARML_SECONDARY_CACHE_DIR", type=str) # configuration constants _DISK_STRATEGY_SECTION = "disk_space_file_size_strategy" _CONFIG_MISSING = object() # Simplified urlsplit tailored for ClearML usage def fast_urlsplit(remote_path, *_, **__): """Return (scheme, netloc, path, query, fragment) without pulling in urllib.""" scheme, sep, rest = remote_path.partition('://') if not sep: scheme, rest = '', remote_path netloc, sep, rest = rest.partition('/') if not sep: netloc, rest = '', netloc path_section, sep, fragment = rest.partition('#') if not sep: fragment = '' path, sep, query = path_section.partition('?') if not sep: query = '' return scheme, netloc, path, query, fragment
_StorageHelper
python
kubernetes-client__python
kubernetes/client/models/v1_pod_readiness_gate.py
{ "start": 383, "end": 3891 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'condition_type': 'str' } attribute_map = { 'condition_type': 'conditionType' } def __init__(self, condition_type=None, local_vars_configuration=None): # noqa: E501 """V1PodReadinessGate - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._condition_type = None self.discriminator = None self.condition_type = condition_type @property def condition_type(self): """Gets the condition_type of this V1PodReadinessGate. # noqa: E501 ConditionType refers to a condition in the pod's condition list with matching type. # noqa: E501 :return: The condition_type of this V1PodReadinessGate. # noqa: E501 :rtype: str """ return self._condition_type @condition_type.setter def condition_type(self, condition_type): """Sets the condition_type of this V1PodReadinessGate. ConditionType refers to a condition in the pod's condition list with matching type. # noqa: E501 :param condition_type: The condition_type of this V1PodReadinessGate. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and condition_type is None: # noqa: E501 raise ValueError("Invalid value for `condition_type`, must not be `None`") # noqa: E501 self._condition_type = condition_type def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1PodReadinessGate): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1PodReadinessGate): return True return self.to_dict() != other.to_dict()
V1PodReadinessGate
python
Lightning-AI__lightning
src/lightning/fabric/plugins/precision/utils.py
{ "start": 1500, "end": 2553 }
class ____: """A context manager to monkeypatch classes.""" def __init__(self, mapping: Mapping[str, type]) -> None: self._mapping = mapping self._originals = {} self._modules = {} for class_string in mapping: module_name, class_name = class_string.rsplit(".", 1) module = __import__(module_name, fromlist=[class_name]) self._modules[class_string] = module self._originals[class_string] = getattr(module, class_name) def __enter__(self) -> None: for class_string, replacement in self._mapping.items(): _, class_name = class_string.rsplit(".", 1) setattr(self._modules[class_string], class_name, replacement) def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: for class_string, replacement in self._mapping.items(): _, class_name = class_string.rsplit(".", 1) setattr(self._modules[class_string], class_name, self._originals[class_string])
_ClassReplacementContextManager
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/dag_run.py
{ "start": 5291, "end": 6524 }
class ____(StrictBaseModel): """List DAG Runs body for batch endpoint.""" order_by: str | None = None page_offset: NonNegativeInt = 0 page_limit: NonNegativeInt = 100 dag_ids: list[str] | None = None states: list[DagRunState | None] | None = None run_after_gte: AwareDatetime | None = None run_after_gt: AwareDatetime | None = None run_after_lte: AwareDatetime | None = None run_after_lt: AwareDatetime | None = None logical_date_gte: AwareDatetime | None = None logical_date_gt: AwareDatetime | None = None logical_date_lte: AwareDatetime | None = None logical_date_lt: AwareDatetime | None = None start_date_gte: AwareDatetime | None = None start_date_gt: AwareDatetime | None = None start_date_lte: AwareDatetime | None = None start_date_lt: AwareDatetime | None = None end_date_gte: AwareDatetime | None = None end_date_gt: AwareDatetime | None = None end_date_lte: AwareDatetime | None = None end_date_lt: AwareDatetime | None = None duration_gte: float | None = None duration_gt: float | None = None duration_lte: float | None = None duration_lt: float | None = None conf_contains: str | None = None
DAGRunsBatchBody
python
python-openxml__python-docx
src/docx/image/tiff.py
{ "start": 4270, "end": 5369 }
class ____: """Image File Directory for a TIFF image, having mapping (dict) semantics allowing "tag" values to be retrieved by tag code.""" def __init__(self, entries): super(_IfdEntries, self).__init__() self._entries = entries def __contains__(self, key): """Provides ``in`` operator, e.g. ``tag in ifd_entries``""" return self._entries.__contains__(key) def __getitem__(self, key): """Provides indexed access, e.g. ``tag_value = ifd_entries[tag_code]``""" return self._entries.__getitem__(key) @classmethod def from_stream(cls, stream, offset): """Return a new |_IfdEntries| instance parsed from `stream` starting at `offset`.""" ifd_parser = _IfdParser(stream, offset) entries = {e.tag: e.value for e in ifd_parser.iter_entries()} return cls(entries) def get(self, tag_code, default=None): """Return value of IFD entry having tag matching `tag_code`, or `default` if no matching tag found.""" return self._entries.get(tag_code, default)
_IfdEntries
python
getsentry__sentry
src/sentry/preprod/api/bases/preprod_artifact_endpoint.py
{ "start": 485, "end": 666 }
class ____(APIException): status_code = status.HTTP_404_NOT_FOUND default_detail = "The requested head preprod artifact does not exist"
HeadPreprodArtifactResourceDoesNotExist
python
django__django
django/contrib/auth/models.py
{ "start": 4596, "end": 10936 }
class ____(BaseUserManager): use_in_migrations = True def _create_user_object(self, username, email, password, **extra_fields): if not username: raise ValueError("The given username must be set") email = self.normalize_email(email) # Lookup the real model class from the global app registry so this # manager method can be used in migrations. This is fine because # managers are by definition working on the real model. GlobalUserModel = apps.get_model( self.model._meta.app_label, self.model._meta.object_name ) username = GlobalUserModel.normalize_username(username) user = self.model(username=username, email=email, **extra_fields) user.password = make_password(password) return user def _create_user(self, username, email, password, **extra_fields): """ Create and save a user with the given username, email, and password. """ user = self._create_user_object(username, email, password, **extra_fields) user.save(using=self._db) return user async def _acreate_user(self, username, email, password, **extra_fields): """See _create_user()""" user = self._create_user_object(username, email, password, **extra_fields) await user.asave(using=self._db) return user def create_user(self, username, email=None, password=None, **extra_fields): extra_fields.setdefault("is_staff", False) extra_fields.setdefault("is_superuser", False) return self._create_user(username, email, password, **extra_fields) create_user.alters_data = True async def acreate_user(self, username, email=None, password=None, **extra_fields): extra_fields.setdefault("is_staff", False) extra_fields.setdefault("is_superuser", False) return await self._acreate_user(username, email, password, **extra_fields) acreate_user.alters_data = True def create_superuser(self, username, email=None, password=None, **extra_fields): extra_fields.setdefault("is_staff", True) extra_fields.setdefault("is_superuser", True) if extra_fields.get("is_staff") is not True: raise ValueError("Superuser must have is_staff=True.") if extra_fields.get("is_superuser") is not True: raise ValueError("Superuser must have is_superuser=True.") return self._create_user(username, email, password, **extra_fields) create_superuser.alters_data = True async def acreate_superuser( self, username, email=None, password=None, **extra_fields ): extra_fields.setdefault("is_staff", True) extra_fields.setdefault("is_superuser", True) if extra_fields.get("is_staff") is not True: raise ValueError("Superuser must have is_staff=True.") if extra_fields.get("is_superuser") is not True: raise ValueError("Superuser must have is_superuser=True.") return await self._acreate_user(username, email, password, **extra_fields) acreate_superuser.alters_data = True def with_perm( self, perm, is_active=True, include_superusers=True, backend=None, obj=None ): if backend is None: backends = auth.get_backends() if len(backends) == 1: backend = backends[0] else: raise ValueError( "You have multiple authentication backends configured and " "therefore must provide the `backend` argument." ) elif not isinstance(backend, str): raise TypeError( "backend must be a dotted import path string (got %r)." % backend ) else: backend = auth.load_backend(backend) if hasattr(backend, "with_perm"): return backend.with_perm( perm, is_active=is_active, include_superusers=include_superusers, obj=obj, ) return self.none() # A few helper functions for common logic between User and AnonymousUser. def _user_get_permissions(user, obj, from_name): permissions = set() name = "get_%s_permissions" % from_name for backend in auth.get_backends(): if hasattr(backend, name): permissions.update(getattr(backend, name)(user, obj)) return permissions async def _auser_get_permissions(user, obj, from_name): permissions = set() name = "aget_%s_permissions" % from_name for backend in auth.get_backends(): if hasattr(backend, name): permissions.update(await getattr(backend, name)(user, obj)) return permissions def _user_has_perm(user, perm, obj): """ A backend can raise `PermissionDenied` to short-circuit permission checks. """ for backend in auth.get_backends(): if not hasattr(backend, "has_perm"): continue try: if backend.has_perm(user, perm, obj): return True except PermissionDenied: return False return False async def _auser_has_perm(user, perm, obj): """See _user_has_perm()""" for backend in auth.get_backends(): if not hasattr(backend, "ahas_perm"): continue try: if await backend.ahas_perm(user, perm, obj): return True except PermissionDenied: return False return False def _user_has_module_perms(user, app_label): """ A backend can raise `PermissionDenied` to short-circuit permission checks. """ for backend in auth.get_backends(): if not hasattr(backend, "has_module_perms"): continue try: if backend.has_module_perms(user, app_label): return True except PermissionDenied: return False return False async def _auser_has_module_perms(user, app_label): """See _user_has_module_perms()""" for backend in auth.get_backends(): if not hasattr(backend, "ahas_module_perms"): continue try: if await backend.ahas_module_perms(user, app_label): return True except PermissionDenied: return False return False
UserManager
python
optuna__optuna
optuna/storages/journal/_storage.py
{ "start": 1602, "end": 16481 }
class ____(BaseStorage): """Storage class for Journal storage backend. Note that library users can instantiate this class, but the attributes provided by this class are not supposed to be directly accessed by them. Journal storage writes a record of every operation to the database as it is executed and at the same time, keeps a latest snapshot of the database in-memory. If the database crashes for any reason, the storage can re-establish the contents in memory by replaying the operations stored from the beginning. Journal storage has several benefits over the conventional value logging storages. 1. The number of IOs can be reduced because of larger granularity of logs. 2. Journal storage has simpler backend API than value logging storage. 3. Journal storage keeps a snapshot in-memory so no need to add more cache. Example: .. code:: import optuna def objective(trial): ... storage = optuna.storages.JournalStorage( optuna.storages.journal.JournalFileBackend("./optuna_journal_storage.log") ) study = optuna.create_study(storage=storage) study.optimize(objective) In a Windows environment, an error message "A required privilege is not held by the client" may appear. In this case, you can solve the problem with creating storage by specifying :class:`~optuna.storages.journal.JournalFileOpenLock` as follows. .. code:: file_path = "./optuna_journal_storage.log" lock_obj = optuna.storages.journal.JournalFileOpenLock(file_path) storage = optuna.storages.JournalStorage( optuna.storages.journal.JournalFileBackend(file_path, lock_obj=lock_obj), ) """ def __init__(self, log_storage: BaseJournalBackend) -> None: self._worker_id_prefix = str(uuid.uuid4()) + "-" self._backend = log_storage self._thread_lock = threading.Lock() self._replay_result = JournalStorageReplayResult(self._worker_id_prefix) with self._thread_lock: if isinstance(self._backend, BaseJournalSnapshot): snapshot = self._backend.load_snapshot() if snapshot is not None: self.restore_replay_result(snapshot) self._sync_with_backend() def __getstate__(self) -> dict[Any, Any]: state = self.__dict__.copy() del state["_worker_id_prefix"] del state["_replay_result"] del state["_thread_lock"] return state def __setstate__(self, state: dict[Any, Any]) -> None: self.__dict__.update(state) self._worker_id_prefix = str(uuid.uuid4()) + "-" self._replay_result = JournalStorageReplayResult(self._worker_id_prefix) self._thread_lock = threading.Lock() def restore_replay_result(self, snapshot: bytes) -> None: try: r: JournalStorageReplayResult | None = pickle.loads(snapshot) except (pickle.UnpicklingError, KeyError): _logger.warning("Failed to restore `JournalStorageReplayResult`.") return if r is None: return if not isinstance(r, JournalStorageReplayResult): _logger.warning("The restored object is not `JournalStorageReplayResult`.") return r._worker_id_prefix = self._worker_id_prefix r._worker_id_to_owned_trial_id = {} r._last_created_trial_id_by_this_process = -1 self._replay_result = r def _write_log(self, op_code: int, extra_fields: dict[str, Any]) -> None: worker_id = self._replay_result.worker_id self._backend.append_logs([{"op_code": op_code, "worker_id": worker_id, **extra_fields}]) def _sync_with_backend(self) -> None: logs = self._backend.read_logs(self._replay_result.log_number_read) self._replay_result.apply_logs(logs) def create_new_study( self, directions: Sequence[StudyDirection], study_name: str | None = None ) -> int: study_name = study_name or DEFAULT_STUDY_NAME_PREFIX + str(uuid.uuid4()) with self._thread_lock: self._write_log( JournalOperation.CREATE_STUDY, {"study_name": study_name, "directions": directions} ) self._sync_with_backend() for frozen_study in self._replay_result.get_all_studies(): if frozen_study.study_name != study_name: continue _logger.info("A new study created in Journal with name: {}".format(study_name)) study_id = frozen_study._study_id # Dump snapshot here. if ( isinstance(self._backend, BaseJournalSnapshot) and study_id != 0 and study_id % SNAPSHOT_INTERVAL == 0 ): self._backend.save_snapshot(pickle.dumps(self._replay_result)) return study_id assert False, "Should not reach." def delete_study(self, study_id: int) -> None: with self._thread_lock: self._write_log(JournalOperation.DELETE_STUDY, {"study_id": study_id}) self._sync_with_backend() def set_study_user_attr(self, study_id: int, key: str, value: Any) -> None: log: dict[str, Any] = {"study_id": study_id, "user_attr": {key: value}} with self._thread_lock: self._write_log(JournalOperation.SET_STUDY_USER_ATTR, log) self._sync_with_backend() def set_study_system_attr(self, study_id: int, key: str, value: JSONSerializable) -> None: log: dict[str, Any] = {"study_id": study_id, "system_attr": {key: value}} with self._thread_lock: self._write_log(JournalOperation.SET_STUDY_SYSTEM_ATTR, log) self._sync_with_backend() def get_study_id_from_name(self, study_name: str) -> int: with self._thread_lock: self._sync_with_backend() for study in self._replay_result.get_all_studies(): if study.study_name == study_name: return study._study_id raise KeyError(NOT_FOUND_MSG) def get_study_name_from_id(self, study_id: int) -> str: with self._thread_lock: self._sync_with_backend() return self._replay_result.get_study(study_id).study_name def get_study_directions(self, study_id: int) -> list[StudyDirection]: with self._thread_lock: self._sync_with_backend() return self._replay_result.get_study(study_id).directions def get_study_user_attrs(self, study_id: int) -> dict[str, Any]: with self._thread_lock: self._sync_with_backend() return self._replay_result.get_study(study_id).user_attrs def get_study_system_attrs(self, study_id: int) -> dict[str, Any]: with self._thread_lock: self._sync_with_backend() return self._replay_result.get_study(study_id).system_attrs def get_all_studies(self) -> list[FrozenStudy]: with self._thread_lock: self._sync_with_backend() return copy.deepcopy(self._replay_result.get_all_studies()) # Basic trial manipulation def create_new_trial(self, study_id: int, template_trial: FrozenTrial | None = None) -> int: log: dict[str, Any] = { "study_id": study_id, "datetime_start": datetime.datetime.now().isoformat(timespec="microseconds"), } if template_trial: log["state"] = template_trial.state if template_trial.values is not None and len(template_trial.values) > 1: log["value"] = None log["values"] = template_trial.values else: log["value"] = template_trial.value log["values"] = None if template_trial.datetime_start: log["datetime_start"] = template_trial.datetime_start.isoformat( timespec="microseconds" ) else: log["datetime_start"] = None if template_trial.datetime_complete: log["datetime_complete"] = template_trial.datetime_complete.isoformat( timespec="microseconds" ) log["distributions"] = { k: distribution_to_json(dist) for k, dist in template_trial.distributions.items() } log["params"] = { k: template_trial.distributions[k].to_internal_repr(param) for k, param in template_trial.params.items() } log["user_attrs"] = template_trial.user_attrs log["system_attrs"] = template_trial.system_attrs log["intermediate_values"] = template_trial.intermediate_values with self._thread_lock: self._write_log(JournalOperation.CREATE_TRIAL, log) self._sync_with_backend() trial_id = self._replay_result._last_created_trial_id_by_this_process # Dump snapshot here. if ( isinstance(self._backend, BaseJournalSnapshot) and trial_id != 0 and trial_id % SNAPSHOT_INTERVAL == 0 ): self._backend.save_snapshot(pickle.dumps(self._replay_result)) return trial_id def set_trial_param( self, trial_id: int, param_name: str, param_value_internal: float, distribution: BaseDistribution, ) -> None: log: dict[str, Any] = { "trial_id": trial_id, "param_name": param_name, "param_value_internal": param_value_internal, "distribution": distribution_to_json(distribution), } with self._thread_lock: self._write_log(JournalOperation.SET_TRIAL_PARAM, log) self._sync_with_backend() def get_trial_id_from_study_id_trial_number(self, study_id: int, trial_number: int) -> int: with self._thread_lock: self._sync_with_backend() if len(self._replay_result._study_id_to_trial_ids[study_id]) <= trial_number: raise KeyError( "No trial with trial number {} exists in study with study_id {}.".format( trial_number, study_id ) ) return self._replay_result._study_id_to_trial_ids[study_id][trial_number] def set_trial_state_values( self, trial_id: int, state: TrialState, values: Sequence[float] | None = None ) -> bool: log: dict[str, Any] = { "trial_id": trial_id, "state": state, "values": values, } if state == TrialState.RUNNING: log["datetime_start"] = datetime.datetime.now().isoformat(timespec="microseconds") elif state.is_finished(): log["datetime_complete"] = datetime.datetime.now().isoformat(timespec="microseconds") with self._thread_lock: if state == TrialState.RUNNING: # NOTE(nabenabe): This sync is not necessary because the last # set_trial_state_values call by the same thread always syncs before the true pop, # but I keep it here to avoid the confusion. Anyways, this section isn't triggered # that often because this section is only for enqueue_trial. self._sync_with_backend() # NOTE(nabenabe): This section is triggered only when we are using `enqueue_trial` # and `GrpcProxyStorage` in distributed optimization setups and solves the issue # https://github.com/optuna/optuna/issues/6084. # When using gRPC, the current thread may already have popped the trial with # trial_id for another process, potentially leading to a false positive in the # return statement of trial_id == _replay_result.owned_trial_id. To eliminate false # positives, we verify whether another process is already evaluating the trial with # trial_id. If True, it means this query does not update the trial state. existing_trial = self._replay_result._trials.get(trial_id) assert existing_trial is not None, ( "Please report your bug on GitHub if this line fails your script." ) if existing_trial.state.is_finished(): raise UpdateFinishedTrialError( UNUPDATABLE_MSG.format(trial_number=existing_trial.number) ) if existing_trial.state != TrialState.WAITING: # This line is equivalent to `existing_trial.state == TrialState.RUNNING`. return False self._write_log(JournalOperation.SET_TRIAL_STATE_VALUES, log) self._sync_with_backend() return state != TrialState.RUNNING or trial_id == self._replay_result.owned_trial_id def set_trial_intermediate_value( self, trial_id: int, step: int, intermediate_value: float ) -> None: log: dict[str, Any] = { "trial_id": trial_id, "step": step, "intermediate_value": intermediate_value, } with self._thread_lock: self._write_log(JournalOperation.SET_TRIAL_INTERMEDIATE_VALUE, log) self._sync_with_backend() def set_trial_user_attr(self, trial_id: int, key: str, value: Any) -> None: log: dict[str, Any] = { "trial_id": trial_id, "user_attr": {key: value}, } with self._thread_lock: self._write_log(JournalOperation.SET_TRIAL_USER_ATTR, log) self._sync_with_backend() def set_trial_system_attr(self, trial_id: int, key: str, value: JSONSerializable) -> None: log: dict[str, Any] = { "trial_id": trial_id, "system_attr": {key: value}, } with self._thread_lock: self._write_log(JournalOperation.SET_TRIAL_SYSTEM_ATTR, log) self._sync_with_backend() def get_trial(self, trial_id: int) -> FrozenTrial: with self._thread_lock: self._sync_with_backend() return self._replay_result.get_trial(trial_id) def get_all_trials( self, study_id: int, deepcopy: bool = True, states: Container[TrialState] | None = None, ) -> list[FrozenTrial]: with self._thread_lock: self._sync_with_backend() frozen_trials = self._replay_result.get_all_trials(study_id, states) if deepcopy: return copy.deepcopy(frozen_trials) return frozen_trials
JournalStorage
python
kamyu104__LeetCode-Solutions
Python/total-waviness-of-numbers-in-range-ii.py
{ "start": 98, "end": 1453 }
class ____(object): def totalWaviness(self, num1, num2): """ :type num1: int :type num2: int :rtype: int """ def count(x): def dp(i, prev, prev2, zero, tight): if i == len(s): return 1, 0 key = (i, prev, prev2, zero, tight) if key not in lookup: cnt = w = 0 mx = int(s[i]) if tight else 9 for d in xrange(mx+1): new_tight = tight and (d == int(s[i])) new_zero = zero and (d == 0) new_prev2 = prev new_prev = d if not new_zero else -1 new_cnt, nw = dp(i+1, new_prev, new_prev2, new_zero, new_tight) cnt += new_cnt if not zero and prev2 != -1 and (prev2 < prev and prev > d or prev2 > prev and prev < d): w += new_cnt w += nw lookup[key] = (cnt, w) return lookup[key] s = str(x) lookup = {} return dp(0, -1, -1, True, True)[1] return count(num2)-count(num1-1) # Time: O(logn * 11 * 11 * 2 * 2 * 10) # Space: O(logn * 11 * 11 * 2 * 2) # memoization by list
Solution
python
huggingface__transformers
src/transformers/models/vilt/modeling_vilt.py
{ "start": 44036, "end": 50102 }
class ____(ViltPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.vilt = ViltModel(config) # Classifier head num_images = config.num_images self.classifier = nn.Sequential( nn.Linear(config.hidden_size * num_images, config.hidden_size * num_images), nn.LayerNorm(config.hidden_size * num_images), nn.GELU(), nn.Linear(config.hidden_size * num_images, config.num_labels), ) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, pixel_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, image_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[ViltForImagesAndTextClassificationOutput, tuple[torch.FloatTensor]]: r""" image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*): Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `pixel_values` into patch embeddings. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Binary classification labels. Examples: ```python >>> from transformers import ViltProcessor, ViltForImagesAndTextClassification >>> import requests >>> from PIL import Image >>> image1 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw) >>> image2 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_1.jpg", stream=True).raw) >>> text = "The left image contains twice the number of dogs as the right image." >>> processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2") >>> model = ViltForImagesAndTextClassification.from_pretrained("dandelin/vilt-b32-finetuned-nlvr2") >>> # prepare inputs >>> encoding = processor([image1, image2], text, return_tensors="pt") >>> # forward pass >>> outputs = model(input_ids=encoding.input_ids, pixel_values=encoding.pixel_values.unsqueeze(0)) >>> logits = outputs.logits >>> idx = logits.argmax(-1).item() >>> print("Predicted answer:", model.config.id2label[idx]) Predicted answer: True ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is not None and pixel_values.ndim == 4: # add dummy num_images dimension pixel_values = pixel_values.unsqueeze(1) if image_embeds is not None and image_embeds.ndim == 3: # add dummy num_images dimension image_embeds = image_embeds.unsqueeze(1) num_images = pixel_values.shape[1] if pixel_values is not None else None if num_images is None: num_images = image_embeds.shape[1] if image_embeds is not None else None if num_images != self.config.num_images: raise ValueError( "Make sure to match the number of images in the model with the number of images in the input." ) pooler_outputs = [] hidden_states = [] if output_hidden_states else None attentions = [] if output_attentions else None for i in range(num_images): # forward every image through the model outputs = self.vilt( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values[:, i, :, :, :] if pixel_values is not None else None, pixel_mask=pixel_mask[:, i, :, :] if pixel_mask is not None else None, inputs_embeds=inputs_embeds, image_embeds=image_embeds[:, i, :, :] if image_embeds is not None else None, image_token_type_idx=i + 1, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooler_output = outputs.pooler_output if return_dict else outputs[1] pooler_outputs.append(pooler_output) if output_hidden_states: hidden_states.append(outputs.hidden_states) if output_attentions: attentions.append(outputs.attentions) pooled_output = torch.cat(pooler_outputs, dim=-1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # move labels to correct device to enable PP labels = labels.to(logits.device) loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits, hidden_states, attentions) return ((loss,) + output) if loss is not None else output return ViltForImagesAndTextClassificationOutput( loss=loss, logits=logits, hidden_states=hidden_states, attentions=attentions, ) @auto_docstring
ViltForImagesAndTextClassification
python
numpy__numpy
numpy/lib/tests/test_array_utils.py
{ "start": 94, "end": 1118 }
class ____: def test_byte_bounds(self): # pointer difference matches size * itemsize # due to contiguity a = np.arange(12).reshape(3, 4) low, high = array_utils.byte_bounds(a) assert_equal(high - low, a.size * a.itemsize) def test_unusual_order_positive_stride(self): a = np.arange(12).reshape(3, 4) b = a.T low, high = array_utils.byte_bounds(b) assert_equal(high - low, b.size * b.itemsize) def test_unusual_order_negative_stride(self): a = np.arange(12).reshape(3, 4) b = a.T[::-1] low, high = array_utils.byte_bounds(b) assert_equal(high - low, b.size * b.itemsize) def test_strided(self): a = np.arange(12) b = a[::2] low, high = array_utils.byte_bounds(b) # the largest pointer address is lost (even numbers only in the # stride), and compensate addresses for striding by 2 assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize)
TestByteBounds
python
pandas-dev__pandas
asv_bench/benchmarks/io/json.py
{ "start": 846, "end": 2105 }
class ____(BaseIO): fname = "__test_lines__.json" params = ["int", "datetime"] param_names = ["index"] def setup(self, index): N = 100000 indexes = { "int": np.arange(N), "datetime": date_range("20000101", periods=N, freq="h"), } df = DataFrame( np.random.randn(N, 5), columns=[f"float_{i}" for i in range(5)], index=indexes[index], ) df.to_json(self.fname, orient="records", lines=True) def time_read_json_lines(self, index): read_json(self.fname, orient="records", lines=True) def time_read_json_lines_concat(self, index): concat(read_json(self.fname, orient="records", lines=True, chunksize=25000)) def time_read_json_lines_nrows(self, index): read_json(self.fname, orient="records", lines=True, nrows=25000) def peakmem_read_json_lines(self, index): read_json(self.fname, orient="records", lines=True) def peakmem_read_json_lines_concat(self, index): concat(read_json(self.fname, orient="records", lines=True, chunksize=25000)) def peakmem_read_json_lines_nrows(self, index): read_json(self.fname, orient="records", lines=True, nrows=15000)
ReadJSONLines
python
wandb__wandb
wandb/docker/names.py
{ "start": 37, "end": 1311 }
class ____(Exception): """The given string is not a valid repository name.""" def resolve_repository_name(repo_name: str) -> tuple[str, str]: if "://" in repo_name: raise InvalidRepositoryError( f"Repository name cannot contain a scheme ({repo_name})" ) index_name, remote_name = split_repo_name(repo_name) if index_name[0] == "-" or index_name[-1] == "-": raise InvalidRepositoryError( f"Invalid index name ({index_name}). Cannot begin or end with a hyphen." ) return resolve_index_name(index_name), remote_name def resolve_index_name(index_name: str) -> str: index_name = convert_to_hostname(index_name) if index_name == "index.docker.io": index_name = "docker.io" return index_name def split_repo_name(repo_name: str) -> tuple[str, str]: parts = repo_name.split("/", 1) if len(parts) == 1 or ( "." not in parts[0] and ":" not in parts[0] and parts[0] != "localhost" ): # This is a docker index repo (ex: username/foobar or ubuntu) return "docker.io", repo_name return parts[0], parts[1] def convert_to_hostname(url: str) -> str: return url.replace("http://", "").replace("https://", "").split("/", 1)[0]
InvalidRepositoryError
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_sampled_from.py
{ "start": 6030, "end": 6319 }
class ____(enum.Enum): a: "int" def test_suggests_elements_instead_of_annotations(): with pytest.raises(InvalidArgument, match=r"Cannot sample.*annotations.*dataclass"): check_can_generate_examples(st.sampled_from(AnnotationsInsteadOfElements))
AnnotationsInsteadOfElements
python
ray-project__ray
python/ray/llm/tests/serve/cpu/deployments/test_prefix_aware_request_router.py
{ "start": 2351, "end": 2440 }
class ____: def __init__(self, prompt: str): self.prompt = prompt
PromptRequest
python
tensorflow__tensorflow
tensorflow/core/function/polymorphism/function_cache.py
{ "start": 1121, "end": 3692 }
class ____: """A container for managing functions.""" __slots__ = ["_primary", "_dispatch_dict", "_garbage_collectors"] def __init__(self): # Maps (FunctionContext, FunctionType) to a function. self._primary = collections.OrderedDict() # Maps FunctionContext to a TypeDispatchTable containing FunctionTypes of # that particular context. self._dispatch_dict = {} def lookup(self, function_type: function_type_lib.FunctionType, context: Optional[FunctionContext] = None) -> Optional[Any]: """Looks up a function based on the context and type.""" context = context or FunctionContext() if context in self._dispatch_dict: dispatch_type = self._dispatch_dict[context].dispatch(function_type) if dispatch_type: return self._primary[(context, dispatch_type)] return None def delete(self, function_type: function_type_lib.FunctionType, context: Optional[FunctionContext] = None, ) -> bool: """Deletes a function given the context and type.""" context = context or FunctionContext() if (context, function_type) not in self._primary: return False del self._primary[(context, function_type)] self._dispatch_dict[context].delete(function_type) return True def add(self, fn: Any, context: Optional[FunctionContext] = None) -> None: """Adds a new function using its function_type. Args: fn: The function to be added to the cache. context: A FunctionContext representing the current context. """ context = context or FunctionContext() self._primary[(context, fn.function_type)] = fn if context not in self._dispatch_dict: self._dispatch_dict[context] = type_dispatch.TypeDispatchTable() self._dispatch_dict[context].add_target(fn.function_type) def generalize( self, context: FunctionContext, function_type: function_type_lib.FunctionType ) -> function_type_lib.FunctionType: """Try to generalize a FunctionType within a FunctionContext.""" if context in self._dispatch_dict: return self._dispatch_dict[context].try_generalizing_function_type( function_type) else: return function_type # TODO(b/205971333): Remove this function. def clear(self): """Removes all functions from the cache.""" self._primary.clear() self._dispatch_dict.clear() def values(self): """Returns a list of all functions held by this cache.""" return list(self._primary.values()) def __len__(self): return len(self._primary)
FunctionCache
python
pydata__xarray
xarray/backends/zarr.py
{ "start": 3398, "end": 5929 }
class ____: """Handle custom logic to safely encode and decode fill values in Zarr. Possibly redundant with logic in xarray/coding/variables.py but needs to be isolated from NetCDF-specific logic. """ @classmethod def encode(cls, value: int | float | str | bytes, dtype: np.dtype[Any]) -> Any: if dtype.kind in "S": # byte string, this implies that 'value' must also be `bytes` dtype. assert isinstance(value, bytes) return base64.standard_b64encode(value).decode() elif dtype.kind in "b": # boolean return bool(value) elif dtype.kind in "iu": # todo: do we want to check for decimals? return int(value) elif dtype.kind in "f": return base64.standard_b64encode(struct.pack("<d", float(value))).decode() elif dtype.kind in "U": return str(value) else: raise ValueError(f"Failed to encode fill_value. Unsupported dtype {dtype}") @classmethod def decode(cls, value: int | float | str | bytes, dtype: str | np.dtype[Any]): if dtype == "string": # zarr V3 string type return str(value) elif dtype == "bytes": # zarr V3 bytes type assert isinstance(value, str | bytes) return base64.standard_b64decode(value) np_dtype = np.dtype(dtype) if np_dtype.kind in "f": assert isinstance(value, str | bytes) return struct.unpack("<d", base64.standard_b64decode(value))[0] elif np_dtype.kind in "b": return bool(value) elif np_dtype.kind in "iu": return int(value) else: raise ValueError(f"Failed to decode fill_value. Unsupported dtype {dtype}") def encode_zarr_attr_value(value): """ Encode a attribute value as something that can be serialized as json Many xarray datasets / variables have numpy arrays and values. This function handles encoding / decoding of such items. ndarray -> list scalar array -> scalar other -> other (no change) """ if isinstance(value, np.ndarray): encoded = value.tolist() elif isinstance(value, np.generic): encoded = value.item() else: encoded = value return encoded def has_zarr_async_index() -> bool: try: import zarr return hasattr(zarr.AsyncArray, "oindex") except (ImportError, AttributeError): return False
FillValueCoder
python
kamyu104__LeetCode-Solutions
Python/minimum-operations-to-equalize-binary-string.py
{ "start": 1711, "end": 2212 }
class ____(object): def minOperations(self, s, k): """ :type s: str :type k: int :rtype: int """ zero = s.count('0') for i in xrange(len(s)+1): if (i*k-zero)&1: continue if i&1: if zero <= i*k <= zero*i+(len(s)-zero)*(i-1): return i else: if zero <= i*k <= zero*(i-1)+(len(s)-zero)*i: return i return -1
Solution3
python
psf__black
src/blib2to3/pytree.py
{ "start": 30322, "end": 32416 }
class ____(BasePattern): def __init__(self, content: BasePattern | None = None) -> None: """ Initializer. The argument is either a pattern or None. If it is None, this only matches an empty sequence (effectively '$' in regex lingo). If it is not None, this matches whenever the argument pattern doesn't have any matches. """ if content is not None: assert isinstance(content, BasePattern), repr(content) self.content = content def match(self, node, results=None) -> bool: # We never match a node in its entirety return False def match_seq(self, nodes, results=None) -> bool: # We only match an empty sequence of nodes in its entirety return len(nodes) == 0 def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]: if self.content is None: # Return a match if there is an empty sequence if len(nodes) == 0: yield 0, {} else: # Return a match if the argument pattern has no matches for c, r in self.content.generate_matches(nodes): return yield 0, {} def generate_matches( patterns: list[BasePattern], nodes: list[NL] ) -> Iterator[tuple[int, _Results]]: """ Generator yielding matches for a sequence of patterns and nodes. Args: patterns: a sequence of patterns nodes: a sequence of nodes Yields: (count, results) tuples where: count: the entire sequence of patterns matches nodes[:count]; results: dict containing named submatches. """ if not patterns: yield 0, {} else: p, rest = patterns[0], patterns[1:] for c0, r0 in p.generate_matches(nodes): if not rest: yield c0, r0 else: for c1, r1 in generate_matches(rest, nodes[c0:]): r = {} r.update(r0) r.update(r1) yield c0 + c1, r
NegatedPattern
python
pandas-dev__pandas
pandas/core/internals/concat.py
{ "start": 8983, "end": 15152 }
class ____: def __init__(self, block: Block) -> None: self.block = block def __repr__(self) -> str: return f"{type(self).__name__}({self.block!r})" def _is_valid_na_for(self, dtype: DtypeObj) -> bool: """ Check that we are all-NA of a type/dtype that is compatible with this dtype. Augments `self.is_na` with an additional check of the type of NA values. """ if not self.is_na: return False blk = self.block if blk.dtype.kind == "V": return True if blk.dtype == object: values = blk.values return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K")) na_value = blk.fill_value if na_value is NaT and blk.dtype != dtype: # e.g. we are dt64 and other is td64 # fill_values match but we should not cast blk.values to dtype # TODO: this will need updating if we ever have non-nano dt64/td64 return False if na_value is NA and needs_i8_conversion(dtype): # FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat # e.g. blk.dtype == "Int64" and dtype is td64, we dont want # to consider these as matching return False # TODO: better to use can_hold_element? return is_valid_na_for_dtype(na_value, dtype) @cache_readonly def is_na(self) -> bool: blk = self.block if blk.dtype.kind == "V": return True return False def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: values: ArrayLike if upcasted_na is None and self.block.dtype.kind != "V": # No upcasting is necessary return self.block.values else: fill_value = upcasted_na if self._is_valid_na_for(empty_dtype): # note: always holds when self.block.dtype.kind == "V" blk_dtype = self.block.dtype if blk_dtype == np.dtype("object"): # we want to avoid filling with np.nan if we are # using None; we already know that we are all # nulls values = cast(np.ndarray, self.block.values) if values.size and values[0, 0] is None: fill_value = None return make_na_array(empty_dtype, self.block.shape, fill_value) return self.block.values def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike: """ Concatenate values from several join units along axis=1. """ empty_dtype = _get_empty_dtype(join_units) has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units) upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks) to_concat = [ ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na) for ju in join_units ] if any(is_1d_only_ea_dtype(t.dtype) for t in to_concat): # TODO(EA2D): special case not needed if all EAs used HybridBlocks # error: No overload variant of "__getitem__" of "ExtensionArray" matches # argument type "Tuple[int, slice]" to_concat = [ t if is_1d_only_ea_dtype(t.dtype) else t[0, :] # type: ignore[call-overload] for t in to_concat ] concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True) concat_values = ensure_block_shape(concat_values, 2) else: concat_values = concat_compat(to_concat, axis=1) return concat_values def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool): """ Find the NA value to go with this dtype. """ if isinstance(dtype, ExtensionDtype): return dtype.na_value elif dtype.kind in "mM": return dtype.type("NaT") elif dtype.kind in "fc": return dtype.type("NaN") elif dtype.kind == "b": # different from missing.na_value_for_dtype return None elif dtype.kind in "iu": if not has_none_blocks: # different from missing.na_value_for_dtype return None return np.nan elif dtype.kind == "O": return np.nan raise NotImplementedError def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj: """ Return dtype and N/A values to use when concatenating specified units. Returned N/A value may be None which means there was no casting involved. Returns ------- dtype """ if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]): empty_dtype = join_units[0].block.dtype return empty_dtype has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units) dtypes = [unit.block.dtype for unit in join_units if not unit.is_na] dtype = find_common_type(dtypes) if has_none_blocks: dtype = ensure_dtype_can_hold_na(dtype) return dtype def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool: """ Check if the join units consist of blocks of uniform type that can be concatenated using Block.concat_same_type instead of the generic _concatenate_join_units (which uses `concat_compat`). """ first = join_units[0].block if first.dtype.kind == "V": return False return ( # exclude cases where a) ju.block is None or b) we have e.g. Int64+int64 all(type(ju.block) is type(first) for ju in join_units) and # e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform all( ju.block.dtype == first.dtype # GH#42092 we only want the dtype_equal check for non-numeric blocks # (for now, may change but that would need a deprecation) or ju.block.dtype.kind in "iub" for ju in join_units ) and # no blocks that would get missing values (can lead to type upcasts) # unless we're an extension dtype. all(not ju.is_na or ju.block.is_extension for ju in join_units) )
JoinUnit
python
keras-team__keras
keras/src/legacy/saving/json_utils_test.py
{ "start": 1343, "end": 3317 }
class ____(testing.TestCase): def test_encode_decode_tensor_shape(self): metadata = { "key1": tf.TensorShape(None), "key2": [tf.TensorShape([None]), tf.TensorShape([3, None, 5])], } string = json_utils.Encoder().encode(metadata) loaded = json_utils.decode(string) self.assertEqual(set(loaded.keys()), {"key1", "key2"}) self.assertEqual(loaded["key1"].rank, None) self.assertAllEqual(loaded["key2"][0].as_list(), [None]) self.assertAllEqual(loaded["key2"][1].as_list(), [3, None, 5]) def test_encode_decode_type_spec(self): spec = tf.TensorSpec((1, 5), tf.float32) string = json_utils.Encoder().encode(spec) loaded = json_utils.decode(string) self.assertEqual(spec, loaded) invalid_type_spec = { "class_name": "TypeSpec", "type_spec": "Invalid Type", "serialized": None, } string = json_utils.Encoder().encode(invalid_type_spec) with self.assertRaisesRegex( ValueError, "No TypeSpec has been registered" ): loaded = json_utils.decode(string) def test_encode_decode_ragged_tensor(self): x = tf.ragged.constant([[1.0, 2.0], [3.0]]) string = json_utils.Encoder().encode(x) loaded = json_utils.decode(string) self.assertAllClose(loaded.values, x.values) def test_encode_decode_extension_type_tensor(self): class MaskedTensor(tf.experimental.ExtensionType): __name__ = "MaskedTensor" values: tf.Tensor mask: tf.Tensor x = MaskedTensor( values=[[1, 2, 3], [4, 5, 6]], mask=[[True, True, False], [True, False, True]], ) string = json_utils.Encoder().encode(x) loaded = json_utils.decode(string) self.assertAllClose(loaded.values, x.values) self.assertAllClose(loaded.mask, x.mask)
JsonUtilsTestTF
python
arrow-py__arrow
tests/test_locales.py
{ "start": 23309, "end": 24814 }
class ____: def test_format_timeframe(self): assert self.locale._format_timeframe("now", 0) == "issa" assert self.locale._format_timeframe("second", 1) == "sekonda" assert self.locale._format_timeframe("seconds", 30) == "30 sekondi" assert self.locale._format_timeframe("minute", 1) == "minuta" assert self.locale._format_timeframe("minutes", 4) == "4 minuti" assert self.locale._format_timeframe("hour", 1) == "siegħa" assert self.locale._format_timeframe("hours", 2) == "2 sagħtejn" assert self.locale._format_timeframe("hours", 4) == "4 sigħat" assert self.locale._format_timeframe("day", 1) == "jum" assert self.locale._format_timeframe("days", 2) == "2 jumejn" assert self.locale._format_timeframe("days", 5) == "5 ijiem" assert self.locale._format_timeframe("month", 1) == "xahar" assert self.locale._format_timeframe("months", 2) == "2 xahrejn" assert self.locale._format_timeframe("months", 7) == "7 xhur" assert self.locale._format_timeframe("year", 1) == "sena" assert self.locale._format_timeframe("years", 2) == "2 sentejn" assert self.locale._format_timeframe("years", 8) == "8 snin" def test_weekday(self): dt = arrow.Arrow(2015, 4, 11, 17, 30, 00) assert self.locale.day_name(dt.isoweekday()) == "Is-Sibt" assert self.locale.day_abbreviation(dt.isoweekday()) == "S" @pytest.mark.usefixtures("lang_locale")
TestMalteseLocale
python
scipy__scipy
scipy/stats/_continuous_distns.py
{ "start": 313983, "end": 318758 }
class ____(rv_continuous): r"""A trapezoidal continuous random variable. %(before_notes)s Notes ----- The trapezoidal distribution can be represented with an up-sloping line from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)`` and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``. This defines the trapezoid base from ``loc`` to ``(loc+scale)`` and the flat top from ``c`` to ``d`` proportional to the position along the base with ``0 <= c <= d <= 1``. When ``c=d``, this is equivalent to `triang` with the same values for `loc`, `scale` and `c`. The method of [1]_ is used for computing moments. `trapezoid` takes :math:`c` and :math:`d` as shape parameters. %(after_notes)s The standard form is in the range [0, 1] with c the mode. The location parameter shifts the start to `loc`. The scale parameter changes the width from 1 to `scale`. %(example)s References ---------- .. [1] Kacker, R.N. and Lawrence, J.F. (2007). Trapezoidal and triangular distributions for Type B evaluation of standard uncertainty. Metrologia 44, 117-127. :doi:`10.1088/0026-1394/44/2/003` """ def _argcheck(self, c, d): return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c) def _shape_info(self): ic = _ShapeInfo("c", False, (0, 1.0), (True, True)) id = _ShapeInfo("d", False, (0, 1.0), (True, True)) return [ic, id] def _pdf(self, x, c, d): u = 2 / (d-c+1) return _lazyselect([x < c, (c <= x) & (x <= d), x > d], [lambda x, c, d, u: u * x / c, lambda x, c, d, u: u, lambda x, c, d, u: u * (1-x) / (1-d)], (x, c, d, u)) def _cdf(self, x, c, d): return _lazyselect([x < c, (c <= x) & (x <= d), x > d], [lambda x, c, d: x**2 / c / (d-c+1), lambda x, c, d: (c + 2 * (x-c)) / (d-c+1), lambda x, c, d: 1-((1-x) ** 2 / (d-c+1) / (1-d))], (x, c, d)) def _ppf(self, q, c, d): qc, qd = self._cdf(c, c, d), self._cdf(d, c, d) condlist = [q < qc, q <= qd, q > qd] choicelist = [np.sqrt(q * c * (1 + d - c)), 0.5 * q * (1 + d - c) + 0.5 * c, 1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))] return np.select(condlist, choicelist) def _munp(self, n, c, d): # Using the parameterization from Kacker, 2007, with # a=bottom left, c=top left, d=top right, b=bottom right, then # E[X^n] = h/(n+1)/(n+2) [(b^{n+2}-d^{n+2})/(b-d) # - ((c^{n+2} - a^{n+2})/(c-a)] # with h = 2/((b-a) - (d-c)). The corresponding parameterization # in scipy, has a'=loc, c'=loc+c*scale, d'=loc+d*scale, b'=loc+scale, # which for standard form reduces to a'=0, b'=1, c'=c, d'=d. # Substituting into E[X^n] gives the bd' term as (1 - d^{n+2})/(1 - d) # and the ac' term as c^{n-1} for the standard form. The bd' term has # numerical difficulties near d=1, so replace (1 - d^{n+2})/(1-d) # with expm1((n+2)*log(d))/(d-1). # Testing with n=18 for c=(1e-30,1-eps) shows that this is stable. # We still require an explicit test for d=1 to prevent divide by zero, # and now a test for d=0 to prevent log(0). ab_term = c**(n+1) dc_term = _lazyselect( [d == 0.0, (0.0 < d) & (d < 1.0), d == 1.0], [lambda d: 1.0, lambda d: np.expm1((n+2) * np.log(d)) / (d-1.0), lambda d: n+2], [d]) val = 2.0 / (1.0+d-c) * (dc_term - ab_term) / ((n+1) * (n+2)) return val def _entropy(self, c, d): # Using the parameterization from Wikipedia (van Dorp, 2003) # with a=bottom left, c=top left, d=top right, b=bottom right # gives a'=loc, b'=loc+c*scale, c'=loc+d*scale, d'=loc+scale, # which for loc=0, scale=1 is a'=0, b'=c, c'=d, d'=1. # Substituting into the entropy formula from Wikipedia gives # the following result. return 0.5 * (1.0-d+c) / (1.0+d-c) + np.log(0.5 * (1.0+d-c)) def _fitstart(self, data, args=None): # Arbitrary, but c=d=1 fails due to being on edge of bounds if args is None: args = (0.33, 0.66) return super()._fitstart(data, args=args) trapezoid = trapezoid_gen(a=0.0, b=1.0, name="trapezoid")
trapezoid_gen
python
getsentry__sentry
tests/sentry/web/frontend/test_auth_login.py
{ "start": 23612, "end": 33556 }
class ____(TestCase): @cached_property def path(self) -> str: return reverse("sentry-login") def setUp(self) -> None: super().setUp() def disable_registration(self): return self.options({"auth.allow-registration": False}) def test_renders_correct_template_existent_org(self) -> None: with self.disable_registration(): resp = self.client.get( self.path, HTTP_HOST=f"{self.organization.slug}.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [("http://baz.testserver/auth/login/baz/", 302)] self.assertTemplateUsed("sentry/organization-login.html") def test_renders_correct_template_existent_org_preserve_querystring(self) -> None: with self.disable_registration(): resp = self.client.get( f"{self.path}?one=two", HTTP_HOST=f"{self.organization.slug}.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [("http://baz.testserver/auth/login/baz/?one=two", 302)] self.assertTemplateUsed("sentry/organization-login.html") def test_renders_correct_template_nonexistent_org(self) -> None: with self.disable_registration(): resp = self.client.get( self.path, HTTP_HOST="does-not-exist.testserver", ) assert resp.status_code == 200 self.assertTemplateUsed("sentry/login.html") def test_login_valid_credentials(self) -> None: # load it once for test cookie with self.disable_registration(): self.client.get(self.path) resp = self.client.post( self.path, {"username": self.user.username, "password": "admin", "op": "login"}, HTTP_HOST="albertos-apples.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [ ("http://albertos-apples.testserver/auth/login/", 302), ("http://testserver/organizations/new/", 302), ] self.assertTemplateUsed("sentry/login.html") def test_login_valid_credentials_with_org(self) -> None: with self.disable_registration(): self.create_organization(name="albertos-apples", owner=self.user) # load it once for test cookie self.client.get(self.path) resp = self.client.post( self.path, {"username": self.user.username, "password": "admin", "op": "login"}, HTTP_HOST="albertos-apples.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [ ("http://albertos-apples.testserver/auth/login/", 302), ("http://albertos-apples.testserver/issues/", 302), ] def test_login_valid_credentials_invalid_customer_domain(self) -> None: with self.feature("system:multi-region"), self.disable_registration(): self.create_organization(name="albertos-apples", owner=self.user) # load it once for test cookie self.client.get(self.path) resp = self.client.post( self.path, {"username": self.user.username, "password": "admin", "op": "login"}, HTTP_POST="invalid.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [ ("http://albertos-apples.testserver/auth/login/", 302), ("http://albertos-apples.testserver/issues/", 302), ] def test_login_valid_credentials_non_staff(self) -> None: with self.disable_registration(): org = self.create_organization(name="albertos-apples") non_staff_user = self.create_user(is_staff=False) self.create_member(organization=org, user=non_staff_user) # load it once for test cookie self.client.get(self.path) resp = self.client.post( self.path, {"username": non_staff_user.username, "password": "admin", "op": "login"}, HTTP_HOST="albertos-apples.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [ ("http://albertos-apples.testserver/auth/login/", 302), ("http://albertos-apples.testserver/issues/", 302), ] def test_login_valid_credentials_not_a_member(self) -> None: user = self.create_user() self.create_organization(name="albertos-apples") self.create_member(organization=self.organization, user=user) with self.disable_registration(): # load it once for test cookie self.client.get(self.path) resp = self.client.post( self.path, {"username": user.username, "password": "admin", "op": "login"}, HTTP_HOST="albertos-apples.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [ (f"http://albertos-apples.testserver{reverse('sentry-login')}", 302), ( f"http://albertos-apples.testserver{reverse('sentry-auth-organization', args=['albertos-apples'])}", 302, ), ] def test_login_valid_credentials_orgless(self) -> None: user = self.create_user() self.create_organization(name="albertos-apples") with self.disable_registration(): # load it once for test cookie self.client.get(self.path) resp = self.client.post( self.path, {"username": user.username, "password": "admin", "op": "login"}, HTTP_HOST="albertos-apples.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [ ("http://albertos-apples.testserver/auth/login/", 302), ("http://albertos-apples.testserver/auth/login/albertos-apples/", 302), ] def test_login_valid_credentials_org_does_not_exist(self) -> None: user = self.create_user() with self.disable_registration(): # load it once for test cookie self.client.get(self.path) resp = self.client.post( self.path, {"username": user.username, "password": "admin", "op": "login"}, HTTP_HOST="albertos-apples.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [ ("http://albertos-apples.testserver/auth/login/", 302), ("http://testserver/organizations/new/", 302), ] def test_login_redirects_to_sso_org_does_not_exist(self) -> None: # load it once for test cookie with self.disable_registration(): user = self.create_user() self.client.get(self.path) user = self.create_user() resp = self.client.post( self.path, { "username": user.username, "password": "admin", "op": "sso", "organization": "foobar", }, HTTP_HOST="albertos-apples.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [("/auth/login/", 302)] # Redirects to default login def test_login_redirects_to_sso_provider_does_not_exist(self) -> None: # load it once for test cookie with self.disable_registration(): user = self.create_user() self.create_organization(name="albertos-apples") self.client.get(self.path) user = self.create_user() resp = self.client.post( self.path, { "username": user.username, "password": "admin", "op": "sso", "organization": "albertos-apples", }, HTTP_HOST="albertos-apples.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [ ("/auth/login/", 302), ("http://albertos-apples.testserver/auth/login/albertos-apples/", 302), ] # Redirects to default login def test_login_redirects_to_sso_provider(self) -> None: # load it once for test cookie with self.disable_registration(): user = self.create_user() custom_organization = self.create_organization(name="albertos-apples") AuthProvider.objects.create(organization_id=custom_organization.id, provider="dummy") self.client.get(self.path) user = self.create_user() resp = self.client.post( self.path, { "username": user.username, "password": "admin", "op": "sso", "organization": "albertos-apples", }, HTTP_HOST="albertos-apples.testserver", follow=True, ) assert resp.status_code == 200 assert resp.redirect_chain == [("/auth/login/albertos-apples/", 302)]
AuthLoginCustomerDomainTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 678759, "end": 681631 }
class ____(sgqlc.types.Type): """A repository issue template.""" __schema__ = github_schema __field_names__ = ("about", "assignees", "body", "filename", "labels", "name", "title") about = sgqlc.types.Field(String, graphql_name="about") """The template purpose.""" assignees = sgqlc.types.Field( sgqlc.types.non_null("UserConnection"), graphql_name="assignees", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """The suggested assignees. Arguments: * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ body = sgqlc.types.Field(String, graphql_name="body") """The suggested issue body.""" filename = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="filename") """The template filename.""" labels = sgqlc.types.Field( "LabelConnection", graphql_name="labels", args=sgqlc.types.ArgDict( ( ("order_by", sgqlc.types.Arg(LabelOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "ASC"})), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ("before", sgqlc.types.Arg(String, graphql_name="before", default=None)), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) """The suggested issue labels Arguments: * `order_by` (`LabelOrder`): Ordering options for labels returned from the connection. (default: `{field: CREATED_AT, direction: ASC}`) * `after` (`String`): Returns the elements in the list that come after the specified cursor. * `before` (`String`): Returns the elements in the list that come before the specified cursor. * `first` (`Int`): Returns the first _n_ elements from the list. * `last` (`Int`): Returns the last _n_ elements from the list. """ name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") """The template name.""" title = sgqlc.types.Field(String, graphql_name="title") """The suggested issue title."""
IssueTemplate
python
readthedocs__readthedocs.org
readthedocs/rtd_tests/tests/test_project_views.py
{ "start": 27371, "end": 29640 }
class ____(TestCase): def setUp(self): self.user = get(User) self.project = get(Project, slug="test", users=[self.user]) self.version = get(Version, slug="1.0", project=self.project) self.webhook = get(WebHook, project=self.project) self.client.force_login(self.user) def test_list(self): resp = self.client.get( reverse("projects_webhooks", args=[self.project.slug]), ) self.assertEqual(resp.status_code, 200) queryset = resp.context["object_list"] self.assertEqual(queryset.count(), 1) self.assertEqual(queryset.first(), self.webhook) def test_create(self): self.assertEqual(self.project.webhook_notifications.all().count(), 1) resp = self.client.post( reverse("projects_webhooks_create", args=[self.project.slug]), data={ "url": "http://www.example.com/", "payload": "{}", "events": [WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id], }, ) self.assertEqual(resp.status_code, 302) self.assertEqual(self.project.webhook_notifications.all().count(), 2) def test_update(self): self.assertEqual(self.project.webhook_notifications.all().count(), 1) self.client.post( reverse( "projects_webhooks_edit", args=[self.project.slug, self.webhook.pk] ), data={ "url": "http://www.example.com/new", "payload": "{}", "events": [WebHookEvent.objects.get(name=WebHookEvent.BUILD_FAILED).id], }, ) self.webhook.refresh_from_db() self.assertEqual(self.webhook.url, "http://www.example.com/new") self.assertEqual(self.project.webhook_notifications.all().count(), 1) def test_delete(self): self.assertEqual(self.project.webhook_notifications.all().count(), 1) self.client.post( reverse( "projects_webhooks_delete", args=[self.project.slug, self.webhook.pk] ), ) self.assertEqual(self.project.webhook_notifications.all().count(), 0) @override_settings(RTD_ALLOW_ORGANIZATIONS=True)
TestWebhooksViews
python
scikit-learn__scikit-learn
sklearn/utils/_param_validation.py
{ "start": 19937, "end": 20615 }
class ____(_Constraint): """Helper constraint for the verbose parameter. Convenience class for [Interval(Integral, 0, None, closed="left"), bool, numpy.bool_] """ def __init__(self): super().__init__() self._constraints = [ Interval(Integral, 0, None, closed="left"), _InstancesOf(bool), _InstancesOf(np.bool_), ] def is_satisfied_by(self, val): return any(c.is_satisfied_by(val) for c in self._constraints) def __str__(self): return ( f"{', '.join([str(c) for c in self._constraints[:-1]])} or" f" {self._constraints[-1]}" )
_VerboseHelper
python
django__django
tests/generic_views/test_base.py
{ "start": 1489, "end": 10966 }
class ____(LoggingAssertionMixin, SimpleTestCase): rf = RequestFactory() def _assert_simple(self, response): self.assertEqual(response.status_code, 200) self.assertEqual(response.content, b"This is a simple view") def test_no_init_kwargs(self): """ A view can't be accidentally instantiated before deployment """ msg = "This method is available only on the class, not on instances." with self.assertRaisesMessage(AttributeError, msg): SimpleView(key="value").as_view() def test_no_init_args(self): """ A view can't be accidentally instantiated before deployment """ msg = "as_view() takes 1 positional argument but 2 were given" with self.assertRaisesMessage(TypeError, msg): SimpleView.as_view("value") def test_pathological_http_method(self): """ The edge case of an HTTP request that spoofs an existing method name is caught. """ self.assertEqual( SimpleView.as_view()( self.rf.get("/", REQUEST_METHOD="DISPATCH") ).status_code, 405, ) def test_get_only(self): """ Test a view which only allows GET doesn't allow other methods. """ self._assert_simple(SimpleView.as_view()(self.rf.get("/"))) self.assertEqual(SimpleView.as_view()(self.rf.post("/")).status_code, 405) self.assertEqual( SimpleView.as_view()(self.rf.get("/", REQUEST_METHOD="FAKE")).status_code, 405, ) def test_get_and_head(self): """ Test a view which supplies a GET method also responds correctly to HEAD. """ self._assert_simple(SimpleView.as_view()(self.rf.get("/"))) response = SimpleView.as_view()(self.rf.head("/")) self.assertEqual(response.status_code, 200) def test_setup_get_and_head(self): view_instance = SimpleView() self.assertFalse(hasattr(view_instance, "head")) view_instance.setup(self.rf.get("/")) self.assertTrue(hasattr(view_instance, "head")) self.assertEqual(view_instance.head, view_instance.get) def test_head_no_get(self): """ Test a view which supplies no GET method responds to HEAD with HTTP 405. """ response = PostOnlyView.as_view()(self.rf.head("/")) self.assertEqual(response.status_code, 405) def test_get_and_post(self): """ Test a view which only allows both GET and POST. """ self._assert_simple(SimplePostView.as_view()(self.rf.get("/"))) self._assert_simple(SimplePostView.as_view()(self.rf.post("/"))) self.assertEqual( SimplePostView.as_view()( self.rf.get("/", REQUEST_METHOD="FAKE") ).status_code, 405, ) def test_invalid_keyword_argument(self): """ View arguments must be predefined on the class and can't be named like an HTTP method. """ msg = ( "The method name %s is not accepted as a keyword argument to " "SimpleView()." ) # Check each of the allowed method names for method in SimpleView.http_method_names: with self.assertRaisesMessage(TypeError, msg % method): SimpleView.as_view(**{method: "value"}) # Check the case view argument is ok if predefined on the class... CustomizableView.as_view(parameter="value") # ...but raises errors otherwise. msg = ( "CustomizableView() received an invalid keyword 'foobar'. " "as_view only accepts arguments that are already attributes of " "the class." ) with self.assertRaisesMessage(TypeError, msg): CustomizableView.as_view(foobar="value") def test_calling_more_than_once(self): """ Test a view can only be called once. """ request = self.rf.get("/") view = InstanceView.as_view() self.assertNotEqual(view(request), view(request)) def test_class_attributes(self): """ The callable returned from as_view() has proper special attributes. """ cls = SimpleView view = cls.as_view() self.assertEqual(view.__doc__, cls.__doc__) self.assertEqual(view.__name__, "view") self.assertEqual(view.__module__, cls.__module__) self.assertEqual(view.__qualname__, f"{cls.as_view.__qualname__}.<locals>.view") self.assertEqual(view.__annotations__, cls.dispatch.__annotations__) self.assertFalse(hasattr(view, "__wrapped__")) def test_dispatch_decoration(self): """ Attributes set by decorators on the dispatch method are also present on the closure. """ self.assertTrue(DecoratedDispatchView.as_view().is_decorated) def test_options(self): """ Views respond to HTTP OPTIONS requests with an Allow header appropriate for the methods implemented by the view class. """ request = self.rf.options("/") view = SimpleView.as_view() response = view(request) self.assertEqual(200, response.status_code) self.assertTrue(response.headers["Allow"]) def test_options_for_get_view(self): """ A view implementing GET allows GET and HEAD. """ request = self.rf.options("/") view = SimpleView.as_view() response = view(request) self._assert_allows(response, "GET", "HEAD") def test_options_for_get_and_post_view(self): """ A view implementing GET and POST allows GET, HEAD, and POST. """ request = self.rf.options("/") view = SimplePostView.as_view() response = view(request) self._assert_allows(response, "GET", "HEAD", "POST") def test_options_for_post_view(self): """ A view implementing POST allows POST. """ request = self.rf.options("/") view = PostOnlyView.as_view() response = view(request) self._assert_allows(response, "POST") def _assert_allows(self, response, *expected_methods): "Assert allowed HTTP methods reported in the Allow response header" response_allows = set(response.headers["Allow"].split(", ")) self.assertEqual(set(expected_methods + ("OPTIONS",)), response_allows) def test_args_kwargs_request_on_self(self): """ Test a view only has args, kwargs & request once `as_view` has been called. """ bare_view = InstanceView() view = InstanceView.as_view()(self.rf.get("/")) for attribute in ("args", "kwargs", "request"): self.assertNotIn(attribute, dir(bare_view)) self.assertIn(attribute, dir(view)) def test_overridden_setup(self): class SetAttributeMixin: def setup(self, request, *args, **kwargs): self.attr = True super().setup(request, *args, **kwargs) class CheckSetupView(SetAttributeMixin, SimpleView): def dispatch(self, request, *args, **kwargs): assert hasattr(self, "attr") return super().dispatch(request, *args, **kwargs) response = CheckSetupView.as_view()(self.rf.get("/")) self.assertEqual(response.status_code, 200) def test_not_calling_parent_setup_error(self): class TestView(View): def setup(self, request, *args, **kwargs): pass # Not calling super().setup() msg = ( "TestView instance has no 'request' attribute. Did you override " "setup() and forget to call super()?" ) with self.assertRaisesMessage(AttributeError, msg): TestView.as_view()(self.rf.get("/")) def test_setup_adds_args_kwargs_request(self): request = self.rf.get("/") args = ("arg 1", "arg 2") kwargs = {"kwarg_1": 1, "kwarg_2": "year"} view = View() view.setup(request, *args, **kwargs) self.assertEqual(request, view.request) self.assertEqual(args, view.args) self.assertEqual(kwargs, view.kwargs) def test_direct_instantiation(self): """ It should be possible to use the view by directly instantiating it without going through .as_view() (#21564). """ view = PostOnlyView() response = view.dispatch(self.rf.head("/")) self.assertEqual(response.status_code, 405) def test_method_not_allowed_response_logged(self): for path, escaped in [ ("/foo/", "/foo/"), (r"/%1B[1;31mNOW IN RED!!!1B[0m/", r"/\x1b[1;31mNOW IN RED!!!1B[0m/"), ]: with self.subTest(path=path): request = self.rf.get(path, REQUEST_METHOD="BOGUS") with self.assertLogs("django.request", "WARNING") as handler: response = SimpleView.as_view()(request) self.assertLogRecord( handler, f"Method Not Allowed (BOGUS): {escaped}", logging.WARNING, 405, request, ) self.assertEqual(response.status_code, 405) @override_settings(ROOT_URLCONF="generic_views.urls")
ViewTest
python
streamlit__streamlit
lib/streamlit/testing/v1/element_tree.py
{ "start": 54111, "end": 54615 }
class ____(Block): """A representation of a column within ``st.columns``.""" type: str = field(repr=False) proto: BlockProto.Column = field(repr=False) weight: float gap: str def __init__( self, proto: BlockProto.Column, root: ElementTree, ) -> None: self.children = {} self.proto = proto self.root = root self.type = "column" self.weight = proto.weight self.gap = proto.gap @dataclass(repr=False)
Column
python
facebook__pyre-check
source/command/test/integration/fake_repository/commit_006_T30944862/a.py
{ "start": 331, "end": 402 }
class ____(Base): def foo(self, x: str) -> None: pass
Derived
python
getsentry__sentry
src/sentry/preprod/api/endpoints/size_analysis/project_preprod_size_analysis_download.py
{ "start": 916, "end": 6069 }
class ____(PreprodArtifactEndpoint): owner = ApiOwner.EMERGE_TOOLS publish_status = { "GET": ApiPublishStatus.EXPERIMENTAL, } def get( self, request: Request, project: Project, head_artifact_id: str, head_artifact: PreprodArtifact, ) -> HttpResponseBase: """ Download size analysis results for a preprod artifact ```````````````````````````````````````````````````` Download the size analysis results for a preprod artifact. :pparam string organization_id_or_slug: the id or slug of the organization the artifact belongs to. :pparam string project_id_or_slug: the id or slug of the project to retrieve the artifact from. :pparam string head_artifact_id: the ID of the preprod artifact to download size analysis for. :auth: required """ analytics.record( PreprodArtifactApiSizeAnalysisDownloadEvent( organization_id=project.organization_id, project_id=project.id, user_id=request.user.id, artifact_id=head_artifact_id, ) ) if not settings.IS_DEV and not features.has( "organizations:preprod-frontend-routes", project.organization, actor=request.user ): return Response({"error": "Feature not enabled"}, status=403) try: size_metrics_qs = head_artifact.get_size_metrics() size_metrics_count = size_metrics_qs.count() if size_metrics_count == 0: return Response( {"error": "Size analysis results not available for this artifact"}, status=404, ) elif size_metrics_count > 1: return Response( {"error": "Multiple size analysis results found for this artifact"}, status=409, ) size_metrics = size_metrics_qs.first() if size_metrics is None: logger.info( "preprod.size_analysis.download.no_size_metrics", extra={"artifact_id": head_artifact_id}, ) return Response( {"error": "Size analysis not found"}, status=404, ) # Handle different analysis states match size_metrics.state: case ( PreprodArtifactSizeMetrics.SizeAnalysisState.PENDING | PreprodArtifactSizeMetrics.SizeAnalysisState.PROCESSING ): return Response( { "state": ( "pending" if size_metrics.state == PreprodArtifactSizeMetrics.SizeAnalysisState.PENDING else "processing" ), "message": "Size analysis is still processing", }, status=200, ) case PreprodArtifactSizeMetrics.SizeAnalysisState.FAILED: return Response( { "state": "failed", "error_code": size_metrics.error_code, "error_message": size_metrics.error_message or "Size analysis failed", }, status=422, ) case PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED: if size_metrics.analysis_file_id is None: logger.error( "preprod.size_analysis.download.completed_without_file", extra={ "artifact_id": head_artifact_id, "size_metrics_id": size_metrics.id, }, ) return Response( {"error": "Size analysis completed but results are unavailable"}, status=500, ) return get_size_analysis_file_response(size_metrics) case _: logger.error( "preprod.size_analysis.download.unknown_state", extra={ "artifact_id": head_artifact_id, "size_metrics_id": size_metrics.id, "state": size_metrics.state, }, ) return Response( {"error": "Size analysis in unexpected state"}, status=500, ) except SizeAnalysisError as e: return get_size_analysis_error_response(e)
ProjectPreprodArtifactSizeAnalysisDownloadEndpoint
python
Netflix__metaflow
metaflow/_vendor/click/_winconsole.py
{ "start": 2119, "end": 3076 }
class ____(ctypes.Structure): _fields_ = [ ("buf", c_void_p), ("obj", py_object), ("len", c_ssize_t), ("itemsize", c_ssize_t), ("readonly", c_int), ("ndim", c_int), ("format", c_char_p), ("shape", c_ssize_p), ("strides", c_ssize_p), ("suboffsets", c_ssize_p), ("internal", c_void_p), ] if PY2: _fields_.insert(-1, ("smalltable", c_ssize_t * 2)) # On PyPy we cannot get buffers so our ability to operate here is # serverly limited. if pythonapi is None: get_buffer = None else: def get_buffer(obj, writable=False): buf = Py_buffer() flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE PyObject_GetBuffer(py_object(obj), byref(buf), flags) try: buffer_type = c_char * buf.len return buffer_type.from_address(buf.buf) finally: PyBuffer_Release(byref(buf))
Py_buffer
python
doocs__leetcode
solution/3300-3399/3316.Find Maximum Removals From Source String/Solution.py
{ "start": 0, "end": 512 }
class ____: def maxRemovals(self, source: str, pattern: str, targetIndices: List[int]) -> int: m, n = len(source), len(pattern) f = [[-inf] * (n + 1) for _ in range(m + 1)] f[0][0] = 0 s = set(targetIndices) for i, c in enumerate(source, 1): for j in range(n + 1): f[i][j] = f[i - 1][j] + int((i - 1) in s) if j and c == pattern[j - 1]: f[i][j] = max(f[i][j], f[i - 1][j - 1]) return f[m][n]
Solution
python
huggingface__transformers
src/transformers/generation/stopping_criteria.py
{ "start": 25731, "end": 26732 }
class ____(StoppingCriteria): """ This class can be used to stop generation whenever the "end-of-sequence" token is generated. By default, it uses the `model.generation_config.eos_token_id`. Args: eos_token_id (`Union[int, list[int], torch.Tensor]`): The id(s) of the *end-of-sequence* token. """ def __init__(self, eos_token_id: int | list[int] | torch.Tensor): if not isinstance(eos_token_id, torch.Tensor): if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] eos_token_id = torch.tensor(eos_token_id) self.eos_token_id = eos_token_id @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor: self.eos_token_id = self.eos_token_id.to(input_ids.device) is_done = isin_mps_friendly(input_ids[:, -1], self.eos_token_id) return is_done
EosTokenCriteria
python
pdm-project__pdm
src/pdm/models/backends.py
{ "start": 2853, "end": 4607 }
class ____(BuildBackend): def expand_line(self, line: str, expand_env: bool = True) -> str: return line.format( env=EnvContext(expand=expand_env), root=PathContext(self.root), home=PathContext(Path.home()), ) def relative_path_to_url(self, path: str) -> str: if os.path.isabs(path): return Path(path).as_uri() return f"{{root:uri}}/{urllib.parse.quote(path)}" @classmethod def build_system(cls) -> BuildSystem: return { "requires": ["hatchling"], "build-backend": "hatchling.build", } _BACKENDS: dict[str, type[BuildBackend]] = { "pdm-backend": PDMBackend, "setuptools": SetuptoolsBackend, "flit-core": FlitBackend, "hatchling": HatchBackend, } # Fallback to the first backend DEFAULT_BACKEND = next(iter(_BACKENDS.values())) def get_backend(name: str) -> type[BuildBackend]: """Get the build backend class by name""" return _BACKENDS[name] def get_backend_by_spec(spec: dict) -> type[BuildBackend]: """Get the build backend class by specification. The parameter passed in is the 'build-system' section in pyproject.toml. """ if "build-backend" not in spec: return DEFAULT_BACKEND for backend_cls in _BACKENDS.values(): if backend_cls.build_system()["build-backend"] == spec["build-backend"]: return backend_cls return DEFAULT_BACKEND def get_relative_path(url: str) -> str | None: if url.startswith("file:///${PROJECT_ROOT}"): return urllib.parse.unquote(url[len("file:///${PROJECT_ROOT}/") :]) if url.startswith("{root:uri}"): return urllib.parse.unquote(url[len("{root:uri}/") :]) return None
HatchBackend
python
tensorflow__tensorflow
tensorflow/python/checkpoint/sharding/sharding_policies_test.py
{ "start": 1855, "end": 31775 }
class ____(test.TestCase): def _get_shardable_tensors_by_task(self, root): serialized_tensors, _, _, _ = ( checkpoint.TrackableSaver(graph_view.ObjectGraphView(root)) ._gather_serialized_tensors(None)) shardable_tensors_by_task = {} for obj, tensor_dict in serialized_tensors.items(): # Divide tensor_dict by device. for checkpoint_key, tensor_slice_dict in tensor_dict.items(): if not isinstance(tensor_slice_dict, dict): # Make sure that maybe_tensor is structured as {slice_spec -> tensor}. tensor_slice_dict = {"": tensor_slice_dict} for slice_spec, tensor_save_spec in tensor_slice_dict.items(): if not isinstance(tensor_save_spec, saveable_object.SaveSpec): tensor_save_spec = saveable_object.SaveSpec( tensor=tensor_save_spec, slice_spec=slice_spec, name=checkpoint_key, dtype=tensor_save_spec.dtype, device=tensor_save_spec.device) save_spec_tensor = tensor_save_spec.tensor device = (device_lib.DeviceSpec.from_string(tensor_save_spec.device) if isinstance(tensor_save_spec.device, str) else tensor_save_spec.device) task = device_lib.DeviceSpec.from_string( saveable_object_util.set_cpu0(device.to_string())) shardable_tensors_by_task.setdefault(task, []).append( sharding_util.ShardableTensor( _tensor_save_spec=tensor_save_spec, tensor=save_spec_tensor, dtype=tensor_save_spec.dtype, device=device, name=tensor_save_spec.name, shape=save_spec_tensor.shape, slice_spec=slice_spec, checkpoint_key=checkpoint_key, trackable=obj)) return shardable_tensors_by_task.values() def test_ShardByTaskPolicy(self): servers = [server_lib.Server.create_local_server() for _ in range(3)] cluster_spec = server_lib.ClusterSpec({ "worker": [s.target[len("grpc://"):] for s in servers]}) remote.connect_to_cluster(cluster_spec) root = module.Module() with ops.device("/job:worker/task:0/cpu:0"): v0 = resource_variable_ops.ResourceVariable(0.0, name="v0") with ops.device("/job:worker/task:1/cpu:0"): v1 = resource_variable_ops.ResourceVariable(1.0, name="v1") with ops.device("/job:worker/task:2/cpu:0"): v2 = resource_variable_ops.ResourceVariable(2.0, name="v2") root.v0 = v0 root.v1 = v1 root.v2 = v2 shardable_tensors = self._get_shardable_tensors_by_task(root) callback = sharding_policies.ShardByTaskPolicy() shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertAllEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE"}, {"v1/.ATTRIBUTES/VARIABLE_VALUE"}, {"v2/.ATTRIBUTES/VARIABLE_VALUE"}, {"_CHECKPOINTABLE_OBJECT_GRAPH"} ]) self.assertEqual( self.evaluate(shards[0]["v0/.ATTRIBUTES/VARIABLE_VALUE"][""]), v0.numpy()) self.assertEqual( self.evaluate(shards[1]["v1/.ATTRIBUTES/VARIABLE_VALUE"][""]), v1.numpy()) self.assertEqual( self.evaluate(shards[2]["v2/.ATTRIBUTES/VARIABLE_VALUE"][""]), v2.numpy()) def test_CheckpointOption_ShardByTaskPolicy(self): servers = [server_lib.Server.create_local_server() for _ in range(3)] cluster_spec = server_lib.ClusterSpec({ "worker": [s.target[len("grpc://"):] for s in servers]}) remote.connect_to_cluster(cluster_spec) root = module.Module() with ops.device("/job:worker/task:0/cpu:0"): v0 = resource_variable_ops.ResourceVariable(0.0, name="v0") self.evaluate(v0.initializer) with ops.device("/job:worker/task:1/cpu:0"): v1 = resource_variable_ops.ResourceVariable(1.0, name="v1") self.evaluate(v1.initializer) with ops.device("/job:worker/task:2/cpu:0"): v2 = resource_variable_ops.ResourceVariable(2.0, name="v2") self.evaluate(v2.initializer) root.v0 = v0 root.v1 = v1 root.v2 = v2 tmp_dir = self.create_tempdir("ckpt") ckpt = checkpoint.Checkpoint(root) save_path = ckpt.save( tmp_dir, options=checkpoint_options.CheckpointOptions( experimental_sharding_callback=( sharding_policies.ShardByTaskPolicy()))) self.assertLen(gfile.Glob(save_path + ".data*"), 4) ckpt.restore(save_path) @test_util.run_in_graph_and_eager_modes def test_MaxShardSizePolicy_1D(self): root = module.Module() with ops.device("cpu:0"): v0 = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0, 3.0], name="v0", dtype=dtypes.float32) v1 = resource_variable_ops.ResourceVariable([[4], [5], [6], [7]], name="v1", dtype=dtypes.int32) self.evaluate(v0.initializer) self.evaluate(v1.initializer) root.v0 = v0 root.v1 = v1 v0_name = "v0/.ATTRIBUTES/VARIABLE_VALUE" v1_name = "v1/.ATTRIBUTES/VARIABLE_VALUE" class V0SaveSliceInfo(variables.Variable.SaveSliceInfo): def __init__(self, var_offset, var_shape): super().__init__( full_name=v0_name, full_shape=tensor_shape.TensorShape(dims=[4]), var_offset=var_offset, var_shape=var_shape) class V1SaveSliceInfo(variables.Variable.SaveSliceInfo): def __init__(self, var_offset, var_shape): super().__init__( full_name=v1_name, full_shape=tensor_shape.TensorShape(dims=[4, 1]), var_offset=var_offset, var_shape=var_shape) shardable_tensors = self._get_shardable_tensors_by_task(root) # Test sharding the v0 & v1 tensors with different max shard sizes. # max_shard_size: 4 bytes # Each element of v0/v1 is a 32 bit/4 byte value, so each variable should be # split into 4 shards. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=4) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",} ]) # V0 slice_spec = V0SaveSliceInfo(var_offset=[0], var_shape=[1]).spec self.assertEqual(self.evaluate(shards[0][v0_name][slice_spec]), 0.0) slice_spec = V0SaveSliceInfo(var_offset=[1], var_shape=[1]).spec self.assertEqual(self.evaluate(shards[1][v0_name][slice_spec]), 1.0) slice_spec = V0SaveSliceInfo(var_offset=[2], var_shape=[1]).spec self.assertEqual(self.evaluate(shards[2][v0_name][slice_spec]), 2.0) slice_spec = V0SaveSliceInfo(var_offset=[3], var_shape=[1]).spec self.assertEqual(self.evaluate(shards[3][v0_name][slice_spec]), 3.0) # V1 slice_spec = V1SaveSliceInfo(var_offset=[0, 0], var_shape=[1, 1]).spec self.assertEqual(self.evaluate(shards[4][v1_name][slice_spec]), [4]) slice_spec = V1SaveSliceInfo(var_offset=[1, 0], var_shape=[1, 1]).spec self.assertEqual(self.evaluate(shards[5][v1_name][slice_spec]), [5]) slice_spec = V1SaveSliceInfo(var_offset=[2, 0], var_shape=[1, 1]).spec self.assertEqual(self.evaluate(shards[6][v1_name][slice_spec]), [6]) slice_spec = V1SaveSliceInfo(var_offset=[3, 0], var_shape=[1, 1]).spec self.assertEqual(self.evaluate(shards[7][v1_name][slice_spec]), [7]) # max_shard_size: 8 bytes # v0/v1 haven't changed, so they should now be split into 2 shards each. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=8) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",} ]) # V0 slice_spec = V0SaveSliceInfo(var_offset=[0], var_shape=[2]).spec self.assertAllEqual( self.evaluate(shards[0][v0_name][slice_spec]), [0.0, 1.0]) slice_spec = V0SaveSliceInfo(var_offset=[2], var_shape=[2]).spec self.assertAllEqual( self.evaluate(shards[1][v0_name][slice_spec]), [2.0, 3.0]) # V1 slice_spec = V1SaveSliceInfo(var_offset=[0, 0], var_shape=[2, 1]).spec self.assertAllEqual( self.evaluate(shards[2][v1_name][slice_spec]), [[4], [5]]) slice_spec = V1SaveSliceInfo(var_offset=[2, 0], var_shape=[2, 1]).spec self.assertAllEqual( self.evaluate(shards[3][v1_name][slice_spec]), [[6], [7]]) # max_shard_size: 10 bytes # 10 bytes is an uneven boundary for 4 byte elements. v0/v1 should be split # into 2 shards each. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=10) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",} ]) # V0 slice_spec = V0SaveSliceInfo(var_offset=[0], var_shape=[2]).spec self.assertAllEqual( self.evaluate(shards[0][v0_name][slice_spec]), [0.0, 1.0]) slice_spec = V0SaveSliceInfo(var_offset=[2], var_shape=[2]).spec self.assertAllEqual( self.evaluate(shards[1][v0_name][slice_spec]), [2.0, 3.0]) # V1 slice_spec = V1SaveSliceInfo(var_offset=[0, 0], var_shape=[2, 1]).spec self.assertAllEqual( self.evaluate(shards[2][v1_name][slice_spec]), [[4], [5]]) slice_spec = V1SaveSliceInfo(var_offset=[2, 0], var_shape=[2, 1]).spec self.assertAllEqual( self.evaluate(shards[3][v1_name][slice_spec]), [[6], [7]]) # max_shard_size: 16 bytes # 16 bytes the exact size of each variable, so they should get 1 shard each. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=16) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",} ]) # V0 slice_spec = V0SaveSliceInfo(var_offset=[0], var_shape=[4]).spec self.assertAllEqual( self.evaluate(shards[0][v0_name][slice_spec]), [0.0, 1.0, 2.0, 3.0]) # V1 slice_spec = V1SaveSliceInfo(var_offset=[0, 0], var_shape=[4, 1]).spec self.assertAllEqual( self.evaluate(shards[1][v1_name][slice_spec]), [[4], [5], [6], [7]]) # max_shard_size: 18 bytes # 18 bytes slightly larger than the size of each variable, but not large # enough to fit another 4 byte element, so they should get 1 shard each. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=18) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",} ]) # V0 slice_spec = V0SaveSliceInfo(var_offset=[0], var_shape=[4]).spec self.assertAllEqual( self.evaluate(shards[0][v0_name][slice_spec]), [0.0, 1.0, 2.0, 3.0]) # V1 slice_spec = V1SaveSliceInfo(var_offset=[0, 0], var_shape=[4, 1]).spec self.assertAllEqual( self.evaluate(shards[1][v1_name][slice_spec]), [[4], [5], [6], [7]]) @test_util.run_in_graph_and_eager_modes def test_MaxShardSizePolicy_2D(self): root = module.Module() with ops.device("cpu:0"): v0 = resource_variable_ops.ResourceVariable([[0, 1], [2, 3], [4, 5]], name="v0") v1 = resource_variable_ops.ResourceVariable([[[6.0], [7.0]], [[8.0], [9.0]], [[10.0], [11.0]]], name="v1") self.evaluate(v0.initializer) self.evaluate(v1.initializer) root.v0 = v0 root.v1 = v1 v0_name = "v0/.ATTRIBUTES/VARIABLE_VALUE" v1_name = "v1/.ATTRIBUTES/VARIABLE_VALUE" class V0SaveSliceInfo(variables.Variable.SaveSliceInfo): def __init__(self, var_offset, var_shape): super().__init__( full_name=v0_name, full_shape=tensor_shape.TensorShape(dims=[3, 2]), var_offset=var_offset, var_shape=var_shape) class V1SaveSliceInfo(variables.Variable.SaveSliceInfo): def __init__(self, var_offset, var_shape): super().__init__( full_name=v1_name, full_shape=tensor_shape.TensorShape(dims=[3, 2, 1]), var_offset=var_offset, var_shape=var_shape) shardable_tensors = self._get_shardable_tensors_by_task(root) # Test sharding the v0 & v1 tensors with different max shard sizes. # max_shard_size: 8 bytes # Each element of v0/v1 is a 32 bit/4 byte value, so each variable should be # split into 3 shards. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=8) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",} ]) # V0 slice_spec = V0SaveSliceInfo(var_offset=[0, 0], var_shape=[1, 2]).spec self.assertAllEqual( self.evaluate(shards[0][v0_name][slice_spec]), [[0, 1]]) slice_spec = V0SaveSliceInfo(var_offset=[1, 0], var_shape=[1, 2]).spec self.assertAllEqual( self.evaluate(shards[1][v0_name][slice_spec]), [[2, 3]]) slice_spec = V0SaveSliceInfo(var_offset=[2, 0], var_shape=[1, 2]).spec self.assertAllEqual( self.evaluate(shards[2][v0_name][slice_spec]), [[4, 5]]) # V1 slice_spec = V1SaveSliceInfo(var_offset=[0, 0, 0], var_shape=[1, 2, 1]).spec self.assertAllEqual( self.evaluate(shards[3][v1_name][slice_spec]), [[[6.0], [7.0]]]) slice_spec = V1SaveSliceInfo(var_offset=[1, 0, 0], var_shape=[1, 2, 1]).spec self.assertAllEqual( self.evaluate(shards[4][v1_name][slice_spec]), [[[8.0], [9.0]]]) slice_spec = V1SaveSliceInfo(var_offset=[2, 0, 0], var_shape=[1, 2, 1]).spec self.assertAllEqual( self.evaluate(shards[5][v1_name][slice_spec]), [[[10.0], [11.0]]]) # max_shard_size: 10 bytes # 10 bytes is an uneven boundary for 4 byte elements. v0/v1 should be split # into 3 shards each. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=10) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",} ]) # V0 slice_spec = V0SaveSliceInfo(var_offset=[0, 0], var_shape=[1, 2]).spec self.assertAllEqual( self.evaluate(shards[0][v0_name][slice_spec]), [[0, 1]]) slice_spec = V0SaveSliceInfo(var_offset=[1, 0], var_shape=[1, 2]).spec self.assertAllEqual( self.evaluate(shards[1][v0_name][slice_spec]), [[2, 3]]) slice_spec = V0SaveSliceInfo(var_offset=[2, 0], var_shape=[1, 2]).spec self.assertAllEqual( self.evaluate(shards[2][v0_name][slice_spec]), [[4, 5]]) # V1 slice_spec = V1SaveSliceInfo(var_offset=[0, 0, 0], var_shape=[1, 2, 1]).spec self.assertAllEqual( self.evaluate(shards[3][v1_name][slice_spec]), [[[6.0], [7.0]]]) slice_spec = V1SaveSliceInfo(var_offset=[1, 0, 0], var_shape=[1, 2, 1]).spec self.assertAllEqual( self.evaluate(shards[4][v1_name][slice_spec]), [[[8.0], [9.0]]]) slice_spec = V1SaveSliceInfo(var_offset=[2, 0, 0], var_shape=[1, 2, 1]).spec self.assertAllEqual( self.evaluate(shards[5][v1_name][slice_spec]), [[[10.0], [11.0]]]) # max_shard_size: 12 bytes # 12 bytes is enough to fit 3 elements per variable in each shard. # v0/v1 should be split into 2 shards each. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=12) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE",}, {"v1/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",} ]) # V0 slice_spec = V0SaveSliceInfo(var_offset=[0, 0], var_shape=[3, 1]).spec self.assertAllEqual( self.evaluate(shards[0][v0_name][slice_spec]), [[0], [2], [4]]) slice_spec = V0SaveSliceInfo(var_offset=[0, 1], var_shape=[3, 1]).spec self.assertAllEqual( self.evaluate(shards[1][v0_name][slice_spec]), [[1], [3], [5]]) # V1 slice_spec = V1SaveSliceInfo(var_offset=[0, 0, 0], var_shape=[3, 1, 1]).spec self.assertAllEqual( self.evaluate(shards[2][v1_name][slice_spec]), [[[6.0]], [[8.0]], [[10.0]]]) slice_spec = V1SaveSliceInfo(var_offset=[0, 1, 0], var_shape=[3, 1, 1]).spec self.assertAllEqual( self.evaluate(shards[3][v1_name][slice_spec]), [[[7.0]], [[9.0]], [[11.0]]]) # max_shard_size: 16 bytes # Each variable should be split into 1.5 shards. The middle shard will # contain elements from both variables. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=16) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE", "v1/.ATTRIBUTES/VARIABLE_VALUE"}, {"v1/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",} ]) # V0 slice_spec = V0SaveSliceInfo(var_offset=[0, 0], var_shape=[2, 2]).spec self.assertAllEqual( self.evaluate(shards[0][v0_name][slice_spec]), [[0, 1], [2, 3]]) slice_spec = V0SaveSliceInfo(var_offset=[2, 0], var_shape=[1, 2]).spec self.assertAllEqual( self.evaluate(shards[1][v0_name][slice_spec]), [[4, 5]]) # V1 slice_spec = V1SaveSliceInfo(var_offset=[0, 0, 0], var_shape=[1, 2, 1]).spec self.assertAllEqual( self.evaluate(shards[1][v1_name][slice_spec]), [[[6.0], [7.0]]]) slice_spec = V1SaveSliceInfo(var_offset=[1, 0, 0], var_shape=[2, 2, 1]).spec self.assertAllEqual( self.evaluate(shards[2][v1_name][slice_spec]), [[[8.0], [9.0]], [[10.0], [11.0]]]) @test_util.run_in_graph_and_eager_modes def test_MaxShardSizePolicy_Strings(self): v_strings = [ "".join(random.choices(string.ascii_uppercase + string.digits, k=10)) for _ in range(4)] root = module.Module() with ops.device("cpu:0"): v0 = resource_variable_ops.ResourceVariable(v_strings, name="v0", dtype=dtypes.string) self.evaluate(v0.initializer) root.v0 = v0 v0_name = "v0/.ATTRIBUTES/VARIABLE_VALUE" class V0SaveSliceInfo(variables.Variable.SaveSliceInfo): def __init__(self, var_offset, var_shape): super().__init__( full_name=v0_name, full_shape=tensor_shape.TensorShape(dims=[4]), var_offset=var_offset, var_shape=var_shape) shardable_tensors = self._get_shardable_tensors_by_task(root) # Test sharding the v0 & v1 tensors with different max shard sizes. # max_shard_size: 10 bytes # Each string in v0 is 10 bytes, so there should be 1 string per shard. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=10) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",} ]) slice_spec = V0SaveSliceInfo(var_offset=[0], var_shape=[1]).spec self.assertAllEqual( self.evaluate(shards[0][v0_name][slice_spec]), [v_strings[0]]) slice_spec = V0SaveSliceInfo(var_offset=[1], var_shape=[1]).spec self.assertAllEqual( self.evaluate(shards[1][v0_name][slice_spec]), [v_strings[1]]) slice_spec = V0SaveSliceInfo(var_offset=[2], var_shape=[1]).spec self.assertAllEqual( self.evaluate(shards[2][v0_name][slice_spec]), [v_strings[2]]) slice_spec = V0SaveSliceInfo(var_offset=[3], var_shape=[1]).spec self.assertAllEqual( self.evaluate(shards[3][v0_name][slice_spec]), [v_strings[3]]) @test_util.run_in_graph_and_eager_modes def test_MaxShardSizePolicy_LargeScalar(self): v_string = "".join(random.choices( string.ascii_uppercase + string.digits, k=10)).encode("utf-8") root = module.Module() with ops.device("cpu:0"): v0 = resource_variable_ops.ResourceVariable( v_string, name="v0", dtype=dtypes.string) self.evaluate(v0.initializer) root.v0 = v0 v0_name = "v0/.ATTRIBUTES/VARIABLE_VALUE" shardable_tensors = self._get_shardable_tensors_by_task(root) # max_shard_size: 8 bytes callback = sharding_policies.MaxShardSizePolicy(max_shard_size=8) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"_CHECKPOINTABLE_OBJECT_GRAPH",}, {"v0/.ATTRIBUTES/VARIABLE_VALUE",} ]) tensor_val = (self.evaluate(shards[1][v0_name][""]) if ops.context.executing_eagerly() else shards[1][v0_name][""]) self.assertEqual(tensor_val, v_string) @test_util.run_in_graph_and_eager_modes def test_CheckpointOption_MaxShardSizePolicy(self): root = module.Module() with ops.device("cpu:0"): v0 = resource_variable_ops.ResourceVariable([[0, 1], [2, 3], [4, 5]], name="v0") v1 = resource_variable_ops.ResourceVariable([[[6.0], [7.0]], [[8.0], [9.0]], [[10.0], [11.0]]], name="v1") v2 = resource_variable_ops.ResourceVariable("test_string", name="v1") self.evaluate(v0.initializer) self.evaluate(v1.initializer) self.evaluate(v2.initializer) root.v0 = v0 root.v1 = v1 root.v2 = v2 tmp_dir = self.create_tempdir("ckpt") ckpt = checkpoint.Checkpoint(root) save_path = ckpt.save( tmp_dir, options=checkpoint_options.CheckpointOptions( experimental_sharding_callback=( sharding_policies.MaxShardSizePolicy(max_shard_size=10)))) # 8 files = 3 shards for v0, 3 for v1, 1 for v2, and 1 for the object graph self.assertLen(gfile.Glob(save_path + ".data*"), 8) ckpt.restore(save_path) @test_util.run_in_graph_and_eager_modes def test_MaxShardSizePolicy_PreSlicedTensor(self): root = module.Module() sliced_v0_name = "sliced_v0/.ATTRIBUTES/VARIABLE_VALUE" class V0SaveSliceInfo(variables.Variable.SaveSliceInfo): def __init__(self, var_offset, var_shape): super().__init__( full_name=sliced_v0_name, full_shape=tensor_shape.TensorShape(dims=[2, 5]), var_offset=var_offset, var_shape=var_shape) v0_slice_spec = V0SaveSliceInfo(var_offset=[0, 1], var_shape=[2, 3]) class ResourceVariableWithSliceSpec(resource_variable_ops.ResourceVariable): def _serialize_to_tensors(self): ckpt_key, tensor = list(super()._serialize_to_tensors().items())[0] return {ckpt_key: {v0_slice_spec.spec: tensor}} with ops.device("cpu:0"): # full_v0 = [[0.0, 1.0, 2.0, 3.0, 4.0], # [5.0, 6.0, 7.0, 8.0, 9.0]] sliced_v0 = ResourceVariableWithSliceSpec([[1.0, 2.0, 3.0], [6.0, 7.0, 8.0]], name="sliced_v0", dtype=dtypes.float32) sliced_v0._set_save_slice_info(v0_slice_spec) self.evaluate(sliced_v0.initializer) root.sliced_v0 = sliced_v0 shardable_tensors = self._get_shardable_tensors_by_task(root) # Test sharding the pre-sliced v0 tensor with different max shard sizes. # max_shard_size: 8 bytes # Each element of v0 is a 32 bit/4 byte value, so v0 should be split into 3 # shards containing 2 elements each. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=8) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"sliced_v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"sliced_v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"sliced_v0/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",}, ]) slice_spec = V0SaveSliceInfo(var_offset=[0, 1], var_shape=[2, 1]).spec self.assertAllEqual( self.evaluate(shards[0][sliced_v0_name][slice_spec]), [[1.0], [6.0]]) slice_spec = V0SaveSliceInfo(var_offset=[0, 2], var_shape=[1, 2]).spec self.assertAllEqual( self.evaluate(shards[1][sliced_v0_name][slice_spec]), [[2.0, 3.0]]) slice_spec = V0SaveSliceInfo(var_offset=[1, 2], var_shape=[1, 2]).spec self.assertAllEqual( self.evaluate(shards[2][sliced_v0_name][slice_spec]), [[7.0, 8.0]]) # max_shard_size: 12 bytes # Each element of v0 is a 32 bit/4 byte value, so v0 should be split into 2 # shards containing 3 elements each. callback = sharding_policies.MaxShardSizePolicy(max_shard_size=12) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) self.assertEqual( [set(shard.keys()) for shard in shards], [ {"sliced_v0/.ATTRIBUTES/VARIABLE_VALUE",}, {"sliced_v0/.ATTRIBUTES/VARIABLE_VALUE", "_CHECKPOINTABLE_OBJECT_GRAPH",}, ]) slice_spec = V0SaveSliceInfo(var_offset=[0, 1], var_shape=[1, 3]).spec self.assertAllEqual( self.evaluate(shards[0][sliced_v0_name][slice_spec]), [[1.0, 2.0, 3.0]]) slice_spec = V0SaveSliceInfo(var_offset=[1, 1], var_shape=[1, 3]).spec self.assertAllEqual( self.evaluate(shards[1][sliced_v0_name][slice_spec]), [[6.0, 7.0, 8.0]]) def test_MaxShardSizePolicy_TFFunction(self): v_string = "".join(random.choices( string.ascii_uppercase + string.digits, k=10)).encode("utf-8") root = module.Module() with ops.device("cpu:0"): v0 = resource_variable_ops.ResourceVariable( v_string, name="v0", dtype=dtypes.string) self.evaluate(v0.initializer) root.v0 = v0 shardable_tensors = self._get_shardable_tensors_by_task(root) @def_function.function def wrapped_policy(shardable_tensors): callback = sharding_policies.MaxShardSizePolicy(max_shard_size=4) shards = [] for tensors in shardable_tensors: shards.extend(callback(tensors)) return shards # TODO(b/326287351): Get string tensor size in tf.function. # This test case should be changed when the bug is fixed/warning removed. with self.assertLogs(level="WARNING") as log_output: log_level = logging.get_verbosity() logging.set_verbosity(logging.WARNING) try: wrapped_policy(shardable_tensors) finally: logging.set_verbosity(log_level) output = log_output[0][0].message self.assertTrue( re.search("sharding policy is being executed in a tf.function", output)) if __name__ == "__main__": ops.enable_eager_execution() test.main()
ShardingPoliciesTest
python
ansible__ansible
lib/ansible/errors/__init__.py
{ "start": 633, "end": 1105 }
class ____(enum.IntEnum): SUCCESS = 0 # used by TQM, must be bit-flag safe GENERIC_ERROR = 1 # used by TQM, must be bit-flag safe HOST_FAILED = 2 # TQM-sourced, must be bit-flag safe HOST_UNREACHABLE = 4 # TQM-sourced, must be bit-flag safe PARSER_ERROR = 4 # FIXME: CLI-sourced, conflicts with HOST_UNREACHABLE INVALID_CLI_OPTION = 5 UNICODE_ERROR = 6 # obsolete, no longer used KEYBOARD_INTERRUPT = 99 UNKNOWN_ERROR = 250
ExitCode
python
conda__conda
conda/models/enums.py
{ "start": 892, "end": 1211 }
class ____(Enum): freebsd = "freebsd" linux = "linux" win = "win32" openbsd = "openbsd5" osx = "darwin" zos = "zos" emscripten = "emscripten" wasi = "wasi" @classmethod def from_sys(cls): return cls(sys.platform) def __json__(self): return self.value
Platform
python
dask__dask
dask/backends.py
{ "start": 445, "end": 1458 }
class ____: """Base Collection-Backend Entrypoint Class Most methods in this class correspond to collection-creation for a specific library backend. Once a collection is created, the existing data will be used to dispatch compute operations within individual tasks. The backend is responsible for ensuring that these data-directed dispatch functions are registered when ``__init__`` is called. """ @classmethod def to_backend_dispatch(cls): """Return a dispatch function to move data to this backend""" raise NotImplementedError @staticmethod def to_backend(data): """Create a new collection with this backend""" raise NotImplementedError @lru_cache(maxsize=1) def detect_entrypoints(entry_point_name): return { ep.name: ep for ep in importlib_metadata.entry_points(group=entry_point_name) } BackendEntrypointType = TypeVar( "BackendEntrypointType", bound="DaskBackendEntrypoint", )
DaskBackendEntrypoint
python
getsentry__sentry
src/sentry/users/models/authenticator.py
{ "start": 1469, "end": 5057 }
class ____(BaseManager["Authenticator"]): def all_interfaces_for_user( self, user: User, return_missing: bool = False, ignore_backup: bool = False ) -> list[OtpMixin | AuthenticatorInterface]: """Returns a correctly sorted list of all interfaces the user has enabled. If `return_missing` is set to `True` then all interfaces are returned even if not enabled. """ # Collect interfaces user is enrolled in ifaces = [ x.interface for x in Authenticator.objects.filter( user_id=user.id, type__in=[a.type for a in available_authenticators(ignore_backup=ignore_backup)], ) ] if return_missing: # Collect additional interfaces that the user # is not enrolled in rvm = dict(AUTHENTICATOR_INTERFACES) for iface in ifaces: rvm.pop(iface.interface_id, None) for iface_cls in rvm.values(): if iface_cls.is_available: ifaces.append(iface_cls()) return sorted(ifaces, key=lambda interface: (interface.type == 0, interface.type)) def auto_add_recovery_codes( self, user: User, force: bool = False ) -> RecoveryCodeInterface | None: """This automatically adds the recovery code backup interface in case no backup interface is currently set for the user. Returns the interface that was added. """ from sentry.auth.authenticators.recovery_code import RecoveryCodeInterface has_authenticators = False # If we're not forcing, check for a backup interface already setup # or if it's missing, we'll need to set it. if not force: for authenticator in Authenticator.objects.filter( user_id=user.id, type__in=[a.type for a in available_authenticators()] ): iface = authenticator.interface if iface.is_backup_interface: return None has_authenticators = True if has_authenticators or force: interface = RecoveryCodeInterface() interface.enroll(user) return interface return None def get_interface(self, user: User, interface_id: str) -> OtpMixin | AuthenticatorInterface: """Looks up an interface by interface ID for a user. If the interface is not available but configured a `Authenticator.DoesNotExist` will be raised just as if the authenticator was not configured at all. """ interface = AUTHENTICATOR_INTERFACES.get(interface_id) if interface is None or not interface.is_available: raise LookupError("No such interface %r" % interface_id) try: return Authenticator.objects.get(user_id=user.id, type=interface.type).interface except Authenticator.DoesNotExist: return interface.generate(EnrollmentStatus.NEW) def bulk_users_have_2fa(self, user_ids: list[int]) -> dict[int, bool]: """Checks if a list of user ids have 2FA configured. Returns a dict of {<id>: <has_2fa>} """ authenticators = set( Authenticator.objects.filter( user__in=user_ids, type__in=[a.type for a in available_authenticators(ignore_backup=True)], ) .distinct() .values_list("user_id", flat=True) ) return {id: id in authenticators for id in user_ids}
AuthenticatorManager
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 502200, "end": 502411 }
class ____(VegaLiteSchema): """LabelOverlap schema wrapper.""" _schema = {"$ref": "#/definitions/LabelOverlap"} def __init__(self, *args, **kwds): super().__init__(*args, **kwds)
LabelOverlap
python
PrefectHQ__prefect
src/prefect/server/utilities/database.py
{ "start": 12789, "end": 25039 }
class ____(functions.GenericFunction[float]): """Platform-independent calculation of the number of seconds between two timestamps or from 'now'""" type: Type[sa.REAL[float]] = sa.REAL inherit_cache: bool = True def __init__( self, dt1: _SQLExpressionOrLiteral[datetime.datetime], dt2: Optional[_SQLExpressionOrLiteral[datetime.datetime]] = None, **kwargs: Any, ) -> None: args = (sa.type_coerce(dt1, Timestamp()),) if dt2 is not None: args = (*args, sa.type_coerce(dt2, Timestamp())) super().__init__(*args, **kwargs) # timestamp and interval arithmetic implementations for PostgreSQL @compiles(date_add, "postgresql") @compiles(interval_add, "postgresql") @compiles(date_diff, "postgresql") def datetime_or_interval_add_postgresql( element: Union[date_add, interval_add, date_diff], compiler: SQLCompiler, **kwargs: Any, ) -> str: operation = operator.sub if isinstance(element, date_diff) else operator.add return compiler.process(operation(*element.clauses), **kwargs) @compiles(date_diff_seconds, "postgresql") def date_diff_seconds_postgresql( element: date_diff_seconds, compiler: SQLCompiler, **kwargs: Any ) -> str: # either 1 or 2 timestamps; if 1, subtract from 'now' dts: list[sa.ColumnElement[datetime.datetime]] = list(element.clauses) if len(dts) == 1: dts = [sa.func.now(), *dts] as_utc = (sa.func.timezone("UTC", dt) for dt in dts) return compiler.process(sa.func.extract("epoch", operator.sub(*as_utc)), **kwargs) # SQLite implementations for the Timestamp and Interval arithmetic functions. # # The following concepts are at play here: # # - By default, SQLAlchemy stores Timestamp values formatted as ISO8601 strings # (with a space between the date and the time parts), with microsecond precision. # - SQLAlchemy stores Interval values as a Timestamp, offset from the UNIX epoch. # - SQLite processes timestamp values with _at most_ millisecond precision, and # only if you use the `juliandate()` function or the 'subsec' modifier for # the `unixepoch()` function (the latter requires SQLite 3.42.0, released # 2023-05-16) # # In order for arthmetic to work well, you need to convert timestamps to # fractional [Julian day numbers][JDN], and intervals to a real number # by subtracting the UNIX epoch from their Julian day number representation. # # Once the result has been computed, the result needs to be converted back # to an ISO8601 formatted string including any milliseconds. For an # interval result, that means adding the UNIX epoch offset to it first. # # [JDN]: https://en.wikipedia.org/wiki/Julian_day # SQLite strftime() format to output ISO8601 date and time with milliseconds # This format must be parseable by the `datetime.fromisodatetime()` function, # or if the SQLite implementation for Timestamp below is configured with a # regex, then that it must target that regex. # # SQLite only provides millisecond precision, but past versions of SQLAlchemy # defaulted to parsing with a regex that would treat fractional as a value in # microseconds. To ensure maximum compatibility the current format should # continue to format the fractional seconds as microseconds, so 6 digits. SQLITE_DATETIME_FORMAT = sa.literal("%Y-%m-%d %H:%M:%f000", literal_execute=True) """The SQLite timestamp output format as a SQL literal string constant""" SQLITE_EPOCH_JULIANDAYNUMBER = sa.literal(2440587.5, literal_execute=True) """The UNIX epoch, 1970-01-01T00:00:00.000000Z, expressed as a fractional Julain day number""" SECONDS_PER_DAY = sa.literal(24 * 60 * 60.0, literal_execute=True) """The number of seconds in a day as a SQL literal, to convert fractional Julian days to seconds""" _sqlite_now_constant = sa.literal("now", literal_execute=True) """The 'now' string constant, passed to SQLite datetime functions""" _sqlite_strftime = partial(sa.func.strftime, SQLITE_DATETIME_FORMAT) """Format SQLite timestamp to a SQLAlchemy-compatible string""" def _sqlite_strfinterval( offset: sa.ColumnElement[float], ) -> sa.ColumnElement[datetime.datetime]: """Format interval offset to a SQLAlchemy-compatible string""" return _sqlite_strftime(SQLITE_EPOCH_JULIANDAYNUMBER + offset) def _sqlite_interval_offset( interval: _SQLExpressionOrLiteral[datetime.timedelta], ) -> sa.ColumnElement[float]: """Convert interval value to a fraction Julian day number REAL offset from UNIX epoch""" return sa.func.julianday(interval) - SQLITE_EPOCH_JULIANDAYNUMBER @compiles(functions.now, "sqlite") def current_timestamp_sqlite( element: functions.now, compiler: SQLCompiler, **kwargs: Any ) -> str: """Generates the current timestamp for SQLite""" return compiler.process(_sqlite_strftime(_sqlite_now_constant), **kwargs) @compiles(date_add, "sqlite") def date_add_sqlite(element: date_add, compiler: SQLCompiler, **kwargs: Any) -> str: dt, interval = element.clauses jdn, offset = sa.func.julianday(dt), _sqlite_interval_offset(interval) # dt + interval, as fractional Julian day number values return compiler.process(_sqlite_strftime(jdn + offset), **kwargs) @compiles(interval_add, "sqlite") def interval_add_sqlite( element: interval_add, compiler: SQLCompiler, **kwargs: Any ) -> str: offsets = map(_sqlite_interval_offset, element.clauses) # interval + interval, as fractional Julian day number values return compiler.process(_sqlite_strfinterval(operator.add(*offsets)), **kwargs) @compiles(date_diff, "sqlite") def date_diff_sqlite(element: date_diff, compiler: SQLCompiler, **kwargs: Any) -> str: jdns = map(sa.func.julianday, element.clauses) # timestamp - timestamp, as fractional Julian day number values return compiler.process(_sqlite_strfinterval(operator.sub(*jdns)), **kwargs) @compiles(date_diff_seconds, "sqlite") def date_diff_seconds_sqlite( element: date_diff_seconds, compiler: SQLCompiler, **kwargs: Any ) -> str: # either 1 or 2 timestamps; if 1, subtract from 'now' dts: list[sa.ColumnElement[Any]] = list(element.clauses) if len(dts) == 1: dts = [_sqlite_now_constant, *dts] as_jdn = (sa.func.julianday(dt) for dt in dts) # timestamp - timestamp, as a fractional Julian day number, times the number of seconds in a day return compiler.process(operator.sub(*as_jdn) * SECONDS_PER_DAY, **kwargs) # PostgreSQL JSON(B) Comparator operators ported to SQLite def _is_literal(elem: Any) -> bool: """Element is not a SQLAlchemy SQL construct""" # Copied from sqlalchemy.sql.coercions._is_literal return not ( isinstance(elem, (sa.Visitable, schema.SchemaEventTarget)) or hasattr(elem, "__clause_element__") ) def _postgresql_array_to_json_array( elem: sa.ColumnElement[Any], ) -> sa.ColumnElement[Any]: """Replace any postgresql array() literals with a json_array() function call Because an _empty_ array leads to a PostgreSQL error, array() is often coupled with a cast(); this function replaces arrays with or without such a cast. This allows us to map the postgres JSONB.has_any / JSONB.has_all operand to SQLite. Returns the updated expression. """ def _replacer(element: Any, **kwargs: Any) -> Optional[Any]: # either array(...), or cast(array(...), ...) if isinstance(element, sa.Cast): element = element.clause if isinstance(element, postgresql.array): return sa.func.json_array(*element.clauses) return None opts: dict[str, Any] = {} return replacement_traverse(elem, opts, _replacer) def _json_each(elem: sa.ColumnElement[Any]) -> sa.TableValuedAlias: """SQLite json_each() table-valued construct Configures a SQLAlchemy table-valued object with the minimum column definitions and correct configuration. """ return sa.func.json_each(elem).table_valued("key", "value", joins_implicitly=True) # sqlite JSON operator implementations. def _sqlite_json_astext( element: sa.BinaryExpression[Any], ) -> sa.BinaryExpression[Any]: """Map postgres JSON.astext / JSONB.astext (`->>`) to sqlite json_extract() Without the `as_string()` call, SQLAlchemy outputs json_quote(json_extract(...)) """ return element.left[element.right].as_string() def _sqlite_json_contains( element: sa.BinaryExpression[bool], ) -> sa.ColumnElement[bool]: """Map JSONB.contains() and JSONB.has_all() to a SQLite expression""" # left can be a JSON value as a (Python) literal, or a SQL expression for a JSON value # right can be a SQLA postgresql.array() literal or a SQL expression for a # JSON array (for .has_all()) or it can be a JSON value as a (Python) # literal or a SQL expression for a JSON object (for .contains()) left, right = element.left, element.right # if either top-level operand is literal, convert to a JSON bindparam if _is_literal(left): left = sa.bindparam("haystack", left, expanding=True, type_=JSON) if _is_literal(right): right = sa.bindparam("needles", right, expanding=True, type_=JSON) else: # convert the array() literal used in JSONB.has_all() to a JSON array. right = _postgresql_array_to_json_array(right) jleft, jright = _json_each(left), _json_each(right) # compute equality by counting the number of distinct matches between the # left items and the right items (e.g. the number of rows resulting from a # join) and seeing if it exceeds the number of distinct keys in the right # operand. # # note that using distinct emulates postgres behavior to disregard duplicates distinct_matches = ( sa.select(sa.func.count(sa.distinct(jleft.c.value))) .join(jright, onclause=jleft.c.value == jright.c.value) .scalar_subquery() ) distinct_keys = sa.select( sa.func.count(sa.distinct(jright.c.value)) ).scalar_subquery() return distinct_matches >= distinct_keys def _sqlite_json_has_any(element: sa.BinaryExpression[bool]) -> sa.ColumnElement[bool]: """Map JSONB.has_any() to a SQLite expression""" # left can be a JSON value as a (Python) literal, or a SQL expression for a JSON value # right can be a SQLA postgresql.array() literal or a SQL expression for a JSON array left, right = element.left, element.right # convert the array() literal used in JSONB.has_all() to a JSON array. right = _postgresql_array_to_json_array(right) jleft, jright = _json_each(left), _json_each(right) # deal with "json array ?| [value, ...]"" vs "json object ?| [key, ...]" tests # if left is a JSON object, match keys, else match values; the latter works # for arrays and all JSON scalar types json_object = sa.literal("object", literal_execute=True) left_elem = sa.case( (sa.func.json_type(element.left) == json_object, jleft.c.key), else_=jleft.c.value, ) return sa.exists().where(left_elem == jright.c.value) # Map of SQLA postgresql JSON/JSONB operators and a function to rewrite # a BinaryExpression with such an operator to their SQLite equivalent. _sqlite_json_operator_map: dict[ OperatorType, Callable[[sa.BinaryExpression[Any]], sa.ColumnElement[Any]] ] = { ASTEXT: _sqlite_json_astext, CONTAINS: _sqlite_json_contains, HAS_ALL: _sqlite_json_contains, # "has all" is equivalent to "contains" HAS_ANY: _sqlite_json_has_any, } @compiles(sa.BinaryExpression, "sqlite") def sqlite_json_operators( element: sa.BinaryExpression[Any], compiler: SQLCompiler, override_operator: Optional[OperatorType] = None, **kwargs: Any, ) -> str: """Intercept the PostgreSQL-only JSON / JSONB operators and translate them to SQLite""" operator = override_operator or element.operator if (handler := _sqlite_json_operator_map.get(operator)) is not None: return compiler.process(handler(element), **kwargs) # ignore reason: SQLA compilation hooks are not as well covered with type annotations return compiler.visit_binary(element, override_operator=operator, **kwargs) # pyright: ignore[reportUnknownMemberType,reportUnknownVariableType]
date_diff_seconds
python
allegroai__clearml
clearml/backend_api/services/v2_23/projects.py
{ "start": 86757, "end": 89575 }
class ____(Response): """ Response of projects.get_hyper_parameters endpoint. :param parameters: A list of parameter sections and names :type parameters: Sequence[dict] :param remaining: Remaining results :type remaining: int :param total: Total number of results :type total: int """ _service = "projects" _action = "get_hyper_parameters" _version = "2.23" _schema = { "definitions": {}, "properties": { "parameters": { "description": "A list of parameter sections and names", "items": {"type": "object"}, "type": ["array", "null"], }, "remaining": { "description": "Remaining results", "type": ["integer", "null"], }, "total": { "description": "Total number of results", "type": ["integer", "null"], }, }, "type": "object", } def __init__( self, parameters: Optional[List[dict]] = None, remaining: Optional[int] = None, total: Optional[int] = None, **kwargs: Any ) -> None: super(GetHyperParametersResponse, self).__init__(**kwargs) self.parameters = parameters self.remaining = remaining self.total = total @schema_property("parameters") def parameters(self) -> Optional[List[dict]]: return self._property_parameters @parameters.setter def parameters(self, value: Optional[List[dict]]) -> None: if value is None: self._property_parameters = None return self.assert_isinstance(value, "parameters", (list, tuple)) self.assert_isinstance(value, "parameters", (dict,), is_array=True) self._property_parameters = value @schema_property("remaining") def remaining(self) -> Optional[int]: return self._property_remaining @remaining.setter def remaining(self, value: Optional[int]) -> None: if value is None: self._property_remaining = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "remaining", six.integer_types) self._property_remaining = value @schema_property("total") def total(self) -> Optional[int]: return self._property_total @total.setter def total(self, value: Optional[int]) -> None: if value is None: self._property_total = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "total", six.integer_types) self._property_total = value
GetHyperParametersResponse
python
doocs__leetcode
solution/1500-1599/1534.Count Good Triplets/Solution.py
{ "start": 0, "end": 460 }
class ____: def countGoodTriplets(self, arr: List[int], a: int, b: int, c: int) -> int: ans, n = 0, len(arr) for i in range(n): for j in range(i + 1, n): for k in range(j + 1, n): ans += ( abs(arr[i] - arr[j]) <= a and abs(arr[j] - arr[k]) <= b and abs(arr[i] - arr[k]) <= c ) return ans
Solution
python
python-excel__xlwt
xlwt/Style.py
{ "start": 209, "end": 588 }
class ____(object): def __init__(self): self.num_format_str = 'General' self.font = Formatting.Font() self.alignment = Formatting.Alignment() self.borders = Formatting.Borders() self.pattern = Formatting.Pattern() self.protection = Formatting.Protection() default_style = XFStyle()
XFStyle
python
spyder-ide__spyder
spyder/widgets/collectionseditor.py
{ "start": 82831, "end": 89796 }
class ____(CustomSortFilterProxy): """ Custom column filter based on regex and model data. Reimplements 'filterAcceptsRow' to follow NamespaceBrowser model. Reimplements 'set_filter' to allow sorting while filtering """ def get_key(self, index): """Return current key from source model.""" source_index = self.mapToSource(index) return self.sourceModel().get_key(source_index) def get_index_from_key(self, key): """Return index using key from source model.""" source_index = self.sourceModel().get_index_from_key(key) return self.mapFromSource(source_index) def get_value(self, index): """Return current value from source model.""" source_index = self.mapToSource(index) return self.sourceModel().get_value(source_index) def set_value(self, index, value): """Set value in source model.""" try: source_index = self.mapToSource(index) self.sourceModel().set_value(source_index, value) except AttributeError: # Read-only models don't have set_value method pass def set_filter(self, text): """Set regular expression for filter.""" self.pattern = get_search_regex(text) self.invalidateFilter() def filterAcceptsRow(self, row_num, parent): """ Qt override. Reimplemented from base class to allow the use of custom filtering using to columns (name and type). """ model = self.sourceModel() name = str(model.row_key(row_num)) variable_type = str(model.row_type(row_num)) r_name = re.search(self.pattern, name) r_type = re.search(self.pattern, variable_type) if r_name is None and r_type is None: return False else: return True def lessThan(self, left, right): """ Implements ordering in a natural way, as a human would sort. This functions enables sorting of the main variable editor table, which does not rely on 'self.sort()'. """ leftData = self.sourceModel().data(left) rightData = self.sourceModel().data(right) try: if isinstance(leftData, str) and isinstance(rightData, str): return natsort(leftData) < natsort(rightData) else: return leftData < rightData except TypeError: # This is needed so all the elements that cannot be compared such # as dataframes and numpy arrays are grouped together in the # variable explorer. For more info see spyder-ide/spyder#14527 return True # ============================================================================= # Tests # ============================================================================= def get_test_data(): """Create test data.""" image = PIL.Image.fromarray(255 * np.random.rand(100, 100)) testdict = {'d': 1, 'a': np.random.rand(10, 10), 'b': [1, 2]} testdate = datetime.date(1945, 5, 8) test_timedelta = datetime.timedelta(days=-1, minutes=42, seconds=13) try: import pandas as pd except (ModuleNotFoundError, ImportError): test_df = None test_timestamp = test_pd_td = test_dtindex = test_series = None else: test_timestamp = pd.Timestamp("1945-05-08T23:01:00.12345") test_pd_td = pd.Timedelta(days=2193, hours=12) test_dtindex = pd.date_range(start="1939-09-01T", end="1939-10-06", freq="12h") test_series = pd.Series({"series_name": [0, 1, 2, 3, 4, 5]}) test_df = pd.DataFrame({"string_col": ["a", "b", "c", "d"], "int_col": [0, 1, 2, 3], "float_col": [1.1, 2.2, 3.3, 4.4], "bool_col": [True, False, False, True]}) class Foobar(object): def __init__(self): self.text = "toto" self.testdict = testdict self.testdate = testdate foobar = Foobar() return {'object': foobar, 'module': np, 'bytes': b'kjkj kj k j j kj k jkj', 'str': 'éù', 'list': [1, 3, [sorted, 5, 6], 'kjkj', None], 'set': {1, 2, 1, 3, None, 'A', 'B', 'C', True, False}, 'tuple': ([1, testdate, testdict, test_timedelta], 'kjkj', None), 'dict': testdict, 'float': 1.2233, 'int': 223, 'bool': True, 'array': np.random.rand(10, 10).astype(np.int64), 'masked_array': np.ma.array([[1, 0], [1, 0]], mask=[[True, False], [False, False]]), '1D-array': np.linspace(-10, 10).astype(np.float16), '3D-array': np.random.randint(2, size=(5, 5, 5)).astype(np.bool_), 'empty_array': np.array([]), 'image': image, 'date': testdate, 'datetime': datetime.datetime(1945, 5, 8, 23, 1, 0, int(1.5e5)), 'timedelta': test_timedelta, 'complex': 2 + 1j, 'complex64': np.complex64(2 + 1j), 'complex128': np.complex128(9j), 'int8_scalar': np.int8(8), 'int16_scalar': np.int16(16), 'int32_scalar': np.int32(32), 'int64_scalar': np.int64(64), 'float16_scalar': np.float16(16), 'float32_scalar': np.float32(32), 'float64_scalar': np.float64(64), 'bool__scalar': np.bool_(8), 'timestamp': test_timestamp, 'timedelta_pd': test_pd_td, 'datetimeindex': test_dtindex, 'series': test_series, 'dataframe': test_df, 'None': None, 'unsupported1': np.arccos, 'unsupported2': np.asarray, # Test for spyder-ide/spyder#3518. 'big_struct_array': np.zeros(1000, dtype=[('ID', 'f8'), ('param1', 'f8', 5000)]), } def editor_test(): """Test Collections editor.""" dialog = CollectionsEditor() dialog.setup(get_test_data()) dialog.exec_() def remote_editor_test(): """Test remote collections editor.""" from spyder.config.manager import CONF from spyder_kernels.utils.nsview import (make_remote_view, REMOTE_SETTINGS) settings = {} for name in REMOTE_SETTINGS: settings[name] = CONF.get('variable_explorer', name) remote = make_remote_view(get_test_data(), settings) dialog = CollectionsEditor() dialog.setup(remote, remote=True) dialog.exec_() if __name__ == "__main__": from spyder.utils.qthelpers import qapplication app = qapplication() # analysis:ignore editor_test() remote_editor_test()
CollectionsCustomSortFilterProxy
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_page_view01.py
{ "start": 315, "end": 975 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("page_view01.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with print options.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.set_page_view() # Options to match automatic page setup. worksheet.set_paper(9) worksheet.vertical_dpi = 200 worksheet.write("A1", "Foo") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_kubernetes_engine.py
{ "start": 23493, "end": 26691 }
class ____: def setup_method(self): self.operator = GKEStartKueueInsideClusterOperator( project_id=TEST_PROJECT_ID, location=TEST_LOCATION, cluster_name=GKE_CLUSTER_NAME, task_id=TEST_TASK_ID, kueue_version=K8S_KUEUE_VERSION, impersonation_chain=TEST_IMPERSONATION_CHAIN, use_internal_ip=False, ) def test_template_fields(self): expected_template_fields = set(GKEOperatorMixin.template_fields) | set( KubernetesInstallKueueOperator.template_fields ) assert set(GKEStartKueueInsideClusterOperator.template_fields) == expected_template_fields def test_enable_tcp_keepalive(self): assert self.operator.enable_tcp_keepalive @mock.patch(GKE_OPERATORS_PATH.format("super")) @mock.patch(GKE_OPERATORS_PATH.format("KubernetesEngineClusterLink")) @mock.patch(GKE_OPERATORS_PATH.format("GKEHook")) def test_execute(self, mock_hook, mock_link, mock_super): mock_get_cluster = mock_hook.return_value.get_cluster mock_cluster = mock_get_cluster.return_value mock_check_cluster_autoscaling_ability = mock_hook.return_value.check_cluster_autoscaling_ability mock_check_cluster_autoscaling_ability.return_value = True mock_context = mock.MagicMock() self.operator.execute(context=mock_context) mock_get_cluster.assert_called_once_with( name=GKE_CLUSTER_NAME, project_id=TEST_PROJECT_ID, ) mock_link.persist.assert_called_once_with( context=mock_context, cluster=mock_cluster, ) mock_check_cluster_autoscaling_ability.assert_called_once_with(cluster=mock_cluster) mock_super.assert_called_once() mock_super.return_value.execute.assert_called_once_with(mock_context) @mock.patch(GKE_OPERATORS_PATH.format("GKEStartKueueInsideClusterOperator.log")) @mock.patch(GKE_OPERATORS_PATH.format("super")) @mock.patch(GKE_OPERATORS_PATH.format("KubernetesEngineClusterLink")) @mock.patch(GKE_OPERATORS_PATH.format("GKEHook")) def test_execute_not_scalable(self, mock_hook, mock_link, mock_super, mock_log): mock_get_cluster = mock_hook.return_value.get_cluster mock_cluster = mock_get_cluster.return_value mock_check_cluster_autoscaling_ability = mock_hook.return_value.check_cluster_autoscaling_ability mock_check_cluster_autoscaling_ability.return_value = False mock_context = mock.MagicMock() self.operator.execute(context=mock_context) mock_get_cluster.assert_called_once_with( name=GKE_CLUSTER_NAME, project_id=TEST_PROJECT_ID, ) mock_link.persist.assert_called_once_with( context=mock_context, cluster=mock_cluster, ) mock_check_cluster_autoscaling_ability.assert_called_once_with(cluster=mock_cluster) mock_super.assert_not_called() mock_log.info.assert_called_once_with( "Cluster doesn't have ability to autoscale, will not install Kueue inside. Aborting" )
TestGKEStartKueueInsideClusterOperator
python
pytorch__pytorch
torch/_dynamo/variables/misc.py
{ "start": 24202, "end": 24957 }
class ____(UnknownVariable): """ Used to insert a dummy variable in the stack to do the graph break at CALL_FUNCTION. """ def __init__(self, msg=None, **kwargs): super().__init__(**kwargs) self.msg = msg def call_function( self, tx: "InstructionTranslator", args: "list[VariableTracker]", kwargs: "dict[str, VariableTracker]", ) -> "VariableTracker": unimplemented( gb_type="Unsupported function call (delayed)", context=f"source: {self.source}", explanation="Dynamo determined that a graph break should occur " f"when calling `{self.source.name()}`. Reason: {self.msg}", hints=[], )
DelayGraphBreakVariable
python
keras-team__keras
keras/src/layers/preprocessing/image_preprocessing/aug_mix_test.py
{ "start": 164, "end": 2098 }
class ____(testing.TestCase): @pytest.mark.requires_trainable_backend def test_layer(self): self.run_layer_test( layers.AugMix, init_kwargs={ "value_range": (0, 255), "num_chains": 2, "chain_depth": 2, "factor": 1, "alpha": 1.0, "all_ops": True, "interpolation": "nearest", "seed": 43, "data_format": "channels_last", }, input_shape=(8, 3, 4, 3), supports_masking=False, expected_output_shape=(8, 3, 4, 3), ) def test_aug_mix_inference(self): seed = 3481 layer = layers.AugMix() np.random.seed(seed) inputs = np.random.randint(0, 255, size=(224, 224, 3)) output = layer(inputs, training=False) self.assertAllClose(inputs, output) def test_random_augment_randomness(self): data_format = backend.config.image_data_format() if data_format == "channels_last": input_data = np.random.random((2, 8, 8, 3)) else: input_data = np.random.random((2, 3, 8, 8)) layer = layers.AugMix( num_chains=11, all_ops=True, data_format=data_format ) augmented_image = layer(input_data) self.assertNotAllClose( backend.convert_to_numpy(augmented_image), input_data ) def test_tf_data_compatibility(self): data_format = backend.config.image_data_format() if data_format == "channels_last": input_data = np.random.random((2, 8, 8, 3)) else: input_data = np.random.random((2, 3, 8, 8)) layer = layers.AugMix(data_format=data_format) ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer) for output in ds.take(1): output.numpy()
RandAugmentTest
python
huggingface__transformers
src/transformers/models/clipseg/modeling_clipseg.py
{ "start": 9710, "end": 12115 }
class ____(nn.Module): def __init__(self, config: CLIPSegTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] max_position_embedding = self.position_embedding.weight.shape[0] if seq_length > max_position_embedding: raise ValueError( f"Sequence length must be less than max_position_embeddings (got `sequence length`: " f"{seq_length} and max_position_embeddings: {max_position_embedding}" ) if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings # Copied from transformers.models.siglip.modeling_siglip.eager_attention_forward def eager_attention_forward( module: nn.Module, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attention_mask: Optional[torch.Tensor], scaling: float, dropout: float = 0.0, **kwargs, ): attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype) attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training) attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2).contiguous() return attn_output, attn_weights
CLIPSegTextEmbeddings
python
allegroai__clearml
clearml/backend_api/services/v2_23/models.py
{ "start": 107414, "end": 108675 }
class ____(Request): """ Convert public models to private :param ids: Ids of the models to convert. Only the models originated by the company can be converted :type ids: Sequence[str] """ _service = "models" _action = "make_private" _version = "2.23" _schema = { "definitions": {}, "properties": { "ids": { "description": "Ids of the models to convert. Only the models originated by the company can be converted", "items": {"type": "string"}, "type": ["array", "null"], } }, "type": "object", } def __init__(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None: super(MakePrivateRequest, self).__init__(**kwargs) self.ids = ids @schema_property("ids") def ids(self) -> Optional[List[str]]: return self._property_ids @ids.setter def ids(self, value: Optional[List[str]]) -> None: if value is None: self._property_ids = None return self.assert_isinstance(value, "ids", (list, tuple)) self.assert_isinstance(value, "ids", six.string_types, is_array=True) self._property_ids = value
MakePrivateRequest
python
django__django
tests/serializers/test_json.py
{ "start": 513, "end": 9069 }
class ____(SerializersTestBase, TestCase): serializer_name = "json" pkless_str = """[ { "pk": null, "model": "serializers.category", "fields": {"name": "Reference"} }, { "model": "serializers.category", "fields": {"name": "Non-fiction"} }]""" mapping_ordering_str = """[ { "model": "serializers.article", "pk": %(article_pk)s, "fields": { "author": %(author_pk)s, "headline": "Poker has no place on ESPN", "pub_date": "2006-06-16T11:00:00", "categories": [ %(first_category_pk)s, %(second_category_pk)s ], "meta_data": [], "topics": [] } } ] """ @staticmethod def _validate_output(serial_str): try: json.loads(serial_str) except Exception: return False else: return True @staticmethod def _get_pk_values(serial_str): serial_list = json.loads(serial_str) return [obj_dict["pk"] for obj_dict in serial_list] @staticmethod def _get_field_values(serial_str, field_name): serial_list = json.loads(serial_str) return [ obj_dict["fields"][field_name] for obj_dict in serial_list if field_name in obj_dict["fields"] ] def test_indentation_whitespace(self): s = serializers.json.Serializer() json_data = s.serialize([Score(score=5.0), Score(score=6.0)], indent=2) for line in json_data.splitlines(): if re.search(r".+,\s*$", line): self.assertEqual(line, line.rstrip()) @isolate_apps("serializers") def test_custom_encoder(self): class ScoreDecimal(models.Model): score = models.DecimalField() class CustomJSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, decimal.Decimal): return str(o) return super().default(o) s = serializers.json.Serializer() json_data = s.serialize( [ScoreDecimal(score=decimal.Decimal(1.0))], cls=CustomJSONEncoder ) self.assertIn('"fields": {"score": "1"}', json_data) def test_json_deserializer_exception(self): with self.assertRaises(DeserializationError): for obj in serializers.deserialize("json", """[{"pk":1}"""): pass def test_helpful_error_message_invalid_pk(self): """ If there is an invalid primary key, the error message should contain the model associated with it. """ test_string = """[{ "pk": "badpk", "model": "serializers.player", "fields": { "name": "Bob", "rank": 1, "team": "Team" } }]""" with self.assertRaisesMessage( DeserializationError, "(serializers.player:pk=badpk)" ): list(serializers.deserialize("json", test_string)) def test_helpful_error_message_invalid_field(self): """ If there is an invalid field value, the error message should contain the model associated with it. """ test_string = """[{ "pk": "1", "model": "serializers.player", "fields": { "name": "Bob", "rank": "invalidint", "team": "Team" } }]""" expected = "(serializers.player:pk=1) field_value was 'invalidint'" with self.assertRaisesMessage(DeserializationError, expected): list(serializers.deserialize("json", test_string)) def test_helpful_error_message_for_foreign_keys(self): """ Invalid foreign keys with a natural key should throw a helpful error message, such as what the failing key is. """ test_string = """[{ "pk": 1, "model": "serializers.category", "fields": { "name": "Unknown foreign key", "meta_data": [ "doesnotexist", "metadata" ] } }]""" key = ["doesnotexist", "metadata"] expected = "(serializers.category:pk=1) field_value was '%r'" % key with self.assertRaisesMessage(DeserializationError, expected): list(serializers.deserialize("json", test_string)) def test_helpful_error_message_for_many2many_non_natural(self): """ Invalid many-to-many keys should throw a helpful error message. """ test_string = """[{ "pk": 1, "model": "serializers.article", "fields": { "author": 1, "headline": "Unknown many to many", "pub_date": "2014-09-15T10:35:00", "categories": [1, "doesnotexist"] } }, { "pk": 1, "model": "serializers.author", "fields": { "name": "Agnes" } }, { "pk": 1, "model": "serializers.category", "fields": { "name": "Reference" } }]""" expected = "(serializers.article:pk=1) field_value was 'doesnotexist'" with self.assertRaisesMessage(DeserializationError, expected): list(serializers.deserialize("json", test_string)) def test_helpful_error_message_for_many2many_natural1(self): """ Invalid many-to-many keys should throw a helpful error message. This tests the code path where one of a list of natural keys is invalid. """ test_string = """[{ "pk": 1, "model": "serializers.categorymetadata", "fields": { "kind": "author", "name": "meta1", "value": "Agnes" } }, { "pk": 1, "model": "serializers.article", "fields": { "author": 1, "headline": "Unknown many to many", "pub_date": "2014-09-15T10:35:00", "meta_data": [ ["author", "meta1"], ["doesnotexist", "meta1"], ["author", "meta1"] ] } }, { "pk": 1, "model": "serializers.author", "fields": { "name": "Agnes" } }]""" key = ["doesnotexist", "meta1"] expected = "(serializers.article:pk=1) field_value was '%r'" % key with self.assertRaisesMessage(DeserializationError, expected): for obj in serializers.deserialize("json", test_string): obj.save() def test_helpful_error_message_for_many2many_natural2(self): """ Invalid many-to-many keys should throw a helpful error message. This tests the code path where a natural many-to-many key has only a single value. """ test_string = """[{ "pk": 1, "model": "serializers.article", "fields": { "author": 1, "headline": "Unknown many to many", "pub_date": "2014-09-15T10:35:00", "meta_data": [1, "doesnotexist"] } }, { "pk": 1, "model": "serializers.categorymetadata", "fields": { "kind": "author", "name": "meta1", "value": "Agnes" } }, { "pk": 1, "model": "serializers.author", "fields": { "name": "Agnes" } }]""" expected = "(serializers.article:pk=1) field_value was 'doesnotexist'" with self.assertRaisesMessage(DeserializationError, expected): for obj in serializers.deserialize("json", test_string, ignore=False): obj.save() def test_helpful_error_message_for_many2many_not_iterable(self): """ Not iterable many-to-many field value throws a helpful error message. """ test_string = """[{ "pk": 1, "model": "serializers.m2mdata", "fields": {"data": null} }]""" expected = "(serializers.m2mdata:pk=1) field_value was 'None'" with self.assertRaisesMessage(DeserializationError, expected): next(serializers.deserialize("json", test_string, ignore=False))
JsonSerializerTestCase
python
django__django
tests/introspection/models.py
{ "start": 2222, "end": 2646 }
class ____(models.Model): up_votes = models.PositiveIntegerField() voting_number = models.PositiveIntegerField(unique=True) class Meta: required_db_features = { "supports_table_check_constraints", } constraints = [ models.CheckConstraint( name="up_votes_gte_0_check", condition=models.Q(up_votes__gte=0) ), ]
CheckConstraintModel
python
celery__celery
celery/exceptions.py
{ "start": 6022, "end": 6372 }
class ____(TaskPredicate): """A task can raise this if it wants to reject/re-queue the message.""" def __init__(self, reason=None, requeue=False): self.reason = reason self.requeue = requeue super().__init__(reason, requeue) def __repr__(self): return f'reject requeue={self.requeue}: {self.reason}'
Reject
python
PyCQA__pylint
tests/functional/r/regression/regression_properties_in_class_context.py
{ "start": 291, "end": 480 }
class ____(Parent): pass assert 'foo' in Child.values # false-positive: unsupported-membership-test for value in Child.values: # false-positive: not-an-iterable print(value)
Child
python
OmkarPathak__pygorithm
tests/test_sorting.py
{ "start": 2461, "end": 2650 }
class ____(unittest.TestCase, TestSortingAlgorithm): inplace = True alph_support = True @staticmethod def sort(arr): return insertion_sort.sort(arr)
TestInsertionSort