language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/models/lightglue/modeling_lightglue.py
{ "start": 22226, "end": 43279 }
class ____(LightGluePreTrainedModel): """ LightGlue is a model matching keypoints in images by leveraging detections from a keypoint detector such as SuperPoint. It is based on the SuperGlue architecture and is designed to be lightweight and efficient. It consists of : 1. Keypoint Encoder 2. A Graph Neural Network with self and cross attention layers 3. Matching Assignment layers The correspondence ids use -1 to indicate non-matching points. Philipp Lindenberger, Paul-Edouard Sarlin and Marc Pollefeys. LightGlue: Local Feature Matching at Light Speed. In ICCV 2023. https://huggingface.co/papers/2306.13643 """ def __init__(self, config: LightGlueConfig): super().__init__(config) self.keypoint_detector = AutoModelForKeypointDetection.from_config( config.keypoint_detector_config, trust_remote_code=config.trust_remote_code ) self.keypoint_detector_descriptor_dim = config.keypoint_detector_config.descriptor_decoder_dim self.descriptor_dim = config.descriptor_dim self.num_layers = config.num_hidden_layers self.filter_threshold = config.filter_threshold self.depth_confidence = config.depth_confidence self.width_confidence = config.width_confidence if self.descriptor_dim != self.keypoint_detector_descriptor_dim: self.input_projection = nn.Linear(self.keypoint_detector_descriptor_dim, self.descriptor_dim, bias=True) else: self.input_projection = nn.Identity() self.positional_encoder = LightGluePositionalEncoder(config) self.transformer_layers = nn.ModuleList( [LightGlueTransformerLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)] ) self.match_assignment_layers = nn.ModuleList( [LightGlueMatchAssignmentLayer(config) for _ in range(config.num_hidden_layers)] ) self.token_confidence = nn.ModuleList( [LightGlueTokenConfidenceLayer(config) for _ in range(config.num_hidden_layers - 1)] ) self.post_init() def _get_confidence_threshold(self, layer_index: int) -> float: """scaled confidence threshold for a given layer""" threshold = 0.8 + 0.1 * np.exp(-4.0 * layer_index / self.num_layers) return np.clip(threshold, 0, 1) def _keypoint_processing( self, descriptors: torch.Tensor, keypoints: torch.Tensor, output_hidden_states: Optional[bool] = False ) -> tuple[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]: descriptors = descriptors.detach().contiguous() projected_descriptors = self.input_projection(descriptors) keypoint_encoding_output = self.positional_encoder(keypoints, output_hidden_states=output_hidden_states) return projected_descriptors, keypoint_encoding_output def _get_early_stopped_image_pairs( self, keypoint_confidences: torch.Tensor, layer_index: int, mask: torch.Tensor, num_points: torch.Tensor ) -> torch.Tensor: """evaluate whether we should stop inference based on the confidence of the keypoints""" batch_size, _ = mask.shape if layer_index < self.num_layers - 1: # If the current layer is not the last layer, we compute the confidence of the keypoints and check # if we should stop the forward pass through the transformer layers for each pair of images. keypoint_confidences = keypoint_confidences.masked_fill(mask == 0, 1) keypoint_confidences = keypoint_confidences.reshape(batch_size // 2, -1) threshold = self._get_confidence_threshold(layer_index) ratio_confident = 1.0 - (keypoint_confidences < threshold).float().sum(dim=1) / num_points early_stopped_pairs = ratio_confident > self.depth_confidence else: # If the current layer is the last layer, we stop the forward pass through the transformer layers for # all pairs of images. early_stopped_pairs = torch.ones(batch_size, dtype=torch.bool) return early_stopped_pairs def _get_keypoint_matching(self, descriptors, mask, layer_index, early_stops=None): if early_stops is not None: descriptors = descriptors[early_stops] mask = mask[early_stops] scores = self.match_assignment_layers[layer_index](descriptors, mask) matches, matching_scores = get_matches_from_scores(scores, self.filter_threshold) return matches, matching_scores def _get_pruning_mask(self, confidences: torch.Tensor, scores: torch.Tensor, layer_index: int) -> torch.Tensor: """mask points which should be removed""" keep = scores > (1 - self.width_confidence) if confidences is not None: # Low-confidence points are never pruned. keep |= confidences <= self._get_confidence_threshold(layer_index) return keep def _do_layer_keypoint_pruning( self, descriptors: torch.Tensor, keypoints: torch.Tensor, mask: torch.Tensor, indices: torch.Tensor, prune_output: torch.Tensor, keypoint_confidences: torch.Tensor, layer_index: int, ): """ For a given layer, prune keypoints based on the confidence of the keypoints and the matchability of the descriptors. """ batch_size, _, _ = descriptors.shape descriptors_matchability = self.match_assignment_layers[layer_index].get_matchability(descriptors) pruned_keypoints_mask = self._get_pruning_mask(keypoint_confidences, descriptors_matchability, layer_index) pruned_keypoints_mask = pruned_keypoints_mask.masked_fill(mask == 0, torch.tensor(False)) # For each image, we extract the pruned indices and the corresponding descriptors and keypoints. pruned_descriptors, pruned_keypoints_0, pruned_keypoints_1, pruned_mask, pruned_indices = ( [t[mask] for t, mask in zip(tensor, pruned_keypoints_mask)] for tensor in [descriptors, keypoints[0], keypoints[1], pruned_keypoints_mask, indices] ) for i in range(batch_size): prune_output[i, pruned_indices[i]] += 1 # Pad the pruned descriptors, keypoints, indices and mask to have the same shape across the batch. pruned_descriptors, pruned_keypoints_0, pruned_keypoints_1, pruned_mask = ( pad_sequence(pruned_tensor, batch_first=True) for pruned_tensor in [pruned_descriptors, pruned_keypoints_0, pruned_keypoints_1, pruned_mask] ) pruned_keypoints = (pruned_keypoints_0, pruned_keypoints_1) pruned_indices = pad_sequence(pruned_indices, batch_first=True, padding_value=-1) return pruned_descriptors, pruned_keypoints, pruned_indices, pruned_mask, prune_output def _concat_early_stopped_outputs( self, early_stops_indices, final_pruned_keypoints_indices, final_pruned_keypoints_iterations, matches, matching_scores, ): early_stops_indices = torch.stack(early_stops_indices) # Rearrange tensors to have the same order as the input batch ids = torch.arange(early_stops_indices.shape[0]) order_indices = early_stops_indices[ids] early_stops_indices = early_stops_indices[order_indices] matches, final_pruned_keypoints_indices = ( pad_sequence(tensor, batch_first=True, padding_value=-1) for tensor in [matches, final_pruned_keypoints_indices] ) matching_scores, final_pruned_keypoints_iterations = ( pad_sequence(tensor, batch_first=True, padding_value=0) for tensor in [matching_scores, final_pruned_keypoints_iterations] ) matches, matching_scores, final_pruned_keypoints_indices, final_pruned_keypoints_iterations = ( tensor[early_stops_indices] for tensor in [ matches, matching_scores, final_pruned_keypoints_indices, final_pruned_keypoints_iterations, ] ) return final_pruned_keypoints_indices, final_pruned_keypoints_iterations, matches, matching_scores def _do_final_keypoint_pruning( self, indices: torch.Tensor, matches: torch.Tensor, matching_scores: torch.Tensor, num_keypoints: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: # (batch_size, num_keypoints) -> (batch_size // 2, 2, num_keypoints) -> 2 * (batch_size // 2, num_keypoints) to # have tensors from batch_size, _ = indices.shape indices, matches, matching_scores = ( tensor.reshape(batch_size // 2, 2, -1) for tensor in [indices, matches, matching_scores] ) indices0 = indices[:, 0] indices1 = indices[:, 1] matches0 = matches[:, 0] matches1 = matches[:, 1] matching_scores0 = matching_scores[:, 0] matching_scores1 = matching_scores[:, 1] # Prepare final matches and matching scores _matches = torch.full((batch_size // 2, 2, num_keypoints), -1, device=indices.device, dtype=matches.dtype) _matching_scores = torch.zeros( (batch_size // 2, 2, num_keypoints), device=indices.device, dtype=matching_scores.dtype ) # Fill the matches and matching scores for each image pair for i in range(batch_size // 2): _matches[i, 0, indices0[i]] = torch.where( matches0[i] == -1, -1, indices1[i].gather(0, matches0[i].clamp(min=0)) ) _matches[i, 1, indices1[i]] = torch.where( matches1[i] == -1, -1, indices0[i].gather(0, matches1[i].clamp(min=0)) ) _matching_scores[i, 0, indices0[i]] = matching_scores0[i] _matching_scores[i, 1, indices1[i]] = matching_scores1[i] return _matches, _matching_scores def _match_image_pair( self, keypoints: torch.Tensor, descriptors: torch.Tensor, height: int, width: int, mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, tuple, tuple]: all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None if keypoints.shape[2] == 0: # no keypoints shape = keypoints.shape[:-1] return ( keypoints.new_full(shape, -1, dtype=torch.int), keypoints.new_zeros(shape), keypoints.new_zeros(shape), all_hidden_states, all_attentions, ) device = keypoints.device batch_size, _, initial_num_keypoints, _ = keypoints.shape num_points_per_pair = torch.sum(mask.reshape(batch_size, -1), dim=1) # (batch_size, 2, num_keypoints, 2) -> (batch_size * 2, num_keypoints, 2) keypoints = keypoints.reshape(batch_size * 2, initial_num_keypoints, 2) mask = mask.reshape(batch_size * 2, initial_num_keypoints) if mask is not None else None descriptors = descriptors.reshape(batch_size * 2, initial_num_keypoints, self.keypoint_detector_descriptor_dim) image_indices = torch.arange(batch_size * 2, device=device) # Keypoint normalization keypoints = normalize_keypoints(keypoints, height, width) descriptors, keypoint_encoding_output = self._keypoint_processing( descriptors, keypoints, output_hidden_states=output_hidden_states ) keypoints = keypoint_encoding_output[0] # Early stop consists of stopping the forward pass through the transformer layers when the confidence of the # keypoints is above a certain threshold. do_early_stop = self.depth_confidence > 0 # Keypoint pruning consists of removing keypoints from the input of the transformer layers when the confidence of # the keypoints is below a certain threshold. do_keypoint_pruning = self.width_confidence > 0 early_stops_indices = [] matches = [] matching_scores = [] final_pruned_keypoints_indices = [] final_pruned_keypoints_iterations = [] pruned_keypoints_indices = torch.arange(0, initial_num_keypoints, device=device).expand(batch_size * 2, -1) pruned_keypoints_iterations = torch.ones_like(pruned_keypoints_indices) for layer_index in range(self.num_layers): input_shape = descriptors.size() if mask is not None: extended_attention_mask = self.get_extended_attention_mask(mask, input_shape) else: extended_attention_mask = torch.ones((batch_size, input_shape[-2]), device=keypoints.device) layer_output = self.transformer_layers[layer_index]( descriptors, keypoints, attention_mask=extended_attention_mask, output_hidden_states=output_hidden_states, output_attentions=output_attentions, ) descriptors, hidden_states, attention = layer_output if output_hidden_states: all_hidden_states = all_hidden_states + hidden_states if output_attentions: all_attentions = all_attentions + attention if do_early_stop: if layer_index < self.num_layers - 1: # Get the confidence of the keypoints for the current layer keypoint_confidences = self.token_confidence[layer_index](descriptors) # Determine which pairs of images should be early stopped based on the confidence of the keypoints for # the current layer. early_stopped_pairs = self._get_early_stopped_image_pairs( keypoint_confidences, layer_index, mask, num_points=num_points_per_pair ) else: # Early stopping always occurs at the last layer early_stopped_pairs = torch.ones(batch_size, dtype=torch.bool) if torch.any(early_stopped_pairs): # If a pair of images is considered early stopped, we compute the matches for the remaining # keypoints and stop the forward pass through the transformer layers for this pair of images. early_stops = early_stopped_pairs.repeat_interleave(2) early_stopped_image_indices = image_indices[early_stops] early_stopped_matches, early_stopped_matching_scores = self._get_keypoint_matching( descriptors, mask, layer_index, early_stops=early_stops ) early_stops_indices.extend(list(early_stopped_image_indices)) matches.extend(list(early_stopped_matches)) matching_scores.extend(list(early_stopped_matching_scores)) if do_keypoint_pruning: final_pruned_keypoints_indices.extend(list(pruned_keypoints_indices[early_stops])) final_pruned_keypoints_iterations.extend(list(pruned_keypoints_iterations[early_stops])) # Remove image pairs that have been early stopped from the forward pass num_points_per_pair = num_points_per_pair[~early_stopped_pairs] descriptors, keypoints_0, keypoint_1, mask, image_indices = tuple( tensor[~early_stops] for tensor in [descriptors, keypoints[0], keypoints[1], mask, image_indices] ) keypoints = (keypoints_0, keypoint_1) if do_keypoint_pruning: pruned_keypoints_indices, pruned_keypoints_iterations, keypoint_confidences = tuple( tensor[~early_stops] for tensor in [ pruned_keypoints_indices, pruned_keypoints_iterations, keypoint_confidences, ] ) # If all pairs of images are early stopped, we stop the forward pass through the transformer # layers for all pairs of images. if torch.all(early_stopped_pairs): break if do_keypoint_pruning: # Prune keypoints from the input of the transformer layers for the next iterations if the confidence of # the keypoints is below a certain threshold. descriptors, keypoints, pruned_keypoints_indices, mask, pruned_keypoints_iterations = ( self._do_layer_keypoint_pruning( descriptors, keypoints, mask, pruned_keypoints_indices, pruned_keypoints_iterations, keypoint_confidences, layer_index, ) ) if do_early_stop and do_keypoint_pruning: # Concatenate early stopped outputs together and perform final keypoint pruning final_pruned_keypoints_indices, final_pruned_keypoints_iterations, matches, matching_scores = ( self._concat_early_stopped_outputs( early_stops_indices, final_pruned_keypoints_indices, final_pruned_keypoints_iterations, matches, matching_scores, ) ) matches, matching_scores = self._do_final_keypoint_pruning( final_pruned_keypoints_indices, matches, matching_scores, initial_num_keypoints, ) else: matches, matching_scores = self._get_keypoint_matching(descriptors, mask, self.num_layers - 1) final_pruned_keypoints_iterations = torch.ones_like(matching_scores) * self.num_layers final_pruned_keypoints_iterations = final_pruned_keypoints_iterations.reshape( batch_size, 2, initial_num_keypoints ) return ( matches, matching_scores, final_pruned_keypoints_iterations, all_hidden_states, all_attentions, ) @can_return_tuple @auto_docstring def forward( self, pixel_values: torch.FloatTensor, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> Union[tuple, "LightGlueKeypointMatchingOutput"]: loss = None if labels is not None: raise ValueError("LightGlue is not trainable, no labels should be provided.") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if pixel_values.ndim != 5 or pixel_values.size(1) != 2: raise ValueError("Input must be a 5D tensor of shape (batch_size, 2, num_channels, height, width)") batch_size, _, channels, height, width = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * 2, channels, height, width) keypoint_detections = self.keypoint_detector(pixel_values) keypoints, _, descriptors, mask = keypoint_detections[:4] keypoints = keypoints.reshape(batch_size, 2, -1, 2).to(pixel_values) descriptors = descriptors.reshape(batch_size, 2, -1, self.keypoint_detector_descriptor_dim).to(pixel_values) mask = mask.reshape(batch_size, 2, -1) absolute_keypoints = keypoints.clone() absolute_keypoints[:, :, :, 0] = absolute_keypoints[:, :, :, 0] * width absolute_keypoints[:, :, :, 1] = absolute_keypoints[:, :, :, 1] * height matches, matching_scores, prune, hidden_states, attentions = self._match_image_pair( absolute_keypoints, descriptors, height, width, mask=mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) return LightGlueKeypointMatchingOutput( loss=loss, matches=matches, matching_scores=matching_scores, keypoints=keypoints, prune=prune, mask=mask, hidden_states=hidden_states, attentions=attentions, ) __all__ = ["LightGluePreTrainedModel", "LightGlueForKeypointMatching"]
LightGlueForKeypointMatching
python
sympy__sympy
sympy/tensor/array/ndim_array.py
{ "start": 2441, "end": 18878 }
class ____(Printable): """N-dimensional array. Examples ======== Create an N-dim array of zeros: >>> from sympy import MutableDenseNDimArray >>> a = MutableDenseNDimArray.zeros(2, 3, 4) >>> a [[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] Create an N-dim array from a list; >>> a = MutableDenseNDimArray([[2, 3], [4, 5]]) >>> a [[2, 3], [4, 5]] >>> b = MutableDenseNDimArray([[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]]) >>> b [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]] Create an N-dim array from a flat list with dimension shape: >>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3)) >>> a [[1, 2, 3], [4, 5, 6]] Create an N-dim array from a matrix: >>> from sympy import Matrix >>> a = Matrix([[1,2],[3,4]]) >>> a Matrix([ [1, 2], [3, 4]]) >>> b = MutableDenseNDimArray(a) >>> b [[1, 2], [3, 4]] Arithmetic operations on N-dim arrays >>> a = MutableDenseNDimArray([1, 1, 1, 1], (2, 2)) >>> b = MutableDenseNDimArray([4, 4, 4, 4], (2, 2)) >>> c = a + b >>> c [[5, 5], [5, 5]] >>> a - b [[-3, -3], [-3, -3]] """ _diff_wrt = True is_scalar = False def __new__(cls, iterable, shape=None, **kwargs): from sympy.tensor.array import ImmutableDenseNDimArray return ImmutableDenseNDimArray(iterable, shape, **kwargs) def __getitem__(self, index): raise NotImplementedError("A subclass of NDimArray should implement __getitem__") def _parse_index(self, index): if isinstance(index, (SYMPY_INTS, Integer)): if index >= self._loop_size: raise ValueError("Only a tuple index is accepted") return index if self._loop_size == 0: raise ValueError("Index not valid with an empty array") if len(index) != self._rank: raise ValueError('Wrong number of array axes') real_index = 0 # check if input index can exist in current indexing for i in range(self._rank): if (index[i] >= self.shape[i]) or (index[i] < -self.shape[i]): raise ValueError('Index ' + str(index) + ' out of border') if index[i] < 0: real_index += 1 real_index = real_index*self.shape[i] + index[i] return real_index def _get_tuple_index(self, integer_index): index = [] for sh in reversed(self.shape): index.append(integer_index % sh) integer_index //= sh index.reverse() return tuple(index) def _check_symbolic_index(self, index): # Check if any index is symbolic: tuple_index = (index if isinstance(index, tuple) else (index,)) if any((isinstance(i, Expr) and (not i.is_number)) for i in tuple_index): for i, nth_dim in zip(tuple_index, self.shape): if ((i < 0) == True) or ((i >= nth_dim) == True): raise ValueError("index out of range") from sympy.tensor import Indexed return Indexed(self, *tuple_index) return None def _setter_iterable_check(self, value): from sympy.matrices.matrixbase import MatrixBase if isinstance(value, (Iterable, MatrixBase, NDimArray)): raise NotImplementedError @classmethod def _scan_iterable_shape(cls, iterable): def f(pointer): if not isinstance(pointer, Iterable): return [pointer], () if len(pointer) == 0: return [], (0,) result = [] elems, shapes = zip(*[f(i) for i in pointer]) if len(set(shapes)) != 1: raise ValueError("could not determine shape unambiguously") for i in elems: result.extend(i) return result, (len(shapes),)+shapes[0] return f(iterable) @classmethod def _handle_ndarray_creation_inputs(cls, iterable=None, shape=None, **kwargs): from sympy.matrices.matrixbase import MatrixBase from sympy.tensor.array import SparseNDimArray if shape is None: if iterable is None: shape = () iterable = () # Construction of a sparse array from a sparse array elif isinstance(iterable, SparseNDimArray): return iterable._shape, iterable._sparse_array # Construct N-dim array from another N-dim array: elif isinstance(iterable, NDimArray): shape = iterable.shape # Construct N-dim array from an iterable (numpy arrays included): elif isinstance(iterable, Iterable): iterable, shape = cls._scan_iterable_shape(iterable) # Construct N-dim array from a Matrix: elif isinstance(iterable, MatrixBase): shape = iterable.shape else: shape = () iterable = (iterable,) if isinstance(iterable, (Dict, dict)) and shape is not None: new_dict = iterable.copy() for k in new_dict: if isinstance(k, (tuple, Tuple)): new_key = 0 for i, idx in enumerate(k): new_key = new_key * shape[i] + idx iterable[new_key] = iterable[k] del iterable[k] if isinstance(shape, (SYMPY_INTS, Integer)): shape = (shape,) if not all(isinstance(dim, (SYMPY_INTS, Integer)) for dim in shape): raise TypeError("Shape should contain integers only.") return tuple(shape), iterable def __len__(self): """Overload common function len(). Returns number of elements in array. Examples ======== >>> from sympy import MutableDenseNDimArray >>> a = MutableDenseNDimArray.zeros(3, 3) >>> a [[0, 0, 0], [0, 0, 0], [0, 0, 0]] >>> len(a) 9 """ return self._loop_size @property def shape(self): """ Returns array shape (dimension). Examples ======== >>> from sympy import MutableDenseNDimArray >>> a = MutableDenseNDimArray.zeros(3, 3) >>> a.shape (3, 3) """ return self._shape def rank(self): """ Returns rank of array. Examples ======== >>> from sympy import MutableDenseNDimArray >>> a = MutableDenseNDimArray.zeros(3,4,5,6,3) >>> a.rank() 5 """ return self._rank def diff(self, *args, **kwargs): """ Calculate the derivative of each element in the array. Examples ======== >>> from sympy import ImmutableDenseNDimArray >>> from sympy.abc import x, y >>> M = ImmutableDenseNDimArray([[x, y], [1, x*y]]) >>> M.diff(x) [[1, 0], [0, y]] """ from sympy.tensor.array.array_derivatives import ArrayDerivative kwargs.setdefault('evaluate', True) return ArrayDerivative(self.as_immutable(), *args, **kwargs) def _eval_derivative(self, base): # Types are (base: scalar, self: array) return self.applyfunc(lambda x: base.diff(x)) def _eval_derivative_n_times(self, s, n): return Basic._eval_derivative_n_times(self, s, n) def applyfunc(self, f): """Apply a function to each element of the N-dim array. Examples ======== >>> from sympy import ImmutableDenseNDimArray >>> m = ImmutableDenseNDimArray([i*2+j for i in range(2) for j in range(2)], (2, 2)) >>> m [[0, 1], [2, 3]] >>> m.applyfunc(lambda i: 2*i) [[0, 2], [4, 6]] """ from sympy.tensor.array import SparseNDimArray from sympy.tensor.array.arrayop import Flatten if isinstance(self, SparseNDimArray) and f(S.Zero) == 0: return type(self)({k: f(v) for k, v in self._sparse_array.items() if f(v) != 0}, self.shape) return type(self)(map(f, Flatten(self)), self.shape) def _sympystr(self, printer): def f(sh, shape_left, i, j): if len(shape_left) == 1: return "["+", ".join([printer._print(self[self._get_tuple_index(e)]) for e in range(i, j)])+"]" sh //= shape_left[0] return "[" + ", ".join([f(sh, shape_left[1:], i+e*sh, i+(e+1)*sh) for e in range(shape_left[0])]) + "]" # + "\n"*len(shape_left) if self.rank() == 0: return printer._print(self[()]) if 0 in self.shape: return f"{self.__class__.__name__}([], {self.shape})" return f(self._loop_size, self.shape, 0, self._loop_size) def tolist(self): """ Converting MutableDenseNDimArray to one-dim list Examples ======== >>> from sympy import MutableDenseNDimArray >>> a = MutableDenseNDimArray([1, 2, 3, 4], (2, 2)) >>> a [[1, 2], [3, 4]] >>> b = a.tolist() >>> b [[1, 2], [3, 4]] """ def f(sh, shape_left, i, j): if len(shape_left) == 1: return [self[self._get_tuple_index(e)] for e in range(i, j)] result = [] sh //= shape_left[0] for e in range(shape_left[0]): result.append(f(sh, shape_left[1:], i+e*sh, i+(e+1)*sh)) return result return f(self._loop_size, self.shape, 0, self._loop_size) def __add__(self, other): from sympy.tensor.array.arrayop import Flatten if not isinstance(other, NDimArray): return NotImplemented if self.shape != other.shape: raise ValueError("array shape mismatch") result_list = [i+j for i,j in zip(Flatten(self), Flatten(other))] return type(self)(result_list, self.shape) def __sub__(self, other): from sympy.tensor.array.arrayop import Flatten if not isinstance(other, NDimArray): return NotImplemented if self.shape != other.shape: raise ValueError("array shape mismatch") result_list = [i-j for i,j in zip(Flatten(self), Flatten(other))] return type(self)(result_list, self.shape) def __mul__(self, other): from sympy.matrices.matrixbase import MatrixBase from sympy.tensor.array import SparseNDimArray from sympy.tensor.array.arrayop import Flatten if isinstance(other, (Iterable, NDimArray, MatrixBase)): raise ValueError("scalar expected, use tensorproduct(...) for tensorial product") other = sympify(other) if isinstance(self, SparseNDimArray): if other.is_zero: return type(self)({}, self.shape) return type(self)({k: other*v for (k, v) in self._sparse_array.items()}, self.shape) result_list = [i*other for i in Flatten(self)] return type(self)(result_list, self.shape) def __rmul__(self, other): from sympy.matrices.matrixbase import MatrixBase from sympy.tensor.array import SparseNDimArray from sympy.tensor.array.arrayop import Flatten if isinstance(other, (Iterable, NDimArray, MatrixBase)): raise ValueError("scalar expected, use tensorproduct(...) for tensorial product") other = sympify(other) if isinstance(self, SparseNDimArray): if other.is_zero: return type(self)({}, self.shape) return type(self)({k: other*v for (k, v) in self._sparse_array.items()}, self.shape) result_list = [other*i for i in Flatten(self)] return type(self)(result_list, self.shape) def __truediv__(self, other): from sympy.matrices.matrixbase import MatrixBase from sympy.tensor.array import SparseNDimArray from sympy.tensor.array.arrayop import Flatten if isinstance(other, (Iterable, NDimArray, MatrixBase)): raise ValueError("scalar expected") other = sympify(other) if isinstance(self, SparseNDimArray) and other != S.Zero: return type(self)({k: v/other for (k, v) in self._sparse_array.items()}, self.shape) result_list = [i/other for i in Flatten(self)] return type(self)(result_list, self.shape) def __rtruediv__(self, other): raise NotImplementedError('unsupported operation on NDimArray') def __neg__(self): from sympy.tensor.array import SparseNDimArray from sympy.tensor.array.arrayop import Flatten if isinstance(self, SparseNDimArray): return type(self)({k: -v for (k, v) in self._sparse_array.items()}, self.shape) result_list = [-i for i in Flatten(self)] return type(self)(result_list, self.shape) def __iter__(self): def iterator(): if self._shape: for i in range(self._shape[0]): yield self[i] else: yield self[()] return iterator() def __eq__(self, other): """ NDimArray instances can be compared to each other. Instances equal if they have same shape and data. Examples ======== >>> from sympy import MutableDenseNDimArray >>> a = MutableDenseNDimArray.zeros(2, 3) >>> b = MutableDenseNDimArray.zeros(2, 3) >>> a == b True >>> c = a.reshape(3, 2) >>> c == b False >>> a[0,0] = 1 >>> b[0,0] = 2 >>> a == b False """ from sympy.tensor.array import SparseNDimArray if not isinstance(other, NDimArray): return False if not self.shape == other.shape: return False if isinstance(self, SparseNDimArray) and isinstance(other, SparseNDimArray): return dict(self._sparse_array) == dict(other._sparse_array) return list(self) == list(other) def __ne__(self, other): return not self == other def _eval_transpose(self): if self.rank() != 2: raise ValueError("array rank not 2") from .arrayop import permutedims return permutedims(self, (1, 0)) def transpose(self): return self._eval_transpose() def _eval_conjugate(self): from sympy.tensor.array.arrayop import Flatten return self.func([i.conjugate() for i in Flatten(self)], self.shape) def conjugate(self): return self._eval_conjugate() def _eval_adjoint(self): return self.transpose().conjugate() def adjoint(self): return self._eval_adjoint() def _slice_expand(self, s, dim): if not isinstance(s, slice): return (s,) start, stop, step = s.indices(dim) return [start + i*step for i in range((stop-start)//step)] def _get_slice_data_for_array_access(self, index): sl_factors = [self._slice_expand(i, dim) for (i, dim) in zip(index, self.shape)] eindices = itertools.product(*sl_factors) return sl_factors, eindices def _get_slice_data_for_array_assignment(self, index, value): if not isinstance(value, NDimArray): value = type(self)(value) sl_factors, eindices = self._get_slice_data_for_array_access(index) slice_offsets = [min(i) if isinstance(i, list) else None for i in sl_factors] # TODO: add checks for dimensions for `value`? return value, eindices, slice_offsets @classmethod def _check_special_bounds(cls, flat_list, shape): if shape == () and len(flat_list) != 1: raise ValueError("arrays without shape need one scalar value") if shape == (0,) and len(flat_list) > 0: raise ValueError("if array shape is (0,) there cannot be elements") def _check_index_for_getitem(self, index): if isinstance(index, (SYMPY_INTS, Integer, slice)): index = (index,) if len(index) < self.rank(): index = tuple(index) + \ tuple(slice(None) for i in range(len(index), self.rank())) if len(index) > self.rank(): raise ValueError('Dimension of index greater than rank of array') return index
NDimArray
python
kamyu104__LeetCode-Solutions
Python/minimum-number-of-seconds-to-make-mountain-height-zero.py
{ "start": 135, "end": 1105 }
class ____(object): def minNumberOfSeconds(self, mountainHeight, workerTimes): """ :type mountainHeight: int :type workerTimes: List[int] :rtype: int """ def binary_search(left, right, check): while left <= right: mid = left + (right-left)//2 if check(mid): right = mid-1 else: left = mid+1 return left def check(target): # t * (1 + 2 + 3 + ... + x) <= target # t * (x+1)*x/2 <= target # x^2+x-2*target/t <= 0 # x <= (-1+(1+8*target/t)**0.5)/2 return sum(int((-1+(1+8*target/t)**0.5)/2) for t in workerTimes) >= mountainHeight mn = min(workerTimes) left, right = mn, mn*(mountainHeight+1)*mountainHeight//2 return binary_search(left, right, check) # Time: O(hlogn) # Space: O(n) import heapq # heap
Solution
python
doocs__leetcode
solution/2000-2099/2079.Watering Plants/Solution.py
{ "start": 0, "end": 345 }
class ____: def wateringPlants(self, plants: List[int], capacity: int) -> int: ans, water = 0, capacity for i, p in enumerate(plants): if water >= p: water -= p ans += 1 else: water = capacity - p ans += i * 2 + 1 return ans
Solution
python
walkccc__LeetCode
solutions/3386. Button with Longest Push Time/3386.py
{ "start": 0, "end": 370 }
class ____: def buttonWithLongestTime(self, events: list[list[int]]) -> int: ans = 0 maxTimeTaken = 0 prevTime = 0 for index, time in events: timeTaken = time - prevTime if timeTaken > maxTimeTaken or timeTaken == maxTimeTaken and index < ans: maxTimeTaken = timeTaken ans = index prevTime = time return ans
Solution
python
pytorch__pytorch
test/test_jit.py
{ "start": 15141, "end": 17448 }
class ____(JitTestCase): """ This runs tests that requires setting some global states like torch._C._set_graph_executor_optimize and restore the values afterward, i.e. test_profiler. This is to address the flaky issue in https://github.com/pytorch/pytorch/issues/91483 in which test_profiler was flaky and failed in the middle without the chance to restore torch._C._set_graph_executor_optimize to its original value. This causes issues for all future tests running after. Using a separate test class here, so that there is no need to run setup and teardown for all tests in TestJit. """ def setUp(self): super().setUp() self.graph_executor_optimize_opt = torch._C._get_graph_executor_optimize() def tearDown(self): super().tearDown() # Resetting torch._C._set_graph_executor_optimize( self.graph_executor_optimize_opt ) def test_profiler(self): torch._C._set_graph_executor_optimize(False) def other_fn(x): return x * 2 x = torch.rand(3, 4) traced_other_fn = torch.jit.trace(other_fn, x) def fn(x): y = traced_other_fn(x) fut = torch.jit._fork(traced_other_fn, x) y = torch.jit._wait(fut) return y traced_fn = torch.jit.trace(fn, x) with torch.autograd.profiler.profile() as prof: traced_fn(x) # expecting to see other_fn TS function call # with cpu time >= mul cpu time and # a forked other_fn mul_events = defaultdict(int) other_fn_events = defaultdict(int) for e in prof.function_events: if e.name == "aten::mul": self.assertTrue(e.thread not in mul_events) mul_events[e.thread] = e.time_range.elapsed_us() elif e.name == "other_fn": self.assertTrue(e.thread not in other_fn_events) other_fn_events[e.thread] = e.time_range.elapsed_us() self.assertTrue(len(mul_events) == 2) self.assertTrue(len(other_fn_events) == 2) for thread, mul_time in mul_events.items(): self.assertTrue(thread in other_fn_events) self.assertTrue(other_fn_events[thread] >= mul_time)
TestJitProfiler
python
openai__openai-python
src/openai/types/webhooks/batch_failed_webhook_event.py
{ "start": 323, "end": 751 }
class ____(BaseModel): id: str """The unique ID of the event.""" created_at: int """The Unix timestamp (in seconds) of when the batch API request failed.""" data: Data """Event data payload.""" type: Literal["batch.failed"] """The type of the event. Always `batch.failed`.""" object: Optional[Literal["event"]] = None """The object of the event. Always `event`."""
BatchFailedWebhookEvent
python
django__django
django/db/migrations/operations/models.py
{ "start": 15306, "end": 19665 }
class ____(ModelOperation): """Rename a model.""" category = OperationCategory.ALTERATION def __init__(self, old_name, new_name): self.old_name = old_name self.new_name = new_name super().__init__(old_name) @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { "old_name": self.old_name, "new_name": self.new_name, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.rename_model(app_label, self.old_name, self.new_name) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.new_name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.old_name) # Move the main table schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Alter the fields pointing to us for related_object in old_model._meta.related_objects: if related_object.related_model == old_model: model = new_model related_key = (app_label, self.new_name_lower) else: related_key = ( related_object.related_model._meta.app_label, related_object.related_model._meta.model_name, ) model = to_state.apps.get_model(*related_key) to_field = to_state.apps.get_model(*related_key)._meta.get_field( related_object.field.name ) schema_editor.alter_field( model, related_object.field, to_field, ) # Rename M2M fields whose name is based on this model's name. fields = zip( old_model._meta.local_many_to_many, new_model._meta.local_many_to_many ) for old_field, new_field in fields: # Skip self-referential fields as these are renamed above. if ( new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created ): continue # Rename columns and the M2M table. schema_editor._alter_many_to_many( new_model, old_field, new_field, strict=False, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.new_name_lower, self.old_name_lower = ( self.old_name_lower, self.new_name_lower, ) self.new_name, self.old_name = self.old_name, self.new_name self.database_forwards(app_label, schema_editor, from_state, to_state) self.new_name_lower, self.old_name_lower = ( self.old_name_lower, self.new_name_lower, ) self.new_name, self.old_name = self.old_name, self.new_name def references_model(self, name, app_label): return ( name.lower() == self.old_name_lower or name.lower() == self.new_name_lower ) def describe(self): return "Rename model %s to %s" % (self.old_name, self.new_name) @property def migration_name_fragment(self): return "rename_%s_%s" % (self.old_name_lower, self.new_name_lower) def reduce(self, operation, app_label): if ( isinstance(operation, RenameModel) and self.new_name_lower == operation.old_name_lower ): return [replace(self, new_name=operation.new_name)] # Skip `ModelOperation.reduce` as we want to run `references_model` # against self.new_name. return super(ModelOperation, self).reduce( operation, app_label ) or not operation.references_model(self.new_name, app_label)
RenameModel
python
matplotlib__matplotlib
lib/matplotlib/_type1font.py
{ "start": 2095, "end": 2261 }
class ____(_Token): kind = 'name' def is_slash_name(self): return self.raw.startswith('/') def value(self): return self.raw[1:]
_NameToken
python
django__django
tests/known_related_objects/tests.py
{ "start": 141, "end": 8039 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.t1 = Tournament.objects.create(name="Tourney 1") cls.t2 = Tournament.objects.create(name="Tourney 2") cls.o1 = Organiser.objects.create(name="Organiser 1") cls.p1 = Pool.objects.create( name="T1 Pool 1", tournament=cls.t1, organiser=cls.o1 ) cls.p2 = Pool.objects.create( name="T1 Pool 2", tournament=cls.t1, organiser=cls.o1 ) cls.p3 = Pool.objects.create( name="T2 Pool 1", tournament=cls.t2, organiser=cls.o1 ) cls.p4 = Pool.objects.create( name="T2 Pool 2", tournament=cls.t2, organiser=cls.o1 ) cls.ps1 = PoolStyle.objects.create(name="T1 Pool 2 Style", pool=cls.p2) cls.ps2 = PoolStyle.objects.create(name="T2 Pool 1 Style", pool=cls.p3) cls.ps3 = PoolStyle.objects.create( name="T1 Pool 1/3 Style", pool=cls.p1, another_pool=cls.p3 ) def test_foreign_key(self): with self.assertNumQueries(2): tournament = Tournament.objects.get(pk=self.t1.pk) pool = tournament.pool_set.all()[0] self.assertIs(tournament, pool.tournament) def test_foreign_key_prefetch_related(self): with self.assertNumQueries(2): tournament = Tournament.objects.prefetch_related("pool_set").get( pk=self.t1.pk ) pool = tournament.pool_set.all()[0] self.assertIs(tournament, pool.tournament) def test_foreign_key_multiple_prefetch(self): with self.assertNumQueries(2): tournaments = list( Tournament.objects.prefetch_related("pool_set").order_by("pk") ) pool1 = tournaments[0].pool_set.all()[0] self.assertIs(tournaments[0], pool1.tournament) pool2 = tournaments[1].pool_set.all()[0] self.assertIs(tournaments[1], pool2.tournament) def test_queryset_or(self): tournament_1 = self.t1 tournament_2 = self.t2 with self.assertNumQueries(1): pools = tournament_1.pool_set.all() | tournament_2.pool_set.all() related_objects = {pool.tournament for pool in pools} self.assertEqual(related_objects, {tournament_1, tournament_2}) def test_queryset_or_different_cached_items(self): tournament = self.t1 organiser = self.o1 with self.assertNumQueries(1): pools = tournament.pool_set.all() | organiser.pool_set.all() first = pools.filter(pk=self.p1.pk)[0] self.assertIs(first.tournament, tournament) self.assertIs(first.organiser, organiser) def test_queryset_or_only_one_with_precache(self): tournament_1 = self.t1 tournament_2 = self.t2 # 2 queries here as pool 3 has tournament 2, which is not cached with self.assertNumQueries(2): pools = tournament_1.pool_set.all() | Pool.objects.filter(pk=self.p3.pk) related_objects = {pool.tournament for pool in pools} self.assertEqual(related_objects, {tournament_1, tournament_2}) # and the other direction with self.assertNumQueries(2): pools = Pool.objects.filter(pk=self.p3.pk) | tournament_1.pool_set.all() related_objects = {pool.tournament for pool in pools} self.assertEqual(related_objects, {tournament_1, tournament_2}) def test_queryset_and(self): tournament = self.t1 organiser = self.o1 with self.assertNumQueries(1): pools = tournament.pool_set.all() & organiser.pool_set.all() first = pools.filter(pk=self.p1.pk)[0] self.assertIs(first.tournament, tournament) self.assertIs(first.organiser, organiser) def test_one_to_one(self): with self.assertNumQueries(2): style = PoolStyle.objects.get(pk=self.ps1.pk) pool = style.pool self.assertIs(style, pool.poolstyle) def test_one_to_one_select_related(self): with self.assertNumQueries(1): style = PoolStyle.objects.select_related("pool").get(pk=self.ps1.pk) pool = style.pool self.assertIs(style, pool.poolstyle) def test_one_to_one_multi_select_related(self): with self.assertNumQueries(1): poolstyles = list(PoolStyle.objects.select_related("pool").order_by("pk")) self.assertIs(poolstyles[0], poolstyles[0].pool.poolstyle) self.assertIs(poolstyles[1], poolstyles[1].pool.poolstyle) def test_one_to_one_prefetch_related(self): with self.assertNumQueries(2): style = PoolStyle.objects.prefetch_related("pool").get(pk=self.ps1.pk) pool = style.pool self.assertIs(style, pool.poolstyle) def test_one_to_one_multi_prefetch_related(self): with self.assertNumQueries(2): poolstyles = list(PoolStyle.objects.prefetch_related("pool").order_by("pk")) self.assertIs(poolstyles[0], poolstyles[0].pool.poolstyle) self.assertIs(poolstyles[1], poolstyles[1].pool.poolstyle) def test_reverse_one_to_one(self): with self.assertNumQueries(2): pool = Pool.objects.get(pk=self.p2.pk) style = pool.poolstyle self.assertIs(pool, style.pool) def test_reverse_one_to_one_select_related(self): with self.assertNumQueries(1): pool = Pool.objects.select_related("poolstyle").get(pk=self.p2.pk) style = pool.poolstyle self.assertIs(pool, style.pool) def test_reverse_one_to_one_prefetch_related(self): with self.assertNumQueries(2): pool = Pool.objects.prefetch_related("poolstyle").get(pk=self.p2.pk) style = pool.poolstyle self.assertIs(pool, style.pool) def test_reverse_one_to_one_multi_select_related(self): with self.assertNumQueries(1): pools = list(Pool.objects.select_related("poolstyle").order_by("pk")) self.assertIs(pools[1], pools[1].poolstyle.pool) self.assertIs(pools[2], pools[2].poolstyle.pool) def test_reverse_one_to_one_multi_prefetch_related(self): with self.assertNumQueries(2): pools = list(Pool.objects.prefetch_related("poolstyle").order_by("pk")) self.assertIs(pools[1], pools[1].poolstyle.pool) self.assertIs(pools[2], pools[2].poolstyle.pool) def test_reverse_fk_select_related_multiple(self): with self.assertNumQueries(1): ps = list( PoolStyle.objects.annotate( pool_1=FilteredRelation("pool"), pool_2=FilteredRelation("another_pool"), ) .select_related("pool_1", "pool_2") .order_by("-pk") ) self.assertIs(ps[0], ps[0].pool_1.poolstyle) self.assertIs(ps[0], ps[0].pool_2.another_style) def test_multilevel_reverse_fk_cyclic_select_related(self): with self.assertNumQueries(3): p = list( PoolStyle.objects.annotate( tournament_pool=FilteredRelation("pool__tournament__pool"), ).select_related("tournament_pool", "tournament_pool__tournament") ) self.assertEqual(p[0].tournament_pool.tournament, p[0].pool.tournament) def test_multilevel_reverse_fk_select_related(self): with self.assertNumQueries(2): p = list( Tournament.objects.filter(id=self.t2.id) .annotate( style=FilteredRelation("pool__another_style"), ) .select_related("style") ) self.assertEqual(p[0].style.another_pool, self.p3)
ExistingRelatedInstancesTests
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/batch_prediction_job.py
{ "start": 21573, "end": 24961 }
class ____(GoogleCloudBaseOperator): """ Gets a BatchPredictionJob. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param batch_prediction_job: Required. The name of the BatchPredictionJob resource. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. :param gcp_conn_id: The connection ID to use connecting to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields = ("region", "project_id", "impersonation_chain") operator_extra_links = (VertexAIBatchPredictionJobLink(),) def __init__( self, *, region: str, project_id: str, batch_prediction_job: str, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.region = region self.project_id = project_id self.batch_prediction_job = batch_prediction_job self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain @property def extra_links_params(self) -> dict[str, Any]: return { "region": self.region, "project_id": self.project_id, } def execute(self, context: Context): hook = BatchPredictionJobHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) try: self.log.info("Get batch prediction job: %s", self.batch_prediction_job) result = hook.get_batch_prediction_job( project_id=self.project_id, region=self.region, batch_prediction_job=self.batch_prediction_job, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) self.log.info("Batch prediction job was gotten.") VertexAIBatchPredictionJobLink.persist( context=context, batch_prediction_job_id=self.batch_prediction_job, ) return BatchPredictionJob.to_dict(result) except NotFound: self.log.info("The Batch prediction job %s does not exist.", self.batch_prediction_job)
GetBatchPredictionJobOperator
python
aio-libs__aiohttp
aiohttp/web_exceptions.py
{ "start": 9615, "end": 9683 }
class ____(HTTPClientError): status_code = 429
HTTPTooManyRequests
python
cython__cython
tests/run/pep526_variable_annotations.py
{ "start": 2688, "end": 2802 }
class ____(Generic[T]): def __init__(self, content): self.content: T = content box = Box(content=5)
Box
python
pypa__warehouse
tests/unit/accounts/test_views.py
{ "start": 143688, "end": 143913 }
class ____: def test_profile_public_email_returns_user(self): user = pretend.stub() request = pretend.stub() assert views.profile_public_email(user, request) == {"user": user}
TestProfilePublicEmail
python
huggingface__transformers
tests/quantization/bnb/test_mixed_int8.py
{ "start": 37753, "end": 39068 }
class ____(MixedInt8Test): model_name = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" EXPECTED_RELATIVE_DIFFERENCE = 1.7869331026479096 EXPECTED_OUTPUTS = set() # Expected on Intel XPU EXPECTED_OUTPUTS.add("Hello my name is John Smith and I am a software engineer. I") # Expected on NVIDIA T4 EXPECTED_OUTPUTS.add("Hello my name is John and I am a software engineer. I have") def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "Jiqing/TinyLlama-1.1B-Chat-v1.0-bnb-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(torch_device), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_bitsandbytes @require_accelerate @require_torch @slow @apply_skip_if_not_implemented
MixedInt8LlamaTest
python
jazzband__django-model-utils
tests/models.py
{ "start": 7034, "end": 7273 }
class ____(models.Model): fk = models.ForeignKey('Tracked', on_delete=models.CASCADE) tracker = FieldTracker() custom_tracker = FieldTracker(fields=['fk_id']) custom_tracker_without_id = FieldTracker(fields=['fk'])
TrackedFK
python
PyCQA__pylint
tests/functional/i/inherit_non_class.py
{ "start": 1521, "end": 1700 }
class ____(memoryview): # [inherit-non-class] pass # Subscription of parent class that implements __class_getitem__ # and returns cls should be allowed.
NotInheritableMemoryView
python
tensorflow__tensorflow
tensorflow/python/compiler/tensorrt/test/trt_mode_test.py
{ "start": 1085, "end": 3458 }
class ____(trt_test.TfTrtIntegrationTestBase): """Test squeeze on batch dim and some unary operations in TF-TRT.""" def GraphFn(self, x1): q = math_ops.abs(x1) q = q + 1.0 q = q * 3.0 q = array_ops.squeeze(q, 0) q = math_ops.abs(q) q = q + 5.0 return array_ops.identity(q, name="output_0") def ShouldRunTest(self, run_params): # Squeeze op produces dynamic shaped values. Therefore, we don't run the # test with static engine to avoid native segment execution. return (run_params.dynamic_engine and run_params.is_v2 and not run_params.use_calibration, "test v2 dynamic engine and " "non-calibration") def GetParams(self): """The input has 1 as a first dimension, which is removed by the squeeze. op in the graph. In explicit batch mode, TensorRT can convert the whole graph. In this mode it is possible to manipulate the batch dimension using the squeeze op. In implicit batch mode TensorRT cannot convert the whole graph. We are not allowed to manipulate (squeeze) the first dimension in implicit batch mode. Therefore the graph will be converted using multiple segments. """ return self.BuildParams(self.GraphFn, dtypes.float32, [[1, 12, 5]], [[12, 5]]) def GetMaxBatchSize(self, run_params): if run_params.dynamic_engine: return None # The first dimension of the input is squeezed and the batch size for the # rest OPs is 12. return 12 @classmethod def setUpClass(cls): if cls is TrtModeTestBase: raise SkipTest("TrtModeTestBase defines base class for other test.") super(TrtModeTestBase, cls).setUpClass() def ExpectedEnginesToBuild(self, run_params): """Check that the expected engine is built. Args: run_params: the run parameters. Returns: the expected engines to build. The squeeze op is not converted by TensorRT in implicit batch mode. Because of this we have two TRTEngineOp in the graphs: one for the subgraph before 'squeeze(q,0)', and another one for the rest of the ops after the 'squeeze(q,0)'. In explicit batch mode the whole graph is converted using a single engine. """ if run_params.dynamic_shape: return ["TRTEngineOp_000"] else: return ["TRTEngineOp_000", "TRTEngineOp_001"]
TrtModeTestBase
python
sympy__sympy
sympy/sets/powerset.py
{ "start": 257, "end": 2913 }
class ____(Set): r"""A symbolic object representing a power set. Parameters ========== arg : Set The set to take power of. evaluate : bool The flag to control evaluation. If the evaluation is disabled for finite sets, it can take advantage of using subset test as a membership test. Notes ===== Power set `\mathcal{P}(S)` is defined as a set containing all the subsets of `S`. If the set `S` is a finite set, its power set would have `2^{\left| S \right|}` elements, where `\left| S \right|` denotes the cardinality of `S`. Examples ======== >>> from sympy import PowerSet, S, FiniteSet A power set of a finite set: >>> PowerSet(FiniteSet(1, 2, 3)) PowerSet({1, 2, 3}) A power set of an empty set: >>> PowerSet(S.EmptySet) PowerSet(EmptySet) >>> PowerSet(PowerSet(S.EmptySet)) PowerSet(PowerSet(EmptySet)) A power set of an infinite set: >>> PowerSet(S.Reals) PowerSet(Reals) Evaluating the power set of a finite set to its explicit form: >>> PowerSet(FiniteSet(1, 2, 3)).rewrite(FiniteSet) FiniteSet(EmptySet, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}) References ========== .. [1] https://en.wikipedia.org/wiki/Power_set .. [2] https://en.wikipedia.org/wiki/Axiom_of_power_set """ def __new__(cls, arg, evaluate=None): if evaluate is None: evaluate=global_parameters.evaluate arg = _sympify(arg) if not isinstance(arg, Set): raise ValueError('{} must be a set.'.format(arg)) return super().__new__(cls, arg) @property def arg(self): return self.args[0] def _eval_rewrite_as_FiniteSet(self, *args, **kwargs): arg = self.arg if arg.is_FiniteSet: return arg.powerset() return None @_sympifyit('other', NotImplemented) def _contains(self, other): if not isinstance(other, Set): return None return fuzzy_bool(self.arg.is_superset(other)) def _eval_is_subset(self, other): if isinstance(other, PowerSet): return self.arg.is_subset(other.arg) def __len__(self): return 2 ** len(self.arg) def __iter__(self): found = [S.EmptySet] yield S.EmptySet for x in self.arg: temp = [] x = FiniteSet(x) for y in found: new = x + y yield new temp.append(new) found.extend(temp) @property def kind(self): return SetKind(self.arg.kind)
PowerSet
python
django__django
tests/get_or_create/models.py
{ "start": 1336, "end": 1678 }
class ____(models.Model): name = models.CharField(max_length=100) authors = models.ManyToManyField(Author, related_name="books") publisher = models.ForeignKey( Publisher, models.CASCADE, related_name="books", db_column="publisher_id_column", ) updated = models.DateTimeField(auto_now=True)
Book
python
coleifer__peewee
tests/schema.py
{ "start": 963, "end": 1224 }
class ____(TestModel): fk = ForeignKeyField('self', null=True, constraint_name='tmc_fk') k = TextField() v = IntegerField(constraints=[Check('v in (1, 2)')]) class Meta: constraints = [Check('k != \'kx\'', name='chk_k')]
TMNamedConstraints
python
oauthlib__oauthlib
tests/oauth1/rfc5849/test_client.py
{ "start": 305, "end": 1318 }
class ____(TestCase): def test_client_no_realm(self): client = Client("client-key") uri, header, body = client.sign("http://example-uri") self.assertTrue( header["Authorization"].startswith('OAuth oauth_nonce=')) def test_client_realm_sign_with_default_realm(self): client = Client("client-key", realm="moo-realm") self.assertEqual(client.realm, "moo-realm") uri, header, body = client.sign("http://example-uri") self.assertTrue( header["Authorization"].startswith('OAuth realm="moo-realm",')) def test_client_realm_sign_with_additional_realm(self): client = Client("client-key", realm="moo-realm") uri, header, body = client.sign("http://example-uri", realm="baa-realm") self.assertTrue( header["Authorization"].startswith('OAuth realm="baa-realm",')) # make sure sign() does not override the default realm self.assertEqual(client.realm, "moo-realm")
ClientRealmTests
python
django__django
tests/admin_views/models.py
{ "start": 9474, "end": 9556 }
class ____(models.Model): child = models.ForeignKey(PKChild, models.CASCADE)
Toy
python
gevent__gevent
src/gevent/greenlet.py
{ "start": 3541, "end": 3857 }
class ____(SpawnedLink): """A wrapper around link that calls it in another greenlet only if source failed. Can be called only from main loop. """ __slots__ = [] def __call__(self, source): if not source.successful(): return SpawnedLink.__call__(self, source)
FailureSpawnedLink
python
anthropics__anthropic-sdk-python
src/anthropic/lib/vertex/_client.py
{ "start": 1104, "end": 3025 }
class ____(BaseClient[_HttpxClientT, _DefaultStreamT]): @typed_cached_property def region(self) -> str: raise RuntimeError("region not set") @typed_cached_property def project_id(self) -> str | None: project_id = os.environ.get("ANTHROPIC_VERTEX_PROJECT_ID") if project_id: return project_id return None @override def _make_status_error( self, err_msg: str, *, body: object, response: httpx.Response, ) -> APIStatusError: if response.status_code == 400: return _exceptions.BadRequestError(err_msg, response=response, body=body) if response.status_code == 401: return _exceptions.AuthenticationError(err_msg, response=response, body=body) if response.status_code == 403: return _exceptions.PermissionDeniedError(err_msg, response=response, body=body) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=body) if response.status_code == 409: return _exceptions.ConflictError(err_msg, response=response, body=body) if response.status_code == 422: return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=body) if response.status_code == 503: return _exceptions.ServiceUnavailableError(err_msg, response=response, body=body) if response.status_code == 504: return _exceptions.DeadlineExceededError(err_msg, response=response, body=body) if response.status_code >= 500: return _exceptions.InternalServerError(err_msg, response=response, body=body) return APIStatusError(err_msg, response=response, body=body)
BaseVertexClient
python
lazyprogrammer__machine_learning_examples
hmm_class/hmmc_concat.py
{ "start": 769, "end": 8830 }
class ____: def __init__(self, M, K): self.M = M # number of hidden states self.K = K # number of Gaussians def fit(self, X, max_iter=30, eps=1e0): # train the HMM model using the Baum-Welch algorithm # a specific instance of the expectation-maximization algorithm N = len(X) D = X[0].shape[1] # assume each x is organized (T, D) self.pi = np.ones(self.M) / self.M # initial state distribution self.A = random_normalized(self.M, self.M) # state transition matrix self.R = np.ones((self.M, self.K)) / self.K # mixture proportions print("initial A:", self.A) print("initial R:", self.R) self.mu = np.zeros((self.M, self.K, D)) for i in range(self.M): for k in range(self.K): random_idx = np.random.choice(N) x = X[random_idx] random_time_idx = np.random.choice(len(x)) self.mu[i,k] = x[random_time_idx] self.sigma = np.zeros((self.M, self.K, D, D)) for j in range(self.M): for k in range(self.K): self.sigma[j,k] = np.eye(D) costs = [] for it in range(max_iter): if it % 1 == 0: print("it:", it) alphas = [] betas = [] gammas = [] Bs = [] # components = [] P = np.zeros(N) for n in range(N): x = X[n] T = len(x) # calculate B so we can lookup when updating alpha and beta B = np.zeros((self.M, T)) component = np.zeros((self.M, self.K, T)) # we'll need these later for j in range(self.M): for t in range(T): for k in range(self.K): p = self.R[j,k] * mvn.pdf(x[t], self.mu[j,k], self.sigma[j,k]) component[j,k,t] = p B[j,t] += p Bs.append(B) alpha = np.zeros((T, self.M)) alpha[0] = self.pi*B[:,0] for t in range(1, T): alpha[t] = alpha[t-1].dot(self.A) * B[:,t] P[n] = alpha[-1].sum() assert(P[n] <= 1) alphas.append(alpha) beta = np.zeros((T, self.M)) beta[-1] = 1 for t in range(T - 2, -1, -1): beta[t] = self.A.dot(B[:,t+1] * beta[t+1]) betas.append(beta) # update for Gaussians gamma = np.zeros((T, self.M, self.K)) for t in range(T): alphabeta = (alphas[n][t,:] * betas[n][t,:]).sum() for j in range(self.M): factor = alphas[n][t,j] * betas[n][t,j] / alphabeta # mixture_j = component[j,:,t].sum() for k in range(self.K): gamma[t,j,k] = factor * component[j,k,t] / B[j,t] gammas.append(gamma) cost = np.log(P).sum() costs.append(cost) # now re-estimate pi, A, R, mu, sigma self.pi = np.sum((alphas[n][0] * betas[n][0])/P[n] for n in range(N)) / N a_den = np.zeros((self.M, 1)) a_num = 0 r_num = np.zeros((self.M, self.K)) r_den = np.zeros(self.M) mu_num = np.zeros((self.M, self.K, D)) sigma_num = np.zeros((self.M, self.K, D, D)) for n in range(N): x = X[n] T = len(x) B = Bs[n] gamma = gammas[n] # denominator for A a_den += (alphas[n][:-1] * betas[n][:-1]).sum(axis=0, keepdims=True).T / P[n] # numerator for A a_num_n = np.zeros((self.M, self.M)) for i in range(self.M): for j in range(self.M): for t in range(T-1): a_num_n[i,j] += alphas[n][t,i] * self.A[i,j] * B[j,t+1] * betas[n][t+1,j] a_num += a_num_n / P[n] # update mixture components r_num_n = np.zeros((self.M, self.K)) r_den_n = np.zeros(self.M) for j in range(self.M): for k in range(self.K): for t in range(T): r_num_n[j,k] += gamma[t,j,k] r_den_n[j] += gamma[t,j,k] r_num += r_num_n / P[n] r_den += r_den_n / P[n] mu_num_n = np.zeros((self.M, self.K, D)) sigma_num_n = np.zeros((self.M, self.K, D, D)) for j in range(self.M): for k in range(self.K): for t in range(T): # update means mu_num_n[j,k] += gamma[t,j,k] * x[t] # update covariances sigma_num_n[j,k] += gamma[t,j,k] * np.outer(x[t] - self.mu[j,k], x[t] - self.mu[j,k]) mu_num += mu_num_n / P[n] sigma_num += sigma_num_n / P[n] self.A = a_num / a_den assert(np.all(self.A <= 1)) # update R, mu, sigma for j in range(self.M): for k in range(self.K): self.R[j,k] = r_num[j,k] / r_den[j] self.mu[j,k] = mu_num[j,k] / r_num[j,k] self.sigma[j,k] = sigma_num[j,k] / r_num[j,k] print("A:", self.A) print("mu:", self.mu) print("sigma:", self.sigma) print("R:", self.R) print("pi:", self.pi) plt.plot(costs) plt.show() def likelihood(self, x): # returns log P(x | model) # using the forward part of the forward-backward algorithm T = len(x) alpha = np.zeros((T, self.M)) B = np.zeros((self.M, T)) for j in range(self.M): for t in range(T): for k in range(self.K): p = self.R[j,k] * mvn.pdf(x[t], self.mu[j,k], self.sigma[j,k]) B[j,t] += p alpha[0] = self.pi*B[:,0] for t in range(1, T): alpha[t] = alpha[t-1].dot(self.A) * B[:,t] return alpha[-1].sum() def likelihood_multi(self, X): return np.array([self.likelihood(x) for x in X]) def log_likelihood_multi(self, X): return np.log(self.likelihood_multi(X)) def set(self, pi, A, R, mu, sigma): self.pi = pi self.A = A self.R = R self.mu = mu self.sigma = sigma M, K = R.shape self.M = M self.K = K def real_signal(): spf = wave.open('helloworld.wav', 'r') #Extract Raw Audio from Wav File # If you right-click on the file and go to "Get Info", you can see: # sampling rate = 16000 Hz # bits per sample = 16 # The first is quantization in time # The second is quantization in amplitude # We also do this for images! # 2^16 = 65536 is how many different sound levels we have signal = spf.readframes(-1) signal = np.fromstring(signal, 'Int16') T = len(signal) hmm = HMM(10) hmm.fit(signal.reshape(1, T)) def fake_signal(init=simple_init): signals = get_signals(N=10, T=10, init=init) # for signal in signals: # for d in xrange(signal.shape[1]): # plt.plot(signal[:,d]) # plt.show() hmm = HMM(2, 2) hmm.fit(signals) L = hmm.log_likelihood_multi(signals).sum() print("LL for fitted params:", L) # test in actual params _, _, _, pi, A, R, mu, sigma = init() hmm.set(pi, A, R, mu, sigma) L = hmm.log_likelihood_multi(signals).sum() print("LL for actual params:", L) if __name__ == '__main__': # real_signal() # will break fake_signal(init=simple_init) # fake_signal(init=big_init) # will break
HMM
python
HypothesisWorks__hypothesis
hypothesis-python/src/hypothesis/internal/conjecture/data.py
{ "start": 13233, "end": 13567 }
class ____(SpanProperty): def __init__(self, spans: "Spans") -> None: super().__init__(spans) self.result = IntList.of_length(len(self.spans)) def start_span(self, i: int, label_index: int) -> None: self.result[i] = label_index def finish(self) -> IntList: return self.result
_label_indices
python
doocs__leetcode
solution/0400-0499/0494.Target Sum/Solution2.py
{ "start": 0, "end": 363 }
class ____: def findTargetSumWays(self, nums: List[int], target: int) -> int: s = sum(nums) if s < target or (s - target) % 2: return 0 n = (s - target) // 2 f = [0] * (n + 1) f[0] = 1 for x in nums: for j in range(n, x - 1, -1): f[j] += f[j - x] return f[n]
Solution
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/cyaml.py
{ "start": 3871, "end": 5281 }
class ____(CEmitter, SafeRepresenter, Resolver): # type: ignore def __init__( self, stream, default_style=None, default_flow_style=None, canonical=None, indent=None, width=None, allow_unicode=None, line_break=None, encoding=None, explicit_start=None, explicit_end=None, version=None, tags=None, block_seq_indent=None, top_level_colon_align=None, prefix_colon=None, ): # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA self._emitter = self._serializer = self._representer = self CEmitter.__init__( self, stream, canonical=canonical, indent=indent, width=width, encoding=encoding, allow_unicode=allow_unicode, line_break=line_break, explicit_start=explicit_start, explicit_end=explicit_end, version=version, tags=tags, ) self._emitter = self._serializer = self._representer = self SafeRepresenter.__init__( self, default_style=default_style, default_flow_style=default_flow_style ) Resolver.__init__(self)
CSafeDumper
python
coleifer__peewee
tests/sqlite.py
{ "start": 78296, "end": 78460 }
class ____(LSMTable): key = TextField(primary_key=True) value = TextField() class Meta: database = database filename = 'test_lsm.ldb'
KVS
python
spyder-ide__spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
{ "start": 67901, "end": 67944 }
class ____: shape = (0, 0)
EmptyDataFrame
python
google__jax
tests/mosaic/gpu_test.py
{ "start": 8506, "end": 10878 }
class ____(TestCase): def test_copy_basic(self): def kernel(ctx, src, dst, _): copy(src, dst) x = jnp.arange(2 * 3 * 5).reshape(2, 5, 3) y = mgpu.as_gpu_kernel(kernel, (1, 1, 1), (128, 1, 1), x, x, ())(x) np.testing.assert_array_equal(y, x) def test_copy_swizzle(self): def kernel(ctx, src, dst, _): copy(src, dst, swizzle=128) x = jnp.arange(8 * 32, dtype=jnp.float32).reshape(8, 32) y = mgpu.as_gpu_kernel(kernel, (1, 1, 1), (128, 1, 1), x, x, ())(x) expected = np.zeros_like(y) for i in range(8): for j in range(8): js = j ^ i expected[i, (j * 4):(j * 4) + 4] = x[i, (js * 4):(js * 4) + 4] np.testing.assert_array_equal(y, expected) def test_copy_swizzle_noop(self): # Two swizzles cancel out def kernel(ctx, src, dst, smem): copy(src, smem, swizzle=128) copy(smem, dst, swizzle=128) x = jnp.arange(8 * 32, dtype=jnp.float32).reshape(8, 32) y = mgpu.as_gpu_kernel(kernel, (1, 1, 1), (128, 1, 1), x, x, x)(x) np.testing.assert_array_equal(y, x) def test_iota_tensor(self): m = n = 64 def kernel(ctx, dst, _): f32 = ir.F32Type.get() index = ir.IndexType.get() registers = iota_tensor(m, n, jnp.float32).registers assert registers.size == 16, registers.size for i, vec_reg in enumerate(registers.flat): for j in range(2): reg = vector.extract( source=vec_reg, dynamic_position=[], static_position=ir.DenseI64ArrayAttr.get([j]), ) memref.store( reg, dst, [gpu.thread_id(gpu.Dimension.x), c(2 * i + j, index)] ) out_shape = jax.ShapeDtypeStruct((128, 32), jnp.float32) regs = mgpu.as_gpu_kernel( kernel, (1, 1, 1), (128, 1, 1), (), out_shape, () )() thread_ids = np.arange(128) warp_ids = thread_ids // 32 lane_ids = thread_ids % 32 thread_rows = warp_ids * 16 + lane_ids // 4 thread_start_cols = (lane_ids % 4) * 2 thread_cols = thread_start_cols[:, None] + (np.arange(n // 8)[None] * 8) regs = regs.reshape(128, 8, 2, 2) for row_half in range(2): for col_half in range(2): np.testing.assert_array_equal( regs[..., row_half, col_half], (thread_rows[:, None] + row_half * 8) * n + thread_cols + col_half )
TestUtilTest
python
getsentry__sentry
src/sentry/models/dashboard_widget.py
{ "start": 6677, "end": 7238 }
class ____(DefaultFieldsModel): __relocation_scope__ = RelocationScope.Organization dashboard_widget_query = FlexibleForeignKey( "sentry.DashboardWidgetQuery", on_delete=models.CASCADE ) field = models.TextField() # The dashboard that the field is linked to dashboard = FlexibleForeignKey("sentry.Dashboard", on_delete=models.CASCADE) class Meta: app_label = "sentry" db_table = "sentry_dashboardfieldlink" unique_together = (("dashboard_widget_query", "field"),) @region_silo_model
DashboardFieldLink
python
spack__spack
lib/spack/spack/version/version_types.py
{ "start": 762, "end": 5493 }
class ____: """Internal representation of the string (non-integer) components of Spack versions. Versions comprise string and integer components (see ``SEGMENT_REGEX`` above). This represents a string component, which is either some component consisting only of alphabetical characters, *or* a special "infinity version" like ``main``, ``develop``, ``master``, etc. For speed, Spack versions are designed to map to Python tuples, so that we can use Python's fast lexicographic tuple comparison on them. ``VersionStrComponent`` is designed to work as a component in these version tuples, and as such must compare directly with ``int`` or other ``VersionStrComponent`` objects. """ __slots__ = ["data"] data: Union[int, str] def __init__(self, data: Union[int, str]): # int for infinity index, str for literal. self.data = data @staticmethod def from_string(string: str) -> "VersionStrComponent": value: Union[int, str] = string if len(string) >= iv_min_len: try: value = infinity_versions.index(string) except ValueError: pass return VersionStrComponent(value) def __hash__(self) -> int: return hash(self.data) def __str__(self) -> str: return ( ("infinity" if self.data >= len(infinity_versions) else infinity_versions[self.data]) if isinstance(self.data, int) else self.data ) def __repr__(self) -> str: return f'VersionStrComponent("{self}")' def __eq__(self, other: object) -> bool: return isinstance(other, VersionStrComponent) and self.data == other.data # ignore typing for certain parts of these methods b/c a) they are performance-critical, and # b) mypy isn't smart enough to figure out that if l_inf and r_inf are the same, comparing # self.data and other.data is type safe. def __lt__(self, other: object) -> bool: l_inf = isinstance(self.data, int) if isinstance(other, int): return not l_inf r_inf = isinstance(other.data, int) # type: ignore return (not l_inf and r_inf) if l_inf ^ r_inf else self.data < other.data # type: ignore def __gt__(self, other: object) -> bool: l_inf = isinstance(self.data, int) if isinstance(other, int): return l_inf r_inf = isinstance(other.data, int) # type: ignore return (l_inf and not r_inf) if l_inf ^ r_inf else self.data > other.data # type: ignore def __le__(self, other: object) -> bool: return self < other or self == other def __ge__(self, other: object) -> bool: return self > other or self == other # Tuple types that make up the internal representation of StandardVersion. # We use Tuples so that Python can quickly compare versions. #: Version components are integers for numeric parts, VersionStrComponents for string parts. VersionComponentTuple = Tuple[Union[int, VersionStrComponent], ...] #: A Prerelease identifier is a constant for alpha/beta/rc/final and one optional number. #: Most versions will have this set to ``(FINAL,)``. Prereleases will have some other #: initial constant followed by a number, e.g. ``(RC, 1)``. PrereleaseTuple = Tuple[int, ...] #: Actual version tuple, including the split version number itself and the prerelease, #: all represented as tuples. VersionTuple = Tuple[VersionComponentTuple, PrereleaseTuple] #: Separators from a parsed version. SeparatorTuple = Tuple[str, ...] def parse_string_components(string: str) -> Tuple[VersionTuple, SeparatorTuple]: """Parse a string into a ``VersionTuple`` and ``SeparatorTuple``.""" string = string.strip() if string and not VALID_VERSION.match(string): raise ValueError("Bad characters in version string: %s" % string) segments = SEGMENT_REGEX.findall(string) separators: Tuple[str] = tuple(m[2] for m in segments) prerelease: Tuple[int, ...] # <version>(alpha|beta|rc)<number> if len(segments) >= 3 and segments[-2][1] in STRING_TO_PRERELEASE and segments[-1][0]: prerelease = (STRING_TO_PRERELEASE[segments[-2][1]], int(segments[-1][0])) segments = segments[:-2] # <version>(alpha|beta|rc) elif len(segments) >= 2 and segments[-1][1] in STRING_TO_PRERELEASE: prerelease = (STRING_TO_PRERELEASE[segments[-1][1]],) segments = segments[:-1] # <version> else: prerelease = (FINAL,) release: VersionComponentTuple = tuple( int(m[0]) if m[0] else VersionStrComponent.from_string(m[1]) for m in segments ) return (release, prerelease), separators
VersionStrComponent
python
getsentry__sentry
tests/sentry/seer/assisted_query/test_issues_tools.py
{ "start": 445, "end": 5247 }
class ____(APITestCase, SnubaTestCase, OccurrenceTestMixin): databases = {"default", "control"} def setUp(self): super().setUp() self.min_ago = before_now(minutes=1) def test_get_issue_filter_keys_success(self): """Test that get_issue_filter_keys returns tags and feature flags""" # Create an error event with custom tags self.store_event( data={ "event_id": "a" * 32, "tags": {"fruit": "apple", "color": "red"}, "timestamp": self.min_ago.isoformat(), }, project_id=self.project.id, ) # Create an issue platform event self.process_occurrence( event_id="b" * 32, project_id=self.project.id, event_data={ "title": "some problem", "platform": "python", "tags": {"issue_tag": "value"}, "timestamp": self.min_ago.isoformat(), "received": self.min_ago.isoformat(), }, ) # Create an event with feature flags self.store_event( data={ "contexts": { "flags": { "values": [ {"flag": "feature_a", "result": True}, {"flag": "feature_b", "result": False}, ] } }, "timestamp": self.min_ago.isoformat(), }, project_id=self.project.id, ) result = get_issue_filter_keys( org_id=self.organization.id, project_ids=[self.project.id], ) assert result is not None assert "tags" in result assert "feature_flags" in result # Check tags (merged event_tags and issue_tags) tags = result["tags"] assert isinstance(tags, list) assert len(tags) > 0 # Verify structure of tags for tag in tags: assert "key" in tag assert "name" in tag assert "totalValues" in tag # Check that our custom tags are present tag_keys = {tag["key"] for tag in tags} assert "fruit" in tag_keys assert "color" in tag_keys # Check feature flags feature_flags = result["feature_flags"] assert isinstance(feature_flags, list) if len(feature_flags) > 0: for flag in feature_flags: assert "key" in flag assert "name" in flag assert "totalValues" in flag # Verify our flags are present flag_keys = {flag["key"] for flag in feature_flags} assert "feature_a" in flag_keys assert "feature_b" in flag_keys def test_get_issue_filter_keys_nonexistent_organization(self): """Test that nonexistent organization returns None""" result = get_issue_filter_keys( org_id=99999, project_ids=[self.project.id], ) assert result is None def test_get_issue_filter_keys_empty_projects(self): """Test with empty project list""" result = get_issue_filter_keys( org_id=self.organization.id, project_ids=[], ) assert result is not None assert "tags" in result assert "feature_flags" in result # Should return empty or minimal results assert isinstance(result["tags"], list) assert isinstance(result["feature_flags"], list) def test_get_issue_filter_keys_multiple_projects(self): """Test with multiple projects""" project2 = self.create_project(organization=self.organization) # Create events in both projects self.store_event( data={ "event_id": "a" * 32, "tags": {"project1_tag": "value1"}, "timestamp": self.min_ago.isoformat(), }, project_id=self.project.id, ) self.store_event( data={ "event_id": "b" * 32, "tags": {"project2_tag": "value2"}, "timestamp": self.min_ago.isoformat(), }, project_id=project2.id, ) result = get_issue_filter_keys( org_id=self.organization.id, project_ids=[self.project.id, project2.id], ) assert result is not None assert "tags" in result tags = result["tags"] tag_keys = {tag["key"] for tag in tags} # Both project tags should be present assert "project1_tag" in tag_keys assert "project2_tag" in tag_keys @pytest.mark.django_db(databases=["default", "control"])
TestGetIssueFilterKeys
python
django__django
django/db/models/fields/json.py
{ "start": 6395, "end": 7015 }
class ____(FieldGetDbPrepValueMixin, PostgresOperatorLookup): lookup_name = "contained_by" postgres_operator = "<@" def as_sql(self, compiler, connection): if not connection.features.supports_json_field_contains: raise NotSupportedError( "contained_by lookup is not supported on this database backend." ) lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(rhs_params) + tuple(lhs_params) return "JSON_CONTAINS(%s, %s)" % (rhs, lhs), params
ContainedBy
python
pypa__pip
src/pip/_internal/resolution/resolvelib/candidates.py
{ "start": 3784, "end": 9178 }
class ____(Candidate): """A candidate backed by an ``InstallRequirement``. This represents a package request with the target not being already in the environment, and needs to be fetched and installed. The backing ``InstallRequirement`` is responsible for most of the leg work; this class exposes appropriate information to the resolver. :param link: The link passed to the ``InstallRequirement``. The backing ``InstallRequirement`` will use this link to fetch the distribution. :param source_link: The link this candidate "originates" from. This is different from ``link`` when the link is found in the wheel cache. ``link`` would point to the wheel cache, while this points to the found remote link (e.g. from pypi.org). """ dist: BaseDistribution is_installed = False def __init__( self, link: Link, source_link: Link, ireq: InstallRequirement, factory: Factory, name: NormalizedName | None = None, version: Version | None = None, ) -> None: self._link = link self._source_link = source_link self._factory = factory self._ireq = ireq self._name = name self._version = version self.dist = self._prepare() self._hash: int | None = None def __str__(self) -> str: return f"{self.name} {self.version}" def __repr__(self) -> str: return f"{self.__class__.__name__}({str(self._link)!r})" def __hash__(self) -> int: if self._hash is not None: return self._hash self._hash = hash((self.__class__, self._link)) return self._hash def __eq__(self, other: Any) -> bool: if isinstance(other, self.__class__): return links_equivalent(self._link, other._link) return False @property def source_link(self) -> Link | None: return self._source_link @property def project_name(self) -> NormalizedName: """The normalised name of the project the candidate refers to""" if self._name is None: self._name = self.dist.canonical_name return self._name @property def name(self) -> str: return self.project_name @property def version(self) -> Version: if self._version is None: self._version = self.dist.version return self._version def format_for_error(self) -> str: return ( f"{self.name} {self.version} " f"(from {self._link.file_path if self._link.is_file else self._link})" ) def _prepare_distribution(self) -> BaseDistribution: raise NotImplementedError("Override in subclass") def _check_metadata_consistency(self, dist: BaseDistribution) -> None: """Check for consistency of project name and version of dist.""" if self._name is not None and self._name != dist.canonical_name: raise MetadataInconsistent( self._ireq, "name", self._name, dist.canonical_name, ) if self._version is not None and self._version != dist.version: raise MetadataInconsistent( self._ireq, "version", str(self._version), str(dist.version), ) # check dependencies are valid # TODO performance: this means we iterate the dependencies at least twice, # we may want to cache parsed Requires-Dist try: list(dist.iter_dependencies(list(dist.iter_provided_extras()))) except InvalidRequirement as e: raise MetadataInvalid(self._ireq, str(e)) def _prepare(self) -> BaseDistribution: try: dist = self._prepare_distribution() except HashError as e: # Provide HashError the underlying ireq that caused it. This # provides context for the resulting error message to show the # offending line to the user. e.req = self._ireq raise except InstallationSubprocessError as exc: if isinstance(self._ireq.comes_from, InstallRequirement): request_chain = self._ireq.comes_from.from_path() else: request_chain = self._ireq.comes_from if request_chain is None: request_chain = "directly requested" raise FailedToPrepareCandidate( package_name=self._ireq.name or str(self._link), requirement_chain=request_chain, failed_step=exc.command_description, ) self._check_metadata_consistency(dist) return dist def iter_dependencies(self, with_requires: bool) -> Iterable[Requirement | None]: # Emit the Requires-Python requirement first to fail fast on # unsupported candidates and avoid pointless downloads/preparation. yield self._factory.make_requires_python_requirement(self.dist.requires_python) requires = self.dist.iter_dependencies() if with_requires else () for r in requires: yield from self._factory.make_requirements_from_spec(str(r), self._ireq) def get_install_requirement(self) -> InstallRequirement | None: return self._ireq
_InstallRequirementBackedCandidate
python
simplejson__simplejson
simplejson/tests/__init__.py
{ "start": 343, "end": 2647 }
class ____(unittest.TestCase): def runTest(self): if hasattr(sys, "pypy_translation_info"): "PyPy doesn't need speedups! :)" elif getattr(getattr(sys, "implementation", None), "name", None) == "graalpy": "GraalPy doesn't need speedups! :)" elif hasattr(self, "skipTest"): self.skipTest("_speedups.so is missing!") def additional_tests(suite=None, project_dir=None): import simplejson import simplejson.encoder import simplejson.decoder if suite is None: suite = unittest.TestSuite() try: import doctest except ImportError: if sys.version_info < (2, 7): # doctests in 2.6 depends on cStringIO return suite raise for mod in (simplejson, simplejson.encoder, simplejson.decoder): suite.addTest(doctest.DocTestSuite(mod)) if project_dir is not None: suite.addTest( doctest.DocFileSuite( os.path.join(project_dir, "index.rst"), module_relative=False ) ) return suite def all_tests_suite(project_dir=None): def get_suite(): suite_names = [ "simplejson.tests.%s" % (os.path.splitext(f)[0],) for f in os.listdir(os.path.dirname(__file__)) if f.startswith("test_") and f.endswith(".py") ] return additional_tests( suite=unittest.TestLoader().loadTestsFromNames(suite_names), project_dir=project_dir, ) suite = get_suite() import simplejson if simplejson._import_c_make_encoder() is None: suite.addTest(TestMissingSpeedups()) else: suite = unittest.TestSuite( [ suite, NoExtensionTestSuite([get_suite()]), ] ) return suite def main(project_dir=None): runner = unittest.TextTestRunner(verbosity=1 + sys.argv.count("-v")) suite = all_tests_suite(project_dir=project_dir) raise SystemExit(not runner.run(suite).wasSuccessful()) if __name__ == "__main__": import os import sys project_dir = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ) sys.path.insert(0, project_dir) main(project_dir=project_dir)
TestMissingSpeedups
python
dateutil__dateutil
src/dateutil/tz/_factories.py
{ "start": 609, "end": 1654 }
class ____(_TzFactory): def __init__(cls, *args, **kwargs): cls.__instances = weakref.WeakValueDictionary() cls.__strong_cache = OrderedDict() cls.__strong_cache_size = 8 cls._cache_lock = _thread.allocate_lock() def __call__(cls, name, offset): if isinstance(offset, timedelta): key = (name, offset.total_seconds()) else: key = (name, offset) instance = cls.__instances.get(key, None) if instance is None: instance = cls.__instances.setdefault(key, cls.instance(name, offset)) # This lock may not be necessary in Python 3. See GH issue #901 with cls._cache_lock: cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) # Remove an item if the strong cache is overpopulated if len(cls.__strong_cache) > cls.__strong_cache_size: cls.__strong_cache.popitem(last=False) return instance
_TzOffsetFactory
python
great-expectations__great_expectations
tests/core/test__docs_decorators.py
{ "start": 19902, "end": 20236 }
class ____: """Docstring summary. Longer description. Args: some_arg: some_arg description. other_arg: other_arg description. """ def __init__(self, some_arg, other_arg) -> None: self.some_arg = some_arg self.other_arg = other_arg
_ClassFullDocstringDeprecatedAndNewAtArgumentLevel
python
walkccc__LeetCode
solutions/2087. Minimum Cost Homecoming of a Robot in a Grid/2087.py
{ "start": 0, "end": 377 }
class ____: def minCost( self, startPos: list[int], homePos: list[int], rowCosts: list[int], colCosts: list[int], ) -> int: ans = 0 i, j = startPos x, y = homePos while i != x: i += 1 if i < x else -1 ans += rowCosts[i] while j != y: j += 1 if j < y else -1 ans += colCosts[j] return ans
Solution
python
kamyu104__LeetCode-Solutions
Python/sort-linked-list-already-sorted-using-absolute-values.py
{ "start": 182, "end": 622 }
class ____(object): def sortLinkedList(self, head): """ :type head: Optional[ListNode] :rtype: Optional[ListNode] """ tail, curr, head.next = head, head.next, None while curr: if curr.val > 0: curr.next, tail.next, tail, curr = None, curr, curr, curr.next else: curr.next, head, curr = head, curr, curr.next return head
Solution
python
pypa__warehouse
tests/unit/email/test_init.py
{ "start": 35530, "end": 38243 }
class ____: @pytest.mark.parametrize("verified", [True, False]) def test_token_leak_email( self, pyramid_request, pyramid_config, monkeypatch, verified ): stub_user = pretend.stub( id=3, username="username", name="", email="email@example.com", primary_email=pretend.stub(email="email@example.com", verified=verified), ) pyramid_request.user = None pyramid_request.db = pretend.stub( query=lambda a: pretend.stub( filter=lambda *a: pretend.stub(one=lambda: stub_user) ), ) subject_renderer = pyramid_config.testing_add_renderer( "email/token-compromised-leak/subject.txt" ) subject_renderer.string_response = "Email Subject" body_renderer = pyramid_config.testing_add_renderer( "email/token-compromised-leak/body.txt" ) body_renderer.string_response = "Email Body" html_renderer = pyramid_config.testing_add_renderer( "email/token-compromised-leak/body.html" ) html_renderer.string_response = "Email HTML Body" send_email = pretend.stub( delay=pretend.call_recorder(lambda *args, **kwargs: None) ) pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email) monkeypatch.setattr(email, "send_email", send_email) result = email.send_token_compromised_email_leak( pyramid_request, stub_user, public_url="http://example.com", origin="github" ) assert result == { "username": "username", "public_url": "http://example.com", "origin": "github", } assert pyramid_request.task.calls == [pretend.call(send_email)] assert send_email.delay.calls == [ pretend.call( f"{stub_user.username} <{stub_user.email}>", { "sender": None, "subject": "Email Subject", "body_text": "Email Body", "body_html": ( "<html>\n<head></head>\n" "<body><p>Email HTML Body</p></body>\n</html>\n" ), }, { "tag": "account:email:sent", "user_id": 3, "additional": { "from_": None, "to": "email@example.com", "subject": "Email Subject", "redact_ip": False, }, }, ) ]
TestTokenLeakEmail
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/translator.py
{ "start": 2802, "end": 3456 }
class ____: """Represents an Airbyte destination, based on data as returned from the API.""" id: str type: str database: Optional[str] schema: Optional[str] @classmethod def from_destination_details( cls, destination_details: Mapping[str, Any], ) -> "AirbyteDestination": return cls( id=destination_details["destinationId"], type=destination_details["destinationType"], database=destination_details["configuration"].get("database"), schema=destination_details["configuration"].get("schema"), ) @whitelist_for_serdes @record
AirbyteDestination
python
PyCQA__pylint
doc/data/messages/i/invalid-overridden-method/bad.py
{ "start": 0, "end": 74 }
class ____: async def bore(self, insect): insect.eat(self)
Fruit
python
sanic-org__sanic
setup.py
{ "start": 163, "end": 4425 }
class ____(TestCommand): """ Provide a Test runner to be used from setup.py to run unit tests """ user_options = [("pytest-args=", "a", "Arguments to pass to pytest")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = "" def run_tests(self): import shlex import pytest errno = pytest.main(shlex.split(self.pytest_args)) sys.exit(errno) def open_local(paths, mode="r", encoding="utf8"): path = os.path.join(os.path.abspath(os.path.dirname(__file__)), *paths) return codecs.open(path, mode, encoding) def str_to_bool(val: str) -> bool: val = val.lower() if val in { "y", "yes", "yep", "yup", "t", "true", "on", "enable", "enabled", "1", }: return True elif val in {"n", "no", "f", "false", "off", "disable", "disabled", "0"}: return False else: raise ValueError(f"Invalid truth value {val}") with open_local(["sanic", "__version__.py"], encoding="latin1") as fp: try: version = re.findall( r"^__version__ = \"([^']+)\"\r?$", fp.read(), re.M )[0] except IndexError: raise RuntimeError("Unable to determine version.") with open_local(["README.rst"]) as rm: long_description = rm.read() setup_kwargs = { "name": "sanic", "version": version, "url": "http://github.com/sanic-org/sanic/", "license": "MIT", "author": "Sanic Community", "author_email": "admhpkns@gmail.com", "description": ( "A web server and web framework that's written to go fast. " "Build fast. Run fast." ), "long_description": long_description, "packages": find_packages(exclude=("tests", "tests.*")), "package_data": {"sanic": ["py.typed", "pages/styles/*"]}, "platforms": "any", "python_requires": ">=3.8", "classifiers": [ "Development Status :: 4 - Beta", "Environment :: Web Environment", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ], "entry_points": {"console_scripts": ["sanic = sanic.__main__:main"]}, } env_dependency = ( '; sys_platform != "win32" ' 'and implementation_name == "cpython"' ) ujson = "ujson>=1.35" + env_dependency uvloop = "uvloop>=0.15.0" + env_dependency types_ujson = "types-ujson" + env_dependency requirements = [ "sanic-routing>=23.12.0", "httptools>=0.0.10", uvloop, ujson, "aiofiles>=0.6.0", "websockets>=10.0", "multidict>=5.0,<7.0", "html5tagger>=1.2.1", "tracerite>=1.0.0", "typing-extensions>=4.4.0", "setuptools>=70.1.0", ] tests_require = [ "sanic-testing>=23.6.0", "pytest>=8.2.2", "coverage", "beautifulsoup4", "pytest-sanic", "pytest-benchmark", "chardet==3.*", "ruff", "bandit", "mypy", "docutils", "pygments", "uvicorn", "slotscheck>=0.8.0,<1", types_ujson, ] docs_require = [ "sphinx>=2.1.2", "sphinx_rtd_theme>=0.4.3", "docutils", "pygments", "m2r2", "enum-tools[sphinx]", "mistune<2.0.0", "autodocsumm>=0.2.11", ] dev_require = tests_require + [ "cryptography", "tox", "towncrier", ] all_require = list(set(dev_require + docs_require)) if str_to_bool(os.environ.get("SANIC_NO_UJSON", "no")): print("Installing without uJSON") requirements.remove(ujson) tests_require.remove(types_ujson) # 'nt' means windows OS if str_to_bool(os.environ.get("SANIC_NO_UVLOOP", "no")): print("Installing without uvLoop") requirements.remove(uvloop) extras_require = { "test": tests_require, "dev": dev_require, "docs": docs_require, "all": all_require, "ext": ["sanic-ext"], "http3": ["aioquic"], } setup_kwargs["install_requires"] = requirements setup_kwargs["tests_require"] = tests_require setup_kwargs["extras_require"] = extras_require setup_kwargs["cmdclass"] = {"test": PyTest} setup(**setup_kwargs)
PyTest
python
conda__conda
conda/base/constants.py
{ "start": 5181, "end": 5332 }
class ____(Enum): clobber = "clobber" warn = "warn" prevent = "prevent" def __str__(self) -> str: return self.value
PathConflict
python
pallets__flask
tests/test_helpers.py
{ "start": 599, "end": 2793 }
class ____: def test_send_file(self, app, req_ctx): rv = flask.send_file("static/index.html") assert rv.direct_passthrough assert rv.mimetype == "text/html" with app.open_resource("static/index.html") as f: rv.direct_passthrough = False assert rv.data == f.read() rv.close() def test_static_file(self, app, req_ctx): # Default max_age is None. # Test with static file handler. rv = app.send_static_file("index.html") assert rv.cache_control.max_age is None rv.close() # Test with direct use of send_file. rv = flask.send_file("static/index.html") assert rv.cache_control.max_age is None rv.close() app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 3600 # Test with static file handler. rv = app.send_static_file("index.html") assert rv.cache_control.max_age == 3600 rv.close() # Test with direct use of send_file. rv = flask.send_file("static/index.html") assert rv.cache_control.max_age == 3600 rv.close() # Test with pathlib.Path. rv = app.send_static_file(FakePath("index.html")) assert rv.cache_control.max_age == 3600 rv.close() class StaticFileApp(flask.Flask): def get_send_file_max_age(self, filename): return 10 app = StaticFileApp(__name__) with app.test_request_context(): # Test with static file handler. rv = app.send_static_file("index.html") assert rv.cache_control.max_age == 10 rv.close() # Test with direct use of send_file. rv = flask.send_file("static/index.html") assert rv.cache_control.max_age == 10 rv.close() def test_send_from_directory(self, app, req_ctx): app.root_path = os.path.join( os.path.dirname(__file__), "test_apps", "subdomaintestmodule" ) rv = flask.send_from_directory("static", "hello.txt") rv.direct_passthrough = False assert rv.data.strip() == b"Hello Subdomain" rv.close()
TestSendfile
python
sqlalchemy__sqlalchemy
test/sql/test_operators.py
{ "start": 28118, "end": 28837 }
class ____( _CustomComparatorTests, fixtures.TestBase ): def _add_override_factory(self): class MyIntegerOne(TypeDecorator): impl = Integer cache_ok = True class comparator_factory(TypeDecorator.Comparator): def __init__(self, expr): super().__init__(expr) def __add__(self, other): return self.expr.op("goofy")(other) def __and__(self, other): return self.expr.op("goofy_and")(other) class MyIntegerTwo(TypeDecorator): impl = MyIntegerOne cache_ok = True return MyIntegerTwo
TypeDecoratorTypeDecoratorComparatorTest
python
joke2k__faker
faker/providers/lorem/nl_BE/__init__.py
{ "start": 68, "end": 17952 }
class ____(LoremProvider): """Implement lorem provider for ``nl_BE`` locale. Source: https://nl.wiktionary.org/wiki/WikiWoordenboek:Lijst_met_1000_basiswoorden """ word_list = ( "aan", "aanbod", "aanraken", "aanval", "aap", "aardappel", "aarde", "aardig", "acht", "achter", "actief", "activiteit", "ademen", "af", "afgelopen", "afhangen", "afmaken", "afname", "afspraak", "afval", "al", "algemeen", "alleen", "alles", "als", "alsjeblieft", "altijd", "ander", "andere", "anders", "angst", "antwoord", "antwoorden", "appel", "arm", "auto", "avond", "avondeten", "baan", "baby", "bad", "bal", "bang", "bank", "basis", "bed", "bedekken", "bedreiging", "bedreven", "been", "beer", "beest", "beetje", "begin", "begrijpen", "begrip", "behalve", "beide", "beker", "bel", "belangrijk", "bellen", "belofte", "beneden", "benzine", "berg", "beroemd", "beroep", "bescherm", "beslissen", "best", "betalen", "beter", "bevatten", "bewegen", "bewolkt", "bezoek", "bibliotheek", "bieden", "bij", "bijna", "bijten", "bijvoorbeeld", "bijzonder", "binnen", "binnenkort", "blad", "blauw", "blazen", "blij", "blijven", "bloed", "bloem", "bodem", "boek", "boerderij", "boete", "boom", "boon", "boord", "boos", "bord", "borstelen", "bos", "bot", "bouwen", "boven", "branden", "brandstof", "breed", "breken", "brengen", "brief", "broer", "broek", "brood", "brug", "bruikbaar", "bruiloft", "bruin", "bui", "buiten", "bureau", "buren", "bus", "buurman", "buurvrouw", "cadeau", "chocolade", "cirkel", "comfortabel", "compleet", "computer", "conditie", "controle", "cool", "correct", "daar", "daarom", "dag", "dak", "dan", "dansen", "dapper", "dat", "de", "deel", "deken", "deksel", "delen", "derde", "deze", "dichtbij", "dienen", "diep", "dier", "dik", "ding", "dit", "dochter", "doen", "dom", "donker", "dood", "door", "doorzichtig", "doos", "dorp", "draad", "draaien", "dragen", "drie", "drijven", "drinken", "drogen", "dromen", "droog", "druk", "dubbel", "dun", "dus", "duur", "duwen", "echt", "een", "één", "eend", "eenheid", "eenzaam", "eerste", "eeuw", "effect", "ei", "eigen", "eiland", "einde", "eis", "elektrisch", "elk", "en", "enkele", "enthousiast", "erg", "eten", "even", "examen", "extreem", "falen", "familie", "feest", "feit", "fel", "fijn", "film", "fit", "fles", "foto", "fout", "fris", "fruit", "gaan", "gat", "gebeuren", "gebeurtenis", "gebied", "geboorte", "geboren", "gebruik", "gebruikelijk", "gebruiken", "gedrag", "gedragen", "geel", "geen", "gehoorzamen", "geit", "geld", "geliefde", "gelijk", "geloof", "geluid", "geluk", "gemak", "gemakkelijk", "gemeen", "genieten", "genoeg", "genot", "gerecht", "gereedschap", "geschikt", "gespannen", "geur", "gevaar", "gevaarlijk", "gevangenis", "geven", "gevolg", "gewicht", "gewoon", "gezicht", "gezond", "gif", "gisteren", "glad", "glas", "glimlach", "god", "goed", "goedkoop", "goud", "graf", "grap", "grappig", "gras", "grens", "grijs", "groeien", "groen", "groente", "groep", "grof", "grond", "groot", "grootmoeder", "grootvader", "haan", "haar", "haast", "hal", "halen", "half", "hallo", "hamer", "hand", "hard", "hart", "haten", "hebben", "heel", "heet", "helder", "helft", "help", "hem", "hemel", "hen", "herfst", "herinneren", "hert", "het", "heuvel", "hier", "hij", "hobby", "hoe", "hoed", "hoek", "hoeveel", "hoeveelheid", "hoewel", "hond", "honderd", "honger", "hoofd", "hoog", "hoogte", "hoop", "horen", "hotel", "houden", "huilen", "huis", "hun", "huren", "hut", "huur", "idee", "ieder", "iedereen", "iemand", "iets", "ijs", "ijzer", "ik", "in", "instrument", "ja", "jaar", "jagen", "jas", "jij", "jong", "jongen", "jouw", "jullie", "kaars", "kaart", "kaas", "kamer", "kans", "kant", "kantoor", "kap", "kast", "kasteel", "kat", "kennen", "kennis", "keuken", "keus", "kiezen", "kijken", "kind", "kip", "kist", "klaar", "klas", "klasse", "kleden", "klein", "kleren", "kleur", "klimmen", "klok", "kloppen", "klopt", "knie", "knippen", "koers", "koffer", "koffie", "kok", "koken", "kom", "komen", "koning", "koningin", "koorts", "kop", "kopen", "kort", "kost", "kosten", "koud", "kraam", "kracht", "krant", "krijgen", "kruis", "kuil", "kunnen", "kunst", "laag", "laat", "laatst", "lach", "lachen", "ladder", "laken", "lamp", "land", "lang", "langs", "langzaam", "laten", "leeftijd", "leeg", "leerling", "leeuw", "leger", "leiden", "lenen", "lengte", "lepel", "leren", "les", "leuk", "leven", "lezen", "lichaam", "licht", "liefde", "liegen", "liggen", "lijk", "lijken", "liniaal", "links", "lip", "list", "lomp", "lood", "lopen", "los", "lot", "lucht", "lui", "luisteren", "lunch", "maag", "maal", "maaltijd", "maan", "maand", "maar", "maat", "machine", "maken", "makkelijk", "mama", "man", "mand", "manier", "map", "markeren", "markt", "me", "medicijn", "meel", "meer", "meerdere", "meest", "meisje", "melk", "meneer", "mengsel", "mensen", "mes", "met", "meubel", "mevrouw", "middel", "midden", "mij", "mijn", "miljoen", "min", "minder", "minuut", "mis", "missen", "mits", "model", "modern", "moeder", "moeilijk", "moeten", "mogelijk", "mogen", "moment", "mond", "mooi", "moord", "moorden", "morgen", "munt", "muziek", "na", "naald", "naam", "naar", "naast", "nacht", "nat", "natuur", "natuurlijk", "nee", "neer", "negen", "nek", "nemen", "net", "netjes", "neus", "niet", "niets", "nieuw", "nieuws", "nobel", "noch", "nodig", "noemen", "nog", "nood", "nooit", "noord", "noot", "normaal", "nu", "nul", "nummer", "object", "oceaan", "ochtend", "oefening", "of", "offer", "olie", "olifant", "om", "oma", "onder", "onderwerp", "onderzoek", "oneven", "ongeluk", "ons", "ontsnappen", "ontbijt", "ontdekken", "ontmoeten", "ontvangen", "ontwikkelen", "onze", "oog", "ooit", "ook", "oom", "oor", "oorlog", "oorzaak", "oost", "op", "opa", "opeens", "open", "openlijk", "opleiding", "opnemen", "oranje", "orde", "oud", "ouder", "over", "overal", "overeenkomen", "overleden", "overvallen", "paar", "paard", "pad", "pagina", "pan", "papa", "papier", "park", "partner", "pas", "passeren", "pen", "peper", "per", "perfect", "periode", "persoon", "piano", "pijn", "pistool", "plaat", "plaatje", "plaats", "plafond", "plank", "plant", "plastic", "plat", "plattegrond", "plein", "plus", "poes", "politie", "poort", "populair", "positie", "postzegel", "potlood", "praten", "presenteren", "prijs", "prins", "prinses", "privé", "proberen", "probleem", "product", "provincie", "publiek", "punt", "raak", "raam", "radio", "raken", "rapport", "recht", "rechtdoor", "rechts", "rechtvaardig", "redden", "reeds", "regen", "reiken", "reizen", "rekenmachine", "rennen", "repareren", "rest", "restaurant", "resultaat", "richting", "rijk", "rijst", "rijzen", "ring", "rok", "rond", "rood", "rook", "rots", "roze", "rubber", "ruiken", "ruimte", "samen", "sap", "schaap", "schaar", "schaduw", "scheiden", "scherp", "schetsen", "schieten", "schijnen", "schip", "school", "schoon", "schouder", "schreeuw", "schreeuwen", "schrijven", "schudden", "seconde", "sex", "signaal", "simpel", "sinds", "slaapkamer", "slapen", "slecht", "sleutel", "slim", "slot", "sluiten", "smaak", "smal", "sneeuw", "snel", "snelheid", "snijden", "soep", "sok", "soms", "soort", "sorry", "speciaal", "spel", "spelen", "sport", "spreken", "springen", "staal", "stad", "stap", "start", "station", "steen", "stelen", "stem", "stempel", "ster", "sterk", "steun", "stil", "stilte", "stoel", "stof", "stoffig", "stom", "stop", "storm", "straat", "straffen", "structuur", "student", "studie", "stuk", "succes", "suiker", "taal", "taart", "tafel", "tak", "tamelijk", "tand", "tante", "tas", "taxi", "te", "team", "teen", "tegen", "teken", "tekenen", "telefoon", "televisie", "tellen", "tennis", "terug", "terugkomst", "terwijl", "test", "tevreden", "thee", "thuis", "tien", "tijd", "titel", "toekomst", "toen", "toename", "totaal", "traan", "tram", "trein", "trekken", "trouwen", "trui", "tuin", "tussen", "tweede", "u", "uit", "uitleggen", "uitnodigen", "uitvinden", "uitzoeken", "uur", "vaak", "vaarwel", "vader", "vak", "vakantie", "vallen", "vals", "van", "vandaag", "vangen", "vanmorgen", "vannacht", "varken", "vast", "vechten", "veel", "veer", "veilig", "ver", "veranderen", "verandering", "verder", "verdienen", "verdrietig", "verenigen", "verf", "vergelijkbaar", "vergelijken", "vergelijking", "vergeten", "vergeven", "vergissen", "verhaal", "verhoging", "verjaardag", "verkeerd", "verkopen", "verlaten", "verleden", "verliezen", "vernietigen", "veroveren", "verrassen", "vers", "verschil", "verschrikkelijk", "verspreiden", "verstand", "verstoppen", "versturen", "vertellen", "vertrekken", "vertrouwen", "verwachten", "verwijderen", "verzamelen", "verzameling", "vet", "vier", "vierkant", "vies", "vijand", "vijf", "vijver", "vinden", "vinger", "vis", "vlag", "vlees", "vlieg", "vliegtuig", "vloer", "voeden", "voedsel", "voelen", "voet", "voetbal", "vogel", "vol", "volgende", "volgorde", "voor", "voorbeeld", "voorkomen", "voorzichtig", "voorzien", "vork", "vorm", "vos", "vouwen", "vraag", "vragen", "vrede", "vreemd", "vreemde", "vriend", "vriendelijk", "vriezen", "vrij", "vrijheid", "vroeg", "vroeger", "vrouw", "vullen", "vuur", "waar", "waarom", "waarschijnlijk", "wachten", "wakker", "wanneer", "want", "wapen", "warm", "wassen", "wat", "water", "we", "week", "weer", "weg", "welke", "welkom", "wens", "wereld", "werelddeel", "werk", "west", "wetenschap", "wie", "wiel", "wij", "wijn", "wijs", "wild", "willen", "wind", "winkel", "winnen", "winter", "wissen", "wit", "wolf", "wolk", "wonder", "woord", "woud", "wreed", "zaak", "zacht", "zak", "zand", "zee", "zeep", "zeer", "zeggen", "zeil", "zeker", "zelfde", "zes", "zetten", "zeven", "ziek", "ziekenhuis", "ziel", "zien", "zij", "zijn", "zilver", "zingen", "zinken", "zitten", "zo", "zoals", "zoeken", "zoet", "zomer", "zon", "zonder", "zonnig", "zoon", "zorg", "zorgen", "zou", "zout", "zuid", "zulke", "zullen", "zus", "zwaar", "zwak", "zwembad", "zwemmen", ) parts_of_speech: Dict[str, tuple] = {}
Provider
python
getsentry__sentry-python
sentry_sdk/debug.py
{ "start": 194, "end": 1019 }
class ____(logging.Filter): def filter(self, record): # type: (LogRecord) -> bool if _client_init_debug.get(False): return True return get_client().options["debug"] def init_debug_support(): # type: () -> None if not logger.handlers: configure_logger() def configure_logger(): # type: () -> None _handler = logging.StreamHandler(sys.stderr) _handler.setFormatter(logging.Formatter(" [sentry] %(levelname)s: %(message)s")) logger.addHandler(_handler) logger.setLevel(logging.DEBUG) logger.addFilter(_DebugFilter()) def configure_debug_hub(): # type: () -> None warnings.warn( "configure_debug_hub is deprecated. Please remove calls to it, as it is a no-op.", DeprecationWarning, stacklevel=2, )
_DebugFilter
python
walkccc__LeetCode
solutions/1626. Best Team With No Conflicts/1626.py
{ "start": 87, "end": 890 }
class ____: def bestTeamScore(self, scores: list[int], ages: list[int]) -> int: n = len(scores) players = [Player(age, score) for age, score in zip(ages, scores)] # dp[i] := the maximum score of choosing the players[0..i] with the # players[i] being selected dp = [0] * n # Sort by age descending, then by score descending players.sort(key=lambda x: (-x.age, -x.score)) for i in range(n): # For each player, choose it first dp[i] = players[i].score # players[j].age >= players[i].age since we sort in descending order. # So, we only have to check that players[j].score >= players[i].score. for j in range(i): if players[j].score >= players[i].score: dp[i] = max(dp[i], dp[j] + players[i].score) return max(dp)
Solution
python
huggingface__transformers
src/transformers/models/evolla/modeling_evolla.py
{ "start": 45892, "end": 49042 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: EvollaConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True self.q_proj = nn.Linear( config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias ) self.k_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.v_proj = nn.Linear( config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias ) self.o_proj = nn.Linear( config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias ) self.rotary_fn = apply_rotary_pos_emb def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor, torch.Tensor]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights
EvollaAttention
python
pytorch__pytorch
benchmarks/operator_benchmark/pt/qlayernorm_test.py
{ "start": 325, "end": 1389 }
class ____(op_bench.TorchBenchmarkBase): def init(self, dims, dtype): X = (torch.rand(*dims) - 0.5) * 256 scale = 1.0 zero_point = 0 self.qX = torch.quantize_per_tensor( X, scale=scale, zero_point=zero_point, dtype=dtype ) self.inputs = { "qX": self.qX, "weight": torch.rand(*self.qX.size()[1:], dtype=torch.float), "bias": torch.rand(*self.qX.size()[1:], dtype=torch.float), "eps": 1e-5, "Y_scale": 0.1, "Y_zero_point": 0, } def forward(self, qX, weight, bias, eps: float, Y_scale: float, Y_zero_point: int): return torch.ops.quantized.layer_norm( qX, qX.size()[1:], weight=weight, bias=bias, eps=eps, output_scale=Y_scale, output_zero_point=Y_zero_point, ) op_bench.generate_pt_test(layernorm_configs_short, QLayerNormBenchmark) if __name__ == "__main__": op_bench.benchmark_runner.main()
QLayerNormBenchmark
python
google__jax
tests/pallas/ops_test.py
{ "start": 7870, "end": 9113 }
class ____(jtu.JaxTestCase): INTERPRET = False def setUp(self): if not self.INTERPRET: if jtu.device_under_test() == "cpu": self.skipTest("Only interpret mode supported on CPU") if (jtu.test_device_matches(["cuda"]) and not jtu.is_cuda_compute_capability_at_least("8.0")): self.skipTest("Only works on GPUs with capability >= sm80") if (jtu.test_device_matches(["cuda"]) and use_mosaic_gpu and not jtu.is_cuda_compute_capability_at_least("9.0")): self.skipTest("Mosaic GPU requires capability >= sm90") super().setUp() @classmethod def pallas_call(cls, *args, **kwargs): if jtu.test_device_matches(["cuda"]) and use_mosaic_gpu: assert plgpu_mgpu is not None compiler_params = plgpu_mgpu.CompilerParams( lowering_semantics=plgpu_mgpu.LoweringSemantics.Warpgroup ) kwargs["compiler_params"] = compiler_params return pl.pallas_call(*args, interpret=cls.INTERPRET, **kwargs) def skip_if_mosaic_gpu(self): if jtu.test_device_matches(["gpu"]) and use_mosaic_gpu: self.skipTest("TODO: Mosaic GPU does not support this yet") @jtu.thread_unsafe_test_class(condition=not jtu.hypothesis_is_thread_safe())
PallasBaseTest
python
bokeh__bokeh
src/bokeh/colors/groups.py
{ "start": 3718, "end": 4470 }
class ____(ColorGroup): ''' CSS "Cyan" Color Group as defined by https://www.w3schools.com/colors/colors_groups.asp .. bokeh-color:: mediumaquamarine .. bokeh-color:: aqua .. bokeh-color:: cyan .. bokeh-color:: lightcyan .. bokeh-color:: paleturquoise .. bokeh-color:: aquamarine .. bokeh-color:: turquoise .. bokeh-color:: mediumturquoise .. bokeh-color:: darkturquoise .. bokeh-color:: lightseagreen .. bokeh-color:: cadetblue .. bokeh-color:: darkcyan .. bokeh-color:: teal ''' _colors = ('MediumAquamarine', 'Aqua', 'Cyan', 'LightCyan', 'PaleTurquoise', 'Aquamarine', 'Turquoise', 'MediumTurquoise', 'DarkTurquoise', 'LightSeaGreen', 'CadetBlue', 'DarkCyan', 'Teal')
cyan
python
getsentry__sentry
tests/sentry/db/test_deletion.py
{ "start": 1921, "end": 3266 }
class ____(TestCase): def test_iteration(self) -> None: target_project = self.project expected_group_ids = {self.create_group().id for i in range(2)} other_project = self.create_project() self.create_group(other_project) self.create_group(other_project) iterator = BulkDeleteQuery( model=Group, project_id=target_project.id, dtfield="last_seen", order_by="last_seen", days=0, ).iterator(1) results: set[int] = set() for chunk in iterator: results.update(chunk) assert results == expected_group_ids def test_iteration_descending(self) -> None: target_project = self.project expected_group_ids = {self.create_group().id for i in range(2)} other_project = self.create_project() self.create_group(other_project) self.create_group(other_project) iterator = BulkDeleteQuery( model=Group, project_id=target_project.id, dtfield="last_seen", order_by="-last_seen", days=0, ).iterator(chunk_size=1) results: set[int] = set() for chunk in iterator: results.update(chunk) assert results == expected_group_ids
BulkDeleteQueryIteratorTestCase
python
aimacode__aima-python
planning.py
{ "start": 74310, "end": 80590 }
class ____(HLA): """ Define Actions for the real-world (that may be refined further), under angelic semantics """ def __init__(self, action, precond, effect, duration=0, consume=None, use=None): super().__init__(action, precond, effect, duration, consume, use) def convert(self, clauses): """ Converts strings into Exprs An HLA with angelic semantics can achieve the effects of simple HLA's (add / remove a variable) and furthermore can have following effects on the variables: Possibly add variable ( $+ ) Possibly remove variable ( $- ) Possibly add or remove a variable ( $$ ) Overrides HLA.convert function """ lib = {'~': 'Not', '$+': 'PosYes', '$-': 'PosNot', '$$': 'PosYesNot'} if isinstance(clauses, Expr): clauses = conjuncts(clauses) for i in range(len(clauses)): for ch in lib.keys(): if clauses[i].op == ch: clauses[i] = expr(lib[ch] + str(clauses[i].args[0])) elif isinstance(clauses, str): for ch in lib.keys(): clauses = clauses.replace(ch, lib[ch]) if len(clauses) > 0: clauses = expr(clauses) try: clauses = conjuncts(clauses) except AttributeError: pass return clauses def angelic_action(self): """ Converts a high level action (HLA) with angelic semantics into all of its corresponding high level actions (HLA). An HLA with angelic semantics can achieve the effects of simple HLA's (add / remove a variable) and furthermore can have following effects for each variable: Possibly add variable ( $+: 'PosYes' ) --> corresponds to two HLAs: HLA_1: add variable HLA_2: leave variable unchanged Possibly remove variable ( $-: 'PosNot' ) --> corresponds to two HLAs: HLA_1: remove variable HLA_2: leave variable unchanged Possibly add / remove a variable ( $$: 'PosYesNot' ) --> corresponds to three HLAs: HLA_1: add variable HLA_2: remove variable HLA_3: leave variable unchanged example: the angelic action with effects possibly add A and possibly add or remove B corresponds to the following 6 effects of HLAs: '$+A & $$B': HLA_1: 'A & B' (add A and add B) HLA_2: 'A & ~B' (add A and remove B) HLA_3: 'A' (add A) HLA_4: 'B' (add B) HLA_5: '~B' (remove B) HLA_6: ' ' (no effect) """ effects = [[]] for clause in self.effect: (n, w) = AngelicHLA.compute_parameters(clause) effects = effects * n # create n copies of effects it = range(1) if len(effects) != 0: # split effects into n sublists (separate n copies created in compute_parameters) it = range(len(effects) // n) for i in it: if effects[i]: if clause.args: effects[i] = expr(str(effects[i]) + '&' + str( Expr(clause.op[w:], clause.args[0]))) # make changes in the ith part of effects if n == 3: effects[i + len(effects) // 3] = expr( str(effects[i + len(effects) // 3]) + '&' + str(Expr(clause.op[6:], clause.args[0]))) else: effects[i] = expr( str(effects[i]) + '&' + str(expr(clause.op[w:]))) # make changes in the ith part of effects if n == 3: effects[i + len(effects) // 3] = expr( str(effects[i + len(effects) // 3]) + '&' + str(expr(clause.op[6:]))) else: if clause.args: effects[i] = Expr(clause.op[w:], clause.args[0]) # make changes in the ith part of effects if n == 3: effects[i + len(effects) // 3] = Expr(clause.op[6:], clause.args[0]) else: effects[i] = expr(clause.op[w:]) # make changes in the ith part of effects if n == 3: effects[i + len(effects) // 3] = expr(clause.op[6:]) return [HLA(Expr(self.name, self.args), self.precond, effects[i]) for i in range(len(effects))] def compute_parameters(clause): """ computes n,w n = number of HLA effects that the angelic HLA corresponds to w = length of representation of angelic HLA effect n = 1, if effect is add n = 1, if effect is remove n = 2, if effect is possibly add n = 2, if effect is possibly remove n = 3, if effect is possibly add or remove """ if clause.op[:9] == 'PosYesNot': # possibly add/remove variable: three possible effects for the variable n = 3 w = 9 elif clause.op[:6] == 'PosYes': # possibly add variable: two possible effects for the variable n = 2 w = 6 elif clause.op[:6] == 'PosNot': # possibly remove variable: two possible effects for the variable n = 2 w = 3 # We want to keep 'Not' from 'PosNot' when adding action else: # variable or ~variable n = 1 w = 0 return n, w
AngelicHLA
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 99635, "end": 99747 }
class ____(BaseModel, extra="forbid"): recommend: "RecommendInput" = Field(..., description="")
RecommendQuery
python
Netflix__metaflow
metaflow/plugins/airflow/sensors/s3_sensor.py
{ "start": 131, "end": 3275 }
class ____(AirflowSensorDecorator): """ The `@airflow_s3_key_sensor` decorator attaches a Airflow [S3KeySensor](https://airflow.apache.org/docs/apache-airflow-providers-amazon/stable/_api/airflow/providers/amazon/aws/sensors/s3/index.html#airflow.providers.amazon.aws.sensors.s3.S3KeySensor) before the start step of the flow. This decorator only works when a flow is scheduled on Airflow and is compiled using `airflow create`. More than one `@airflow_s3_key_sensor` can be added as a flow decorators. Adding more than one decorator will ensure that `start` step starts only after all sensors finish. Parameters ---------- timeout : int Time, in seconds before the task times out and fails. (Default: 3600) poke_interval : int Time in seconds that the job should wait in between each try. (Default: 60) mode : str How the sensor operates. Options are: { poke | reschedule }. (Default: "poke") exponential_backoff : bool allow progressive longer waits between pokes by using exponential backoff algorithm. (Default: True) pool : str the slot pool this task should run in, slot pools are a way to limit concurrency for certain tasks. (Default:None) soft_fail : bool Set to true to mark the task as SKIPPED on failure. (Default: False) name : str Name of the sensor on Airflow description : str Description of sensor in the Airflow UI bucket_key : Union[str, List[str]] The key(s) being waited on. Supports full s3:// style url or relative path from root level. When it's specified as a full s3:// url, please leave `bucket_name` as None bucket_name : str Name of the S3 bucket. Only needed when bucket_key is not provided as a full s3:// url. When specified, all the keys passed to bucket_key refers to this bucket. (Default:None) wildcard_match : bool whether the bucket_key should be interpreted as a Unix wildcard pattern. (Default: False) aws_conn_id : str a reference to the s3 connection on Airflow. (Default: None) verify : bool Whether or not to verify SSL certificates for S3 connection. (Default: None) """ name = "airflow_s3_key_sensor" operator_type = SensorNames.S3_SENSOR # Arg specification can be found here : # https://airflow.apache.org/docs/apache-airflow-providers-amazon/stable/_api/airflow/providers/amazon/aws/sensors/s3/index.html#airflow.providers.amazon.aws.sensors.s3.S3KeySensor defaults = dict( **AirflowSensorDecorator.defaults, bucket_key=None, # Required bucket_name=None, wildcard_match=False, aws_conn_id=None, verify=None, # `verify (Optional[Union[str, bool]])` Whether or not to verify SSL certificates for S3 connection. # `verify` is a airflow variable. ) def validate(self, flow): if self.attributes["bucket_key"] is None: raise AirflowException( "`bucket_key` for `@%s`cannot be empty." % (self.name) ) super().validate(flow)
S3KeySensorDecorator
python
coleifer__peewee
tests/pwiz_integration.py
{ "start": 742, "end": 853 }
class ____(TestModel): name = CharField(unique=True) parent = ForeignKeyField('self', null=True)
Category
python
streamlit__streamlit
lib/streamlit/runtime/caching/cache_data_api.py
{ "start": 4392, "end": 10295 }
class ____(CacheStatsProvider): """Manages all DataCache instances.""" def __init__(self) -> None: self._caches_lock = threading.Lock() self._function_caches: dict[str, DataCache[Any]] = {} def get_cache( self, key: str, persist: CachePersistType, max_entries: int | None, ttl: int | float | timedelta | str | None, display_name: str, ) -> DataCache[Any]: """Return the mem cache for the given key. If it doesn't exist, create a new one with the given params. """ ttl_seconds = time_to_seconds(ttl, coerce_none_to_inf=False) # Get the existing cache, if it exists, and validate that its params # haven't changed. with self._caches_lock: cache = self._function_caches.get(key) if ( cache is not None and cache.ttl_seconds == ttl_seconds and cache.max_entries == max_entries and cache.persist == persist ): return cache # Close the existing cache's storage, if it exists. if cache is not None: _LOGGER.debug( "Closing existing DataCache storage " "(key=%s, persist=%s, max_entries=%s, ttl=%s) " "before creating new one with different params", key, persist, max_entries, ttl, ) cache.storage.close() # Create a new cache object and put it in our dict _LOGGER.debug( "Creating new DataCache (key=%s, persist=%s, max_entries=%s, ttl=%s)", key, persist, max_entries, ttl, ) cache_context = self.create_cache_storage_context( function_key=key, function_name=display_name, ttl_seconds=ttl_seconds, max_entries=max_entries, persist=persist, ) cache_storage_manager = self.get_storage_manager() storage = cache_storage_manager.create(cache_context) cache = DataCache( key=key, storage=storage, persist=persist, max_entries=max_entries, ttl_seconds=ttl_seconds, display_name=display_name, ) self._function_caches[key] = cache return cache def clear_all(self) -> None: """Clear all in-memory and on-disk caches.""" with self._caches_lock: try: # try to remove in optimal way if such ability provided by # storage manager clear_all method; # if not implemented, fallback to remove all # available storages one by one self.get_storage_manager().clear_all() except NotImplementedError: for data_cache in self._function_caches.values(): data_cache.clear() data_cache.storage.close() self._function_caches = {} def get_stats(self) -> list[CacheStat]: with self._caches_lock: # Shallow-clone our caches. We don't want to hold the global # lock during stats-gathering. function_caches = self._function_caches.copy() stats: list[CacheStat] = [] for cache in function_caches.values(): stats.extend(cache.get_stats()) return group_stats(stats) def validate_cache_params( self, function_name: str, persist: CachePersistType, max_entries: int | None, ttl: int | float | timedelta | str | None, ) -> None: """Validate that the cache params are valid for given storage. Raises ------ InvalidCacheStorageContext Raised if the cache storage manager is not able to work with provided CacheStorageContext. """ ttl_seconds = time_to_seconds(ttl, coerce_none_to_inf=False) cache_context = self.create_cache_storage_context( function_key="DUMMY_KEY", function_name=function_name, ttl_seconds=ttl_seconds, max_entries=max_entries, persist=persist, ) try: self.get_storage_manager().check_context(cache_context) except InvalidCacheStorageContextError: _LOGGER.exception( "Cache params for function %s are incompatible with current " "cache storage manager.", function_name, ) raise def create_cache_storage_context( self, function_key: str, function_name: str, persist: CachePersistType, ttl_seconds: float | None, max_entries: int | None, ) -> CacheStorageContext: return CacheStorageContext( function_key=function_key, function_display_name=function_name, ttl_seconds=ttl_seconds, max_entries=max_entries, persist=persist, ) def get_storage_manager(self) -> CacheStorageManager: if runtime.exists(): return runtime.get_instance().cache_storage_manager # When running in "raw mode", we can't access the CacheStorageManager, # so we're falling back to InMemoryCache. _LOGGER.warning("No runtime found, using MemoryCacheStorageManager") return MemoryCacheStorageManager() # Singleton DataCaches instance _data_caches = DataCaches() def get_data_cache_stats_provider() -> CacheStatsProvider: """Return the StatsProvider for all @st.cache_data functions.""" return _data_caches
DataCaches
python
python__mypy
mypy/test/testdeps.py
{ "start": 785, "end": 3236 }
class ____(DataSuite): files = find_test_files(pattern="deps*.test") def run_case(self, testcase: DataDrivenTestCase) -> None: src = "\n".join(testcase.input) dump_all = "# __dump_all__" in src options = parse_options(src, testcase, incremental_step=1) if options.python_version > sys.version_info: pytest.skip("Test case requires a newer Python version") options.use_builtins_fixtures = True options.show_traceback = True options.cache_dir = os.devnull options.export_types = True options.preserve_asts = True options.allow_empty_bodies = True messages, files, type_map = self.build(src, options) a = messages if files is None or type_map is None: if not a: a = ["Unknown compile error (likely syntax error in test case or fixture)"] else: deps: defaultdict[str, set[str]] = defaultdict(set) for module, file in files.items(): if (module in dumped_modules or dump_all) and (module in testcase.test_modules): new_deps = get_dependencies(file, type_map, options.python_version, options) for source in new_deps: deps[source].update(new_deps[source]) type_state.add_all_protocol_deps(deps) for source, targets in sorted(deps.items()): if source.startswith(("<enum", "<typing", "<mypy", "<_typeshed.")): # Remove noise. continue line = f"{source} -> {', '.join(sorted(targets))}" # Clean up output a bit line = line.replace("__main__", "m") a.append(line) assert_string_arrays_equal( testcase.output, a, f"Invalid output ({testcase.file}, line {testcase.line})" ) def build( self, source: str, options: Options ) -> tuple[list[str], dict[str, MypyFile] | None, dict[Expression, Type] | None]: try: result = build.build( sources=[BuildSource("main", None, source)], options=options, alt_lib_path=test_temp_dir, ) except CompileError as e: # TODO: Should perhaps not return None here. return e.messages, None, None return result.errors, result.files, result.types
GetDependenciesSuite
python
zarr-developers__zarr-python
src/zarr/codecs/numcodecs/_codecs.py
{ "start": 10465, "end": 11202 }
class ____(_NumcodecsArrayArrayCodec, codec_name="astype"): def resolve_metadata(self, chunk_spec: ArraySpec) -> ArraySpec: dtype = parse_dtype(np.dtype(self.codec_config["encode_dtype"]), zarr_format=3) # type: ignore[arg-type] return replace(chunk_spec, dtype=dtype) def evolve_from_array_spec(self, array_spec: ArraySpec) -> AsType: if self.codec_config.get("decode_dtype") is None: # TODO: remove these coverage exemptions the correct way, i.e. with tests dtype = array_spec.dtype.to_native_dtype() # pragma: no cover return AsType(**{**self.codec_config, "decode_dtype": str(dtype)}) # pragma: no cover return self # bytes-to-bytes checksum codecs
AsType
python
falconry__falcon
tests/test_recipes.py
{ "start": 3297, "end": 4150 }
class ____: def test_raw_path(self, asgi, app_kind, util): recipe = util.load_module( 'raw_url_path', parent_dir='examples/recipes', suffix=app_kind ) url1 = '/cache/http%3A%2F%2Ffalconframework.org' result1 = falcon.testing.simulate_get(recipe.app, url1) assert result1.status_code == 200 assert result1.json == {'url': 'http://falconframework.org'} scope1 = falcon.testing.create_scope(url1) assert scope1['raw_path'] == url1.encode() url2 = '/cache/http%3A%2F%2Ffalconframework.org/status' result2 = falcon.testing.simulate_get(recipe.app, url2) assert result2.status_code == 200 assert result2.json == {'cached': True} scope2 = falcon.testing.create_scope(url2) assert scope2['raw_path'] == url2.encode()
TestRawURLPath
python
rapidsai__cudf
python/dask_cudf/dask_cudf/_legacy/io/parquet.py
{ "start": 833, "end": 16975 }
class ____(ArrowDatasetEngine): @classmethod def _create_dd_meta(cls, dataset_info, **kwargs): # Start with pandas-version of meta meta_pd = super()._create_dd_meta(dataset_info, **kwargs) # Convert to cudf # (drop unsupported timezone information) for k, v in meta_pd.dtypes.items(): if isinstance(v, pd.DatetimeTZDtype) and v.tz is not None: meta_pd[k] = meta_pd[k].dt.tz_localize(None) meta_cudf = cudf.from_pandas(meta_pd) # Re-set "object" dtypes to align with pa schema kwargs = dataset_info.get("kwargs", {}) set_object_dtypes_from_pa_schema( meta_cudf, kwargs.get("schema", None), ) return meta_cudf @classmethod def multi_support(cls): # Assert that this class is CudfEngine # and that multi-part reading is supported return cls == CudfEngine @classmethod def _read_paths( cls, paths, fs, columns=None, row_groups=None, filters=None, partitions=None, partitioning=None, partition_keys=None, open_file_options=None, dataset_kwargs=None, **kwargs, ): # Simplify row_groups if all None if row_groups == [None for path in paths]: row_groups = None # Make sure we read in the columns needed for row-wise # filtering after IO. This means that one or more columns # will be dropped almost immediately after IO. However, # we do NEED these columns for accurate filtering. filters = _normalize_filters(filters) projected_columns = None if columns and filters: projected_columns = [c for c in columns if c is not None] columns = sorted( set(v[0] for v in itertools.chain.from_iterable(filters)) | set(projected_columns) ) dataset_kwargs = dataset_kwargs or {} if partitions: dataset_kwargs["partitioning"] = partitioning or "hive" # Use cudf to read in data try: df = cudf.read_parquet( paths, engine="cudf", columns=columns, row_groups=row_groups if row_groups else None, dataset_kwargs=dataset_kwargs, categorical_partitions=False, filesystem=fs, **kwargs, ) except RuntimeError as err: # TODO: Remove try/except after null-schema issue is resolved # (See: https://github.com/rapidsai/cudf/issues/12702) if len(paths) > 1: df = cudf.concat( [ cudf.read_parquet( path, engine="cudf", columns=columns, row_groups=row_groups[i] if row_groups else None, dataset_kwargs=dataset_kwargs, categorical_partitions=False, filesystem=fs, **kwargs, ) for i, path in enumerate(paths) ] ) else: raise err # Apply filters (if any are defined) df = _apply_post_filters(df, filters) if projected_columns: # Elements of `projected_columns` may now be in the index. # We must filter these names from our projection projected_columns = [ col for col in projected_columns if col in df._column_names ] df = df[projected_columns] if partitions and partition_keys is None: # Use `HivePartitioning` by default ds = pa_ds.dataset( paths, filesystem=fs, **dataset_kwargs, ) frag = next(ds.get_fragments()) if frag: # Extract hive-partition keys, and make sure they # are ordered the same as they are in `partitions` raw_keys = pa_ds._get_partition_keys(frag.partition_expression) partition_keys = [ (hive_part.name, raw_keys[hive_part.name]) for hive_part in partitions ] if partition_keys: if partitions is None: raise ValueError("Must pass partition sets") for i, (name, index2) in enumerate(partition_keys): if len(partitions[i].keys): # Build a categorical column from `codes` directly # (since the category is often a larger dtype) codes = as_column( partitions[i].keys.get_loc(index2), length=len(df), ) df[name] = codes._with_type_metadata( cudf.CategoricalDtype( categories=partitions[i].keys, ordered=False ) ) elif name not in df.columns: # Add non-categorical partition column df[name] = as_column(index2, length=len(df)) return df @classmethod def read_partition( cls, fs, pieces, columns, index, categories=(), partitions=(), filters=None, partitioning=None, schema=None, open_file_options=None, **kwargs, ): if columns is not None: columns = [c for c in columns] if isinstance(index, list): columns += index dataset_kwargs = kwargs.get("dataset", {}) partitioning = partitioning or dataset_kwargs.get("partitioning", None) if isinstance(partitioning, dict): partitioning = pa_ds.partitioning(**partitioning) # Check if we are actually selecting any columns read_columns = columns if schema and columns: ignored = set(schema.names) - set(columns) if not ignored: read_columns = None if not isinstance(pieces, list): pieces = [pieces] # Extract supported kwargs from `kwargs` read_kwargs = kwargs.get("read", {}) read_kwargs.update(open_file_options or {}) check_file_size = read_kwargs.pop("check_file_size", None) # Wrap reading logic in a `try` block so that we can # inform the user that the `read_parquet` partition # size is too large for the available memory try: # Assume multi-piece read paths = [] rgs = [] last_partition_keys = None dfs = [] for i, piece in enumerate(pieces): (path, row_group, partition_keys) = piece row_group = None if row_group == [None] else row_group # File-size check to help "protect" users from change # to up-stream `split_row_groups` default. We only # check the file size if this partition corresponds # to a full file, and `check_file_size` is defined if check_file_size and len(pieces) == 1 and row_group is None: file_size = fs.size(path) if file_size > check_file_size: warnings.warn( f"A large parquet file ({file_size}B) is being " f"used to create a DataFrame partition in " f"read_parquet. This may cause out of memory " f"exceptions in operations downstream. See the " f"notes on split_row_groups in the read_parquet " f"documentation. Setting split_row_groups " f"explicitly will silence this warning." ) if i > 0 and partition_keys != last_partition_keys: dfs.append( cls._read_paths( paths, fs, columns=read_columns, row_groups=rgs if rgs else None, filters=filters, partitions=partitions, partitioning=partitioning, partition_keys=last_partition_keys, dataset_kwargs=dataset_kwargs, **read_kwargs, ) ) paths = [] rgs = [] last_partition_keys = None paths.append(path) rgs.append( [row_group] if not isinstance(row_group, list) and row_group is not None else row_group ) last_partition_keys = partition_keys dfs.append( cls._read_paths( paths, fs, columns=read_columns, row_groups=rgs if rgs else None, filters=filters, partitions=partitions, partitioning=partitioning, partition_keys=last_partition_keys, dataset_kwargs=dataset_kwargs, **read_kwargs, ) ) df = cudf.concat(dfs) if len(dfs) > 1 else dfs[0] # Re-set "object" dtypes align with pa schema set_object_dtypes_from_pa_schema(df, schema) if index and (index[0] in df.columns): df = df.set_index(index[0]) elif index is False and df.index.names != [None]: # If index=False, we shouldn't have a named index df.reset_index(inplace=True) except MemoryError as err: raise MemoryError( "Parquet data was larger than the available GPU memory!\n\n" "See the notes on split_row_groups in the read_parquet " "documentation.\n\n" "Original Error: " + str(err) ) raise err return df @staticmethod def write_partition( df, path, fs, filename, partition_on, return_metadata, fmd=None, compression="snappy", index_cols=None, **kwargs, ): preserve_index = False if len(index_cols) and set(index_cols).issubset(set(df.columns)): df.set_index(index_cols, drop=True, inplace=True) preserve_index = True if partition_on: md = write_to_dataset( df=df, root_path=path, compression=compression, filename=filename, partition_cols=partition_on, fs=fs, preserve_index=preserve_index, return_metadata=return_metadata, statistics=kwargs.get("statistics", "ROWGROUP"), int96_timestamps=kwargs.get("int96_timestamps", False), row_group_size_bytes=kwargs.get("row_group_size_bytes", None), row_group_size_rows=kwargs.get("row_group_size_rows", None), max_page_size_bytes=kwargs.get("max_page_size_bytes", None), max_page_size_rows=kwargs.get("max_page_size_rows", None), storage_options=kwargs.get("storage_options", None), ) else: with ( contextlib.nullcontext() if ioutils._is_local_filesystem(fs) else fs.open(fs.sep.join([path, filename]), mode="wb") ) as out_file: if out_file is None: out_file = fs.sep.join([path, filename]) elif not isinstance(out_file, IOBase): out_file = BufferedWriter(out_file) md = df.to_parquet( path=out_file, engine=kwargs.get("engine", "cudf"), index=kwargs.get("index", None), partition_cols=kwargs.get("partition_cols", None), partition_file_name=kwargs.get( "partition_file_name", None ), partition_offsets=kwargs.get("partition_offsets", None), statistics=kwargs.get("statistics", "ROWGROUP"), int96_timestamps=kwargs.get("int96_timestamps", False), row_group_size_bytes=kwargs.get( "row_group_size_bytes", None ), row_group_size_rows=kwargs.get( "row_group_size_rows", None ), storage_options=kwargs.get("storage_options", None), metadata_file_path=filename if return_metadata else None, ) # Return the schema needed to write the metadata if return_metadata: return [{"meta": md}] else: return [] @staticmethod def write_metadata(parts, fmd, fs, path, append=False, **kwargs): if parts: # Aggregate metadata and write to _metadata file metadata_path = fs.sep.join([path, "_metadata"]) _meta = [] if append and fmd is not None: # Convert to bytes: <https://github.com/rapidsai/cudf/issues/17177> if isinstance(fmd, pq.FileMetaData): with BytesIO() as myio: fmd.write_metadata_file(myio) myio.seek(0) fmd = np.frombuffer(myio.read(), dtype="uint8") _meta = [fmd] _meta.extend([parts[i][0]["meta"] for i in range(len(parts))]) _meta = ( cudf.io.merge_parquet_filemetadata(_meta) if len(_meta) > 1 else _meta[0] ) with fs.open(metadata_path, "wb") as fil: fil.write(memoryview(_meta)) @classmethod def collect_file_metadata(cls, path, fs, file_path): with fs.open(path, "rb") as f: meta = pq.ParquetFile(f).metadata if file_path: meta.set_file_path(file_path) with BytesIO() as myio: meta.write_metadata_file(myio) myio.seek(0) meta = np.frombuffer(myio.read(), dtype="uint8") return meta @classmethod def aggregate_metadata(cls, meta_list, fs, out_path): meta = ( cudf.io.merge_parquet_filemetadata(meta_list) if len(meta_list) > 1 else meta_list[0] ) if out_path: metadata_path = fs.sep.join([out_path, "_metadata"]) with fs.open(metadata_path, "wb") as fil: fil.write(memoryview(meta)) return None else: return meta def set_object_dtypes_from_pa_schema(df, schema): # Simple utility to modify cudf DataFrame # "object" dtypes to agree with a specific # pyarrow schema. if schema: for col_name, col in df._data.items(): if col_name in schema.names: typ = cudf_dtype_from_pa_type(schema.field(col_name).type) if not isinstance( typ, (cudf.ListDtype, cudf.StructDtype) ) and isinstance(col, cudf.core.column.StringColumn): df._data[col_name] = col.astype(typ) to_parquet = dd.to_parquet if create_metadata_file_dd is None: create_metadata_file = create_metadata_file_dd else: create_metadata_file = partial(create_metadata_file_dd, engine=CudfEngine)
CudfEngine
python
kennethreitz__tablib
src/tablib/packages/dbfpy/fields.py
{ "start": 11090, "end": 11916 }
class ____(DbfFieldDef): """Definition of the date field.""" typeCode = "D" defaultValue = utils.classproperty(lambda cls: datetime.date.today()) # "yyyymmdd" gives us 8 characters length = 8 def decodeValue(self, value): """Return a ``datetime.date`` instance decoded from ``value``.""" if value.strip(): return utils.getDate(value) else: return None def encodeValue(self, value): """Return a string-encoded value. ``value`` argument should be a value suitable for the `utils.getDate` call. Return: Return value is a string in format "yyyymmdd". """ if value: return utils.getDate(value).strftime("%Y%m%d") else: return " " * self.length
DbfDateFieldDef
python
huggingface__transformers
src/transformers/models/univnet/modeling_univnet.py
{ "start": 3581, "end": 8548 }
class ____(nn.Module): """ Implementation of the kernel predictor network which supplies the kernel and bias for the location variable convolutional layers (LVCs) in each UnivNet LVCBlock. Based on the KernelPredictor implementation in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L7). Parameters: config: (`UnivNetConfig`): Config for the `UnivNetModel` model. conv_kernel_size (`int`, *optional*, defaults to 3): The kernel size for the location variable convolutional layer kernels (convolutional weight tensor). conv_layers (`int`, *optional*, defaults to 4): The number of location variable convolutional layers to output kernels and biases for. """ def __init__( self, config: UnivNetConfig, conv_kernel_size: int = 3, conv_layers: int = 4, ): super().__init__() self.conv_in_channels = config.model_hidden_channels self.conv_out_channels = 2 * config.model_hidden_channels self.conv_kernel_size = conv_kernel_size self.conv_layers = conv_layers self.kernel_channels = ( self.conv_in_channels * self.conv_out_channels * self.conv_kernel_size * self.conv_layers ) self.bias_channels = self.conv_out_channels * self.conv_layers self.resnet_in_channels = config.num_mel_bins self.resnet_hidden_channels = config.kernel_predictor_hidden_channels self.resnet_kernel_size = config.kernel_predictor_conv_size self.num_blocks = config.kernel_predictor_num_blocks self.leaky_relu_slope = config.leaky_relu_slope padding = (self.resnet_kernel_size - 1) // 2 self.input_conv = nn.Conv1d(self.resnet_in_channels, self.resnet_hidden_channels, 5, padding=2, bias=True) self.resblocks = nn.ModuleList([UnivNetKernelPredictorResidualBlock(config) for _ in range(self.num_blocks)]) self.kernel_conv = nn.Conv1d( self.resnet_hidden_channels, self.kernel_channels, self.resnet_kernel_size, padding=padding, bias=True ) self.bias_conv = nn.Conv1d( self.resnet_hidden_channels, self.bias_channels, self.resnet_kernel_size, padding=padding, bias=True ) def forward(self, spectrogram: torch.FloatTensor): """ Maps a conditioning log-mel spectrogram to a tensor of convolutional kernels and biases, for use in location variable convolutional layers. Note that the input spectrogram should have shape (batch_size, input_channels, seq_length). Args: spectrogram (`torch.FloatTensor` of shape `(batch_size, input_channels, seq_length)`): Tensor containing the log-mel spectrograms. Returns: tuple[`torch.FloatTensor, `torch.FloatTensor`]: tuple of tensors where the first element is the tensor of location variable convolution kernels of shape `(batch_size, self.conv_layers, self.conv_in_channels, self.conv_out_channels, self.conv_kernel_size, seq_length)` and the second element is the tensor of location variable convolution biases of shape `(batch_size, self.conv_layers. self.conv_out_channels, seq_length)`. """ batch_size, _, seq_length = spectrogram.shape hidden_states = self.input_conv(spectrogram) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) for resblock in self.resblocks: hidden_states = resblock(hidden_states) kernel_hidden_states = self.kernel_conv(hidden_states) bias_hidden_states = self.bias_conv(hidden_states) # Reshape kernels and biases to appropriate shape kernels = kernel_hidden_states.view( batch_size, self.conv_layers, self.conv_in_channels, self.conv_out_channels, self.conv_kernel_size, seq_length, ).contiguous() biases = bias_hidden_states.view( batch_size, self.conv_layers, self.conv_out_channels, seq_length, ).contiguous() return kernels, biases def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm weight_norm(self.input_conv) for layer in self.resblocks: layer.apply_weight_norm() weight_norm(self.kernel_conv) weight_norm(self.bias_conv) def remove_weight_norm(self): nn.utils.remove_weight_norm(self.input_conv) for layer in self.resblocks: layer.remove_weight_norm() nn.utils.remove_weight_norm(self.kernel_conv) nn.utils.remove_weight_norm(self.bias_conv)
UnivNetKernelPredictor
python
vyperlang__vyper
vyper/semantics/analysis/base.py
{ "start": 648, "end": 767 }
class ____(StringEnum): EXTERNAL = enum.auto() INTERNAL = enum.auto() DEPLOY = enum.auto()
FunctionVisibility
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/backfill.py
{ "start": 3494, "end": 3662 }
class ____(graphene.ObjectType): backfill_id = graphene.NonNull(graphene.String) class Meta: name = "CancelBackfillSuccess"
GrapheneCancelBackfillSuccess
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/application/dummy.py
{ "start": 340, "end": 1619 }
class ____(Application[None]): """ When no :class:`.Application` is running, :func:`.get_app` will run an instance of this :class:`.DummyApplication` instead. """ def __init__(self) -> None: super().__init__(output=DummyOutput(), input=DummyInput()) def run( self, pre_run: Callable[[], None] | None = None, set_exception_handler: bool = True, handle_sigint: bool = True, in_thread: bool = False, inputhook: InputHook | None = None, ) -> None: raise NotImplementedError("A DummyApplication is not supposed to run.") async def run_async( self, pre_run: Callable[[], None] | None = None, set_exception_handler: bool = True, handle_sigint: bool = True, slow_callback_duration: float = 0.5, ) -> None: raise NotImplementedError("A DummyApplication is not supposed to run.") async def run_system_command( self, command: str, wait_for_enter: bool = True, display_before_text: AnyFormattedText = "", wait_text: str = "", ) -> None: raise NotImplementedError def suspend_to_background(self, suspend_group: bool = True) -> None: raise NotImplementedError
DummyApplication
python
pypa__warehouse
tests/unit/admin/views/test_organizations.py
{ "start": 33822, "end": 38873 }
class ____: def test_delete_role(self, db_request, monkeypatch): organization = OrganizationFactory.create(name="pypi") user = UserFactory.create(username="testuser") role = OrganizationRoleFactory.create( organization=organization, user=user, role_name=OrganizationRoleType.Member ) # Mock record_event record_event = pretend.call_recorder(lambda **kwargs: None) monkeypatch.setattr(organization, "record_event", record_event) db_request.matchdict = { "organization_id": str(organization.id), "role_id": str(role.id), } db_request.route_path = pretend.call_recorder( lambda *a, **kw: "/admin/organizations/" ) db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.POST = {"username": user.username} result = views.delete_organization_role(db_request) assert isinstance(result, HTTPSeeOther) assert result.location == "/admin/organizations/" assert db_request.session.flash.calls == [ pretend.call( f"Removed '{user.username}' as 'Member' from '{organization.name}'", queue="success", ) ] assert db_request.db.query(OrganizationRole).count() == 0 # Check event was recorded assert record_event.calls == [ pretend.call( request=db_request, tag="admin:organization:role:remove", additional={ "action": f"remove Member {user.username}", "user_id": str(user.id), "role_name": "Member", }, ) ] def test_delete_role_not_found(self, db_request): organization = OrganizationFactory.create(name="pypi") db_request.matchdict = { "organization_id": str(organization.id), "role_id": "00000000-0000-0000-0000-000000000000", } db_request.route_path = pretend.call_recorder( lambda *a, **kw: "/admin/organizations/" ) db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) result = views.delete_organization_role(db_request) assert isinstance(result, HTTPSeeOther) assert db_request.session.flash.calls == [ pretend.call("This role no longer exists", queue="error") ] def test_delete_role_wrong_confirmation(self, db_request): organization = OrganizationFactory.create(name="pypi") user = UserFactory.create(username="testuser") role = OrganizationRoleFactory.create( organization=organization, user=user, role_name=OrganizationRoleType.Member ) db_request.matchdict = { "organization_id": str(organization.id), "role_id": str(role.id), } db_request.route_path = pretend.call_recorder( lambda *a, **kw: "/admin/organizations/" ) db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.POST = {"username": "wronguser"} result = views.delete_organization_role(db_request) assert isinstance(result, HTTPSeeOther) assert db_request.session.flash.calls == [ pretend.call("Confirm the request", queue="error") ] # Role should still exist assert db_request.db.query(OrganizationRole).count() == 1 def test_delete_role_no_confirmation(self, db_request): organization = OrganizationFactory.create(name="pypi") user = UserFactory.create(username="testuser") role = OrganizationRoleFactory.create( organization=organization, user=user, role_name=OrganizationRoleType.Member ) db_request.matchdict = { "organization_id": str(organization.id), "role_id": str(role.id), } db_request.route_path = pretend.call_recorder( lambda *a, **kw: "/admin/organizations/" ) db_request.session = pretend.stub( flash=pretend.call_recorder(lambda *a, **kw: None) ) db_request.POST = {} result = views.delete_organization_role(db_request) assert isinstance(result, HTTPSeeOther) assert db_request.session.flash.calls == [ pretend.call("Confirm the request", queue="error") ] # Role should still exist assert db_request.db.query(OrganizationRole).count() == 1 def test_delete_role_organization_not_found(self, db_request): db_request.matchdict = { "organization_id": "00000000-0000-0000-0000-000000000000", "role_id": "00000000-0000-0000-0000-000000000000", } with pytest.raises(HTTPNotFound): views.delete_organization_role(db_request)
TestDeleteOrganizationRole
python
matplotlib__matplotlib
galleries/examples/user_interfaces/mathtext_wx_sgskip.py
{ "start": 3859, "end": 4110 }
class ____(wx.App): def OnInit(self): frame = CanvasFrame(None, "wxPython mathtext demo app") self.SetTopWindow(frame) frame.Show(True) return True if __name__ == "__main__": app = MyApp() app.MainLoop()
MyApp
python
tensorflow__tensorflow
third_party/xla/xla/backends/cpu/codegen/fusion_emitter_test.py
{ "start": 7614, "end": 9265 }
class ____(parameterized.TestCase): def test_exp_nan_dce(self): dtype = np.dtype(np.float64) hlo = """ HloModule test_module fusion_computation { %param = f64[3] parameter(0) %multiplier = f64[3] constant({1, 0, nan}) %mult = f64[3] multiply(%param, %multiplier) ROOT %exp = f64[3] exponential(%mult) } ENTRY main { %param = f64[3] parameter(0) ROOT %wrapped_fusion = f64[3] fusion(%param), kind=kLoop, calls=%fusion_computation } """ hlo_module, buffer_assignment = utilities.parse_hlo_module(hlo) jit_compiler = testlib_cpu.JitCompiler(hlo_module.get_config()) mlir_context = testlib_cpu.MLIRContext() kernel_definition = testlib_cpu.emit_fusion_kernel( mlir_context, hlo_module.get_root_instruction(), buffer_assignment, False, ) kernel_runner = testlib_cpu.KernelRunner.create( kernel_definition, jit_compiler ) operand_shape = (3,) param = base_utilities.create_literal_from_np( np.ndarray(operand_shape, dtype) ) np_param = np.asarray(param) np_param[0] = 1 np_param[1] = 0 np_param[2] = np.nan result = base_utilities.create_literal_from_np( np.zeros(operand_shape, dtype) ) kernel_runner.call([param, result]) np_result = np.asarray(result) self.assertAlmostEqual(np_result[0], np.exp(1)) self.assertAlmostEqual(np_result[1], np.exp(0)) self.assertTrue(np.isnan(np_result[2])) if __name__ == "__main__": absltest.main()
FusionEmitterTest
python
jina-ai__jina
tests/integration/docarray_v2/test_issues.py
{ "start": 2913, "end": 2966 }
class ____(BaseDoc): text: str = "test"
SimpleInput
python
doocs__leetcode
solution/1400-1499/1485.Clone Binary Tree With Random Pointer/Solution.py
{ "start": 220, "end": 714 }
class ____: def copyRandomBinaryTree(self, root: 'Optional[Node]') -> 'Optional[NodeCopy]': def dfs(root): if root is None: return None if root in mp: return mp[root] copy = NodeCopy(root.val) mp[root] = copy copy.left = dfs(root.left) copy.right = dfs(root.right) copy.random = dfs(root.random) return copy mp = {} return dfs(root)
Solution
python
doocs__leetcode
solution/0100-0199/0125.Valid Palindrome/Solution.py
{ "start": 0, "end": 378 }
class ____: def isPalindrome(self, s: str) -> bool: i, j = 0, len(s) - 1 while i < j: if not s[i].isalnum(): i += 1 elif not s[j].isalnum(): j -= 1 elif s[i].lower() != s[j].lower(): return False else: i, j = i + 1, j - 1 return True
Solution
python
scipy__scipy
scipy/signal/tests/test_wavelets.py
{ "start": 136, "end": 2145 }
class ____: def test_ricker(self): w = wavelets._ricker(1.0, 1) expected = 2 / (np.sqrt(3 * 1.0) * (np.pi ** 0.25)) assert_array_equal(w, expected) lengths = [5, 11, 15, 51, 101] for length in lengths: w = wavelets._ricker(length, 1.0) assert len(w) == length max_loc = np.argmax(w) assert max_loc == (length // 2) points = 100 w = wavelets._ricker(points, 2.0) half_vec = np.arange(0, points // 2) # Wavelet should be symmetric assert_array_almost_equal(w[half_vec], w[-(half_vec + 1)]) # Check zeros aas = [5, 10, 15, 20, 30] points = 99 for a in aas: w = wavelets._ricker(points, a) vec = np.arange(0, points) - (points - 1.0) / 2 exp_zero1 = np.argmin(np.abs(vec - a)) exp_zero2 = np.argmin(np.abs(vec + a)) assert_array_almost_equal(w[exp_zero1], 0) assert_array_almost_equal(w[exp_zero2], 0) def test_cwt(self): widths = [1.0] def delta_wavelet(s, t): return np.array([1]) len_data = 100 test_data = np.sin(np.pi * np.arange(0, len_data) / 10.0) # Test delta function input gives same data as output cwt_dat = wavelets._cwt(test_data, delta_wavelet, widths) assert cwt_dat.shape == (len(widths), len_data) assert_array_almost_equal(test_data, cwt_dat.flatten()) # Check proper shape on output widths = [1, 3, 4, 5, 10] cwt_dat = wavelets._cwt(test_data, wavelets._ricker, widths) assert cwt_dat.shape == (len(widths), len_data) widths = [len_data * 10] # Note: this wavelet isn't defined quite right, but is fine for this test def flat_wavelet(l, w): return np.full(w, 1 / w) cwt_dat = wavelets._cwt(test_data, flat_wavelet, widths) assert_array_almost_equal(cwt_dat, np.mean(test_data))
TestWavelets
python
readthedocs__readthedocs.org
readthedocs/projects/migrations/0133_addons_load_when_embedded.py
{ "start": 150, "end": 724 }
class ____(migrations.Migration): safe = Safe.before_deploy() dependencies = [ ("projects", "0132_addons_linkpreviews_fields"), ] operations = [ migrations.AddField( model_name="addonsconfig", name="options_load_when_embedded", field=models.BooleanField(default=False, null=True), ), migrations.AddField( model_name="historicaladdonsconfig", name="options_load_when_embedded", field=models.BooleanField(default=False, null=True), ), ]
Migration
python
pytorch__pytorch
torch/_export/serde/schema.py
{ "start": 13604, "end": 13870 }
class ____: config: Annotated[dict[str, PayloadMeta], 10] # # The structure is used to serialize instances of AOTInductorModel to pass # them from the publishing pipeline to the predictor. # # All new fields should be marked as optional. # @dataclass
PayloadConfig
python
kamyu104__LeetCode-Solutions
Python/reduce-array-size-to-the-half.py
{ "start": 58, "end": 727 }
class ____(object): def minSetSize(self, arr): """ :type arr: List[int] :rtype: int """ counting_sort = [0]*len(arr) count = collections.Counter(arr) for c in count.itervalues(): counting_sort[c-1] += 1 result, total = 0, 0 for c in reversed(xrange(len(arr))): if not counting_sort[c]: continue count = min(counting_sort[c], ((len(arr)+1)//2 - total - 1)//(c+1) + 1) result += count total += count*(c+1) if total >= (len(arr)+1)//2: break return result
Solution
python
getsentry__sentry
tests/sentry/seer/autofix/test_issue_summary.py
{ "start": 46660, "end": 49527 }
class ____(APITestCase, SnubaTestCase): def setUp(self) -> None: super().setUp() self.group = self.create_group() event_data = load_data("python") self.event = self.store_event(data=event_data, project_id=self.project.id) self.user = self.create_user() @patch("sentry.seer.autofix.issue_summary._trigger_autofix_task") @patch("sentry.seer.autofix.issue_summary._generate_fixability_score") @patch("sentry.seer.autofix.issue_summary.get_autofix_state") @patch("sentry.seer.autofix.issue_summary.quotas.backend.has_available_reserved_budget") def test_alert_skips_automation_below_threshold( self, mock_budget, mock_state, mock_fixability, mock_trigger ): """Alert automation should skip when event count < 10 with triage-signals-v0""" self.project.update_option("sentry:autofix_automation_tuning", "always") mock_budget.return_value = True mock_state.return_value = None mock_fixability.return_value = SummarizeIssueResponse( group_id=str(self.group.id), headline="Test", scores=SummarizeIssueScores(fixability_score=0.70), ) # Set event count to 5 self.group.times_seen = 5 self.group.times_seen_pending = 0 run_automation(self.group, self.user, self.event, SeerAutomationSource.ALERT) # Should not trigger automation mock_trigger.delay.assert_not_called() @patch( "sentry.seer.autofix.issue_summary.is_seer_autotriggered_autofix_rate_limited", return_value=False, ) @patch("sentry.seer.autofix.issue_summary._trigger_autofix_task") @patch("sentry.seer.autofix.issue_summary._generate_fixability_score") @patch("sentry.seer.autofix.issue_summary.get_autofix_state") @patch("sentry.seer.autofix.issue_summary.quotas.backend.has_available_reserved_budget") def test_alert_runs_automation_above_threshold( self, mock_budget, mock_state, mock_fixability, mock_trigger, mock_rate_limit ): """Alert automation should run when event count >= 10 with triage-signals-v0""" self.project.update_option("sentry:autofix_automation_tuning", "always") mock_budget.return_value = True mock_state.return_value = None mock_fixability.return_value = SummarizeIssueResponse( group_id=str(self.group.id), headline="Test", scores=SummarizeIssueScores(fixability_score=0.70), ) # Set event count to 10 self.group.times_seen = 10 self.group.times_seen_pending = 0 run_automation(self.group, self.user, self.event, SeerAutomationSource.ALERT) # Should trigger automation mock_trigger.delay.assert_called_once() @with_feature("organizations:gen-ai-features")
TestRunAutomationAlertEventCount
python
kamyu104__LeetCode-Solutions
Python/majority-element.py
{ "start": 832, "end": 1065 }
class ____(object): def majorityElement(self, nums): """ :type nums: List[int] :rtype: int """ return sorted(collections.Counter(nums).items(), key=lambda a: a[1], reverse=True)[0][0]
Solution3
python
sympy__sympy
sympy/polys/numberfields/exceptions.py
{ "start": 1204, "end": 1401 }
class ____(Exception): r""" Represents cases in which an algebraic structure was expected to have a certain property, or be of a certain type, but was not. """ pass
StructureError
python
coleifer__peewee
tests/reflection.py
{ "start": 19174, "end": 19314 }
class ____(TestModel): content = TextField() timestamp = DateTimeField(default=datetime.datetime.now) status = IntegerField()
Note
python
chroma-core__chroma
chromadb/test/property/strategies.py
{ "start": 6706, "end": 7715 }
class ____(types.EmbeddingFunction[Documents]): def __init__(self, dim: int, dtype: npt.DTypeLike) -> None: self.dim = dim self.dtype = dtype def __call__(self, input: types.Documents) -> types.Embeddings: # Hash the texts and convert to hex strings hashed_texts = [ list(hashlib.sha256(text.encode("utf-8")).hexdigest()) for text in input ] # Pad with repetition, or truncate the hex strings to the desired dimension padded_texts = [ text * (self.dim // len(text)) + text[: self.dim % len(text)] for text in hashed_texts ] # Convert the hex strings to dtype embeddings: types.Embeddings = [ np.array([int(char, 16) / 15.0 for char in text], dtype=self.dtype) for text in padded_texts ] return embeddings def __repr__(self) -> str: return f"hashing_embedding_function(dim={self.dim}, dtype={self.dtype})"
hashing_embedding_function
python
apache__airflow
airflow-core/src/airflow/api_fastapi/core_api/datamodels/xcom.py
{ "start": 2856, "end": 2992 }
class ____(StrictBaseModel): """Payload serializer for updating an XCom entry.""" value: Any map_index: int = -1
XComUpdateBody
python
django__django
tests/model_forms/models.py
{ "start": 9074, "end": 9459 }
class ____(models.Model): title = models.CharField(max_length=50, unique_for_date="posted", blank=True) slug = models.CharField(max_length=50, unique_for_year="posted", blank=True) subtitle = models.CharField(max_length=50, unique_for_month="posted", blank=True) posted = models.DateTimeField(editable=False) def __str__(self): return self.title
DateTimePost
python
kamyu104__LeetCode-Solutions
Python/maximum-weight-in-two-bags.py
{ "start": 740, "end": 1242 }
class ____(object): def maxWeight(self, weights, w1, w2): """ :type weights: List[int] :type w1: int :type w2: int :rtype: int """ dp = [[False]*(w2+1) for _ in xrange(w1+1)] dp[0][0] = True for w in weights: dp = [[dp[i][j] or (i-w >= 0 and dp[i-w][j]) or (j-w >= 0 and dp[i][j-w]) for j in xrange(w2+1)] for i in xrange(w1+1)] return max(i+j for i in xrange(w1+1) for j in xrange(w2+1) if dp[i][j])
Solution2
python
sympy__sympy
sympy/stats/joint_rv.py
{ "start": 12655, "end": 13131 }
class ____(RandomSymbol): """ Representation of random symbols with joint probability distributions to allow indexing." """ def __getitem__(self, key): if isinstance(self.pspace, JointPSpace): if (self.pspace.component_count <= key) == True: raise ValueError("Index keys for %s can only up to %s." % (self.name, self.pspace.component_count - 1)) return Indexed(self, key)
JointRandomSymbol
python
django__django
tests/constraints/models.py
{ "start": 2841, "end": 3296 }
class ____(models.Model): name = models.CharField(max_length=255) color = models.CharField(max_length=32, null=True) class Meta: required_db_features = {"supports_partial_indexes"} constraints = [ models.UniqueConstraint( fields=["name"], name="name_without_color_uniq", condition=models.Q(color__isnull=True), ), ]
UniqueConstraintConditionProduct
python
pennersr__django-allauth
allauth/socialaccount/providers/disqus/provider.py
{ "start": 326, "end": 559 }
class ____(ProviderAccount): def get_profile_url(self): return self.account.extra_data.get("profileUrl") def get_avatar_url(self): return self.account.extra_data.get("avatar", {}).get("permalink")
DisqusAccount
python
ansible__ansible
test/integration/targets/cache-plugins/cache_plugins/dummy_cache.py
{ "start": 271, "end": 1483 }
class ____(BaseCacheModule): def __init__(self, *args, **kwargs): if not os.environ.get('DUMMY_CACHE_SKIP_SUPER'): super().__init__(*args, **kwargs) self._storage_dir = pathlib.Path(os.environ.get('OUTPUT_DIR')) / 'cache-storage' self._storage_dir.mkdir(parents=True, exist_ok=True) def _get_key_path(self, key: str) -> pathlib.Path: return self._storage_dir / key def get(self, key: t.Any) -> object: try: return json.loads(self._get_key_path(key).read_text()) except FileNotFoundError: raise KeyError(key) from None def set(self, key: str, value: object) -> None: self._get_key_path(key).write_text(json.dumps(value)) def keys(self) -> list[str]: path: pathlib.Path return [path.name for path in self._storage_dir.iterdir()] def contains(self, key: object) -> bool: try: self.get(key) except KeyError: return False return True def delete(self, key: str) -> None: self._get_key_path(key).unlink(missing_ok=True) def flush(self) -> None: for key in self.keys(): self.delete(key)
CacheModule
python
ZoranPandovski__al-go-rithms
dp/Minimum ASCII Delete Sum for Two Strings/minDelSum.py
{ "start": 337, "end": 2304 }
class ____: def minimumDeleteSum(self, s1: str, s2: str) -> int: ''' Problem statement: the lowest ASCII sum of deleted elements to obtain LCS = find the Highest ASCII sum of the LCS And let's call it 'x' Then our answer is sum of ascii chars of s1 and s2 minus 2 * x ''' # Redifined LCS to obtain highest sum ASCII LCS def lcs(X , Y): # find the length of the strings m = len(X) n = len(Y) # declaring the array for storing the dp values L = [[None]*(n+1) for i in range(m+1)] """Following steps build L[m+1][n+1] in bottom up fashion Note: L[i][j] contains maximum ascii sum of LCS of X[0..i-1] and Y[0..j-1]""" for i in range(m+1): for j in range(n+1): # Base case if i == 0 or j == 0 : L[i][j] = 0 # If two characters are matching then # The formed LCS can have heigst sum # Or We have previously obtained the # Heigest sum of ascii characters # We need the max only elif X[i-1] == Y[j-1]: L[i][j] = max(L[i-1][j-1]+ord(X[i-1]), L[i-1][j] , L[i][j-1]) # Else get the max without the character else: L[i][j] = max(L[i-1][j] , L[i][j-1]) return L[m][n] # Find the sum of all the ASCII values of 's1' chars r1 = 0 for i in s1: r1 += ord(i) # Find the sum of all the ASCII values of 's2' chars r2 = 0 for i in s2: r2 += ord(i) # Return the answer return r1 + r2 - 2 * lcs(s1, s2)
Solution
python
facebookresearch__faiss
tests/test_index.py
{ "start": 21531, "end": 23699 }
class ____(unittest.TestCase): def do_test(self, index_key): d = 32 index = faiss.index_factory(d, index_key) index.train(faiss.randn((100, d), 123)) # reference reconstruction index.add(faiss.randn((100, d), 345)) index.add(faiss.randn((100, d), 678)) ref_recons = index.reconstruct_n(0, 200) # with lookup index.reset() rs = np.random.RandomState(123) ids = rs.choice(10000, size=200, replace=False).astype(np.int64) index.add_with_ids(faiss.randn((100, d), 345), ids[:100]) index.set_direct_map_type(faiss.DirectMap.Hashtable) index.add_with_ids(faiss.randn((100, d), 678), ids[100:]) # compare for i in range(0, 200, 13): recons = index.reconstruct(int(ids[i])) self.assertTrue(np.all(recons == ref_recons[i])) # test I/O buf = faiss.serialize_index(index) index2 = faiss.deserialize_index(buf) # compare for i in range(0, 200, 13): recons = index2.reconstruct(int(ids[i])) self.assertTrue(np.all(recons == ref_recons[i])) # remove toremove = np.ascontiguousarray(ids[0:200:3]) sel = faiss.IDSelectorArray(50, faiss.swig_ptr(toremove[:50])) # test both ways of removing elements nremove = index2.remove_ids(sel) nremove += index2.remove_ids(toremove[50:]) self.assertEqual(nremove, len(toremove)) for i in range(0, 200, 13): if i % 3 == 0: self.assertRaises( RuntimeError, index2.reconstruct, int(ids[i]) ) else: recons = index2.reconstruct(int(ids[i])) self.assertTrue(np.all(recons == ref_recons[i])) # index error should raise self.assertRaises( RuntimeError, index.reconstruct, 20000 ) def test_IVFFlat(self): self.do_test("IVF5,Flat") def test_IVFSQ(self): self.do_test("IVF5,SQfp16") def test_IVFPQ(self): self.do_test("IVF5,PQ4x4np")
TestReconsHash
python
ansible__ansible
test/lib/ansible_test/_internal/bootstrap.py
{ "start": 2085, "end": 2534 }
class ____(Bootstrap): """Bootstrap remote instances.""" platform: str platform_version: str def get_variables(self) -> dict[str, t.Union[str, list[str]]]: """The variables to template in the bootstrapping script.""" variables = super().get_variables() variables.update( platform=self.platform, platform_version=self.platform_version, ) return variables
BootstrapRemote
python
dagster-io__dagster
python_modules/libraries/dagster-databricks/dagster_databricks/pipes.py
{ "start": 19807, "end": 23171 }
class ____(PipesChunkedLogReader): """Reader that reads a log file from DBFS. Args: interval (float): interval in seconds between attempts to download a log chunk remote_log_name (Literal["stdout", "stderr"]): The name of the log file to read. target_stream (TextIO): The stream to which to forward log chunks that have been read. client (WorkspaceClient): A databricks `WorkspaceClient` object. debug_info (Optional[str]): An optional message containing debug information about the log reader. """ def __init__( self, *, interval: float = 10, remote_log_name: Literal["stdout", "stderr"], target_stream: TextIO, client: WorkspaceClient, debug_info: Optional[str] = None, ): super().__init__(interval=interval, target_stream=target_stream) self.dbfs_client = files.DbfsAPI(client.api_client) self.remote_log_name = remote_log_name self.log_position = 0 self.log_modification_time = None self.log_path = None self._debug_info = debug_info @property def debug_info(self) -> Optional[str]: return self._debug_info def target_is_readable(self, params: PipesParams) -> bool: return self._get_log_path(params) is not None def download_log_chunk(self, params: PipesParams) -> Optional[str]: log_path = self._get_log_path(params) if log_path is None: return None else: try: status = self.dbfs_client.get_status(log_path) # No need to download again if it hasn't changed if status.modification_time == self.log_modification_time: return None read_response = self.dbfs_client.read(log_path) assert read_response.data content = base64.b64decode(read_response.data).decode("utf-8") chunk = content[self.log_position :] self.log_position = len(content) return chunk except OSError: return None @property def name(self) -> str: return f"PipesDbfsLogReader({self.remote_log_name})" # The directory containing logs will not exist until either 5 minutes have elapsed or the # job has finished. def _get_log_path(self, params: PipesParams) -> Optional[str]: if self.log_path is None: cluster_driver_log_root = ( params["extras"].get("cluster_driver_log_root") if "extras" in params else None ) if cluster_driver_log_root is None: return None try: child_dirs = list(self.dbfs_client.list(cluster_driver_log_root)) except OSError: child_dirs = [] # log root doesn't exist yet match = next( ( child_dir for child_dir in child_dirs if child_dir.path and child_dir.path.endswith(self.remote_log_name) ), None, ) if match: self.log_path = f"dbfs:{match.path}" return self.log_path # #################################### # ##### Databricks Serverless # ####################################
PipesDbfsLogReader
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI045.py
{ "start": 772, "end": 844 }
class ____: def __iter__(self) -> Iterable: ...
IterableReturn
python
jazzband__django-model-utils
model_utils/managers.py
{ "start": 14732, "end": 18183 }
class ____(models.QuerySet[Any]): def join(self, qs: QuerySet[Any] | None = None) -> QuerySet[Any]: ''' Join one queryset together with another using a temporary table. If no queryset is used, it will use the current queryset and join that to itself. `Join` either uses the current queryset and effectively does a self-join to create a new limited queryset OR it uses a queryset given by the user. The model of a given queryset needs to contain a valid foreign key to the current queryset to perform a join. A new queryset is then created. ''' to_field = 'id' if qs: fks = [ fk for fk in qs.model._meta.fields if getattr(fk, 'related_model', None) == self.model ] fk = fks[0] if fks else None model_set = f'{self.model.__name__.lower()}_set' key = fk or getattr(qs.model, model_set, None) if not key: raise ValueError('QuerySet is not related to current model') try: fk_column = key.column except AttributeError: fk_column = 'id' to_field = key.field.column qs = qs.only(fk_column) # if we give a qs we need to keep the model qs to not lose anything new_qs = self else: fk_column = 'id' qs = self.only(fk_column) new_qs = self.model._default_manager.all() TABLE_NAME = 'temp_stuff' query, params = qs.query.sql_with_params() sql = ''' DROP TABLE IF EXISTS {table_name}; DROP INDEX IF EXISTS {table_name}_id; CREATE TEMPORARY TABLE {table_name} AS {query}; CREATE INDEX {table_name}_{fk_column} ON {table_name} ({fk_column}); '''.format(table_name=TABLE_NAME, fk_column=fk_column, query=str(query)) with connection.cursor() as cursor: cursor.execute(sql, params) class TempModel(models.Model): temp_key = models.ForeignKey( self.model, on_delete=models.DO_NOTHING, db_column=fk_column, to_field=to_field ) class Meta: managed = False db_table = TABLE_NAME conn = Join( table_name=TempModel._meta.db_table, parent_alias=new_qs.query.get_initial_alias(), table_alias=None, join_type='INNER JOIN', join_field=self.model.tempmodel_set.rel, nullable=False ) new_qs.query.join(conn, reuse=None) return new_qs if not TYPE_CHECKING: # Hide deprecated API during type checking, to encourage switch to # 'JoinQueryset.as_manager()', which is supported by the mypy plugin # of django-stubs. class JoinManagerMixin: """ Manager that adds a method join. This method allows you to join two querysets together. """ def get_queryset(self): warnings.warn( "JoinManager and JoinManagerMixin are deprecated. " "Please use 'JoinQueryset.as_manager()' instead.", DeprecationWarning ) return self._queryset_class(model=self.model, using=self._db) class JoinManager(JoinManagerMixin): pass
JoinQueryset
python
django-extensions__django-extensions
tests/collisions/models.py
{ "start": 403, "end": 552 }
class ____(models.Model): # conflict with django.contrib.auth name = models.CharField(max_length=10) priority = models.IntegerField()
Group