language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
pydantic__pydantic
tests/mypy/modules/plugin_strict_fields.py
{ "start": 402, "end": 556 }
class ____(Model): b: int = Field(strict=False) c: int = Field(strict=True) # expected error: c ModelOverride1(a='1', b='2', c='3')
ModelOverride1
python
kamyu104__LeetCode-Solutions
Python/process-tasks-using-servers.py
{ "start": 52, "end": 769 }
class ____(object): def assignTasks(self, servers, tasks): """ :type servers: List[int] :type tasks: List[int] :rtype: List[int] """ idle = [(servers[i], i) for i in xrange(len(servers))] working = [] heapq.heapify(idle) result = [] t = 0 for i in xrange(len(tasks)): t = max(t, i) if idle else working[0][0] while working and working[0][0] <= t: _, w, idx = heapq.heappop(working) heapq.heappush(idle, (w, idx)) w, idx = heapq.heappop(idle) heapq.heappush(working, (t+tasks[i], w, idx)) result.append(idx) return result
Solution
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_high_low_lines01.py
{ "start": 315, "end": 1554 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_high_low_lines01.xlsx") def test_create_file(self): """Test the creation of an XlsxWriter file with high-low lines.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "line"}) chart.axis_ids = [49018368, 49019904] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.set_high_low_lines() chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5", } ) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$C$1:$C$5", } ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
sqlalchemy__sqlalchemy
test/sql/test_operators.py
{ "start": 90304, "end": 93689 }
class ____(fixtures.TestBase, testing.AssertsCompiledSQL): __dialect__ = "default" table1 = table("mytable", column("myid", Integer)) @testing.combinations( ("add", operator.add, "+"), ("mul", operator.mul, "*"), ("sub", operator.sub, "-"), ("mod", operator.mod, "%"), id_="iaa", ) def test_math_op(self, py_op, sql_op): for lhs, rhs, res in ( (5, self.table1.c.myid, ":myid_1 %s mytable.myid"), (5, literal(5), ":param_1 %s :param_2"), (self.table1.c.myid, "b", "mytable.myid %s :myid_1"), (self.table1.c.myid, literal(2.7), "mytable.myid %s :param_1"), ( self.table1.c.myid, self.table1.c.myid, "mytable.myid %s mytable.myid", ), (literal(5), 8, ":param_1 %s :param_2"), (literal(6), self.table1.c.myid, ":param_1 %s mytable.myid"), (literal(7), literal(5.5), ":param_1 %s :param_2"), ): self.assert_compile(py_op(lhs, rhs), res % sql_op) def test_truediv_op_integer(self): self.assert_compile( 5 / literal(5), ":param_1 / CAST(:param_2 AS NUMERIC)" ) def test_floordiv_op_integer(self): self.assert_compile(5 // literal(5), ":param_1 / :param_2") def test_floordiv_op_numeric(self): self.assert_compile(5.10 // literal(5.5), "FLOOR(:param_1 / :param_2)") @testing.combinations( ("format", "mytable.myid %% %s"), ("qmark", "mytable.myid % ?"), ("named", "mytable.myid % :myid_1"), ("pyformat", "mytable.myid %% %(myid_1)s"), ) def test_custom_op_percent_escaping(self, paramstyle, expected): expr = self.table1.c.myid.op("%")(5) self.assert_compile( expr, expected, dialect=default.DefaultDialect(paramstyle=paramstyle), ) @testing.combinations( (operator.add,), (operator.mul,), (operator.sub,), (operator.floordiv), ) def test_integer_integer_coercion_to_integer(self, op): expr = op(column("bar", Integer()), column("foo", Integer())) assert isinstance(expr.type, Integer) @testing.combinations( (operator.add,), (operator.mul,), (operator.sub,), (operator.truediv,), ) def test_integer_numeric_coercion_to_numeric(self, op): expr = op(column("bar", Integer()), column("foo", Numeric(10, 2))) assert isinstance(expr.type, Numeric) expr = op(column("foo", Numeric(10, 2)), column("bar", Integer())) assert isinstance(expr.type, Numeric) def test_integer_truediv(self): expr = column("bar", Integer()) / column("foo", Integer) assert isinstance(expr.type, Numeric) def test_integer_floordiv(self): expr = column("bar", Integer()) // column("foo", Integer) assert isinstance(expr.type, Integer) def test_power_operator(self): expr = column("bar", Integer()) ** column("foo", Integer) self.assert_compile(expr, "pow(bar, foo)") expr = column("bar", Integer()) ** 42 self.assert_compile(expr, "pow(bar, :pow_1)", {"pow_1": 42}) expr = 99 ** column("bar", Integer()) self.assert_compile(expr, "pow(:pow_1, bar)", {"pow_1": 42})
MathOperatorTest
python
django__django
tests/m2m_and_m2o/models.py
{ "start": 581, "end": 683 }
class ____(models.Model): others = models.ManyToManyField("StringReferenceModel")
StringReferenceModel
python
tensorflow__tensorflow
tensorflow/python/ops/nn_ops.py
{ "start": 12254, "end": 28012 }
class ____: """Helper class for _non_atrous_convolution. Note that this class assumes that shapes of input and filter passed to `__call__` are compatible with `input_shape` and filter_shape passed to the constructor. Args: input_shape: static input shape, i.e. input.shape. filter_shape: static filter shape, i.e. filter.shape. padding: see _non_atrous_convolution. data_format: see _non_atrous_convolution. strides: see _non_atrous_convolution. name: see _non_atrous_convolution. num_batch_dims: (Optional.) The number of batch dimensions in the input; if not provided, the default of `1` is used. """ def __init__( self, input_shape, filter_shape, padding, data_format=None, strides=None, name=None, num_batch_dims=1): # filter shape is always rank num_spatial_dims + 2 # and num_spatial_dims == input_shape.ndims - num_batch_dims - 1 if input_shape.ndims is not None: filter_shape = filter_shape.with_rank( input_shape.ndims - num_batch_dims + 1) self.padding = padding self.name = name # input shape is == num_spatial_dims + num_batch_dims + 1 # and filter_shape is always rank num_spatial_dims + 2 if filter_shape.ndims is not None: input_shape = input_shape.with_rank( filter_shape.ndims + num_batch_dims - 1) if input_shape.ndims is None: raise ValueError( "Rank of convolution must be known. " f"Received: input_shape={input_shape} of rank {input_shape.rank}") if input_shape.ndims < 3 or input_shape.ndims - num_batch_dims + 1 > 5: raise ValueError( "`input_shape.rank - num_batch_dims + 1` must be at least 3 and at " f"most 5. Received: input_shape.rank={input_shape.rank} and " f"num_batch_dims={num_batch_dims}") conv_dims = input_shape.ndims - num_batch_dims - 1 if strides is None: strides = [1] * conv_dims elif len(strides) != conv_dims: raise ValueError( f"`len(strides)` should be {conv_dims}. " f"Received: strides={strides} of length {len(strides)}") if conv_dims == 1: # conv1d uses the 2-d data format names if data_format is None: data_format = "NWC" elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}: raise ValueError("`data_format` must be 'NWC' or 'NCW'. " f"Received: data_format={data_format}") self.strides = strides[0] self.data_format = data_format self.conv_op = self._conv1d elif conv_dims == 2: if data_format is None or data_format == "NHWC": data_format = "NHWC" strides = [1] + list(strides) + [1] elif data_format == "NCHW": strides = [1, 1] + list(strides) else: raise ValueError("`data_format` must be 'NHWC' or 'NCHW'. " f"Received: data_format={data_format}") self.strides = strides self.data_format = data_format self.conv_op = conv2d elif conv_dims == 3: if data_format is None or data_format == "NDHWC": strides = [1] + list(strides) + [1] elif data_format == "NCDHW": strides = [1, 1] + list(strides) else: raise ValueError("`data_format` must be 'NDHWC' or 'NCDHW'. " f"Received: data_format={data_format}") self.strides = strides self.data_format = data_format self.conv_op = _conv3d_expanded_batch # Note that we need this adapter since argument names for conv1d don't match # those for gen_nn_ops.conv2d and gen_nn_ops.conv3d. # pylint: disable=redefined-builtin def _conv1d(self, input, filter, strides, padding, data_format, name): return conv1d( value=input, filters=filter, stride=strides, padding=padding, data_format=data_format, name=name) # pylint: enable=redefined-builtin def __call__(self, inp, filter): # pylint: disable=redefined-builtin return self.conv_op( input=inp, filter=filter, strides=self.strides, padding=self.padding, data_format=self.data_format, name=self.name) def squeeze_batch_dims(inp, op, inner_rank, name=None): """Returns `unsqueeze_batch(op(squeeze_batch(inp)))`. Where `squeeze_batch` reshapes `inp` to shape `[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]` and `unsqueeze_batch` does the reverse reshape but on the output. Args: inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape` is length `inner_rank`. op: A callable that takes a single input tensor and returns a single. output tensor. inner_rank: A python integer. name: A string. Returns: `unsqueeze_batch_op(squeeze_batch(inp))`. """ with ops.name_scope(name, "squeeze_batch_dims", [inp]): inp = ops.convert_to_tensor(inp, name="input") shape = inp.shape inner_shape = shape[-inner_rank:] if not inner_shape.is_fully_defined(): inner_shape = array_ops.shape(inp)[-inner_rank:] batch_shape = shape[:-inner_rank] if not batch_shape.is_fully_defined(): batch_shape = array_ops.shape(inp)[:-inner_rank] if isinstance(inner_shape, tensor_shape.TensorShape): inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list()) else: inp_reshaped = array_ops.reshape( inp, array_ops.concat(([-1], inner_shape), axis=-1)) out_reshaped = op(inp_reshaped) out_inner_shape = out_reshaped.shape[-inner_rank:] if not out_inner_shape.is_fully_defined(): out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:] out = array_ops.reshape( out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1)) out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:]) return out @tf_export("nn.dilation2d", v1=[]) @dispatch.add_dispatch_support def dilation2d_v2( input, # pylint: disable=redefined-builtin filters, # pylint: disable=redefined-builtin strides, padding, data_format, dilations, name=None): """Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors. The `input` tensor has shape `[batch, in_height, in_width, depth]` and the `filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each input channel is processed independently of the others with its own structuring function. The `output` tensor has shape `[batch, out_height, out_width, depth]`. The spatial dimensions of the output tensor depend on the `padding` algorithm. We currently only support the default "NHWC" `data_format`. In detail, the grayscale morphological 2-D dilation is the max-sum correlation (for consistency with `conv2d`, we use unmirrored filters): output[b, y, x, c] = max_{dy, dx} input[b, strides[1] * y + rates[1] * dy, strides[2] * x + rates[2] * dx, c] + filters[dy, dx, c] Max-pooling is a special case when the filter has size equal to the pooling kernel size and contains all zeros. Note on duality: The dilation of `input` by the `filters` is equal to the negation of the erosion of `-input` by the reflected `filters`. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 4-D with shape `[batch, in_height, in_width, depth]`. filters: A `Tensor`. Must have the same type as `input`. 3-D with shape `[filter_height, filter_width, depth]`. strides: A list of `ints` that has length `>= 4`. The stride of the sliding window for each dimension of the input tensor. Must be: `[1, stride_height, stride_width, 1]`. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. See [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2) for more information. data_format: A `string`, only `"NHWC"` is currently supported. dilations: A list of `ints` that has length `>= 4`. The input stride for atrous morphological dilation. Must be: `[1, rate_height, rate_width, 1]`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ if data_format != "NHWC": raise ValueError("`data_format` values other than 'NHWC' are not " f"supported. Received: data_format={data_format}") return gen_nn_ops.dilation2d(input=input, filter=filters, strides=strides, rates=dilations, padding=padding, name=name) @tf_export(v1=["nn.dilation2d"]) @dispatch.add_dispatch_support def dilation2d_v1( # pylint: disable=missing-docstring input, # pylint: disable=redefined-builtin filter=None, # pylint: disable=redefined-builtin strides=None, rates=None, padding=None, name=None, filters=None, dilations=None): filter = deprecated_argument_lookup("filters", filters, "filter", filter) rates = deprecated_argument_lookup("dilations", dilations, "rates", rates) return gen_nn_ops.dilation2d(input, filter, strides, rates, padding, name) dilation2d_v1.__doc__ = gen_nn_ops.dilation2d.__doc__ @tf_export("nn.with_space_to_batch") @dispatch.add_dispatch_support def with_space_to_batch( input, # pylint: disable=redefined-builtin dilation_rate, padding, op, filter_shape=None, spatial_dims=None, data_format=None): """Performs `op` on the space-to-batch representation of `input`. This has the effect of transforming sliding window operations into the corresponding "atrous" operation in which the input is sampled at the specified `dilation_rate`. In the special case that `dilation_rate` is uniformly 1, this simply returns: op(input, num_spatial_dims, padding) Otherwise, it returns: batch_to_space_nd( op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings), num_spatial_dims, "VALID") adjusted_dilation_rate, adjusted_crops), where: adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)], adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2] defined as follows: We first define two int64 tensors `paddings` and `crops` of shape `[num_spatial_dims, 2]` based on the value of `padding` and the spatial dimensions of the `input`: If `padding = "VALID"`, then: paddings, crops = required_space_to_batch_paddings( input_shape[spatial_dims], dilation_rate) If `padding = "SAME"`, then: dilated_filter_shape = filter_shape + (filter_shape - 1) * (dilation_rate - 1) paddings, crops = required_space_to_batch_paddings( input_shape[spatial_dims], dilation_rate, [(dilated_filter_shape - 1) // 2, dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2]) Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial dimensions are contiguous starting at the second dimension, but the specified `spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and `crops` in order to be usable with these operations. For a given dimension, if the block size is 1, and both the starting and ending padding and crop amounts are 0, then space_to_batch_nd effectively leaves that dimension alone, which is what is needed for dimensions not part of `spatial_dims`. Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case efficiently for any number of leading and trailing dimensions. For 0 <= i < len(spatial_dims), we assign: adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i] adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :] adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :] All unassigned values of `adjusted_dilation_rate` default to 1, while all unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0. Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID" padding is equivalent to specifying `padding = "SAME"` with a filter_shape of `[1]*N`. Advanced usage. Note the following optimization: A sequence of `with_space_to_batch` operations with identical (not uniformly 1) `dilation_rate` parameters and "VALID" padding net = with_space_to_batch(net, dilation_rate, "VALID", op_1) ... net = with_space_to_batch(net, dilation_rate, "VALID", op_k) can be combined into a single `with_space_to_batch` operation as follows: def combined_op(converted_input, num_spatial_dims, _): result = op_1(converted_input, num_spatial_dims, "VALID") ... result = op_k(result, num_spatial_dims, "VALID") net = with_space_to_batch(net, dilation_rate, "VALID", combined_op) This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and `batch_to_space_nd`. Similarly, a sequence of `with_space_to_batch` operations with identical (not uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter dimensions net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1) ... net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k) can be combined into a single `with_space_to_batch` operation as follows: def combined_op(converted_input, num_spatial_dims, _): result = op_1(converted_input, num_spatial_dims, "SAME") ... result = op_k(result, num_spatial_dims, "SAME") net = with_space_to_batch(net, dilation_rate, "VALID", combined_op) Args: input: Tensor of rank > max(spatial_dims). dilation_rate: int32 Tensor of *known* shape [num_spatial_dims]. padding: str constant equal to "VALID" or "SAME" op: Function that maps (input, num_spatial_dims, padding) -> output filter_shape: If padding = "SAME", specifies the shape of the convolution kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims]. If padding = "VALID", filter_shape is ignored and need not be specified. spatial_dims: Monotonically increasing sequence of `num_spatial_dims` integers (which are >= 1) specifying the spatial dimensions of `input` and output. Defaults to: `range(1, num_spatial_dims+1)`. data_format: A string or None. Specifies whether the channel dimension of the `input` and output is the last dimension (default, or if `data_format` does not start with "NC"), or the second dimension (if `data_format` starts with "NC"). For N=1, the valid values are "NWC" (default) and "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". Returns: The output Tensor as described above, dimensions will vary based on the op provided. Raises: ValueError: if `padding` is invalid or the arguments are incompatible. ValueError: if `spatial_dims` are invalid. """ input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin input_shape = input.shape def build_op(num_spatial_dims, padding): return lambda inp, _: op(inp, num_spatial_dims, padding) new_op = _WithSpaceToBatch( input_shape, dilation_rate, padding, build_op, filter_shape=filter_shape, spatial_dims=spatial_dims, data_format=data_format) return new_op(input, None)
_NonAtrousConvolution
python
pytorch__pytorch
test/quantization/fx/test_quantize_fx.py
{ "start": 275505, "end": 380569 }
class ____(QuantizationTestCase): def setUp(self): super().setUp() self.custom_qconfig = torch.ao.quantization.QConfig( activation=torch.ao.quantization.observer.HistogramObserver.with_args( qscheme=torch.per_tensor_symmetric, dtype=torch.qint8 ), weight=torch.ao.quantization.default_per_channel_weight_observer ) self.common_quant_patterns = { torch.nn.ConvTranspose1d: DefaultNodeQuantizeHandler, torch.nn.ConvTranspose2d: DefaultNodeQuantizeHandler, torch.nn.ELU: DefaultNodeQuantizeHandler, torch.nn.LeakyReLU: DefaultNodeQuantizeHandler, torch.nn.Hardswish: DefaultNodeQuantizeHandler, torch.nn.InstanceNorm1d: DefaultNodeQuantizeHandler, torch.nn.InstanceNorm2d: DefaultNodeQuantizeHandler, torch.nn.InstanceNorm3d: DefaultNodeQuantizeHandler, torch.nn.LayerNorm: DefaultNodeQuantizeHandler, torch.nn.SiLU: DefaultNodeQuantizeHandler, torch.nn.Mish: DefaultNodeQuantizeHandler, torch.nn.GELU: DefaultNodeQuantizeHandler, torch.nn.Softmax: DefaultNodeQuantizeHandler, torch.nn.functional.elu: DefaultNodeQuantizeHandler, torch.nn.functional.hardswish: DefaultNodeQuantizeHandler, torch.nn.functional.instance_norm: DefaultNodeQuantizeHandler, torch.nn.functional.layer_norm: DefaultNodeQuantizeHandler, torch.nn.functional.leaky_relu: DefaultNodeQuantizeHandler, torch.nn.functional.silu: DefaultNodeQuantizeHandler, torch.nn.functional.mish: DefaultNodeQuantizeHandler, torch.nn.functional.gelu: DefaultNodeQuantizeHandler, torch.nn.functional.softmax: DefaultNodeQuantizeHandler, torch.sum: DefaultNodeQuantizeHandler } """Unit tests for individual ops """ @skipIfNoFBGEMM def test_linear_module(self): with override_quantized_engine('fbgemm'): class LinearModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(30, 4).float() def forward(self, x): return self.linear(x) class LinearReLUModel(torch.nn.Module): def __init__(self, f_relu=False): super().__init__() self.linear = torch.nn.Linear(30, 4).float() if f_relu: self.relu = F.relu else: self.relu = torch.nn.ReLU() def forward(self, x): x = self.linear(x) x = self.relu(x) return x class LinearBnModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(4, 4).float() self.bn = torch.nn.BatchNorm1d(4) def forward(self, x): x = self.linear(x) x = self.bn(x) return x # Test linear data = (torch.rand((1, 30), dtype=torch.float),) for quant_type in self.all_quant_types: model = LinearModel() quantized_module = nnqd.Linear if quant_type == QuantType.DYNAMIC else nnq.Linear quantized_node = ns.call_module(quantized_module) result_dict = self.checkGraphModeFxOp(model, data, quant_type, quantized_node) if quant_type in self.static_quant_types: self.assertEqual(result_dict["quantized_output"], result_dict["quantized_reference_output"]) # TODO: enable test for dynamic quant # Test linear-relu for f_relu, quant_type in itertools.product([True, False], [QuantType.STATIC, QuantType.QAT]): model = LinearReLUModel(f_relu) quantized_node = ns.call_module(nniq.LinearReLU) result_dict = self.checkGraphModeFxOp(model, data, quant_type, quantized_node) self.assertEqual(result_dict["quantized_output"], result_dict["quantized_reference_output"]) # Test linear-bn data = (torch.rand((4, 4), dtype=torch.float),) for quant_type in self.static_quant_types: model = LinearBnModel() quantized_node = ns.call_module(nnq.Linear) result_dict = self.checkGraphModeFxOp(model, data, quant_type, quantized_node) self.assertEqual(result_dict["quantized_output"], result_dict["quantized_reference_output"]) @skipIfNoFBGEMM def test_functional_linear(self): with override_quantized_engine('fbgemm'): class FuncLinear(torch.nn.Module): def __init__(self, use_bias, has_relu, f_relu): super().__init__() self.w = torch.randn(4, 30) self.b = torch.randn(4) self.use_bias = use_bias if has_relu: if f_relu: self.relu_or_id = F.relu else: self.relu_or_id = torch.nn.ReLU() else: self.relu_or_id = torch.nn.Identity() def forward(self, x): if self.use_bias: x = F.linear(x, self.w, self.b) else: x = F.linear(x, self.w) x = self.relu_or_id(x) return x data = (torch.rand((1, 30), dtype=torch.float),) quant_type_to_qlinear_fun = { QuantType.DYNAMIC: ns.call_function(torch.ops.quantized.linear_dynamic), QuantType.STATIC: ns.call_function(torch.ops.quantized.linear), QuantType.QAT: ns.call_function(torch.ops.quantized.linear), } quant_type_to_qlinear_relu_fun = { # we don't have linear_relu_dynamic QuantType.DYNAMIC: ns.call_function(torch.ops.quantized.linear_relu_dynamic), QuantType.STATIC: ns.call_function(torch.ops.quantized.linear_relu), QuantType.QAT: ns.call_function(torch.ops.quantized.linear_relu), } options = itertools.product( self.all_quant_types, (True, False), # use_bias (True, False), # has_relu (True, False), # functional relu ) for quant_type, use_bias, has_relu, f_relu in options: # when has_relu is False, we are using an nn.Identity and # we will insert observer/fake_quant for the output of nn.Identity since # it is a copy node, that's why we have extra observer/fake_quant # when has_relu is False quant_type_to_prepare_expected_node_occurrence = { QuantType.DYNAMIC: { ns.call_module(torch.ao.quantization.PlaceholderObserver): 1, ns.call_module(torch.ao.quantization.MinMaxObserver): 1, }, # There should be 3 observers: after input, weight and activation. # one more observer for torch.nn.Identity when there is no relu QuantType.STATIC: { ns.call_module(torch.ao.quantization.HistogramObserver): 2 if has_relu else 3, ns.call_module(torch.ao.quantization.PerChannelMinMaxObserver): 1, }, # There should be 3 observers: after input, weight and activation. QuantType.QAT: { ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 3 if has_relu else 4, }, } model = FuncLinear(use_bias, has_relu, f_relu) if has_relu: qlinear_fun = quant_type_to_qlinear_relu_fun[quant_type] else: qlinear_fun = quant_type_to_qlinear_fun[quant_type] if quant_type != QuantType.DYNAMIC: num_dequantize = 1 else: # we will have an extra quantize_per_tensor_dynamic + dequantize for # nn.Identity right now, but it will be fixed after we use # backend_config to configure the default pt backend num_dequantize = int(not has_relu) convert_node_occurrence = { ns.call_function(torch.quantize_per_tensor): 1 if quant_type != QuantType.DYNAMIC else 0, qlinear_fun: 1, ns.call_method("dequantize"): num_dequantize if quant_type != QuantType.DYNAMIC else 0, } prepare_expected_node_occurrence = \ quant_type_to_prepare_expected_node_occurrence[quant_type] result_dict = self.checkGraphModeFxOp( model, data, quant_type, qlinear_fun, prepare_expected_node_occurrence=prepare_expected_node_occurrence, expected_node_occurrence=convert_node_occurrence) if quant_type != QuantType.DYNAMIC: self.assertEqual(result_dict["quantized_output"], result_dict["quantized_reference_output"]) # Ensure packed weights in lowered models are folded self.assertIn("_packed_weight_0", result_dict["quantized"].state_dict().keys()) @skipIfNoFBGEMM def test_linear_dynamic_fp16(self): with override_quantized_engine('fbgemm'): class FuncLinear(torch.nn.Module): def __init__(self, use_bias, has_relu, f_relu): super().__init__() self.w = torch.randn(4, 30) self.b = torch.randn(4) self.use_bias = use_bias if has_relu: if f_relu: self.relu = F.relu else: self.relu = torch.nn.ReLU() else: self.relu = torch.nn.Identity() def forward(self, x): if self.use_bias: x = F.linear(x, self.w, self.b) else: x = F.linear(x, self.w) x = self.relu(x) return x data = (torch.rand((1, 30), dtype=torch.float),) options = itertools.product( (True, False), # use_bias (True, False), # has_relu (True, False), # functional relu (True, False), # is_reference ) for use_bias, has_relu, f_relu, is_reference in options: model = FuncLinear(use_bias, has_relu, f_relu) if is_reference: qlinear_fun = ns.call_function(torch.nn.functional.linear) else: if has_relu: qlinear_fun = ns.call_function(torch.ops.quantized.linear_relu_dynamic_fp16) else: qlinear_fun = ns.call_function(torch.ops.quantized.linear_dynamic_fp16) prepare_node_occurrence = { # activation and weight ns.call_module(torch.ao.quantization.PlaceholderObserver): 2 } convert_node_occurrence = { qlinear_fun: 1, # weight ns.call_method("to"): 1 if is_reference else 0 } self.checkGraphModeFxOp( model, data, QuantType.DYNAMIC, qlinear_fun, is_reference=is_reference, custom_qconfig_dict={"": float16_dynamic_qconfig}, prepare_expected_node_occurrence=prepare_node_occurrence, expected_node_occurrence=convert_node_occurrence) def test_linear_static_fp16(self): class FuncLinear(torch.nn.Module): def __init__(self, use_bias, has_relu, f_relu): super().__init__() self.w = torch.randn(4, 30) self.b = torch.randn(4) self.use_bias = use_bias if has_relu: if f_relu: self.relu = F.relu else: self.relu = torch.nn.ReLU() else: self.relu = torch.nn.Identity() def forward(self, x): if self.use_bias: x = F.linear(x, self.w, self.b) else: x = F.linear(x, self.w) x = self.relu(x) return x data = (torch.rand((1, 30), dtype=torch.float),) options = itertools.product( (True, False), # use_bias (True, False), # has_relu (True, False), # functional relu (True, False), # is_reference ) backend_config = get_test_only_legacy_native_backend_config() for use_bias, has_relu, f_relu, is_reference in options: model = FuncLinear(use_bias, has_relu, f_relu) linear_fun = ns.call_function(torch.nn.functional.linear) # when has_relu is False, we are using an nn.Identity and # we will insert observer/fake_quant for the output of nn.Identity since # it is a copy node, that's why we have extra observer/fake_quant # when has_relu is False prepare_node_occurrence = { # activation, weight, bias and output ns.call_module(torch.ao.quantization.PlaceholderObserver): 3 + int(use_bias) + int(not has_relu), } # We have extra to and dequantize when is_reference is True # and has_relu is False since when has_relu is False, we # have an nn.Identity in the model, which is a CopyNode # and we would add extra quant - dequant for CopyNode in # reference patterns convert_node_occurrence = { # we don't support static fp16 ops, so the linear function # is unfused linear_fun: 1, # activation, weight, bias and output ns.call_method("to"): 3 + int(use_bias) + int(not has_relu and is_reference), ns.call_method("dequantize"): 3 + int(use_bias) + int(not has_relu and is_reference) } self.checkGraphModeFxOp( model, data, QuantType.DYNAMIC, linear_fun, is_reference=is_reference, custom_qconfig_dict={"": float16_static_qconfig}, prepare_expected_node_occurrence=prepare_node_occurrence, expected_node_occurrence=convert_node_occurrence, backend_config=backend_config) @skipIfNoFBGEMM def test_conv_module(self): conv_module = {1 : torch.nn.Conv1d, 2 : torch.nn.Conv2d, 3 : torch.nn.Conv3d} class ConvWrapper(torch.nn.Module): def __init__(self, dim): super().__init__() self.conv = conv_module[dim](3, 3, 3).float() def forward(self, x): return self.conv(x) options = itertools.product([1, 2, 3], self.static_quant_types) quantized_nodes = { # dim 1: ns.call_module(nnq.Conv1d), 2: ns.call_module(nnq.Conv2d), 3: ns.call_module(nnq.Conv3d), } for dim, quant_type in options: self.checkGraphModeFxOp( ConvWrapper(dim), self.img_data_dict[dim], quant_type, quantized_nodes[dim]) @skipIfNoFBGEMM def test_functional_conv(self): with override_quantized_engine('fbgemm'): """ Test for function conv and functional conv + relu """ convs = { 1: torch.nn.functional.conv1d, 2: torch.nn.functional.conv2d, 3: torch.nn.functional.conv3d, } class FuncConv(torch.nn.Module): def __init__(self, dim, use_bias, has_relu, f_relu): super().__init__() self.dim = dim self.w = torch.randn(tuple([3] * (dim + 2))) self.b = torch.randn(3) if use_bias else None self.stride = tuple([1] * dim) self.padding = tuple([0] * dim) self.dilation = tuple([1] * dim) self.groups = 1 self.use_bias = use_bias if has_relu: if f_relu: self.relu = F.relu else: self.relu = torch.nn.ReLU() else: self.relu = torch.nn.Identity() def forward(self, x): x = convs[self.dim](x, self.w, self.b, self.stride, self.padding, self.dilation, self.groups) x = self.relu(x) return x quant_type_to_qconv_fun = { QuantType.STATIC: { 1: ns.call_function(torch.ops.quantized.conv1d), 2: ns.call_function(torch.ops.quantized.conv2d), 3: ns.call_function(torch.ops.quantized.conv3d) }, QuantType.QAT: { 1: ns.call_function(torch.ops.quantized.conv1d), 2: ns.call_function(torch.ops.quantized.conv2d), 3: ns.call_function(torch.ops.quantized.conv3d) }, } quant_type_to_qconv_relu_fun = { QuantType.STATIC: { 1: ns.call_function(torch.ops.quantized.conv1d_relu), 2: ns.call_function(torch.ops.quantized.conv2d_relu), 3: ns.call_function(torch.ops.quantized.conv3d_relu) }, QuantType.QAT: { 1: ns.call_function(torch.ops.quantized.conv1d_relu), 2: ns.call_function(torch.ops.quantized.conv2d_relu), 3: ns.call_function(torch.ops.quantized.conv3d_relu) }, } options = itertools.product( [1, 2, 3], # dims self.static_quant_types, (True, False), # use_bias (True, False), # has_relu (True, False), # functional relu ) for dim, quant_type, use_bias, has_relu, f_relu in options: # when has_relu is False, we are using an nn.Identity and # we will insert observer/fake_quant for the output of nn.Identity since # it is a copy node, that's why we have extra observer/fake_quant # when has_relu is False quant_type_to_prepare_expected_node_occurrence = { QuantType.DYNAMIC: {}, # There should be 3 observers: after input, weight and activation. QuantType.STATIC: { ns.call_module(torch.ao.quantization.HistogramObserver): 2 if has_relu else 3, ns.call_module(torch.ao.quantization.PerChannelMinMaxObserver): 1, }, # There should be 3 observers: after input, weight and activation. QuantType.QAT: { ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 3 if has_relu else 4, }, } data_dims = [2, 3] + [4] * dim data = (torch.randn(tuple(data_dims), dtype=torch.float),) model = FuncConv(dim, use_bias, has_relu, f_relu) if has_relu: qconv_fun = quant_type_to_qconv_relu_fun[quant_type][dim] else: qconv_fun = quant_type_to_qconv_fun[quant_type][dim] convert_node_occurrence = { ns.call_function(torch.quantize_per_tensor): 1, qconv_fun: 1, ns.call_method("dequantize"): 1 } prepare_expected_node_occurrence = \ quant_type_to_prepare_expected_node_occurrence[quant_type] result_dict = self.checkGraphModeFxOp( model, data, quant_type, qconv_fun, prepare_expected_node_occurrence=prepare_expected_node_occurrence, expected_node_occurrence=convert_node_occurrence) if quant_type != QuantType.DYNAMIC: self.assertEqual(result_dict["quantized_output"], result_dict["quantized_reference_output"]) # Ensure packed weights in lowered models are folded self.assertIn("_packed_weight_0", result_dict["quantized"].state_dict().keys()) @skipIfNoFBGEMM def test_quantized_conv_relu(self): """tests for conv1d_relu/conv2d_relu/conv3d_relu""" conv_module = {1 : torch.nn.Conv1d, 2 : torch.nn.Conv2d, 3 : torch.nn.Conv3d} class ConvNdRelu(torch.nn.Module): def __init__(self, dim, inplace): super().__init__() self.conv = conv_module[dim](3, 3, 3).float() self.relu = torch.nn.ReLU(inplace) def forward(self, x): return self.relu(self.conv(x)) class ConvNdFunctionalRelu(torch.nn.Module): def __init__(self, dim): super().__init__() self.conv = conv_module[dim](3, 3, 3).float() def forward(self, x): return F.relu(self.conv(x)) class ConvNdInplaceFunctionalRelu(torch.nn.Module): def __init__(self, dim): super().__init__() self.conv = conv_module[dim](3, 3, 3).float() def forward(self, x): return F.relu(self.conv(x), True) options = itertools.product([1, 2, 3], self.static_quant_types) quantized_nodes = { # dim 1: ns.call_module(nniq.ConvReLU1d), 2: ns.call_module(nniq.ConvReLU2d), 3: ns.call_module(nniq.ConvReLU3d), } for dim, quant_type in options: for m in [ConvNdRelu(dim, True), ConvNdRelu(dim, False), ConvNdFunctionalRelu(dim), ConvNdInplaceFunctionalRelu(dim)]: self.checkGraphModeFxOp( m, self.img_data_dict[dim], quant_type, quantized_nodes[dim]) def _test_binary_op_int8_impl(self, binary_op, ibinary_op, quantized_op): data = (torch.randn(1, 1, 1, 1, dtype=torch.float), torch.randn(1, 1, 1, 1, dtype=torch.float)) options = itertools.product([True, False], [True, False], [True, False]) quant_type = QuantType.STATIC # testing for default int8 static quant for is_inplace, is_scalar, is_reference in options: if is_reference: node_list = [ ns.call_method("dequantize"), ns.call_function(binary_op), ns.call_function(torch.quantize_per_tensor) ] quantized_node = None else: node_list = None quantized_node = ns.call_function(quantized_op) self.checkGraphModeFxOp( BinaryOp(binary_op, ibinary_op, is_inplace, is_scalar), data, quant_type, quantized_node, expected_node_list=node_list, is_reference=is_reference) # This tests the binary op should be quantized even when it is not feed with a # quantized input self.checkGraphModeFxOp( BinaryOpNonQuantizedInput(binary_op, ibinary_op, is_inplace, is_scalar), data, quant_type, quantized_node, expected_node_list=node_list, is_reference=is_reference) def _test_binary_op_float16_impl(self, binary_op, ibinary_op): data = (torch.randn(1, 1, 1, 1, dtype=torch.float), torch.randn(1, 1, 1, 1, dtype=torch.float)) quant_type = QuantType.STATIC # testing for fp16 static quant # we are producing fp16 patterns options = itertools.product([True, False], [True, False]) custom_qconfig_dict = { "object_type": [(binary_op, float16_static_qconfig)] } backend_config = get_test_only_legacy_native_backend_config() for is_inplace, is_scalar in options: node_occurrence = { # output_conv1, output_add1, output_add2 for scalar # output_conv1, output_conv2, output_add1, output_add2 for non-scalar ns.call_method("to"): 3 if is_scalar else 4 } self.checkGraphModeFxOp( BinaryOp(binary_op, ibinary_op, is_inplace, is_scalar), data, quant_type, expected_node_occurrence=node_occurrence, custom_qconfig_dict=custom_qconfig_dict, backend_config=backend_config) node_occurrence = { # input_add, output_add for scalar # input_add1, input_add2, output_add for non-scalar ns.call_method("to"): 2 if is_scalar else 3 } self.checkGraphModeFxOp( BinaryOpNonQuantizedInput(binary_op, ibinary_op, is_inplace, is_scalar), data, quant_type, expected_node_occurrence=node_occurrence, custom_qconfig_dict=custom_qconfig_dict, backend_config=backend_config) def _test_binary_op_relu_int8_impl(self, binary_op, ibinary_op, quantized_op): data = (torch.rand((1, 1, 1, 1), dtype=torch.float), torch.rand((1, 1, 1, 1), dtype=torch.float)) quant_type = QuantType.STATIC quantized_node = ns.call_function(quantized_op) options = itertools.product( [True, False], [nn.ReLU, F.relu, torch.relu], [True, False]) for is_inplace_op, relu_callable, is_scalar in options: model = BinaryOpRelu( binary_op, ibinary_op, is_inplace_op, relu_callable, is_scalar) self.checkGraphModeFxOp( model, data, quant_type, quantized_node) def _test_binary_op_relu_float16_impl(self, binary_op, ibinary_op): data = (torch.rand((1, 1, 1, 1), dtype=torch.float), torch.rand((1, 1, 1, 1), dtype=torch.float)) quant_type = QuantType.STATIC options = itertools.product( [True, False], [nn.ReLU, F.relu, torch.relu], [True, False]) custom_qconfig_dict = { "": float16_static_qconfig, "object_type": [(torch.nn.Conv2d, None)] } backend_config = get_test_only_legacy_native_backend_config() for is_inplace_op, is_functional_relu, is_scalar in options: node_occurrence = { ns.call_method("to"): 3 if is_scalar else 4 } model = BinaryOpRelu( binary_op, ibinary_op, is_inplace_op, is_functional_relu, is_scalar) self.checkGraphModeFxOp( model, data, quant_type, custom_qconfig_dict=custom_qconfig_dict, expected_node_occurrence=node_occurrence, backend_config=backend_config) @skipIfNoFBGEMM def test_add(self): self._test_binary_op_int8_impl( operator.add, operator.iadd, torch.ops.quantized.add) self._test_binary_op_float16_impl( operator.add, operator.iadd) @unittest.skip("This is no longer needed right now, can enable later with new api") def test_sub(self): self._test_binary_op_float16_impl(operator.sub, operator.isub) self._test_binary_op_float16_impl(torch.sub, None) @unittest.skip("This is no longer needed right now, can enable later with new api") def test_div(self): self._test_binary_op_float16_impl(operator.truediv, operator.itruediv) self._test_binary_op_float16_impl(torch.div, None) @skipIfNoFBGEMM def test_mul(self): self._test_binary_op_int8_impl( operator.mul, operator.imul, torch.ops.quantized.mul) self._test_binary_op_float16_impl(operator.mul, operator.imul) @unittest.skip("This is no longer needed right now, can enable later with new api") def test_sum(self): class Sum(torch.nn.Module): def forward(self, x): x = torch.sum(x, [1], keepdim=True) x = torch.sum(x, [1]) return x data = torch.randn(1, 2, 3, 4, dtype=torch.float) quant_type = QuantType.STATIC # testing for fp16 static quant # we are producing fp16 patterns custom_qconfig_dict = { "object_type": [(torch.sum, float16_static_qconfig)] } node_occurrence = { # input_sum1, output_sum1, output_sum2 ns.call_method("to"): 3 } self.checkGraphModeFxOp( Sum(), data, quant_type, expected_node_occurrence=node_occurrence, custom_qconfig_dict=custom_qconfig_dict) @unittest.skip("This is no longer needed right now, can enable later with new api") def test_bmm(self): class BMMMethod(torch.nn.Module): def forward(self, x, y): return x.bmm(y) data = (torch.randn(1, 1, 1, dtype=torch.float), torch.randn(1, 1, 1, dtype=torch.float)) quant_type = QuantType.STATIC # testing for fp16 static quant # we are producing fp16 patterns custom_qconfig_dict = { "object_type": [(torch.bmm, float16_static_qconfig), ("bmm", float16_static_qconfig)] } node_occurrence = { # input_bmm1, input_bmm2, output_bmm ns.call_method("to"): 3 } self.checkGraphModeFxOp( BinaryOpNonQuantizedInput(torch.bmm, None, False, False), data, quant_type, expected_node_occurrence=node_occurrence, custom_qconfig_dict=custom_qconfig_dict) # TODO: support call_method("bmm") # we can transform call_method("bmm") to call_function(torch.bmm) # self.checkGraphModeFxOp( # BMMMethod(), data, quant_type, # expected_node_occurrence=node_occurrence, # custom_qconfig_dict=custom_qconfig_dict, # print_debug_info=True) @skipIfNoFBGEMM def test_add_relu(self): self._test_binary_op_relu_int8_impl( operator.add, operator.iadd, torch.ops.quantized.add_relu) self._test_binary_op_relu_float16_impl( operator.add, operator.iadd) @skipIfNoFBGEMM def test_add_relu_multiple_uses_of_relu(self): class Sub(torch.nn.Module): def __init__(self) -> None: super().__init__() self.relu = torch.nn.ReLU(inplace=True) class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.sub = Sub() def forward(self, x, y): x = x + y x = self.sub.relu(x) x = x + y x = self.sub.relu(x) return x m = M().eval() example_inputs = (torch.randn(3), torch.randn(3)) m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs) m = convert_fx(m) node_occurrence = { ns.call_function(torch.quantize_per_tensor): 2, ns.call_function(torch.ops.quantized.add_relu): 2, ns.call_method("dequantize"): 1, } self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence) # check the model is scriptable m = torch.jit.script(m) # check the model is runnable m(*example_inputs) @skipIfNoFBGEMM def test_mul_relu(self): self._test_binary_op_relu_int8_impl( operator.mul, operator.imul, torch.ops.quantized.mul_relu) self._test_binary_op_relu_float16_impl( operator.mul, operator.imul) # TODO(future PR): make more generic def _test_quantized_add_mul_qat(self, model, example_inputs, expected_node_occurrence): qconfig_dict = {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')} mp = prepare_qat_fx(model, qconfig_dict, example_inputs=example_inputs) self.checkGraphModuleNodes( mp, expected_node_occurrence=expected_node_occurrence) @skipIfNoFBGEMM def test_quantized_add_qat(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) def forward(self, x): x = torch.add(x, 1.0) x = self.conv1(x) x = torch.add(x, 1.0) x = torch.relu(x) x = self.conv2(x) return x m = M() example_inputs = (torch.randn(1, 1, 1, 1),) expected_node_occurrence = { ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 5, } self._test_quantized_add_mul_qat(m, example_inputs, expected_node_occurrence) @skipIfNoFBGEMM def test_quantized_mul_qat(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(1, 1, 1) self.conv2 = torch.nn.Conv2d(1, 1, 1) def forward(self, x): x = torch.mul(x, 1.0) x = self.conv1(x) x = torch.mul(x, 1.0) x = torch.relu(x) x = self.conv2(x) return x m = M() example_inputs = (torch.randn(1, 1, 1, 1),) expected_node_occurrence = { ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 5, } self._test_quantized_add_mul_qat(m, example_inputs, expected_node_occurrence) def test_int8_input_no_unnecessary_fq(self): """ If the inputs to the graph are quantized and the only node does not need an activation observer, verifies that the activation observer is not inserted. """ class M(nn.Module): def __init__(self, scalar): super().__init__() self.scalar = scalar self.add_func = torch.ao.nn.quantized.FloatFunctional() def forward(self, x): return self.add_func.add_scalar(x, self.scalar) m = M(0.5) mp = torch.ao.quantization.quantize_fx.prepare_qat_fx( m, {'': torch.ao.quantization.get_default_qat_qconfig('fbgemm')}, example_inputs=(torch.randn(1),), prepare_custom_config={"input_quantized_idxs": [0]}) expected_node_occurrence = { ns.call_module(torch.ao.quantization.FusedMovingAvgObsFakeQuantize): 1, } self.checkGraphModuleNodes( mp, expected_node_occurrence=expected_node_occurrence) @skipIfNoFBGEMM def test_cat(self): """ quantization of the output of cat will depend on the input of cat. we only quantize the output of cat when its inputs are quantized. """ class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(2, 2, 2).float() self.conv2 = torch.nn.Conv2d(2, 2, 2).float() def forward(self, x, y): x = self.conv1(x) y = self.conv2(y) return torch.cat([x, y], 1) example_inputs = (torch.randn(1, 2, 5, 5, dtype=torch.float), torch.randn(1, 2, 5, 5, dtype=torch.float)) quantized_node = ns.call_function(torch.cat) options = itertools.product(self.static_quant_types, [True, False]) for quant_type, is_reference in options: if is_reference: converted_node_list = [ ns.call_method("dequantize"), ns.call_function(torch.cat), ns.call_function(torch.quantize_per_tensor) ] converted_node_occurrence = { # inputs and outputs of the two conv, and output of cat ns.call_method("dequantize"): 5, ns.call_function(torch.cat): 1, # inputs and outputs of the two conv, and output of cat ns.call_function(torch.quantize_per_tensor): 5, } else: converted_node_list = None converted_node_occurrence = { # output of cat ns.call_method("dequantize"): 1, ns.call_function(torch.cat): 1, # for two inputs ns.call_function(torch.quantize_per_tensor): 2, } self.checkGraphModeFxOp( M(), example_inputs, quant_type, quantized_node, expected_node_list=converted_node_list, expected_node_occurrence=converted_node_occurrence, is_reference=is_reference) # check cat is using the same observer for input and output m = M().eval() m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs) # two inputs and one output of torch.cat are using same observer, so we have # 2 observers that's replicated all_observers = len(dict(m.named_modules(remove_duplicate=False))) distinct_observers = len(dict(m.named_modules())) self.assertEqual(all_observers, distinct_observers + 2) # make sure the converted model runs m = convert_fx(m) m(*example_inputs) @skipIfNoFBGEMM def test_qbatch_norm(self): bn_module = { # TODO: quantized batchnorm 1d module is missing # 1 : torch.nn.BatchNorm1d, 2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d, } class M(torch.nn.Module): def __init__(self, dim): super().__init__() self.bn = bn_module[dim](3).to(torch.float) def forward(self, x): return self.bn(x) options = itertools.product(self.static_quant_types, [2, 3], [True, False]) quantized_nodes = { False: { # 1: ns.call_module(nnq.BatchNorm1d), 2: ns.call_module(nnq.BatchNorm2d), 3: ns.call_module(nnq.BatchNorm3d), }, True: { # 1: ns.call_module(nn.BatchNorm1d), 2: ns.call_module(nn.BatchNorm2d), 3: ns.call_module(nn.BatchNorm3d), } } for quant_type, dim, is_reference in options: self.checkGraphModeFxOp( M(dim), self.img_data_dict[dim], quant_type, quantized_nodes[is_reference][dim], is_reference=is_reference) @skipIfNoFBGEMM def test_qbatch_norm_relu(self): bn_module = {2 : torch.nn.BatchNorm2d, 3 : torch.nn.BatchNorm3d} class BNRelu(torch.nn.Module): def __init__(self, dim, inplace): super().__init__() self.bn = bn_module[dim](3).to(torch.float) self.relu = torch.nn.ReLU(inplace=inplace) def forward(self, x): return self.relu(self.bn(x)) class BNFuncRelu(torch.nn.Module): def __init__(self, dim): super().__init__() self.bn = bn_module[dim](3).to(torch.float) def forward(self, x): return F.relu(self.bn(x), False) class BNFuncInplaceRelu(torch.nn.Module): def __init__(self, dim): super().__init__() self.bn = bn_module[dim](3).to(torch.float) def forward(self, x): return F.relu(self.bn(x), True) options = itertools.product(self.static_quant_types, [2, 3], [True, False]) quantized_nodes = { True: { 2: ns.call_module(nni.BNReLU2d), 3: ns.call_module(nni.BNReLU3d), }, False: { 2: ns.call_module(nniq.BNReLU2d), 3: ns.call_module(nniq.BNReLU3d), } } for quant_type, dim, is_reference in options: for instance in [BNRelu(dim, True), BNRelu(dim, False), BNFuncRelu(dim), BNFuncInplaceRelu(dim)]: self.checkGraphModeFxOp( instance, self.img_data_dict[dim], quant_type, quantized_nodes[is_reference][dim], is_reference=is_reference) def _test_activation_impl( self, float_module, float_op, quantized_module, quantized_op): ''' Test for activation op(with inplace options), float_op can be torch op or functional op ''' class M(torch.nn.Module): def __init__(self, is_module, inplace): super().__init__() self.is_module = is_module self.inplace = inplace if self.is_module: self.op = float_module(self.inplace) else: self.op = float_op def forward(self, input): if self.is_module: return self.op(input) else: return self.op(input, self.inplace) options = itertools.product([True, False], [True, False], self.static_quant_types, [True, False]) quantized_nodes = { # is_module True: { # is_reference True: ns.call_module(float_module), False: ns.call_module(quantized_module), }, False: { True: ns.call_function(float_op), False: ns.call_function(quantized_op), } } for is_module, is_inplace, quant_type, is_reference in options: self.checkGraphModeFxOp( M(is_module, is_inplace), self.img_data_2d, quant_type, quantized_nodes[is_module][is_reference], is_reference=is_reference) def test_hardswish(self): self._test_activation_impl(nn.Hardswish, F.hardswish, nnq.Hardswish, torch.ops.quantized.hardswish) def test_elu(self): self._test_activation_impl(nn.ELU, F.elu, nnq.ELU, torch.ops.quantized.elu) def test_leaky_relu(self): self._test_activation_impl(nn.LeakyReLU, F.leaky_relu, nnq.LeakyReLU, torch.ops.quantized.leaky_relu) def test_prelu(self): class M(torch.nn.Module): def __init__(self, num_param: int): super().__init__() self.op = torch.nn.PReLU(num_parameters=num_param) def forward(self, input): return self.op(input) X = [[torch.randn(4, 4, 4, 4, dtype=torch.float)]] options = itertools.product([1, 4], self.static_quant_types, [True, False]) quantized_nodes = { # is_reference True: ns.call_module(torch.nn.PReLU), False: ns.call_module(torch.ao.nn.quantized.PReLU), } for num_parameter, quant_type, is_reference in options: self.checkGraphModeFxOp( M(num_parameter), X, quant_type, quantized_nodes[is_reference], is_reference=is_reference) def _test_norm_impl( self, float_module, float_op, op_args, data, quantized_module, quantized_op, skip_op_arg_for_functional=False): ''' Test for normalization op, float_op can be torch op or functional op, op_args is a list of positional argument for the module/op ''' class M(torch.nn.Module): def __init__(self, is_module): super().__init__() self.is_module = is_module if self.is_module: self.op = float_module(*op_args) else: self.op = float_op def forward(self, input): if self.is_module: return self.op(input) else: args = [input] if not skip_op_arg_for_functional: args += op_args return self.op(*args) options = itertools.product([True, False], self.static_quant_types) quantized_nodes = { # is_module True: ns.call_module(quantized_module), False: ns.call_function(quantized_op), } for is_module, quant_type in options: self.checkGraphModeFxOp( M(is_module), data, quant_type, quantized_nodes[is_module]) def _test_norm_float16_impl( self, float_module, float_op, op_args, data, skip_op_arg_for_functional=False): ''' Test for normalization op, float_op can be torch op or functional op, op_args is a list of positional argument for the module/op ''' class M(torch.nn.Module): def __init__(self, is_module): super().__init__() self.is_module = is_module if self.is_module: self.op = float_module(*op_args) else: self.op = float_op def forward(self, input): if self.is_module: return self.op(input) else: args = [input] if not skip_op_arg_for_functional: args += op_args return self.op(*args) options = itertools.product([True, False], self.static_quant_types) qconfig_dict = { "object_type": [ (float_module, float16_static_qconfig), (float_op, float16_static_qconfig) ] } node_occurrence = { ns.call_method("to"): 2 } for is_module, quant_type in options: self.checkGraphModeFxOp( M(is_module), data, quant_type, custom_qconfig_dict=qconfig_dict, expected_node_occurrence=node_occurrence) def test_layer_norm(self): data = (torch.rand((1, 2, 5, 5), dtype=torch.float),) self._test_norm_impl( nn.LayerNorm, F.layer_norm, [[2, 5, 5]], data, nnq.LayerNorm, torch.ops.quantized.layer_norm) def test_instance_norm(self): data_1d = (torch.rand((1, 4, 5), dtype=torch.float),) data_2d = (torch.rand((1, 4, 5, 1), dtype=torch.float),) data_3d = (torch.rand((1, 4, 5, 1, 1), dtype=torch.float),) data_dict = {1 : data_1d, 2 : data_2d, 3 : data_3d} instance_norm_modules = {1 : nn.InstanceNorm1d, 2 : nn.InstanceNorm2d, 3 : nn.InstanceNorm3d} quantized_instance_norm_modules = { 1 : nnq.InstanceNorm1d, 2 : nnq.InstanceNorm2d, 3 : nnq.InstanceNorm3d } for dim in [1, 2, 3]: data = data_dict[dim] module = instance_norm_modules[dim] quantized_module = quantized_instance_norm_modules[dim] self._test_norm_impl( module, F.instance_norm, [4], data, quantized_module, torch.ops.quantized.instance_norm, skip_op_arg_for_functional=True) def test_norm_weight_bias(self): class Linear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.w = torch.ones(5, 5) self.b = torch.zeros(5) def forward(self, x): return torch.nn.functional.linear(x, self.w, self.b) class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.mods1 = Linear() self.scale = torch.randn(5, 5) self.bias = torch.randn(5, 5) def forward(self, x): x1 = self.mods1(x) y = F.layer_norm(x1, [5, 5], weight=self.scale, bias=self.bias) return y model = M() expected_occurrence = { ns.call_function(torch.quantize_per_tensor): 1, ns.call_function(torch.ops.quantized.linear): 1, ns.call_function(torch.ops.quantized.layer_norm): 1, ns.call_method("dequantize"): 1, } self.checkGraphModeFxOp( model, (torch.rand(5, 5),), QuantType.STATIC, expected_node_occurrence=expected_occurrence, custom_qconfig_dict=get_default_qconfig_mapping().to_dict() ) def _test_default_node_quant_handler_ops( self, module, functional, qconfig, is_reference=True, node_list=None, additional_quant_pattern_dict=None ): class M(torch.nn.Module): def __init__(self, mod, func): super().__init__() self.module = mod() self.functional = func def forward(self, x): x = self.module(x) x = self.functional(x) return x if node_list is None: node_list = [] if additional_quant_pattern_dict is None: additional_quant_pattern_dict = {} data = torch.randn((2, 2, 2, 2)) quant_type = QuantType.STATIC prepare_custom_qconfig_dict = {"additional_quant_pattern": additional_quant_pattern_dict} qconfig_dict = {"": qconfig} m = M(module, functional).eval() m_prep = prepare_fx(m, qconfig_dict, prepare_custom_qconfig_dict) m_prep(data) convert_fn = convert_to_reference_fx if is_reference else convert_fx m_quant = convert_fn(m_prep, is_reference=is_reference) m_quant(data) self.checkGraphModuleNodes(m_quant, expected_node_list=node_list) @unittest.skip("TODO: reenable with backend_config api") def test_gelu_normal(self): module = torch.nn.GELU functional = torch.nn.functional.gelu qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") is_reference = False node_list = [ ns.call_module(module), ns.call_function(functional), ] self._test_default_node_quant_handler_ops( module, functional, qconfig, is_reference, node_list) @unittest.skip("TODO: reenable with backend_config api") def test_softmax_normal(self): module = torch.nn.Softmax functional = torch.nn.functional.softmax qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") is_reference = False node_list = [ ns.call_module(torch.ao.nn.quantized.Softmax), ns.call_function(functional), ] self._test_default_node_quant_handler_ops( module, functional, qconfig, is_reference, node_list) @unittest.skip("This is no longer needed right now, can enable later with new api") def test_gelu_reference(self): module = torch.nn.GELU functional = torch.nn.functional.gelu qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") is_reference = True node_list = [ ns.call_function(torch.quantize_per_tensor), ns.call_method("dequantize"), ns.call_module(module), ns.call_function(torch.quantize_per_tensor), ns.call_method('dequantize'), ns.call_function(functional), ns.call_function(torch.quantize_per_tensor), ns.call_method('dequantize') ] # TODO: change these to use backend_config additional_patterns = {torch.nn.GELU: DefaultNodeQuantizeHandler, torch.nn.functional.gelu: DefaultNodeQuantizeHandler} self._test_default_node_quant_handler_ops( module, functional, qconfig, is_reference, node_list, additional_patterns) self._test_default_node_quant_handler_ops(module, functional, self.custom_qconfig, is_reference, node_list, additional_quant_pattern_dict=self.common_quant_patterns) @unittest.skip("This is no longer needed right now, can enable later with new api") def test_softmax_reference(self): module = torch.nn.Softmax functional = torch.nn.functional.softmax qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") is_reference = True node_list = [ ns.call_function(torch.quantize_per_tensor), ns.call_method("dequantize"), ns.call_module(module), ns.call_function(torch.quantize_per_tensor), ns.call_method('dequantize'), ns.call_function(functional), ns.call_function(torch.quantize_per_tensor), ns.call_method('dequantize') ] additional_patterns = {torch.nn.Softmax: DefaultNodeQuantizeHandler, torch.nn.functional.softmax: DefaultNodeQuantizeHandler} self._test_default_node_quant_handler_ops( module, functional, qconfig, is_reference, node_list, additional_patterns) self._test_default_node_quant_handler_ops(module, functional, self.custom_qconfig, is_reference, node_list, additional_quant_pattern_dict=self.common_quant_patterns) @unittest.skip("This is no longer needed right now, can enable later with new api") def test_silu_reference(self): module = torch.nn.SiLU functional = torch.nn.functional.silu qconfig = float16_static_qconfig is_reference = True node_list = [ ns.call_method("to"), ns.call_method("dequantize"), ns.call_module(module), ns.call_method("to"), ns.call_method('dequantize'), ns.call_function(functional), ns.call_method("to"), ns.call_method('dequantize') ] self._test_default_node_quant_handler_ops( module, functional, qconfig, is_reference, node_list) node_list = [ ns.call_function(torch.quantize_per_tensor), ns.call_method("dequantize"), ns.call_module(module), ns.call_function(torch.quantize_per_tensor), ns.call_method("dequantize"), ns.call_function(functional), ns.call_function(torch.quantize_per_tensor), ns.call_method("dequantize") ] self._test_default_node_quant_handler_ops(module, functional, self.custom_qconfig, is_reference, node_list, additional_quant_pattern_dict=self.common_quant_patterns) @unittest.skip("This is no longer needed right now, can enable later with new api") def test_mish_reference(self): module = torch.nn.Mish functional = torch.nn.functional.mish qconfig = float16_static_qconfig is_reference = True node_list = [ ns.call_method("to"), ns.call_method("dequantize"), ns.call_module(module), ns.call_method("to"), ns.call_method('dequantize'), ns.call_function(functional), ns.call_method("to"), ns.call_method('dequantize') ] self._test_default_node_quant_handler_ops( module, functional, qconfig, is_reference, node_list) node_list = [ ns.call_function(torch.quantize_per_tensor), ns.call_method("dequantize"), ns.call_module(module), ns.call_function(torch.quantize_per_tensor), ns.call_method("dequantize"), ns.call_function(functional), ns.call_function(torch.quantize_per_tensor), ns.call_method("dequantize") ] self._test_default_node_quant_handler_ops(module, functional, self.custom_qconfig, is_reference, node_list, additional_quant_pattern_dict=self.common_quant_patterns) def test_bmm_int_reference(self): """ int8 is not supported for bmm so we won't produce reference pattern for it """ class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.bmm = torch.bmm def forward(self, x, y): out = self.bmm(x, y) return out data_x = torch.randn((2, 2, 2,)) data_y = torch.randn((2, 2, 2,)) example_inputs = (data_x, data_y) qconfig_dict = {"": torch.ao.quantization.get_default_qconfig("fbgemm")} is_reference = True node_list = [ ns.call_function(torch.bmm), ] m = M().eval() m_prep = prepare_fx(m, qconfig_dict, example_inputs=example_inputs) m_prep(*example_inputs) convert_fn = convert_to_reference_fx if is_reference else convert_fx m_quant = convert_fn(m_prep) m_quant(*example_inputs) self.checkGraphModuleNodes(m_quant, expected_node_list=node_list) @skipIfNoFBGEMM def test_clamp(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 2).float() self.relu6 = torch.nn.ReLU6() self.relu6_ = torch.nn.ReLU6(True) self.hardtanh = torch.nn.Hardtanh() self.hardtanh_ = torch.nn.Hardtanh(inplace=True) def forward(self, x): x = self.conv(x) x = self.relu6(x) self.relu6_(x) x = F.relu6(x) x = torch.clamp(x, -3, 3) x = x.clamp(-2.5, 2.5) # x = x.clamp_(-2, 2) # Enable when quantized `clamp_` is ready x = self.hardtanh(x) self.hardtanh_(x) x = F.hardtanh(x) return x data = (torch.rand((1, 2, 5, 5), dtype=torch.float),) # list of node that should occur in order node_list = [ ns.call_function(torch.quantize_per_tensor), ns.call_module(nnq.Conv2d), ns.call_method('dequantize') ] for quant_type in self.static_quant_types: self.checkGraphModeFxOp( M(), data, quant_type, expected_node_list=node_list) def test_fixed_qparams_ops_fp16(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.tanh = torch.nn.Tanh() def forward(self, x): x = self.sigmoid(x) x = torch.sigmoid(x) x = x.sigmoid() x = self.tanh(x) x = torch.tanh(x) x = x.tanh() return x data = (torch.randn((2, 2, 2, 2), dtype=torch.float),) quant_type = QuantType.STATIC # TODO: use get_default_qconfig_mapping once it handles fp16 qconfig_mapping = QConfigMapping().set_global(float16_static_qconfig) backend_config = get_test_only_legacy_native_backend_config() node_occurrence = { ns.call_method("to"): 7 } self.checkGraphModeFxOp( M(), data, quant_type, custom_qconfig_dict=qconfig_mapping, expected_node_occurrence=node_occurrence, backend_config=backend_config) def test_fixed_qparams_ops_qint8(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.tanh = torch.nn.Tanh() def forward(self, x): x = self.sigmoid(x) x = torch.sigmoid(x) x = x.sigmoid() x = self.tanh(x) x = torch.tanh(x) x = x.tanh() return x data = (torch.randn((2, 2, 2, 2), dtype=torch.float),) quant_type = QuantType.STATIC qconfig = torch.ao.quantization.QConfig( activation=HistogramObserver.with_args(qscheme=torch.per_tensor_symmetric, dtype=torch.quint8), weight=default_weight_observer) qconfig_mapping = get_default_qconfig_mapping().set_global(qconfig) node_occurrence = { ns.call_function(torch.quantize_per_tensor): 7, ns.call_method("dequantize"): 7 } self.checkGraphModeFxOp( M(), data, quant_type, custom_qconfig_dict=qconfig_mapping, expected_node_occurrence=node_occurrence, is_reference=True) def test_fixed_qparams_ops_wrong_qconfig(self): """ Test that wrong qconfigs for fixed qparams ops results in the ops not being quantized. """ class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.sigmoid = torch.nn.Sigmoid() self.tanh = torch.nn.Tanh() def forward(self, x): x = self.sigmoid(x) x = torch.sigmoid(x) x = x.sigmoid() x = self.tanh(x) x = torch.tanh(x) x = x.tanh() return x data = (torch.randn((2, 2, 2, 2), dtype=torch.float),) qconfig_mapping = QConfigMapping().set_global(default_qconfig) m = M().eval() node_occurrence = { ns.call_function(torch.quantize_per_tensor): 0, ns.call_method("dequantize"): 0, } self.checkGraphModeFxOp( m, data, QuantType.STATIC, custom_qconfig_dict=qconfig_mapping, expected_node_occurrence=node_occurrence, is_reference=True) self.assertTrue(isinstance(m.sigmoid, torch.nn.Sigmoid)) self.assertTrue(isinstance(m.tanh, torch.nn.Tanh)) @skipIfNoFBGEMM def test_general_shape_ops(self): """ A test that checks dequantize will be swapped for all supported general shape ops like aten::flatten without actually checking for execution of these ops """ class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.maxpool1d = torch.nn.MaxPool1d(kernel_size=3) self.maxpool2d = torch.nn.MaxPool2d(kernel_size=3) self.maxpool3d = torch.nn.MaxPool3d(kernel_size=3) self.dropout = torch.nn.Dropout() self.conv1 = torch.nn.Conv2d(3, 3, 3) self.conv2 = torch.nn.Conv2d(3, 3, 3) self.relu = torch.nn.ReLU() def forward(self, x): x = self.conv1(x) # add_scalar x = x + 3 # mul_scalar x = x * 3 # add_scalar_out x += 3 # mul_scalar_out x *= 3 # add_scalar_relu x = x + 3 x = F.relu(x) # add_scalar_relu_out x += 3 x = F.relu(x) # mul_scalar_relu x = x * 3 x = F.relu(x) # mul_scalar_relu_out x *= 3 x = F.relu(x) x = self.maxpool1d(x) x = self.maxpool2d(x) x = self.maxpool3d(x) x = torch.flatten(x) x = x.reshape([-1]) x = x.resize_(1, 1, x) x = x.view(-1) # prim::ListConstruct xs = [x, x] # prim::ListUnpack x, y = xs # prim::TupleConstruct xs = (x, x) # prim::TupleUnpack x, y = xs x = x.transpose(1, 2) x = x.contiguous() # chunk is not supported since observer only supports # observing single Tensor currently x, y = torch.chunk(x, 2) x = F.dropout(x) x = self.dropout(x) x = x.permute(0, 2, 3, 1) x = x.repeat_interleave(3, 1) x = torch.repeat_interleave(x, 3, 1) x = self.relu(x) x = F.relu(x) x = F.relu(x, inplace=True) x = x.relu() x.relu_() x = x.squeeze(0) x.squeeze_(0) x = torch.squeeze(x, 0) x = x.unsqueeze(0) x.unsqueeze_(0) x = torch.unsqueeze(x, 0) x = x.detach() x.detach_() x = x.repeat(4, 2) y = [] y.append(x) z = torch.stack(y, 0) z = [z, z] x, _ = z x = self.conv2(x) return x example_inputs = (torch.rand(1, 3, 10, 10),) # This model is not executable since we just put all ops # in the same forward m = M().eval() qconfig_dict = {'': default_qconfig} prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs) # not runnable quantized = convert_fx(prepared) # This checks that the dequantize from the output of first conv # is being propagated to the end, so that we don't insert extra # observers and also successfully fused two quantized::conv2d # patterns # one quantize_per_tensor for input # check exact counts of quantize and dequantize count_check = { # input of conv and two outputs of getitem ns.call_function(torch.quantize_per_tensor) : 2, # output of the model and two outputs of getitem ns.call_method('dequantize') : 2 } order_check = [ ns.call_function(torch.quantize_per_tensor), ns.call_module(nnq.Conv2d), ns.call_module(nnq.Conv2d), ns.call_method('dequantize'), ] self.checkGraphModuleNodes( quantized, expected_node_occurrence=count_check, expected_node_list=order_check) # Checking the is_reference output m = M().eval() qconfig_dict = {'': default_qconfig} prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs) # not runnable quantized = convert_to_reference_fx(prepared) @skipIfNoFBGEMM def test_ave_pool_with_custom_cfg(self): """ A test that checks correct patterns are produced for avg_pool2d with customized config """ class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.avg_pool2d = torch.nn.AvgPool2d(3) def forward(self, x): x = self.avg_pool2d(x) return x # This model is not executable since we just put all ops # in the same forward m = M().eval() # nothing to fuse so skipping the fuse step qconfig_dict = {'': default_qconfig} example_inputs = (torch.randn(1, 3, 3, 3),) prepared = prepare_fx( m, qconfig_dict, example_inputs=example_inputs, prepare_custom_config={"input_quantized_idxs": [0]}) # not runnable quantized = convert_fx(prepared) # This checks that the dequantize from the output of first conv # is being propagated to the end, so that we don't insert extra # observers # check exact counts of quantize and dequantize count_check = { ns.call_method('dequantize') : 1 } order_check = [ ns.call_module(nn.AvgPool2d), ns.call_method('dequantize'), ] self.checkGraphModuleNodes( quantized, expected_node_occurrence=count_check, expected_node_list=order_check) @skipIfNoFBGEMM def test_general_value_ops(self): """ A test that checks correct patterns are produced for all supported general value ops like aten::avg_pool2d \ without actually checking for execution of these ops """ class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.avg_pool1d = torch.nn.AvgPool1d(3) self.avg_pool2d = torch.nn.AvgPool2d(3) self.avg_pool3d = torch.nn.AvgPool3d(3) self.adaptive_avg_pool1d = torch.nn.AdaptiveAvgPool1d(1) self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) self.adaptive_avg_pool3d = torch.nn.AdaptiveAvgPool3d((1, 1, 1)) def forward(self, x): x = self.conv(x) x = self.avg_pool1d(x) x = self.avg_pool2d(x) x = self.avg_pool3d(x) x = self.adaptive_avg_pool1d(x) x = self.adaptive_avg_pool2d(x) x = self.adaptive_avg_pool3d(x) x = F.avg_pool1d(x, 3) x = F.avg_pool2d(x, 3) x = F.avg_pool3d(x, 3) x = F.adaptive_avg_pool1d(x, (1)) x = F.adaptive_avg_pool2d(x, (1, 1)) x = F.adaptive_avg_pool3d(x, (1, 1, 1)) x = torch.mean(x) x = torch.mean(x, [2, 3], False) x = x.mean() x = x.mean([2, 3], True) x = F.interpolate(x, 4, mode='nearest') x = F.interpolate(x, 4, mode='linear') x = self.conv(x) return x # This model is not executable since we just put all ops # in the same forward m = M().eval() # nothing to fuse so skipping the fuse step qconfig_dict = {'': default_qconfig} example_inputs = (torch.randn(1, 3, 3, 3),) prepared = prepare_fx(m, qconfig_dict, example_inputs=example_inputs) # not runnable quantized = convert_fx(prepared) # This checks that the dequantize from the output of first conv # is being propagated to the end, so that we don't insert extra # observers # check exact counts of quantize and dequantize count_check = { ns.call_function(torch.quantize_per_tensor) : 1, ns.call_method('dequantize') : 1 } order_check = [ ns.call_function(torch.quantize_per_tensor), ns.call_module(nnq.Conv2d), ns.call_module(nnq.Conv2d), ns.call_method('dequantize'), ] self.checkGraphModuleNodes( quantized, expected_node_occurrence=count_check, expected_node_list=order_check) def test_copy_node_fp32_input(self): """ CopyNode works for both fp32 and int8 inputs, this is a test to make sure that a CopyNode can be successfully quantized in both cases """ class M(torch.nn.Module): def forward(self, x): x = x.relu() return x m = M().eval() m = prepare_fx(m, {"": default_reuse_input_qconfig}, example_inputs=(torch.randn(1),)) m = convert_fx(m) # make sure it runs m(torch.rand(1)) def test_getitem(self): """ Make sure we only insert observer for getitem if the following node is matched or needs to be quantized """ class M(torch.nn.Module): def forward(self, xs): x = xs[0] return x m = M().eval() example_inputs = (torch.rand(1, 2),) qconfig_mapping = get_default_qconfig_mapping() m = prepare_fx(m, qconfig_mapping, example_inputs=example_inputs) self.checkGraphModuleNodes(m, expected_node_occurrence={ ns.call_module(torch.ao.quantization.MinMaxObserver): 0 }) m = convert_fx(m) m(*example_inputs) class M2(torch.nn.Module): def forward(self, xs): x = xs[0] x = torch.sigmoid(x) return x m2 = M2().eval() example_inputs = ([torch.rand(1, 2)],) qconfig_mapping = get_default_qconfig_mapping() m2 = prepare_fx(m2, qconfig_mapping, example_inputs=example_inputs) self.checkGraphModuleNodes(m2, expected_node_occurrence={ ns.call_module(torch.ao.quantization.FixedQParamsObserver): 2 }) m2 = convert_fx(m2) self.checkGraphModuleNodes(m2, expected_node_list=[ ns.call_function(torch.quantize_per_tensor), ns.call_method("dequantize") ]) m2(*example_inputs) # testing prepare recognizes non-Tensor input for getitem class M3(torch.nn.Module): def forward(self, x): s = x.shape n, c = s[:2] x = torch.sigmoid(x) return x m3 = M3().eval() example_inputs = (torch.rand(1, 2, 3, 4),) qconfig_mapping = get_default_qconfig_mapping() m3 = prepare_fx(m3, qconfig_mapping, example_inputs=example_inputs) self.checkGraphModuleNodes(m3, expected_node_occurrence={ ns.call_module(torch.ao.quantization.FixedQParamsObserver): 2 }) m3 = convert_fx(m3) self.checkGraphModuleNodes(m3, expected_node_list=[ ns.call_function(torch.quantize_per_tensor), ns.call_method("dequantize") ]) m3(*example_inputs) @skipIfNoFBGEMM def test_fixed_qparams_ops(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.sigmoid = torch.nn.Sigmoid() self.hardsigmoid = torch.nn.Hardsigmoid() self.tanh = torch.nn.Tanh() self.softmax = torch.nn.Softmax(dim=0) def forward(self, x): x = self.conv(x) # F.sigmoid is deprecated x = self.sigmoid(x) x = torch.sigmoid(x) x = x.sigmoid() x = self.hardsigmoid(x) x = F.hardsigmoid(x) x = F.hardsigmoid(x, inplace=True) x = self.tanh(x) # F.tanh is deprecated x = torch.tanh(x) x = x.tanh() # TODO(future PR): handle F.softmax x = self.softmax(x) return x for eval_mode in [True, False]: # This model is not executable since we just put all ops # in the same forward m = M() if eval_mode: m.eval() qconfig_mapping = get_default_qconfig_mapping() prepare = prepare_fx fq_count = 10 else: m.train() qconfig_mapping = get_default_qat_qconfig_mapping() prepare = prepare_qat_fx fq_count = 10 # nothing to fuse so skipping the fuse step m_copy = copy.deepcopy(m) example_inputs = (torch.rand(3, 3, 3, 3),) prepared = prepare(m, qconfig_mapping, example_inputs=example_inputs) prepared_copy = copy.deepcopy(prepared) # check that prepare does not change model result if eval_mode: self.assertEqual(m_copy(*example_inputs), prepared_copy(*example_inputs)) # check the correct number of activation_post_process is inserted expected_activation_post_process = FixedQParamsObserver if eval_mode else FixedQParamsFakeQuantize count_check = { ns.call_module(expected_activation_post_process) : fq_count, } self.checkGraphModuleNodes( prepared, expected_node_occurrence=count_check) # not runnable quantized = convert_fx(prepared) quantized_reference = convert_to_reference_fx(prepared_copy) # This checks that the dequantize from the output of first conv # is being propagated to the end, so that we don't insert extra # observers # check exact counts of quantize and dequantize count_check = { ns.call_function(torch.quantize_per_tensor) : 1, ns.call_method('dequantize') : 1 } order_check = [ ns.call_function(torch.quantize_per_tensor), ns.call_module(nnq.Conv2d), ns.call_module(nn.Sigmoid), ns.call_module(nnq.Softmax), ns.call_method('dequantize'), ] self.checkGraphModuleNodes( quantized, expected_node_occurrence=count_check, expected_node_list=order_check) reference_count_check = { ns.call_function(torch.quantize_per_tensor) : 12, ns.call_method('dequantize') : 12 } reference_order_check = [ ns.call_function(torch.quantize_per_tensor), ns.call_method('dequantize'), ns.call_module(nnqr.Conv2d), ns.call_function(torch.quantize_per_tensor), ns.call_method('dequantize'), ns.call_module(nn.Sigmoid), ns.call_function(torch.quantize_per_tensor), ns.call_method('dequantize'), ns.call_module(nn.Softmax), ns.call_function(torch.quantize_per_tensor), ns.call_method('dequantize'), ] self.checkGraphModuleNodes( quantized_reference, expected_node_occurrence=reference_count_check, expected_node_list=reference_order_check) # Verify that softmax scale and zero_point are correct self.assertTrue(quantized.softmax.scale - (1.0 / 256) <= 1e-8) self.assertTrue(quantized.softmax.zero_point == 0) def test_float_functional(self): class TorchAdd(nn.Module): """Wrapper around torch.add so that all ops can be found at build""" def __init__(self) -> None: super().__init__() self.add_func = nnq.FloatFunctional() def forward(self, x, y): return self.add_func.add(x, y) class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.ff1 = TorchAdd() self.ff2 = nnq.FloatFunctional() self.ff3 = nnq.FloatFunctional() self.ff4 = nnq.FloatFunctional() self.ff5 = nnq.FloatFunctional() self.ff6 = nnq.FloatFunctional() def forward(self, x): x = self.ff1(x, x) x = self.ff2.add_scalar(x, 3) x = self.ff3.mul(x, x) x = self.ff4.mul_scalar(x, 3) x = self.ff5.add_relu(x, x) x = self.ff6.cat([x]) return x example_inputs = (torch.rand(3, 3),) # Note: QAT test succeeded by chance, to make it actually work # we need to fix eager mode FloatFunctional by removing # activation_post_process in add_scalar and mul_scalar for quant_type in self.static_quant_types: m = M() ref_m = torch.ao.quantization.QuantWrapper(M()) is_qat = quant_type == QuantType.QAT if is_qat: m.train() ref_m.train() qconfig = default_qat_qconfig expected_act_post_process = torch.ao.quantization.FakeQuantize else: m.eval() ref_m.eval() qconfig = default_qconfig expected_act_post_process = torch.ao.quantization.MinMaxObserver prepare_fx_function = prepare_qat_fx if is_qat else prepare_fx qconfig_dict = {"": qconfig} m = prepare_fx_function(m, qconfig_dict, example_inputs=example_inputs) node_occurrence = { ns.call_module(expected_act_post_process): 7, ns.call_module(torch.ao.nn.quantized.FloatFunctional): 0 } self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence) m(*example_inputs) node_list = [ ns.call_function(torch.quantize_per_tensor), ns.call_function(torch.ops.quantized.add), ns.call_function(torch.ops.quantized.add), ns.call_function(torch.ops.quantized.mul), ns.call_function(torch.ops.quantized.mul), ns.call_function(torch.ops.quantized.add_relu), ns.call_function(torch.cat), ns.call_method('dequantize') ] m = convert_fx(m) self.checkGraphModuleNodes(m, expected_node_list=node_list) # make sure numerics match with eager mode ref_m.qconfig = qconfig prepare_function = prepare_qat if is_qat else prepare ref_m = prepare_function(ref_m) ref_m(*example_inputs) ref_m = convert(ref_m) # FX Graph Mode and Eager Mode now diverages in numerics of add_scalar and mul_scalar # self.assertEqual(m(data), ref_m(data)) def test_embedding(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) def forward(self, indices): return self.emb(indices) for qconfig_type in [float_qparams_weight_only_qconfig, float_qparams_weight_only_qconfig_4bit]: model = M().eval() indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8, 3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3]) example_inputs = (indices,) quantized_node = ns.call_module(nnq.Embedding) # check dynamic quant self.checkGraphModeFxOp( model, example_inputs, QuantType.DYNAMIC, quantized_node, custom_qconfig_dict={"": qconfig_type} ) model = M().eval() configs = [ (qconfig_type, ns.call_module(nnq.Embedding)), (None, ns.call_module(nn.Embedding)), (default_qconfig, ns.call_module(nn.Embedding)), ] # check static quantization for qconfig, node in configs: qconfig_dict = {"": qconfig} m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs) self.checkGraphModuleNodes(m, expected_node_occurrence={ ns.call_module(torch.ao.quantization.MinMaxObserver): 0 }) m = convert_fx(m) self.checkGraphModuleNodes(m, expected_node=node) # make sure it runs m(*example_inputs) def test_embedding_bag(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, include_last_offset=True) def forward(self, indices, offsets): return self.emb(indices, offsets) indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8, 3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3]) offsets = torch.tensor([0, 19, 20, 28, 28, 32]) quantized_node = ns.call_module(nnq.EmbeddingBag) example_inputs = (indices, offsets) for dtype in [torch.quint8, torch.quint4x2]: model = M().eval() float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0) float_qparams_qconfig = QConfig(activation=default_placeholder_observer, weight=float_qparams_observer) self.checkGraphModeFxOp( model, example_inputs, QuantType.DYNAMIC, quantized_node, custom_qconfig_dict={"": float_qparams_qconfig} ) # check it works in None and static qconfig for qconfig in [None, default_qconfig]: qconfig_dict = {"": qconfig} m = M().eval() m = prepare_fx(model, qconfig_dict, example_inputs=example_inputs) self.checkGraphModuleNodes(m, expected_node_occurrence={ ns.call_module(torch.ao.quantization.MinMaxObserver): 0 }) m = convert_fx(m) self.checkGraphModuleNodes(m, expected_node=ns.call_module(nn.EmbeddingBag)) # make sure it runs m(*example_inputs) def _test_rnn_impl(self, qconfigs, M, module_type_strs, module_types, sample_input): options = itertools.product(qconfigs, module_type_strs) for qconfig, module_type_str in options: model_eager = M(module_type_str).eval() model_graph = copy.deepcopy(model_eager) if torch.backends.quantized.engine == 'qnnpack' and \ qconfig is float16_dynamic_qconfig: continue # fp16 dynamic quant is not supported for qnnpack eager_qconfig_dict = dict.fromkeys(module_types, qconfig) model_eager = quantize_dynamic(model_eager, qconfig_spec=eager_qconfig_dict) graph_qconfig_dict = { "object_type": [ (x, qconfig) for x in module_types ] } model_graph = prepare_fx(model_graph, graph_qconfig_dict, example_inputs=(sample_input,)) model_graph = convert_fx(model_graph) self.assertEqual(model_eager(sample_input), model_graph(sample_input)) self.checkScriptable(model_graph, [[sample_input]], True) @override_qengines def test_rnn_cell(self): if torch.backends.quantized.engine not in ('fbgemm', 'qnnpack'): return qconfigs = [per_channel_dynamic_qconfig, default_dynamic_qconfig, float16_dynamic_qconfig] module_type_strs = ['LSTMCell', 'GRUCell', 'RNNTanh', 'RNNReLU'] module_types = [torch.nn.LSTMCell, torch.nn.GRUCell, torch.nn.RNNCell] sample_input = torch.tensor([[100, -155], [-155, 100], [100, -155]], dtype=torch.float) self._test_rnn_impl(qconfigs, RNNCellDynamicModel, module_type_strs, module_types, sample_input) @override_qengines def test_rnn(self): if torch.backends.quantized.engine not in ('fbgemm', 'qnnpack'): return qconfigs = [per_channel_dynamic_qconfig, default_dynamic_qconfig, float16_dynamic_qconfig] module_type_strs = ['LSTM', 'GRU'] module_types = [torch.nn.LSTM, torch.nn.GRU] niter = 10 sample_input = torch.tensor([[100, -155], [-155, 100], [100, -155]], dtype=torch.float).unsqueeze(0).repeat(niter, 1, 1) self._test_rnn_impl(qconfigs, RNNDynamicModel, module_type_strs, module_types, sample_input) def _test_conv_transpose_impl( self, float_cls: Callable, q_cls: Callable, data: torch.Tensor): with override_quantized_engine('qnnpack'): # Create fp32 versions of FX and Eager models m1 = torch.nn.Sequential(float_cls(1, 1, 1)) m2 = torch.nn.Sequential(float_cls(1, 1, 1)) m2.load_state_dict(m1.state_dict()) m2 = torch.ao.quantization.QuantWrapper(m2) # FX graph result_dict = self.checkGraphModeFxOp( m1, (data,), QuantType.STATIC, expected_node_occurrence={ ns.call_module(q_cls): 1, }) q_result1 = result_dict["quantized_output"] # Eager m2.qconfig = get_default_qconfig(torch.backends.quantized.engine) m2.eval() m2p = torch.ao.quantization.prepare(m2) m2p(data) m2q = torch.ao.quantization.convert(m2p) q_result2 = m2q(data) # verify results match self.assertEqual(q_result1, q_result2) @unittest.skipUnless('qnnpack' in supported_qengines, "This Pytorch Build has not been built with or does not support QNNPACK") def test_conv_transpose_1d(self): self._test_conv_transpose_impl( torch.nn.ConvTranspose1d, nnq.ConvTranspose1d, torch.randn(4, 1, 4)) @unittest.skipUnless('qnnpack' in supported_qengines, "This Pytorch Build has not been built with or does not support QNNPACK") def test_conv_transpose_2d(self): self._test_conv_transpose_impl( torch.nn.ConvTranspose2d, nnq.ConvTranspose2d, torch.randn(4, 1, 4, 4)) def test_reshape_fp16(self): class M(torch.nn.Module): def __init__(self, w, b): super().__init__() self.w = w self.b = b def forward(self, x): x = torch.nn.functional.linear(x, self.w) x = x.reshape(-1, 4) x = torch.nn.functional.linear(x, self.w) return x w = torch.randn(4, 4) b = torch.randn(4) m = M(w, b).eval() qconfig_dict = { # reshape will be quantized to fp16 as requested by this qconfig "": float16_static_qconfig, "object_type": [ (torch.nn.functional.linear, default_qconfig) ] } backend_config = get_test_only_legacy_native_backend_config() example_inputs = (torch.randn(1, 4),) m = prepare_fx( m, qconfig_dict, example_inputs=example_inputs, backend_config=backend_config) expected_occurrence = { # input and weight of first and second linear, output of first and second linear ns.call_module(torch.ao.quantization.MinMaxObserver): 6, # we insert placeholder observer for both input and output of reshape ns.call_module(torch.ao.quantization.PlaceholderObserver): 2 } self.checkGraphModuleNodes( m, expected_node_occurrence=expected_occurrence ) m = convert_fx(m, backend_config=backend_config) expected_occurrence = { ns.call_function(torch.quantize_per_tensor): 2, # dequantize after first linear, before reshape and before output ns.call_method("dequantize"): 3, # before reshape, to(fp16) ns.call_method("to"): 1, ns.call_function(torch.ops.quantized.linear): 2 } self.checkGraphModuleNodes( m, expected_node_occurrence=expected_occurrence ) # make sure it runs m(torch.randn(2, 4)) def test_multiple_qconfigs_for_single_value(self): """ Test multiple qconfigs for a single value""" class M(torch.nn.Module): def __init__(self, w, b): super().__init__() self.w = w self.b = b def forward(self, x): x = torch.nn.functional.linear(x, self.w) x = torch.sigmoid(x) return x w = torch.randn(4, 4) b = torch.randn(4) m = M(w, b).eval() # TODO: use get_default_qconfig_mapping once it handles fp16 qconfig_mapping = QConfigMapping() \ .set_global(float16_static_qconfig) \ .set_object_type(torch.nn.functional.linear, default_qconfig) example_inputs = (torch.randn(1, 4),) backend_config = get_test_only_legacy_native_backend_config() m = prepare_fx( m, qconfig_mapping, example_inputs=example_inputs, backend_config=backend_config) expected_occurrence = { # input and weight of linear, output of linear ns.call_module(torch.ao.quantization.MinMaxObserver): 3, # input and output of sigmoid ns.call_module(torch.ao.quantization.PlaceholderObserver): 2, } self.checkGraphModuleNodes( m, expected_node_occurrence=expected_occurrence ) # make sure it runs m = convert_fx(m) expected_occurrence = { ns.call_function(torch.quantize_per_tensor): 1, ns.call_method("dequantize"): 3, ns.call_method("to"): 2 } self.checkGraphModuleNodes( m, expected_node_occurrence=expected_occurrence ) def test_boolean_tensor(self): """ Make sure we don't insert observer for boolean Tensors """ class M(torch.nn.Module): def forward(self, x, mask): mask = mask.unsqueeze(0) mask = mask.unsqueeze(1) x = x.masked_fill(mask, 1) return x m = M().eval() example_inputs = (torch.rand(1, 2, 3, 4), torch.rand(3, 4).bool()) m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs) expected_occurrence = { ns.call_module(torch.ao.quantization.MinMaxObserver): 0 } self.checkGraphModuleNodes( m, expected_node_occurrence=expected_occurrence) m = convert_fx(m) m(*example_inputs) def test_chunk(self): class M(torch.nn.Module): def forward(self, x): x, y = torch.chunk(x, 2) x = x + y return x m = M().eval() example_inputs = (torch.rand(2, 2, 2, 2),) m = prepare_fx(m, {"": default_qconfig}, example_inputs=example_inputs) m(*example_inputs) m = convert_fx(m) m(*example_inputs) # make sure everything runs def test_ref_pattern_multi_use(self): class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(5, 5) self.linear1 = torch.nn.Linear(5, 5) def forward(self, x): y = self.linear(x) z = self.linear1(x) a = torch.mul(z, 5) b = torch.add(z, 5) return (y, a, b) m = M().eval() qconfig_dict = { "": None, "object_type": [ (torch.nn.Linear, get_default_qconfig("fbgemm")), (torch.nn.ReLU, get_default_qconfig("fbgemm")), ], } example_inputs = (torch.randn(1, 5),) m = prepare_fx(m, qconfig_dict, example_inputs=example_inputs) m = convert_fx(m) expected_occurrence = { ns.call_function(torch.quantize_per_tensor): 1, ns.call_module(nnq.Linear): 2, ns.call_method("dequantize"): 2, ns.call_function(torch.add): 1, ns.call_function(torch.mul): 1, } self.checkGraphModuleNodes( m, expected_node_occurrence=expected_occurrence) def test_qmatmul(self): class M(torch.nn.Module): def forward(self, x, y): z = torch.matmul(x, y) return z m = M().eval() example_inputs = (torch.randn(2, 2), torch.randn(2, 2)) qconfig_dict = get_default_qconfig_mapping("fbgemm") mp = prepare_fx(m, qconfig_dict, example_inputs=example_inputs) mp(*example_inputs) mq = convert_fx(mp) expected_occurrence = { ns.call_function(torch.matmul): 0, ns.call_function(torch.ops.quantized.matmul): 1, } self.checkGraphModuleNodes( mq, expected_node_occurrence=expected_occurrence) # verify no crash res = mq(*example_inputs) def test_pixel_shuffle(self): class MyBias(nn.Module): def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(8)) class MyModel(nn.Module): def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(8, 8, 1, bias=False) self.bias = MyBias() def forward(self, x): x = self.conv(x) x = nn.functional.pixel_shuffle(x, 2) x = x.view(-1, 8, 2, 2) bias = self.bias.bias return x + bias backend_config = get_qnnpack_backend_config() qconfig_mapping = get_default_qconfig_mapping("qnnpack") model = MyModel() m = prepare_fx( model, qconfig_mapping=qconfig_mapping, example_inputs=(torch.randn(1, 8, 3, 3),), backend_config=backend_config ) m = convert_fx(m) expected_occurrence = { ns.call_function(torch.quantize_per_tensor): 2, ns.call_method("dequantize"): 1, } self.checkGraphModuleNodes(m, expected_node_occurrence=expected_occurrence) def test_pixel_shuffle_module(self) -> None: class MyBias(nn.Module): def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(8)) class MyModel(nn.Module): def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(8, 8, 1, bias=False) self.ps = nn.PixelShuffle(upscale_factor=2) self.bias = MyBias() def forward(self, x): x = self.conv(x) x = self.ps(x) x = x.view(-1, 8, 2, 2) bias = self.bias.bias return x + bias backend_config = get_qnnpack_backend_config() qconfig_mapping = get_default_qconfig_mapping("qnnpack") model = MyModel() m = prepare_fx( model, qconfig_mapping=qconfig_mapping, example_inputs=(torch.randn(1, 8, 3, 3),), backend_config=backend_config ) m = convert_fx(m) expected_occurrence = { ns.call_function(torch.quantize_per_tensor): 2, ns.call_method("dequantize"): 1, ns.call_module(nn.PixelShuffle): 1, } self.checkGraphModuleNodes(m, expected_node_occurrence=expected_occurrence) def test_pixel_unshuffle(self): class MyBias(nn.Module): def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(64)) class MyModel(nn.Module): def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(8, 8, 1, bias=False) self.bias = MyBias() def forward(self, x): x = self.conv(x) x = nn.functional.pixel_unshuffle(x, 2) bias = self.bias.bias return x + bias for backend in ["fbgemm", "qnnpack"]: if backend == "fbgemm": backend_config = get_fbgemm_backend_config() else: backend_config = get_qnnpack_backend_config() qconfig_mapping = get_default_qconfig_mapping(backend) model = MyModel() m = prepare_fx( model, qconfig_mapping=qconfig_mapping, example_inputs=(torch.randn(1, 8, 6, 6),), backend_config=backend_config ) m = convert_fx(m) expected_occurrence = { ns.call_function(torch.quantize_per_tensor): 2, ns.call_method("dequantize"): 1, } self.checkGraphModuleNodes(m, expected_node_occurrence=expected_occurrence) def test_pixel_unshuffle_module(self) -> None: class MyBias(nn.Module): def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(64)) class MyModel(nn.Module): def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(8, 8, 1, bias=False) self.unshuffle = nn.PixelUnshuffle(downscale_factor=2) self.bias = MyBias() def forward(self, x): x = self.conv(x) x = self.unshuffle(x) bias = self.bias.bias return x + bias for backend in ["fbgemm", "qnnpack"]: if backend == "fbgemm": backend_config = get_fbgemm_backend_config() else: backend_config = get_qnnpack_backend_config() qconfig_mapping = get_default_qconfig_mapping(backend) model = MyModel() m = prepare_fx( model, qconfig_mapping=qconfig_mapping, example_inputs=(torch.randn(1, 8, 6, 6),), backend_config=backend_config ) m = convert_fx(m) expected_occurrence = { ns.call_function(torch.quantize_per_tensor): 2, ns.call_method("dequantize"): 1, ns.call_module(nn.PixelUnshuffle): 1, } self.checkGraphModuleNodes(m, expected_node_occurrence=expected_occurrence) def test_narrow(self): class MyBias(nn.Module): def __init__(self) -> None: super().__init__() self.bias = nn.Parameter(torch.randn(4)) class MyModel(nn.Module): def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(8, 8, 1, bias=False) self.bias = MyBias() def forward(self, x): x = self.conv(x) x = torch.narrow(x, 1, 0, 4) bias = self.bias.bias return x + bias for backend in ["fbgemm", "qnnpack"]: if backend == "fbgemm": backend_config = get_fbgemm_backend_config() else: backend_config = get_qnnpack_backend_config() qconfig_mapping = get_default_qconfig_mapping(backend) model = MyModel() m = prepare_fx( model, qconfig_mapping=qconfig_mapping, example_inputs=(torch.randn(1, 8, 3, 3),), backend_config=backend_config ) m = convert_fx(m) expected_occurrence = { ns.call_function(torch.quantize_per_tensor): 2, ns.call_method("dequantize"): 1, } self.checkGraphModuleNodes(m, expected_node_occurrence=expected_occurrence)
TestQuantizeFxOps
python
spyder-ide__spyder
spyder/plugins/editor/api/run.py
{ "start": 1027, "end": 1522 }
class ____(TypedDict): """Schema emitted by the editor for the `Selection` run context.""" # File path to the file that contains the selection to execute. path: str # Actual selection text to execute. selection: str # Encoding of the text. encoding: str # Selection start and end in (line, column) format line_col_bounds: Tuple[Tuple[int, int], Tuple[int, int]] # Selection start and end in characters character_bounds: Tuple[int, int]
SelectionRun
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/matchExhaustion1.py
{ "start": 910, "end": 1173 }
class ____(Enum): red = 0 green = 1 blue = 2 def func6(subj: Color): # This should generate an error if reportMatchNotExhaustive is enabled. match subj: case Color.red: pass case Color.green: pass
Color
python
pytorch__pytorch
torch/_inductor/template_heuristics/triton.py
{ "start": 76263, "end": 76625 }
class ____(MMTemplateConfigMixin, CUDAConfigHeuristic): """Standard MM template heuristic for CUDA""" @register_template_heuristic( mm_template.uid, "cuda", register=torch.version.hip is None, op_name="addmm" ) @register_template_heuristic( bmm_template.uid, "cuda", register=torch.version.hip is None, op_name="baddbmm" )
CUDAMMTemplateConfigHeuristic
python
astropy__astropy
astropy/coordinates/attributes.py
{ "start": 7497, "end": 10190 }
class ____(Attribute): """ A frame attribute that is a CartesianRepresentation with specified units. Parameters ---------- default : object Default value for the attribute if not provided secondary_attribute : str Name of a secondary instance attribute which supplies the value if ``default is None`` and no value was supplied during initialization. unit : unit-like or None Name of a unit that the input will be converted into. If None, no unit-checking or conversion is performed doc : str Description of the frame attribute for help and documentation """ def __init__(self, default=None, secondary_attribute="", unit=None, **kwargs): super().__init__(default, secondary_attribute, **kwargs) self.unit = unit def convert_input(self, value): """ Checks that the input is a CartesianRepresentation with the correct unit, or the special value ``[0, 0, 0]``. Parameters ---------- value : object Input value to be converted. Returns ------- out : object The correctly-typed object. converted : boolean A boolean which indicates if conversion was actually performed. Raises ------ ValueError If the input is not valid for this attribute. """ if ( isinstance(value, list) and len(value) == 3 and all(v == 0 for v in value) and self.unit is not None ): return CartesianRepresentation(np.zeros(3) * self.unit), True else: # is it a CartesianRepresentation with correct unit? try: cartesian = value.to_cartesian() except AttributeError: converted = False else: converted = cartesian is not value value = cartesian if hasattr(value, "xyz") and value.xyz.unit == self.unit: return value, converted converted = True # if it's a CartesianRepresentation, get the xyz Quantity value = getattr(value, "xyz", value) if not hasattr(value, "unit"): raise TypeError( f"tried to set a {self.__class__.__name__} with something that does" " not have a unit." ) value = value.to(self.unit) # now try and make a CartesianRepresentation. cartrep = CartesianRepresentation(value, copy=False) return cartrep, converted
CartesianRepresentationAttribute
python
networkx__networkx
networkx/algorithms/shortest_paths/tests/test_generic.py
{ "start": 17070, "end": 19930 }
class ____: def test_cycle_graph(self): ans = nx.average_shortest_path_length(nx.cycle_graph(7)) assert ans == pytest.approx(2, abs=1e-7) def test_path_graph(self): ans = nx.average_shortest_path_length(nx.path_graph(5)) assert ans == pytest.approx(2, abs=1e-7) def test_weighted(self): G = nx.Graph() nx.add_cycle(G, range(7), weight=2) ans = nx.average_shortest_path_length(G, weight="weight") assert ans == pytest.approx(4, abs=1e-7) G = nx.Graph() nx.add_path(G, range(5), weight=2) ans = nx.average_shortest_path_length(G, weight="weight") assert ans == pytest.approx(4, abs=1e-7) def test_specified_methods(self): G = nx.Graph() nx.add_cycle(G, range(7), weight=2) ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra") assert ans == pytest.approx(4, abs=1e-7) ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford") assert ans == pytest.approx(4, abs=1e-7) ans = nx.average_shortest_path_length( G, weight="weight", method="floyd-warshall" ) assert ans == pytest.approx(4, abs=1e-7) G = nx.Graph() nx.add_path(G, range(5), weight=2) ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra") assert ans == pytest.approx(4, abs=1e-7) ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford") assert ans == pytest.approx(4, abs=1e-7) ans = nx.average_shortest_path_length( G, weight="weight", method="floyd-warshall" ) assert ans == pytest.approx(4, abs=1e-7) def test_directed_not_strongly_connected(self): G = nx.DiGraph([(0, 1)]) with pytest.raises(nx.NetworkXError, match="Graph is not strongly connected"): nx.average_shortest_path_length(G) def test_undirected_not_connected(self): g = nx.Graph() g.add_nodes_from(range(3)) g.add_edge(0, 1) pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g) def test_trivial_graph(self): """Tests that the trivial graph has average path length zero, since there is exactly one path of length zero in the trivial graph. For more information, see issue #1960. """ G = nx.trivial_graph() assert nx.average_shortest_path_length(G) == 0 def test_null_graph(self): with pytest.raises(nx.NetworkXPointlessConcept): nx.average_shortest_path_length(nx.null_graph()) def test_bad_method(self): with pytest.raises(ValueError): G = nx.path_graph(2) nx.average_shortest_path_length(G, weight="weight", method="SPAM")
TestAverageShortestPathLength
python
ethereum__web3.py
web3/exceptions.py
{ "start": 7073, "end": 7495 }
class ____(Web3Exception): """ Used to signal between asyncio contexts that a task that is being awaited is not currently running. """ def __init__(self, task: "asyncio.Task[Any]", message: str | None = None) -> None: self.task = task if message is None: message = f"Task {task} is not running." self.message = message super().__init__(message)
TaskNotRunning
python
facebookresearch__faiss
tests/test_local_search_quantizer.py
{ "start": 17482, "end": 19385 }
class ____(unittest.TestCase): def test_accuracy1(self): """check that the error is in the same ballpark as LSQ.""" recall1 = self.eval_index_accuracy("PLSQ4x3x5_Nqint8") recall2 = self.eval_index_accuracy("LSQ12x5_Nqint8") self.assertGreaterEqual(recall1, recall2) # 622 vs 551 def test_accuracy2(self): """when nsplits = 1, PLSQ should be almost the same as LSQ""" recall1 = self.eval_index_accuracy("PLSQ1x3x5_Nqint8") recall2 = self.eval_index_accuracy("LSQ3x5_Nqint8") diff = abs(recall1 - recall2) # 273 vs 275 in OSX self.assertGreaterEqual(5, diff) def eval_index_accuracy(self, index_key): ds = datasets.SyntheticDataset(32, 1000, 1000, 100) index = faiss.index_factory(ds.d, index_key) index.train(ds.get_train()) index.add(ds.get_database()) D, I = index.search(ds.get_queries(), 10) inter = faiss.eval_intersection(I, ds.get_groundtruth(10)) # do a little I/O test index2 = faiss.deserialize_index(faiss.serialize_index(index)) D2, I2 = index2.search(ds.get_queries(), 10) np.testing.assert_array_equal(I2, I) np.testing.assert_array_equal(D2, D) return inter def test_factory(self): AQ = faiss.AdditiveQuantizer ns, Msub, nbits = 2, 4, 8 index = faiss.index_factory(64, f"PLSQ{ns}x{Msub}x{nbits}_Nqint8") assert isinstance(index, faiss.IndexProductLocalSearchQuantizer) self.assertEqual(index.plsq.nsplits, ns) self.assertEqual(index.plsq.subquantizer(0).M, Msub) self.assertEqual(index.plsq.subquantizer(0).nbits.at(0), nbits) self.assertEqual(index.plsq.search_type, AQ.ST_norm_qint8) code_size = (ns * Msub * nbits + 7) // 8 + 1 self.assertEqual(index.plsq.code_size, code_size)
TestIndexProductLocalSearchQuantizer
python
getsentry__sentry
src/sentry/ratelimits/cardinality.py
{ "start": 764, "end": 2650 }
class ____(CardinalityLimiter): def __init__( self, cluster: str = "default", num_shards: int = 3, num_physical_shards: int = 3, metric_tags: Mapping[str, str] | None = None, ) -> None: """ :param cluster: Name of the redis cluster to use, to be configured with the `redis.clusters` Sentry option (like any other redis cluster in Sentry). :param cluster_num_shards: The number of logical shards to have. This controls the average set size in Redis. :param cluster_num_physical_shards: The number of actual shards to store. Controls how many keys of type "unordered set" there are in Redis. The ratio `cluster_num_physical_shards / cluster_num_shards` is a sampling rate, the lower it is, the less precise accounting will be. """ is_redis_cluster, client, _ = redis.get_dynamic_cluster_from_options( "", {"cluster": cluster} ) if is_redis_cluster: assert isinstance(client, RedisCluster) else: assert isinstance(client, BlasterClient) self.impl = RedisCardinalityLimiterImpl( client, num_shards=num_shards, num_physical_shards=num_physical_shards, metrics_backend=RedisToolsMetricsBackend(metrics.backend, tags=metric_tags), ) super().__init__() def check_within_quotas( self, requests: Sequence[RequestedQuota], timestamp: Timestamp | None = None ) -> tuple[Timestamp, Sequence[GrantedQuota]]: return self.impl.check_within_quotas(requests, timestamp) def use_quotas( self, grants: Sequence[GrantedQuota], timestamp: Timestamp, ) -> None: return self.impl.use_quotas(grants, timestamp)
RedisCardinalityLimiter
python
django__django
tests/admin_views/tests.py
{ "start": 345921, "end": 357145 }
class ____(TestCase): admin_site = site @classmethod def setUpTestData(cls): cls.superuser = User.objects.create_superuser( username="super", password="secret", email="super@example.com" ) cls.joepublicuser = User.objects.create_user( username="joepublic", password="secret" ) def setUp(self): self.client.force_login(self.superuser) def assertURLEqual(self, url1, url2, msg_prefix=""): """ Assert that two URLs are equal despite the ordering of their querystring. Refs #22360. """ parsed_url1 = urlsplit(url1) path1 = parsed_url1.path parsed_qs1 = dict(parse_qsl(parsed_url1.query)) parsed_url2 = urlsplit(url2) path2 = parsed_url2.path parsed_qs2 = dict(parse_qsl(parsed_url2.query)) for parsed_qs in [parsed_qs1, parsed_qs2]: if "_changelist_filters" in parsed_qs: changelist_filters = parsed_qs["_changelist_filters"] parsed_filters = dict(parse_qsl(changelist_filters)) parsed_qs["_changelist_filters"] = parsed_filters self.assertEqual(path1, path2) self.assertEqual(parsed_qs1, parsed_qs2) def test_assert_url_equal(self): # Test equality. change_user_url = reverse( "admin:auth_user_change", args=(self.joepublicuser.pk,) ) self.assertURLEqual( "http://testserver{}?_changelist_filters=" "is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url), "http://testserver{}?_changelist_filters=" "is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url), ) # Test inequality. with self.assertRaises(AssertionError): self.assertURLEqual( "http://testserver{}?_changelist_filters=" "is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url), "http://testserver{}?_changelist_filters=" "is_staff__exact%3D1%26is_superuser__exact%3D1".format(change_user_url), ) # Ignore scheme and host. self.assertURLEqual( "http://testserver{}?_changelist_filters=" "is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url), "{}?_changelist_filters=" "is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url), ) # Ignore ordering of querystring. self.assertURLEqual( "{}?is_staff__exact=0&is_superuser__exact=0".format( reverse("admin:auth_user_changelist") ), "{}?is_superuser__exact=0&is_staff__exact=0".format( reverse("admin:auth_user_changelist") ), ) # Ignore ordering of _changelist_filters. self.assertURLEqual( "{}?_changelist_filters=" "is_staff__exact%3D0%26is_superuser__exact%3D0".format(change_user_url), "{}?_changelist_filters=" "is_superuser__exact%3D0%26is_staff__exact%3D0".format(change_user_url), ) def get_changelist_filters(self): return { "is_superuser__exact": 0, "is_staff__exact": 0, } def get_changelist_filters_querystring(self): return urlencode(self.get_changelist_filters()) def get_preserved_filters_querystring(self): return urlencode( {"_changelist_filters": self.get_changelist_filters_querystring()} ) def get_sample_user_id(self): return self.joepublicuser.pk def get_changelist_url(self): return "%s?%s" % ( reverse("admin:auth_user_changelist", current_app=self.admin_site.name), self.get_changelist_filters_querystring(), ) def get_add_url(self, add_preserved_filters=True): url = reverse("admin:auth_user_add", current_app=self.admin_site.name) if add_preserved_filters: url = "%s?%s" % (url, self.get_preserved_filters_querystring()) return url def get_change_url(self, user_id=None, add_preserved_filters=True): if user_id is None: user_id = self.get_sample_user_id() url = reverse( "admin:auth_user_change", args=(user_id,), current_app=self.admin_site.name ) if add_preserved_filters: url = "%s?%s" % (url, self.get_preserved_filters_querystring()) return url def get_history_url(self, user_id=None): if user_id is None: user_id = self.get_sample_user_id() return "%s?%s" % ( reverse( "admin:auth_user_history", args=(user_id,), current_app=self.admin_site.name, ), self.get_preserved_filters_querystring(), ) def get_delete_url(self, user_id=None): if user_id is None: user_id = self.get_sample_user_id() return "%s?%s" % ( reverse( "admin:auth_user_delete", args=(user_id,), current_app=self.admin_site.name, ), self.get_preserved_filters_querystring(), ) def test_changelist_view(self): response = self.client.get(self.get_changelist_url()) self.assertEqual(response.status_code, 200) # Check the `change_view` link has the correct querystring. detail_link = re.search( '<a href="(.*?)">{}</a>'.format(self.joepublicuser.username), response.text, ) self.assertURLEqual(detail_link[1], self.get_change_url()) def test_change_view(self): # Get the `change_view`. response = self.client.get(self.get_change_url()) self.assertEqual(response.status_code, 200) # Check the form action. form_action = re.search( '<form action="(.*?)" method="post" id="user_form" novalidate>', response.text, ) self.assertURLEqual( form_action[1], "?%s" % self.get_preserved_filters_querystring() ) # Check the history link. history_link = re.search( '<a href="(.*?)" class="historylink">History</a>', response.text, ) self.assertURLEqual(history_link[1], self.get_history_url()) # Check the delete link. delete_link = re.search( '<a role="button" href="(.*?)" class="deletelink">Delete</a>', response.text ) self.assertURLEqual(delete_link[1], self.get_delete_url()) # Test redirect on "Save". post_data = { "username": "joepublic", "last_login_0": "2007-05-30", "last_login_1": "13:20:10", "date_joined_0": "2007-05-30", "date_joined_1": "13:20:10", } post_data["_save"] = 1 response = self.client.post(self.get_change_url(), data=post_data) self.assertRedirects(response, self.get_changelist_url()) post_data.pop("_save") # Test redirect on "Save and continue". post_data["_continue"] = 1 response = self.client.post(self.get_change_url(), data=post_data) self.assertRedirects(response, self.get_change_url()) post_data.pop("_continue") # Test redirect on "Save and add new". post_data["_addanother"] = 1 response = self.client.post(self.get_change_url(), data=post_data) self.assertRedirects(response, self.get_add_url()) post_data.pop("_addanother") def test_change_view_close_link(self): viewuser = User.objects.create_user( username="view", password="secret", is_staff=True ) viewuser.user_permissions.add( get_perm(User, get_permission_codename("view", User._meta)) ) self.client.force_login(viewuser) response = self.client.get(self.get_change_url()) close_link = re.search( '<a role="button" href="(.*?)" class="closelink">Close</a>', response.text ) close_link = close_link[1].replace("&amp;", "&") self.assertURLEqual(close_link, self.get_changelist_url()) def test_change_view_without_preserved_filters(self): response = self.client.get(self.get_change_url(add_preserved_filters=False)) # The action attribute is omitted. self.assertContains(response, '<form method="post" id="user_form" novalidate>') def test_add_view(self): # Get the `add_view`. response = self.client.get(self.get_add_url()) self.assertEqual(response.status_code, 200) # Check the form action. form_action = re.search( '<form action="(.*?)" method="post" id="user_form" novalidate>', response.text, ) self.assertURLEqual( form_action[1], "?%s" % self.get_preserved_filters_querystring() ) post_data = { "username": "dummy", "password1": "test", "password2": "test", } # Test redirect on "Save". post_data["_save"] = 1 response = self.client.post(self.get_add_url(), data=post_data) self.assertRedirects( response, self.get_change_url(User.objects.get(username="dummy").pk) ) post_data.pop("_save") # Test redirect on "Save and continue". post_data["username"] = "dummy2" post_data["_continue"] = 1 response = self.client.post(self.get_add_url(), data=post_data) self.assertRedirects( response, self.get_change_url(User.objects.get(username="dummy2").pk) ) post_data.pop("_continue") # Test redirect on "Save and add new". post_data["username"] = "dummy3" post_data["_addanother"] = 1 response = self.client.post(self.get_add_url(), data=post_data) self.assertRedirects(response, self.get_add_url()) post_data.pop("_addanother") def test_add_view_without_preserved_filters(self): response = self.client.get(self.get_add_url(add_preserved_filters=False)) # The action attribute is omitted. self.assertContains(response, '<form method="post" id="user_form" novalidate>') def test_delete_view(self): # Test redirect on "Delete". response = self.client.post(self.get_delete_url(), {"post": "yes"}) self.assertRedirects(response, self.get_changelist_url()) def test_url_prefix(self): context = { "preserved_filters": self.get_preserved_filters_querystring(), "opts": User._meta, } prefixes = ("", "/prefix/", "/後台/") for prefix in prefixes: with self.subTest(prefix=prefix), override_script_prefix(prefix): url = reverse( "admin:auth_user_changelist", current_app=self.admin_site.name ) self.assertURLEqual( self.get_changelist_url(), add_preserved_filters(context, url), )
AdminKeepChangeListFiltersTests
python
kamyu104__LeetCode-Solutions
Python/rearranging-fruits.py
{ "start": 110, "end": 1838 }
class ____(object): def minCost(self, basket1, basket2): """ :type basket1: List[int] :type basket2: List[int] :rtype: int """ def nth_element(nums, n, left=0, compare=lambda a, b: a < b): def tri_partition(nums, left, right, target, compare): mid = left while mid <= right: if nums[mid] == target: mid += 1 elif compare(nums[mid], target): nums[left], nums[mid] = nums[mid], nums[left] left += 1 mid += 1 else: nums[mid], nums[right] = nums[right], nums[mid] right -= 1 return left, right right = len(nums)-1 while left <= right: pivot_idx = random.randint(left, right) pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare) if pivot_left <= n <= pivot_right: return elif pivot_left > n: right = pivot_left-1 else: # pivot_right < n. left = pivot_right+1 cnt = collections.Counter() for x in basket1: cnt[x] += 1 for x in basket2: cnt[x] -= 1 mn = min(cnt.iterkeys()) swaps = [] for k, v in cnt.iteritems(): if v%2: return -1 swaps.extend(k for _ in xrange(abs(v)//2)) nth_element(swaps, len(swaps)//2) return sum(min(swaps[i], mn*2) for i in xrange(len(swaps)//2))
Solution
python
astropy__astropy
astropy/visualization/wcsaxes/coordinates_map.py
{ "start": 289, "end": 7643 }
class ____: """ A container for coordinate helpers that represents a coordinate system. This object can be used to access coordinate helpers by index (like a list) or by name (like a dictionary). Parameters ---------- axes : :class:`~astropy.visualization.wcsaxes.WCSAxes` The axes the coordinate map belongs to. transform : `~matplotlib.transforms.Transform`, optional The transform for the data. coord_meta : dict, optional A dictionary providing additional metadata. This should include the keys ``type``, ``wrap``, and ``unit``. Each of these should be a list with as many items as the dimension of the coordinate system. The ``type`` entries should be one of ``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should give, for the longitude, the angle at which the coordinate wraps (and `None` otherwise), and the ``unit`` should give the unit of the coordinates as :class:`~astropy.units.Unit` instances. This can optionally also include a ``format_unit`` entry giving the units to use for the tick labels (if not specified, this defaults to ``unit``). frame_class : type, optional The class for the frame, which should be a subclass of :class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a :class:`~astropy.visualization.wcsaxes.frame.RectangularFrame` previous_frame_path : `~matplotlib.path.Path`, optional When changing the WCS of the axes, the frame instance will change but we might want to keep reusing the same underlying matplotlib `~matplotlib.path.Path` - in that case, this can be passed to this keyword argument. """ def __init__( self, axes, transform=None, coord_meta=None, frame_class=RectangularFrame, previous_frame_path=None, ): self._axes = axes self._transform = transform self.frame = frame_class(axes, self._transform, path=previous_frame_path) # Set up coordinates self._coords = [] self._aliases = {} visible_count = 0 for index in range(len(coord_meta["type"])): # Extract coordinate metadata coord_type = coord_meta["type"][index] coord_wrap = coord_meta["wrap"][index] coord_unit = coord_meta["unit"][index] name = coord_meta["name"][index] visible = True if "visible" in coord_meta: visible = coord_meta["visible"][index] format_unit = None if "format_unit" in coord_meta: format_unit = coord_meta["format_unit"][index] default_label = name[0] if isinstance(name, (tuple, list)) else name if "default_axis_label" in coord_meta: default_label = coord_meta["default_axis_label"][index] coord_index = None if visible: visible_count += 1 coord_index = visible_count - 1 self._coords.append( CoordinateHelper( parent_axes=axes, parent_map=self, transform=self._transform, coord_index=coord_index, coord_type=coord_type, coord_wrap=coord_wrap, coord_unit=coord_unit, format_unit=format_unit, frame=self.frame, default_label=default_label, ) ) # Set up aliases for coordinates if isinstance(name, tuple): for nm in name: nm = nm.lower() # Do not replace an alias already in the map if we have # more than one alias for this axis. if nm not in self._aliases: self._aliases[nm] = index else: self._aliases[name.lower()] = index def __getitem__(self, item): if isinstance(item, str): return self._coords[self._aliases[item.lower()]] else: return self._coords[item] def __contains__(self, item): if isinstance(item, str): return item.lower() in self._aliases else: return 0 <= item < len(self._coords) def set_visible(self, visibility): raise NotImplementedError() def __iter__(self): yield from self._coords def grid(self, draw_grid=True, grid_type=None, **kwargs): """ Plot gridlines for both coordinates. Standard matplotlib appearance options (color, alpha, etc.) can be passed as keyword arguments. Parameters ---------- draw_grid : bool Whether to show the gridlines grid_type : { 'lines' | 'contours' } Whether to plot the contours by determining the grid lines in world coordinates and then plotting them in world coordinates (``'lines'``) or by determining the world coordinates at many positions in the image and then drawing contours (``'contours'``). The first is recommended for 2-d images, while for 3-d (or higher dimensional) cubes, the ``'contours'`` option is recommended. By default, 'lines' is used if the transform has an inverse, otherwise 'contours' is used. """ for coord in self: coord.grid(draw_grid=draw_grid, grid_type=grid_type, **kwargs) def get_coord_range(self): xmin, xmax = self._axes.get_xlim() if isinstance(self.frame, RectangularFrame1D): extent = [xmin, xmax] else: ymin, ymax = self._axes.get_ylim() extent = [xmin, xmax, ymin, ymax] return find_coordinate_range( self._transform, extent, [coord.coord_type for coord in self if coord.coord_index is not None], [coord.coord_unit for coord in self if coord.coord_index is not None], [coord.coord_wrap for coord in self if coord.coord_index is not None], ) def _as_table(self): # Import Table here to avoid importing the astropy.table package # every time astropy.visualization.wcsaxes is imported. from astropy.table import Table rows = [] for icoord, coord in enumerate(self._coords): aliases = [key for key, value in self._aliases.items() if value == icoord] row = OrderedDict( [ ("index", icoord), ("aliases", " ".join(aliases)), ("type", coord.coord_type), ("unit", coord.coord_unit), ("wrap", coord.coord_wrap), ("format_unit", coord.get_format_unit()), ("visible", "no" if coord.coord_index is None else "yes"), ] ) rows.append(row) return Table(rows=rows) def __repr__(self): s = f"<CoordinatesMap with {len(self._coords)} world coordinates:\n\n" table = indent(str(self._as_table()), " ") return s + table + "\n\n>"
CoordinatesMap
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_outline01.py
{ "start": 315, "end": 2757 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("outline01.xlsx") self.ignore_files = [ "xl/calcChain.xml", "[Content_Types].xml", "xl/_rels/workbook.xml.rels", ] def test_create_file(self): """ Test the creation of a outlines in a XlsxWriter file. These tests are based on the outline programs in the examples directory. """ workbook = Workbook(self.got_filename) worksheet1 = workbook.add_worksheet("Outlined Rows") bold = workbook.add_format({"bold": 1}) worksheet1.set_row(1, None, None, {"level": 2}) worksheet1.set_row(2, None, None, {"level": 2}) worksheet1.set_row(3, None, None, {"level": 2}) worksheet1.set_row(4, None, None, {"level": 2}) worksheet1.set_row(5, None, None, {"level": 1}) worksheet1.set_row(6, None, None, {"level": 2}) worksheet1.set_row(7, None, None, {"level": 2}) worksheet1.set_row(8, None, None, {"level": 2}) worksheet1.set_row(9, None, None, {"level": 2}) worksheet1.set_row(10, None, None, {"level": 1}) worksheet1.set_column("A:A", 20) worksheet1.write("A1", "Region", bold) worksheet1.write("A2", "North") worksheet1.write("A3", "North") worksheet1.write("A4", "North") worksheet1.write("A5", "North") worksheet1.write("A6", "North Total", bold) worksheet1.write("B1", "Sales", bold) worksheet1.write("B2", 1000) worksheet1.write("B3", 1200) worksheet1.write("B4", 900) worksheet1.write("B5", 1200) worksheet1.write("B6", "=SUBTOTAL(9,B2:B5)", bold, 4300) worksheet1.write("A7", "South") worksheet1.write("A8", "South") worksheet1.write("A9", "South") worksheet1.write("A10", "South") worksheet1.write("A11", "South Total", bold) worksheet1.write("B7", 400) worksheet1.write("B8", 600) worksheet1.write("B9", 500) worksheet1.write("B10", 600) worksheet1.write("B11", "=SUBTOTAL(9,B7:B10)", bold, 2100) worksheet1.write("A12", "Grand Total", bold) worksheet1.write("B12", "=SUBTOTAL(9,B2:B10)", bold, 6400) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
streamlit__streamlit
lib/tests/streamlit/data_mocks/snowpark_mocks.py
{ "start": 736, "end": 1544 }
class ____: """This is dummy DataFrame class, which imitates nowflake.snowpark.dataframe.DataFrame class for testing purposes. We use this to make sure that our code does a special handling if it detects a Snowpark Dataframe. This allows testing of the functionality without having the library installed, but it won't capture changes in the API of the library. This requires integration tests. """ __module__ = "snowflake.snowpark.dataframe" def __init__(self, data: pd.DataFrame): self._data: pd.DataFrame = data def to_pandas(self) -> pd.DataFrame: return self._data def limit(self, n: int) -> DataFrame: """Returns the top n element of a mock version of Snowpark Dataframe""" return DataFrame(self._data.head(n))
DataFrame
python
tensorflow__tensorflow
tensorflow/python/data/kernel_tests/concatenate_test.py
{ "start": 7022, "end": 8291 }
class ____(checkpoint_test_base.CheckpointTestBase, parameterized.TestCase): def _build_concatenate_dataset(self, var_array, options=None): input_components = (np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(np.array([[12], [13], [14], [15]]), 4)) to_concatenate_components = (np.tile( np.array([[5], [6], [7], [8], [9]]), 20), var_array) dataset = dataset_ops.Dataset.from_tensor_slices( input_components).concatenate( dataset_ops.Dataset.from_tensor_slices(to_concatenate_components)) if options: dataset = dataset.with_options(options) return dataset @combinations.generate( combinations.times( test_base.default_test_combinations(), checkpoint_test_base.default_test_combinations(), combinations.combine(symbolic_checkpoint=[False, True]))) def test(self, verify_fn, symbolic_checkpoint): num_outputs = 9 array = np.tile(np.array([[16], [17], [18], [19], [20]]), 15) options = options_lib.Options() options.experimental_symbolic_checkpoint = symbolic_checkpoint verify_fn(self, lambda: self._build_concatenate_dataset(array, options), num_outputs)
ConcatenateCheckpointTest
python
run-llama__llama_index
llama-index-core/llama_index/core/extractors/metadata_extractors.py
{ "start": 1943, "end": 6299 }
class ____(BaseExtractor): """ Title extractor. Useful for long documents. Extracts `document_title` metadata field. Args: llm (Optional[LLM]): LLM nodes (int): number of nodes from front to use for title extraction node_template (str): template for node-level title clues extraction combine_template (str): template for combining node-level clues into a document-level title """ is_text_node_only: bool = False # can work for mixture of text and non-text nodes llm: SerializeAsAny[LLM] = Field(description="The LLM to use for generation.") nodes: int = Field( default=5, description="The number of nodes to extract titles from.", gt=0, ) node_template: str = Field( default=DEFAULT_TITLE_NODE_TEMPLATE, description="The prompt template to extract titles with.", ) combine_template: str = Field( default=DEFAULT_TITLE_COMBINE_TEMPLATE, description="The prompt template to merge titles with.", ) def __init__( self, llm: Optional[LLM] = None, # TODO: llm_predictor arg is deprecated llm_predictor: Optional[LLM] = None, nodes: int = 5, node_template: str = DEFAULT_TITLE_NODE_TEMPLATE, combine_template: str = DEFAULT_TITLE_COMBINE_TEMPLATE, num_workers: int = DEFAULT_NUM_WORKERS, **kwargs: Any, ) -> None: """Init params.""" if nodes < 1: raise ValueError("num_nodes must be >= 1") super().__init__( llm=llm or llm_predictor or Settings.llm, nodes=nodes, node_template=node_template, combine_template=combine_template, num_workers=num_workers, **kwargs, ) @classmethod def class_name(cls) -> str: return "TitleExtractor" async def aextract(self, nodes: Sequence[BaseNode]) -> List[Dict]: nodes_by_doc_id = self.separate_nodes_by_ref_id(nodes) titles_by_doc_id = await self.extract_titles(nodes_by_doc_id) return [{"document_title": titles_by_doc_id[node.ref_doc_id]} for node in nodes] def filter_nodes(self, nodes: Sequence[BaseNode]) -> List[BaseNode]: filtered_nodes: List[BaseNode] = [] for node in nodes: if self.is_text_node_only and not isinstance(node, TextNode): continue filtered_nodes.append(node) return filtered_nodes def separate_nodes_by_ref_id(self, nodes: Sequence[BaseNode]) -> Dict: separated_items: Dict[Optional[str], List[BaseNode]] = {} for node in nodes: key = node.ref_doc_id if key not in separated_items: separated_items[key] = [] if len(separated_items[key]) < self.nodes: separated_items[key].append(node) return separated_items async def extract_titles(self, nodes_by_doc_id: Dict) -> Dict: jobs = [] final_dict = {} async def get_titles_by_doc(nodes: List[BaseNode], key: str) -> Dict: titles_by_doc_id = {} title_candidates = await self.get_title_candidates(nodes) combined_titles = ", ".join(title_candidates) titles_by_doc_id[key] = await self.llm.apredict( PromptTemplate(template=self.combine_template), context_str=combined_titles, ) return titles_by_doc_id for key, nodes in nodes_by_doc_id.items(): jobs.append(get_titles_by_doc(nodes, key)) list_dict_titles: List[Dict] = await run_jobs( jobs=jobs, show_progress=self.show_progress, ) for d in list_dict_titles: for key, value in d.items(): final_dict.update({key: value}) return final_dict async def get_title_candidates(self, nodes: List[BaseNode]) -> List[str]: return [ await self.llm.apredict( PromptTemplate(template=self.node_template), context_str=cast(TextNode, node).text, ) for node in nodes ] DEFAULT_KEYWORD_EXTRACT_TEMPLATE = """\ {context_str}. Give {keywords} unique keywords for this \ document. Format as comma separated. Keywords: """
TitleExtractor
python
facebook__pyre-check
tools/generate_taint_models/model.py
{ "start": 10068, "end": 11628 }
class ____(RawCallableModel): definition: query.Define def __init__( self, definition: query.Define, parameter_annotation: Optional[ParameterAnnotation] = None, returns: Optional[str] = None, parameter_type_whitelist: Optional[Iterable[str]] = None, parameter_name_whitelist: Optional[Set[str]] = None, annotations: Optional[AnnotationSpecification] = None, whitelist: Optional[WhitelistSpecification] = None, ) -> None: self.definition = definition super().__init__( parameter_annotation=parameter_annotation, returns=returns, parameter_type_whitelist=parameter_type_whitelist, parameter_name_whitelist=parameter_name_whitelist, annotations=annotations, whitelist=whitelist, ) def _generate_parameters(self) -> List[Parameter]: parameters: List[Parameter] = [] for parameter in self.definition.parameters: if "**" in parameter.name: kind = Parameter.Kind.KWARG elif "*" in parameter.name: kind = Parameter.Kind.VARARG else: kind = Parameter.Kind.ARG parameters.append( Parameter( name=parameter.name, annotation=parameter.annotation, kind=kind ) ) return parameters def _get_fully_qualified_callable_name(self) -> Optional[str]: return self.definition.name
PyreFunctionDefinitionModel
python
django__django
tests/urlpatterns/converters.py
{ "start": 234, "end": 842 }
class ____: _dynamic_to_python = None _dynamic_to_url = None @property def regex(self): return r"[0-9a-zA-Z]+" @regex.setter def regex(self): raise Exception("You can't modify the regular expression.") def to_python(self, value): return type(self)._dynamic_to_python(value) def to_url(self, value): return type(self)._dynamic_to_url(value) @classmethod def register_to_python(cls, value): cls._dynamic_to_python = value @classmethod def register_to_url(cls, value): cls._dynamic_to_url = value
DynamicConverter
python
python-openxml__python-docx
src/docx/enum/text.py
{ "start": 3926, "end": 5370 }
class ____(BaseXmlEnum): """Specifies a line spacing format to be applied to a paragraph. Example:: from docx.enum.text import WD_LINE_SPACING paragraph = document.add_paragraph() paragraph.line_spacing_rule = WD_LINE_SPACING.EXACTLY MS API name: `WdLineSpacing` URL: http://msdn.microsoft.com/en-us/library/office/ff844910.aspx """ SINGLE = (0, "UNMAPPED", "Single spaced (default).") """Single spaced (default).""" ONE_POINT_FIVE = (1, "UNMAPPED", "Space-and-a-half line spacing.") """Space-and-a-half line spacing.""" DOUBLE = (2, "UNMAPPED", "Double spaced.") """Double spaced.""" AT_LEAST = ( 3, "atLeast", "Minimum line spacing is specified amount. Amount is specified separately.", ) """Minimum line spacing is specified amount. Amount is specified separately.""" EXACTLY = ( 4, "exact", "Line spacing is exactly specified amount. Amount is specified separately.", ) """Line spacing is exactly specified amount. Amount is specified separately.""" MULTIPLE = ( 5, "auto", "Line spacing is specified as multiple of line heights. Changing font size" " will change line spacing proportionately.", ) """Line spacing is specified as multiple of line heights. Changing font size will change the line spacing proportionately."""
WD_LINE_SPACING
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_defined_name01.py
{ "start": 315, "end": 1859 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("defined_name01.xlsx") self.ignore_files = [ "xl/printerSettings/printerSettings1.bin", "xl/worksheets/_rels/sheet1.xml.rels", ] self.ignore_elements = { "[Content_Types].xml": ['<Default Extension="bin"'], "xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"], } def test_create_file(self): """Test the creation of a simple XlsxWriter file with defined names.""" workbook = Workbook(self.got_filename) worksheet1 = workbook.add_worksheet() worksheet2 = workbook.add_worksheet() worksheet3 = workbook.add_worksheet("Sheet 3") worksheet1.print_area("A1:E6") worksheet1.autofilter("F1:G1") worksheet1.write("G1", "Filter") worksheet1.write("F1", "Auto") worksheet1.fit_to_pages(2, 2) workbook.define_name("'Sheet 3'!Bar", "='Sheet 3'!$A$1") workbook.define_name("Abc", "=Sheet1!$A$1") workbook.define_name("Baz", "=0.98") workbook.define_name("Sheet1!Bar", "=Sheet1!$A$1") workbook.define_name("Sheet2!Bar", "=Sheet2!$A$1") workbook.define_name("Sheet2!aaa", "=Sheet2!$A$1") workbook.define_name("_Egg", "=Sheet1!$A$1") workbook.define_name("_Fog", "=Sheet1!$A$1") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
pypa__hatch
src/hatch/env/collectors/plugin/interface.py
{ "start": 37, "end": 2013 }
class ____: """ Example usage: ```python tab="plugin.py" from hatch.env.collectors.plugin.interface import EnvironmentCollectorInterface class SpecialEnvironmentCollector(EnvironmentCollectorInterface): PLUGIN_NAME = 'special' ... ``` ```python tab="hooks.py" from hatchling.plugin import hookimpl from .plugin import SpecialEnvironmentCollector @hookimpl def hatch_register_environment_collector(): return SpecialEnvironmentCollector ``` """ PLUGIN_NAME = "" """The name used for selection.""" def __init__(self, root, config): self.__root = root self.__config = config @property def root(self): """ The root of the project tree as a path-like object. """ return self.__root @property def config(self) -> dict: """ ```toml config-example [tool.hatch.env.collectors.<PLUGIN_NAME>] ``` """ return self.__config def get_initial_config(self) -> dict[str, dict]: # noqa: PLR6301 """ Returns configuration for environments keyed by the environment or matrix name. """ return {} def finalize_config(self, config: dict[str, dict]): """ Finalizes configuration for environments keyed by the environment or matrix name. This will override any user-defined settings and any collectors that ran before this call. This is called before matrices are turned into concrete environments. """ def finalize_environments(self, config: dict[str, dict]): """ Finalizes configuration for environments keyed by the environment name. This will override any user-defined settings and any collectors that ran before this call. This is called after matrices are turned into concrete environments. """
EnvironmentCollectorInterface
python
pytorch__pytorch
torch/testing/_internal/common_quantization.py
{ "start": 95571, "end": 96515 }
class ____(torch.nn.Module): def __init__(self) -> None: super().__init__() self.mycat = nnq.FloatFunctional() self.myadd = nnq.FloatFunctional() self.myadd_relu = nnq.FloatFunctional() self.mymatmul = nnq.FloatFunctional() # Tracing doesn't work yet for c10 ops with scalar inputs # https://github.com/pytorch/pytorch/issues/27097 # self.my_scalar_add = nnq.FloatFunctional() # self.my_scalar_mul = nnq.FloatFunctional() def forward(self, x): y = self.mycat.cat([x, x, x]) z = self.myadd.add(y, y) w = self.myadd_relu.add_relu(z, z) u = self.mymatmul.matmul(w, w.T) # Tracing doesn't work yet for c10 ops with scalar inputs # https://github.com/pytorch/pytorch/issues/27097 # w = self.my_scalar_add.add_scalar(w, -0.5) # w = self.my_scalar_mul.mul_scalar(w, 0.5) return u
ModelWithFunctionals
python
jazzband__django-waffle
waffle/models.py
{ "start": 17790, "end": 18118 }
class ____(AbstractBaseSample): """A sample of users. A sample is true some percentage of the time, but is not connected to users or requests. """ class Meta(AbstractBaseSample.Meta): swappable = 'WAFFLE_SAMPLE_MODEL' verbose_name = _('Sample') verbose_name_plural = _('Samples')
Sample
python
huggingface__transformers
src/transformers/models/sam3/modeling_sam3.py
{ "start": 79307, "end": 81284 }
class ____(nn.Module): """ Feature Pyramid Network (FPN) decoder that generates pixel-level features. Inspired by MaskFormer's pixel decoder. """ def __init__(self, config: Sam3MaskDecoderConfig): super().__init__() self.config = config hidden_size = config.hidden_size num_upsampling_stages = config.num_upsampling_stages # Create conv layers and norms for FPN self.conv_layers = nn.ModuleList( [ nn.Conv2d(hidden_size, hidden_size, kernel_size=3, stride=1, padding=1) for _ in range(num_upsampling_stages) ] ) self.norms = nn.ModuleList([nn.GroupNorm(8, hidden_size) for _ in range(num_upsampling_stages)]) self.out_channels = hidden_size def forward(self, backbone_features: list[torch.Tensor]) -> torch.Tensor: """ Args: backbone_features: List of backbone features [batch_size, hidden_size, H_i, W_i] from low to high resolution (assumes already projected to hidden_size) Returns: Pixel embeddings [batch_size, hidden_size, H, W] at the finest resolution """ # Start from the coarsest feature (last in list) prev_fpn = backbone_features[-1] # Iterate through features from coarse to fine (excluding the last which we started with) for layer_idx, backbone_feat in enumerate(reversed(backbone_features[:-1])): # Upsample previous FPN output to match current backbone feature size prev_fpn = F.interpolate(prev_fpn, size=backbone_feat.shape[-2:], mode="nearest") # Add skip connection prev_fpn = prev_fpn + backbone_feat # Apply conv and norm prev_fpn = self.conv_layers[layer_idx](prev_fpn) prev_fpn = self.norms[layer_idx](prev_fpn) prev_fpn = F.relu(prev_fpn) return prev_fpn
Sam3PixelDecoder
python
spack__spack
var/spack/test_repos/spack_repo/edges_test/packages/openblas/package.py
{ "start": 217, "end": 756 }
class ____(Package): """This package provides two virtuals together, so if one is chosen the other must be used too if needed. """ homepage = "http://www.openblas.net" url = "http://github.com/xianyi/OpenBLAS/archive/v0.2.15.tar.gz" version("0.2.16", md5="b1190f3d3471685f17cfd1ec1d252ac9") version("0.2.15", md5="b1190f3d3471685f17cfd1ec1d252ac9") version("0.2.14", md5="b1190f3d3471685f17cfd1ec1d252ac9") version("0.2.13", md5="b1190f3d3471685f17cfd1ec1d252ac9") provides("blas", "lapack")
Openblas
python
joke2k__faker
faker/providers/person/ta_IN/__init__.py
{ "start": 44, "end": 20391 }
class ____(PersonProvider): formats_female = ( "{{first_name_female}}", "{{first_name_female}}", "{{first_name_female}}", "{{first_name_female}} {{last_name}}", "{{first_name_female}} {{last_name}}", ) formats_male = ( "{{first_name_male}}", "{{first_name_male}}", "{{first_name_male}}", "{{first_name_male}} {{last_name}}", "{{first_name_male}} {{last_name}}", ) formats = formats_male + formats_female # Source: http://tamilcube.com/babynames/tamil-baby-names.aspx first_names_male = ( "அகண்டலன்", "அகத்தியன்", "அகரன்", "அகரமுதல்வன்", "அகற்கண்ணன்", "அகற்குறி", "அகலன்", "அகலறிவன்", "அகலார்சடையன்", "அகல்", "அகழ்மேனி", "அகவன்", "அகாத்", "அகிரா", "அகிலங்கடந்தான்", "அகிலன்", "அகில்", "அகுல்,", "அகோரா", "அக்தர்", "அக்னி", "அக்னிகுமாரன்", "அக்மல்,", "அக்ரூர்,", "அக்ரோதனன்", "அங்கணன்", "அங்கதன்", "அச்சுதானந்தன்", "அஜய்", "ஆகர்ணா,", "ஆகாஷ்", "ஆகேந்திரா", "ஆக்னேயா", "ஆசைத்தம்பி", "ஆஞ்சனேயா", "ஆடலரசன்", "ஆடலரசு", "ஆட்டனத்தி", "ஆண்டர்ஸன்", "ஆண்ட்ரு", "ஆதர்ஷ்", "ஆதர்ஷ்,ஆதேஷ்", "ஆதவன்", "ஆதி", "ஆதிகுணா", "ஆதிகேசவன்", "ஆதிசங்கரா", "ஆதிசேஷா", "ஆதிதேவா", "ஆதித்யவர்த்தன்", "ஆதித்யா", "ஆதிநாதன்", "ஆதிநாராயணா", "ஆதிமூர்த்தி", "ஆத்மஜா,", "ஆனந்த", "ஆனந்தகிரி", "ஆனந்தசாகரன்", "ஆனந்ததேவன்", "இக்பால்", "இக்ஷூ,", "இசக்கிமுத்து", "இசைக்கலை", "இசைக்கலைவாணன்", "இசைக்கோ", "இசைச்செல்வன்", "இசைச்செல்வம்", "இசைத்தமிழன்", "இசைத்தம்பி", "இசைமணி", "இசைமாமணி", "இசைமுதல்வன்", "இசையரசன்", "இசையரசு", "இசையறிவன்", "இசையழகன்", "இசையாளன்", "இசையேந்தல்", "இசைவளன்", "இசைவளவன்", "இசைவாணன்", "இசைவேந்தன்", "இடைக்காடன்", "இடைக்காடர்", "இந்தரஜித்", "இந்திகாப்", "இந்திரகுமார்", "இந்திரநீல்", "இந்திவார்", "உஜாகர்", "உஜேஷ்", "உட்கர்ஷ்", "உதயகுமார்;", "உதயச்சல்", "உதயன்", "உதயபரிதி", "உதயமூர்த்தி", "உதயவன்", "உதயவானன்", "உதயா", "உதய்", "உதர்", "உதாங்கன்", "உதித்", "உதியஞ்சேரல்", "உதியன்", "உதீப்", "உத்கர்ஷா", "உத்சவ்", "உத்தம்", "உத்தர்", "உத்தவ்", "உத்தியா", "உத்பல்", "உன்னத்", "உன்மேஷ்", "உபதேஷ்", "உபமன்யூ", "உபேந்திரா", "ஊர்ஜித்", "எகாம்பரம்", "எட்டப்பன்", "எழினி", "எழிற்கண்", "எழிற்கதிர்", "எழிற்குமரன்", "எழிற்கோ", "எழிற்கோமகன்", "எழிற்பாவியன்", "எழிலகன்", "எழிலன்", "எழிலன்பன்", "எழிலரசன்", "எழிலழகன்", "எழிலாம்பல்", "எழிலேந்தி", "எழில்", "எழில்குமரன்", "எழில்மணி", "எழில்மதி", "எழில்மன்னன்", "எழில்மலை", "எழில்முகிலன்", "ஏகசந்திரா", "ஏகசிந்த்", "ஏகராஜ்", "ஏகலைவன்", "ஏகா", "ஏகாங்கா", "ஏகாம்பரம்", "ஏக்ராம்", "ஏந்தல்", "ஏழிசை", "ஏழிசைக்கதிர்", "ஏழிசைக்கனல்", "ஏழிசைக்கனி", "ஏழிசைக்கலை", "ஏழிசைக்குமரன்", "ஏழிசைக்குரிசில்", "ஐயனாரப்பன்", "ஐயன்", "ஐயப்பன்", "ஐயம்பெருமான்", "ஐயா", "ஐஸக்", "ஒட்டக்கூத்தன்", "ஒட்டக்கூத்தர்", "ஒளி", "ஒளிஒவியம்", "ஒளிமதி", "ஒளியன்", "ஒளியழகன்", "ஒளியவன்", "ஒளிர்நிலவன்", "ஒளிவேந்தன்", "ஒள்ளறிவன்", "கங்கைகொண்டான்", "கசரா", "கடம்பன்", "கடம்பா", "கடர்", "கடற்கோ", "கடலரசன்", "கடலிறை", "கடல்வேந்தன்", "கணியன்", "கணேஷ்", "கணைக்கால்", "கண்ணதாசன்", "கண்ணன்", "கண்ணப்பன்", "கண்ணாயிரம்", "கண்ணுக்கினியன்", "கண்ணையன்", "கண்மணி", "கண்மதியன்", "கண்மயா", "கதிரழகன்", "கதிரவன்", "கதிரொளி", "கதிரேசன்", "கதிரோன்", "கதிர்", "சகுந்தன்", "சக்கரவர்த்தி", "சக்திவேல", "சங்கன்", "சங்கிலி", "சசி", "சசிகாந்த்", "சசிகுமார்", "சசிதரன்", "சச்சிதாநந்தம்", "சஜீத்", "சஞ்சய்", "சஞ்ஜோக்", "சடகோபன்", "சதிஷ்வரன்", "சதீஷ்", "சத்தியலிங்கம்", "சத்யநாராயணன்", "சத்யமூர்த்தி", "சத்யராஐ;", "சத்யவாணன்", "சத்யவிரதன்", "சத்யா", "சத்யேந்திரா", "சத்ருகணன்", "சந்தனு", "சந்தானம்", "சந்திர", "தக்ஷேஷ்", "தங்கக்கதிர்", "தங்கச்சுடர்", "தங்கதுரை", "தங்கத்தமிழன்", "தங்கத்தமிழ்", "தங்கபாண்டியன்", "தங்கமகன்", "தங்கமணி", "தங்கமதி", "தங்கமுத்து", "தங்கம்", "தங்கராஐ;", "தங்கவேலன்", "தங்கவேலு", "தங்கவேல்", "தங்கேஷ்வரன்", "தசரதன்", "தஞ்சைவாணன்", "தணி;சேரன்", "தணிகேவேலன்", "தணிகைச்செல்வன்", "தணிகைத்தம்பி", "தணிகைநம்பி", "தணிகைமணி", "தணிகைமுருகன்", "தணிகைமுருகு", "தணிகையரசு", "தணிகைவேலன்", "தணிகைவேல்", "நக்கீரத்தமிழன்", "நக்கீரன்", "நக்கீரர்", "நச்சினார்க்கினியன்", "நச்சினார்க்கினியர்", "நடராஜன்", "நடவரசன்", "நடேஷ்", "நட்புச்செல்வன்", "நந்தன்", "நந்தா", "நன்னன்", "நன்னாடன்", "நன்னாயகம்", "நன்னி", "நன்னூலன்", "நன்னெறியன்", "நன்மணி", "நன்மதி", "நன்மாறன்", "நன்மொழியன்", "நம்பி", "நம்பிகுட்டுவன்", "நம்பியருள்", "நம்பியூரான்", "நம்பிள்ளை", "நம்பெருமான்", "நம்பெருமாள்", "நம்மாழ்வார்", "பகத்", "பகலவன்", "பகவந்த்", "பகீரதன்", "பகுகுனன்", "பகுதானன்", "பகுபலி", "பகுபாலன்", "பகுபுத்ரன்", "பகுப்ரியன்", "பகுமான்யன்", "பகுமித்ரன்", "பக்தவச்சலம்", "பசந்த்", "பசவராஜ்", "பசுபதி", "பச்சையப்பன்", "பஜன்", "பஜரங்கன்", "பதிரன்", "பதுமனார்", "பத்மநாபன்", "பத்ரநிதி", "பத்ராகன்", "பத்ராயணர்", "பத்ரி", "பத்ரிநாதன்", "பத்ரிநாராயணன்", "பத்ருஹரி", "பந்துல்", "மகிணன்", "மகிழரசன்", "மகிழரசு", "மகிழ்கோ", "மகிழ்ச்சிக்கு", "மகிழ்நன்", "மகிழ்ந்தன்", "மணவழகன்", "மணவாளன்", "மணி", "மணிகண்டன்", "மணிக்கதிர்", "மணிக்கொடி", "மணிக்கோ", "மணிக்கோவன்", "மணிச்சுடர்", "மணிநிலவன்", "மணிப்பவளன்", "மணிமன்றவாணன்", "மணிமலை", "மணிமார்பன்", "மணிமாறன்", "மணிமுடி", "மணிமுத்து", "மணிமொழியன்", "மணியன்", "மணியன்செல்வம்", "மணியரசன்", "மணிரத்ணம்", "மணிவண்ணன்", "யஷ்வந்த்", "யாழரசன்", "யாழ்பாடி", "யாழ்ப்பாணன்", "யாழ்வாணன்", "யூகேந்திரன்", "யூகேஷ்", "யூசுப்", "யூவராஐன்", "யூவராஜ்", "யேவான்", "யோகலிங்கம்", "யோகாநந்தன்", "யோகேஷ்", "ரஃபி", "ரகு", "ரகுபதி", "ரகுராம்", "ரகுவரன்", "ரங்கசாமி", "ரஜினி", "ரத்தினம்", "ரமணன்", "ரமணி", "ரமேஷ்", "ரமேஷ்கண்ணா", "ரவி", "ரவின்", "ரஷஷுத்", "ராகவன்", "ராகவ்", "ராஜ", "ராஜகுரு", "ராஜகோபால்", "ராஜசேகர்", "ராஜதுரை", "ராஜப்பிரியன்", "ராஜவேலு", "ராஜா", "ராஜீவ்", "ராஜேஷ்", "ராஜ்குமார்", "ராபர்ட்", "ராமசாமி", "வசந்த்", "வசந்த்குமார்", "வடிவேற்கரசன்", "வடிவேலன்", "வடிவேல்", "வடிவேல்முருகன்", "வணங்காமுடி", "வண்டார்குழலி", "வண்ணநிலவன்", "வண்ணன்", "வரதராஐன்", "வரதராஜ்", "வருண்குமாH", "வருனேஷ்", "வல்லரசு", "வல்லவன்", "வளவன்", "வள்ளல்", "வள்ளிமணாளன்", "வள்ளுவன்", "வள்ளுவர்", "வழுதி", "வஷிஷ்டர்", "வாகீசன்", "வாசு", "வாசுதேவன்", "வாஞ்சினாதன்", "வாணன்", "வானத்து", "வானமாமலை", "ஷகுந்த்", "ஷசி", "ஷத்ருஞ்ஜய்", "ஷபீர்", "ஷம்பு", "ஷரண்", "ஷலின்", "ஷஷாங்க்", "ஷஸ்வத்", "ஷா", "ஷானவாஸ்", "ஷிங்", "ஷியாம்", "ஷிஷிர்", "ஷைலேந்திரா", "ஷைலேஷ்", "ஷைல்", "ஷோபன்", "ஷ்னேகல்", "ஷ்யாமல்", "ஷ்யாம்", "ஷ்ராவண்", "ஷ்வேதங்க்", "ஸ்டீபன்", "ஸ்ரீகாந்த்", "ஸ்ரீசிவநாராயணன்", "ஸ்ரீதர்", "ஸ்ரீநிவாசன்", "ஸ்ரீநிவாஸ்", "ஸ்ரீபிரசாத்", "ஸ்ரீராம்", "ஸ்வப்நில்", "ஹம்ரிஷ்", "ஹரி", "ஹரிகரண்", "ஹரிதாஸ்", "ஹரிஷ்", "ஹரிஹரன்", "ஹவினாஷன்", "ஹஷ்விந்ரன்", "ஹாருண்", ) first_names_female = ( "அகத்தழகி", "அகமணி", "அகமதி", "அகலிகை", "அகல்யா", "அகல்விழி", "அகவழகு", "அகவொளி", "அகானா", "அகிலா", "அகிலாண்டம்", "அகிலேஷ்வரி", "அகில்", "அக்னிகா", "அக்னிமுகி", "அக்னேயி", "அக்ஷயா", "அக்ஷரா", "அக்ஷா", "அக்ஷிதா", "அங்கம்மாள்", "அங்கயர்க்கண்ணி", "அங்கவை", "அங்கால", "அங்கையர்க்கரசி", "அசிரா", "அச்சலா", "அஜந்தா", "ஆகமா", "ஆசிரா", "ஆசைச்செல்வி", "ஆஞ்சல்", "ஆடற்கொடி", "ஆடற்கோமகள்", "ஆடற்செல்வி", "ஆடலரசி", "ஆடலழகி", "ஆடல்", "ஆட்டநத்தி", "ஆண்டாள்", "ஆதர்ஷா", "ஆதி", "ஆதிசக்தி", "ஆதித்தமிழ்", "ஆதித்தா", "ஆதித்தி", "ஆதித்யபிரபா", "ஆதிமகள்", "ஆதிமறை", "ஆதிமொழி", "ஆதியரசி", "இக்ஷிதா", "இசை", "இசைக்கதிர்", "இசைக்கொடி", "இசைக்கோமகள்", "இசைச்செல்வம்", "இசைச்செல்வி", "இசைத்தேவி", "இசைநேயம்", "இசைமகள்", "இசைமறை", "இசைமுரசு", "இசைமொழி", "இசையமுதம்", "இசையமுது", "இசையரசி", "இசையொளி", "இசைவாணி", "இதயா", "இந்திரஜா", "இந்திரா", "இந்திராக்ஷி", "இந்திராணி", "இந்து", "இந்துகலா", "இந்துகா", "இந்துஜா", "இந்துமதி", "இந்துமுகி", "இந்துவதனி", "உச்சிதா", "உஜிலா", "உண்மை", "உண்மையொளி", "உண்மைவிளம்பி", "உதயசந்திரிகா", "உதயா", "உதயாதி", "உத்தமி", "உத்பலா", "உன்னதி", "உன்மைமொழி", "உபாஸனா", "உமயாள்", "உமா", "உமாமகேஷ்வரி", "உமை", "உமையம்மை", "உமையரசி", "உமையாள்", "உயிரோவியம்", "உலக", "உலகநங்கை", "உலகநேயம்", "உலகமணி", "உலகமதி", "உலகம்மை", "உலகிறை", "உலகொளி", "உலகோவியம்", "ஊர்மிளா", "ஊர்வசி", "எமலி", "எமல்டா", "எமில்டா", "எயினி", "எரிதழல்", "எரியீட்டி", "எல்லி", "எழிசை", "எழினி", "எழிற்கதிர்", "எழிற்குமரி", "எழிற்குவளை", "எழிற்கோமகள்", "எழிற்செல்வம்", "எழிற்செல்வி", "எழிலம்மை", "எழிலரசி", "எழிலழகி", "எழிலி", "எழிலிசை", "எழிலேந்தி", "எழிலோவியம்", "எழில்", "எழில்நிலவு", "எழில்மகள்", "எழில்மங்கை", "ஏகாபரனா", "ஏந்திசை", "ஏந்திழை", "ஏனாக்ஷி", "ஏறுநடை", "ஏலா", "ஏழிசை", "ஏழிசைக்கதிர்", "ஏழிசைக்கனல்", "ஏழிசைக்கனி", "ஏழிசைக்கலை", "ஏழிசைக்குமரி", "ஏழிசைக்கொடி", "ஏழிசைக்கோமகள்", "ஏழிசைச்சுடர்", "ஏழிசைச்செல்வம்", "ஏழிசைச்செல்வி", "ஏழிசைதேவி", "ஏழிசைத்தென்றல்", "ஏழிசைநாயகி", "ஏழிசைநேயம்", "ஏழிசைப்பாமகள்", "ஏழிசைப்பாவை", "ஏழிசைப்புதல்வி", "ஏழிசைப்பொழில்", "ஏழிசைமணி", "ஏழிசைமதி", "ஏழிசைமுரசு", "ஐக்கியா", "ஐராவதி", "ஐஸ்வர்யா", "ஒளவை", "ஒளிசுடர", "ஒளிமுகம்", "ஒளிவாணி", "கஐலட்சுமி", "கங்கா", "கங்கை", "கஜோல்", "கஜ்ரி", "கடற்கோமகள்", "கடலரசி", "கடலிறை", "கணையாழி", "கண்ணகி", "கண்ணம்மா", "கண்ணிமை", "கண்மணி", "கண்மதி", "கண்மலர்", "கதிரழகி", "கதிர்", "கதிர்க்குமரி", "கதிர்ச்செல்வி", "கதிர்மாமணி", "கத்ரினா", "கனகவள்ளி", "கனகா", "கனல்", "கனல்மொழி", "கனிகா", "கனிமதி", "கனிமொழி", "கனியமுது", "கனிரா", "சஃபா", "சஃபியா", "சகீனா", "சகுண்", "சக்தி", "சங்கமித்ரா", "சங்கமித்ரை", "சங்கரி", "சங்கவி", "சங்கவை", "சங்காரம்", "சங்கீதா", "சங்கு", "சங்குக்கொடி", "சங்குப்பூ", "சங்குப்பூவழகி", "சங்குமணி", "சங்குமதி", "சங்குமாலை", "சங்கெழில்", "சங்கொலி", "சசிகலா", "சசிரேகா", "சச்சி", "சஜனி", "சஞ்சு", "சதிகா", "சத்தியவாணி", "சந்தனம்", "சந்தானலட்சுமி", "தங்கக்கதிர்", "தங்கச்சுடர்", "தங்கத்தமிழ்", "தங்கபாப்பா", "தங்கபுஷ்பம்", "தங்கமகள்", "தங்கமணி", "தங்கமதி", "தங்கம்", "தங்கம்மா", "தங்கம்மாள்", "தடாகை", "தணிகைச்செல்வி", "தண்ணிலவு", "தண்ணொளி", "தண்மதி", "தத்தை", "தனக்கோட்டி", "தனபாக்கியம்", "தனலட்சுமி", "தனஸ்ரீ", "தனித்தமிழ்", "தனுப்பிரியா", "தனுஷா", "தனுஷ்கா", "தனுஷ்ரி", "தன்சி", "தன்மானம்", "தன்வி", "தமயந்தி", "நங்கை", "நடவரசி", "நதியா", "நந்திகா", "நந்திதா", "நந்தினி", "நன்முத்து", "நன்மொழி", "நப்பசலையார்", "நயன்தாரா", "நர்மதா", "நறுமலர்", "நறுமுகை", "நற்றிணை", "நல்ல", "நல்லிசை", "நளாயினி", "நளினி", "நவிதா", "நவீனா", "நவ்யா", "நாகதேவி", "நாகமணி", "நாகமதி", "நாகம்மாள்", "நாகம்மை", "நாகவல்லி", "நாச்சி", "நாச்சியார்", "நாதவேணி", "பகவதி", "பகவத்", "பச்சையம்மாள்", "பஞ்சாமிர்தம்", "பதுமை", "பத்மபிரியா", "பத்மா", "பத்மினி", "பனிமலர்", "பன்னீர்", "பன்னீர்செல்வி", "பபிதா", "பரணி", "பரமேஷ்வரி", "பரிமளம்", "பரிமளா", "பல்லவி", "பழகுத்தமிழ்", "பவதா", "பவதாரணி", "பவளக்கொடி", "பவளமலை", "பவளமல்லி", "பவளம்", "பவழமொழி", "பவானி", "பவித்ரா", "பாக்கியலக்ஷ்மி", "பாக்யஸ்ரீ", "மகஷேவரி", "மகிழினி", "மகிழ்", "மகிழ்வதனி", "மங்களம்", "மங்களா", "மங்கை", "மங்கையர்க்கரசி", "மஞ்சனா", "மஞ்சரி", "மஞ்சள்", "மஞ்சு", "மணவழகி", "மணி", "மணிகா", "மணிக்கதிர்", "மணிக்கொடி", "மணிச்சுடர்", "மணிப்பவளம்", "மணிமகள்", "மணிமலர்", "மணிமாலா", "மணிமுகில்", "மணிமேகலை", "மணிமொழி", "மணியம்மை", "மணியரசி", "மணியெழில்", "மணியொளி", "யசோதா", "யமுனா", "யஷ்வினி", "யாமினி", "யாளினி", "யாழரசி", "யாழிசை", "யாழினி", "யாழின்", "யாழைப்போல்", "யாழ்நங்கை", "யாழ்மொழி", "யூதிகா", "யூவரானி", "யேகம்மை", "யோகமலர்", "யோகராணி", "யோகலட்சுமி", "யோகவல்லி", "யோஸ்னிதா", "யோஹிதா", "ரகசியா", "ரக்ஷனா", "ரக்ஷிகா", "ரக்ஷிதா", "ரக்ஷினி", "ரங்கநாயகி", "ரஞ்சனா", "ரஞ்சிதம்", "ரஞ்சிதா", "ரஞ்சினி", "ரட்சகா", "ரதவனி", "ரதி", "ரனித்தா", "ரமணி", "ரம்ஜான்", "ரம்யா", "ராகினி", "ராசாத்தி", "ராஜகுமாரி", "ராஜலட்சுமி", "ராஜி", "ராஜேஷ்வரி", "ராணி", "ராதா", "ராதிகா", "ரித்திகா", "ரீஜா", "ரீட்டா", "ரீனா", "வகேஷ்வரி", "வசந்தசேனா", "வசந்தா", "வசந்தி", "வசனா", "வசுதா", "வசுதாரிணி", "வசுமதி", "வஞ்சி", "வஞ்சிக்கொடி", "வஞ்சிப்பாமகள்", "வஞ்சிமகள்", "வடிவரசி", "வடிவழகி", "வடிவு", "வடிவுக்கரசி", "வண்டமிழ்", "வண்டார்குழலி", "வண்ணக்கதிர்", "வண்ணமதி", "வதனா", "வதனி", "வத்ஸலா", "வனிதா", "வமகேஷி", "வருணி", "வருனிதா", "வர்ணவதி", "வர்ஷா", "வர்ஷினி", "ஷக்தி", "ஷண்சிலாதேவி", "ஷதா", "ஷதாக்ஷி", "ஷந்தோஷி", "ஷந்ஸா", "ஷபரி", "ஷப்னம்", "ஷமா", "ஷரணி", "ஷரினி", "ஷர்மிதா", "ஷர்மிளா", "ஷர்மிஸ்தா", "ஷர்வானி", "ஷஷி", "ஷாந்தலா", "ஷாந்தி", "ஷானன்", "ஷாமினி", "ஷாரன்", "ஷாலிகா", "ஷாலினி", "ஷாலு", "ஷாஷினி,", "ஷாஹ்னா", "ஷிஃபாலி", "ஷிகா", "ஷிச்சி", "ஷிபானி", "ஸகஸ்ரா", "ஸங்கரி", "ஸத்யா", "ஸத்வரி", "ஸன்யுக்தா", "ஸபீனா", "ஸயூரி", "ஸரயூ", "ஸரளா", "ஸரஸ்வதி", "ஸரிகா", "ஸஹிரா", "ஸுபத்திரை", "ஸுப்ரியா", "ஸுப்ரீத்", "ஸுமா", "ஸுரபி", "ஸெடெஃபானியா", "ஸெடெபானி", "ஸௌரா", "ஸ்கந்தா", "ஸ்திரிரத்னா", "ஸ்துதி", "ஸ்னேஹல்", "ஸ்ப்ரிஹா", "ஸ்மிதா", "ஸ்மிருதி", "ஸ்மேரா", "ஸ்ராவந்தி", "ஸ்ராவனி", "ஸ்ரீகமா", "ஸ்ரீகலா", "ஸ்ரீகா", "ஸ்ரீதேவி", "ஸ்ரீநிதி", "ஸ்ரீனா", "ஸ்ரீமயி", "ஸ்ரீமா", "ஸ்ரீயா", "ஸ்ரீயாதித்யா", "ஸ்ரீலக்ஷ்மி", "ஸ்ரீலா", "ஸ்ரீலேகா", "ஸ்ரீவல்லி", "ஸ்ரீவித்யா", "ஹசினிகா", "ஹனிஷா", "ஹன்சா", "ஹன்யா", "ஹன்ஷிகா", "ஹம்சவர்த்தினி", "ஹம்சவானி", "ஹம்சா", "ஹரிதா", "ஹரினி", "ஹரினிவேதா", "ஹர்ஷா", "ஹர்ஷிகா", "ஹர்ஷிதா", "ஹர்ஷினி", "ஹலிமா", "ஹவிஷ்மதி", "ஹஸிதா", "ஹஸினா", "ஹஸ்னா", "ஹாசினி", "ஹிரண்யா", "ஹெலன்", "ஹேமந்தினி", "ஹேமலதா", "ஹேமா", ) first_names = first_names_male + first_names_female last_names = first_names
Provider
python
django__django
tests/forms_tests/field_tests/test_imagefield.py
{ "start": 589, "end": 4136 }
class ____(FormFieldAssertionsMixin, SimpleTestCase): def test_imagefield_annotate_with_image_after_clean(self): f = ImageField() img_path = get_img_path("filepath_test_files/1x1.png") with open(img_path, "rb") as img_file: img_data = img_file.read() img_file = SimpleUploadedFile("1x1.png", img_data) img_file.content_type = "text/plain" uploaded_file = f.clean(img_file) self.assertEqual("PNG", uploaded_file.image.format) self.assertEqual("image/png", uploaded_file.content_type) def test_imagefield_annotate_with_bitmap_image_after_clean(self): """ This also tests the situation when Pillow doesn't detect the MIME type of the image (#24948). """ from PIL.BmpImagePlugin import BmpImageFile try: Image.register_mime(BmpImageFile.format, None) f = ImageField() img_path = get_img_path("filepath_test_files/1x1.bmp") with open(img_path, "rb") as img_file: img_data = img_file.read() img_file = SimpleUploadedFile("1x1.bmp", img_data) img_file.content_type = "text/plain" uploaded_file = f.clean(img_file) self.assertEqual("BMP", uploaded_file.image.format) self.assertIsNone(uploaded_file.content_type) finally: Image.register_mime(BmpImageFile.format, "image/bmp") def test_file_extension_validation(self): f = ImageField() img_path = get_img_path("filepath_test_files/1x1.png") with open(img_path, "rb") as img_file: img_data = img_file.read() img_file = SimpleUploadedFile("1x1.txt", img_data) with self.assertRaisesMessage( ValidationError, "File extension “txt” is not allowed." ): f.clean(img_file) def test_corrupted_image(self): f = ImageField() img_file = SimpleUploadedFile("not_an_image.jpg", b"not an image") msg = ( "Upload a valid image. The file you uploaded was either not an " "image or a corrupted image." ) with self.assertRaisesMessage(ValidationError, msg): f.clean(img_file) with TemporaryUploadedFile( "not_an_image_tmp.png", "text/plain", 1, "utf-8" ) as tmp_file: with self.assertRaisesMessage(ValidationError, msg): f.clean(tmp_file) def test_widget_attrs_default_accept(self): f = ImageField() # Nothing added for non-FileInput widgets. self.assertEqual(f.widget_attrs(Widget()), {}) self.assertEqual(f.widget_attrs(FileInput()), {"accept": "image/*"}) self.assertEqual(f.widget_attrs(ClearableFileInput()), {"accept": "image/*"}) self.assertWidgetRendersTo( f, '<input type="file" name="f" accept="image/*" required id="id_f" />' ) def test_widget_attrs_accept_specified(self): f = ImageField(widget=FileInput(attrs={"accept": "image/png"})) self.assertEqual(f.widget_attrs(f.widget), {}) self.assertWidgetRendersTo( f, '<input type="file" name="f" accept="image/png" required id="id_f" />' ) def test_widget_attrs_accept_false(self): f = ImageField(widget=FileInput(attrs={"accept": False})) self.assertEqual(f.widget_attrs(f.widget), {}) self.assertWidgetRendersTo( f, '<input type="file" name="f" required id="id_f" />' )
ImageFieldTest
python
getsentry__sentry
tests/acceptance/test_organization_developer_settings.py
{ "start": 2160, "end": 5109 }
class ____(AcceptanceTestCase): """ As a developer, I can edit an existing integration """ def setUp(self) -> None: super().setUp() self.user = self.create_user("foo@example.com") self.org = self.create_organization(name="Tesla", owner=self.user) self.team = self.create_team(organization=self.org, name="Tesla Motors") self.project = self.create_project(organization=self.org, teams=[self.team], name="Model S") self.sentry_app = self.create_sentry_app( name="Tesla App", organization=self.org, schema={"elements": [self.create_issue_link_schema()]}, ) self.login_as(self.user) self.org_developer_settings_path = ( f"/settings/{self.org.slug}/developer-settings/{self.sentry_app.slug}" ) def load_page(self, url: str) -> None: self.browser.get(url) self.browser.wait_until_not('[data-test-id="loading-indicator"]') def test_edit_integration_schema(self) -> None: self.load_page(self.org_developer_settings_path) textarea = self.browser.element('textarea[name="schema"]') textarea.clear() textarea.send_keys("{}") self.browser.click('[aria-label="Save Changes"]') self.browser.wait_until(".ref-success") self.browser.wait_until('[data-test-id="tesla-app"]') link = self.browser.find_element(by=By.LINK_TEXT, value="Tesla App") link.click() self.browser.wait_until_not('[data-test-id="loading-indicator"]') schema = self.browser.element('textarea[name="schema"]') assert schema.text == "" def test_remove_tokens_internal_app(self) -> None: internal_app = self.create_internal_integration(name="Internal App", organization=self.org) self.create_internal_integration_token(user=self.user, internal_integration=internal_app) url = f"/settings/{self.org.slug}/developer-settings/{internal_app.slug}" self.load_page(url) self.browser.click('[aria-label="Revoke"]') self.browser.click('[data-test-id="confirm-button"]') self.browser.wait_until(".ref-success") assert self.browser.find_element( by=By.XPATH, value='//p[contains(text(), "You haven\'t created any authentication tokens yet.")]', ) def test_add_tokens_internal_app(self) -> None: internal_app = self.create_internal_integration(name="Internal App", organization=self.org) url = f"/settings/{self.org.slug}/developer-settings/{internal_app.slug}" self.load_page(url) assert self.browser.element_exists('[aria-label="Generated token"]') is False self.browser.click('[data-test-id="token-add"]') self.browser.wait_until(".ref-success") assert len(self.browser.elements('[aria-label="Generated token"]')) == 1
OrganizationDeveloperSettingsEditAcceptanceTest
python
ApeWorX__ape
src/ape/plugins/_utils.py
{ "start": 5080, "end": 8639 }
class ____(BaseModel): """ Metadata per plugin type, including information for all plugins. """ core: "PluginGroup" available: "PluginGroup" installed: "PluginGroup" third_party: "PluginGroup" @classmethod def load(cls, plugin_manager: "PluginManager", include_available: bool = True): plugins = plugin_manager.registered_plugins if include_available: plugins = plugins.union(github_client.available_plugins) return cls.from_package_names(plugins, include_available=include_available) @classmethod def from_package_names( cls, packages: Iterable[str], include_available: bool = True, trusted_list: Optional[Iterable] = None, ) -> "PluginMetadataList": PluginMetadataList.model_rebuild() core = PluginGroup(plugin_type=PluginType.CORE) available = PluginGroup(plugin_type=PluginType.AVAILABLE) installed = PluginGroup(plugin_type=PluginType.INSTALLED) third_party = PluginGroup(plugin_type=PluginType.THIRD_PARTY) for package_id in packages: parts = package_id.split("==") name = parts[0] version = parts[1] if len(parts) == 2 else None plugin = PluginMetadata(name=name.strip(), version=version) if plugin.in_core: core.plugins[name] = plugin continue # perf: only check these once. is_installed = plugin.is_installed is_trusted = plugin.check_trusted(use_web=False, trusted_list=trusted_list) is_available = include_available and plugin.is_available if include_available and is_available and not is_installed: available.plugins[name] = plugin elif is_installed and not plugin.in_core and is_trusted: installed.plugins[name] = plugin elif is_installed: third_party.plugins[name] = plugin else: logger.error(f"'{plugin.name}' is not a plugin.") return cls(core=core, available=available, installed=installed, third_party=third_party) def __str__(self) -> str: return self.to_str() def to_str( self, include: Optional[Sequence[PluginType]] = None, include_version: bool = True, output_format: OutputFormat = OutputFormat.DEFAULT, ) -> str: representation = ApePluginsRepr(self, include=include, output_format=output_format) return str(representation) @property def all_plugins(self) -> Iterator["PluginMetadata"]: yield from self.core.plugins.values() yield from self.available.plugins.values() yield from self.installed.plugins.values() yield from self.third_party.plugins.values() def get_plugin(self, name: str, check_available: bool = True) -> Optional["PluginMetadata"]: name = name if name.startswith("ape_") else f"ape_{name}" if name in self.core.plugins: return self.core.plugins[name] elif name in self.installed.plugins: return self.installed.plugins[name] elif name in self.third_party.plugins: return self.third_party.plugins[name] elif check_available and name in self.available.plugins: return self.available.plugins[name] return None def _get_available_plugins(): # NOTE: Wrapped in a method so can GitHub HTTP can be avoided in tests. return github_client.available_plugins
PluginMetadataList
python
kamyu104__LeetCode-Solutions
Python/24-game.py
{ "start": 1042, "end": 2030 }
class ____(object): def judgePoint24(self, nums): """ :type nums: List[int] :rtype: bool """ def dfs(nums): if len(nums) == 1: return nums[0] == 24 ops = [add, sub, mul, truediv] for i in xrange(len(nums)): for j in xrange(len(nums)): if i == j: continue next_nums = [nums[k] for k in xrange(len(nums)) if i != k != j] for op in ops: if ((op is add or op is mul) and j > i) or \ (op == truediv and nums[j] == 0): continue next_nums.append(op(nums[i], nums[j])) if dfs(next_nums): return True next_nums.pop() return False return dfs(map(Fraction, nums))
Solution2
python
gevent__gevent
src/greentest/3.11/test_socket.py
{ "start": 7130, "end": 10623 }
class ____: """Threadable Test class The ThreadableTest class makes it easy to create a threaded client/server pair from an existing unit test. To create a new threaded class from an existing unit test, use multiple inheritance: class NewClass (OldClass, ThreadableTest): pass This class defines two new fixture functions with obvious purposes for overriding: clientSetUp () clientTearDown () Any new test functions within the class must then define tests in pairs, where the test name is preceded with a '_' to indicate the client portion of the test. Ex: def testFoo(self): # Server portion def _testFoo(self): # Client portion Any exceptions raised by the clients during their tests are caught and transferred to the main thread to alert the testing framework. Note, the server setup function cannot call any blocking functions that rely on the client thread during setup, unless serverExplicitReady() is called just before the blocking call (such as in setting up a client/server connection and performing the accept() in setUp(). """ def __init__(self): # Swap the true setup function self.__setUp = self.setUp self.setUp = self._setUp def serverExplicitReady(self): """This method allows the server to explicitly indicate that it wants the client thread to proceed. This is useful if the server is about to execute a blocking routine that is dependent upon the client thread during its setup routine.""" self.server_ready.set() def _setUp(self): self.enterContext(threading_helper.wait_threads_exit()) self.server_ready = threading.Event() self.client_ready = threading.Event() self.done = threading.Event() self.queue = queue.Queue(1) self.server_crashed = False def raise_queued_exception(): if self.queue.qsize(): raise self.queue.get() self.addCleanup(raise_queued_exception) # Do some munging to start the client test. methodname = self.id() i = methodname.rfind('.') methodname = methodname[i+1:] test_method = getattr(self, '_' + methodname) self.client_thread = thread.start_new_thread( self.clientRun, (test_method,)) try: self.__setUp() except: self.server_crashed = True raise finally: self.server_ready.set() self.client_ready.wait() self.addCleanup(self.done.wait) def clientRun(self, test_func): self.server_ready.wait() try: self.clientSetUp() except BaseException as e: self.queue.put(e) self.clientTearDown() return finally: self.client_ready.set() if self.server_crashed: self.clientTearDown() return if not hasattr(test_func, '__call__'): raise TypeError("test_func must be a callable function") try: test_func() except BaseException as e: self.queue.put(e) finally: self.clientTearDown() def clientSetUp(self): raise NotImplementedError("clientSetUp must be implemented.") def clientTearDown(self): self.done.set() thread.exit()
ThreadableTest
python
kamyu104__LeetCode-Solutions
Python/count-substrings-that-differ-by-one-character.py
{ "start": 33, "end": 874 }
class ____(object): def countSubstrings(self, s, t): """ :type s: str :type t: str :rtype: int """ def count(i, j): # for each possible alignment, count the number of substrs that differ by 1 char result = left_cnt = right_cnt = 0 # left and right consecutive same counts relative to the different char for k in xrange(min(len(s)-i, len(t)-j)): right_cnt += 1 if s[i+k] != t[j+k]: left_cnt, right_cnt = right_cnt, 0 # prev_i = i+k-prev+1 result += left_cnt # target substrs are [s[left_i+c:i+k+1] for c in xrange(left_cnt)] return result return sum(count(i, 0) for i in xrange(len(s))) + \ sum(count(0, j) for j in xrange(1, len(t)))
Solution
python
scikit-learn__scikit-learn
sklearn/calibration.py
{ "start": 1996, "end": 27988 }
class ____(ClassifierMixin, MetaEstimatorMixin, BaseEstimator): """Calibrate probabilities using isotonic, sigmoid, or temperature scaling. This class uses cross-validation to both estimate the parameters of a classifier and subsequently calibrate a classifier. With `ensemble=True`, for each cv split it fits a copy of the base estimator to the training subset, and calibrates it using the testing subset. For prediction, predicted probabilities are averaged across these individual calibrated classifiers. When `ensemble=False`, cross-validation is used to obtain unbiased predictions, via :func:`~sklearn.model_selection.cross_val_predict`, which are then used for calibration. For prediction, the base estimator, trained using all the data, is used. This is the prediction method implemented when `probabilities=True` for :class:`~sklearn.svm.SVC` and :class:`~sklearn.svm.NuSVC` estimators (see :ref:`User Guide <scores_probabilities>` for details). Already fitted classifiers can be calibrated by wrapping the model in a :class:`~sklearn.frozen.FrozenEstimator`. In this case all provided data is used for calibration. The user has to take care manually that data for model fitting and calibration are disjoint. The calibration is based on the :term:`decision_function` method of the `estimator` if it exists, else on :term:`predict_proba`. Read more in the :ref:`User Guide <calibration>`. In order to learn more on the CalibratedClassifierCV class, see the following calibration examples: :ref:`sphx_glr_auto_examples_calibration_plot_calibration.py`, :ref:`sphx_glr_auto_examples_calibration_plot_calibration_curve.py`, and :ref:`sphx_glr_auto_examples_calibration_plot_calibration_multiclass.py`. Parameters ---------- estimator : estimator instance, default=None The classifier whose output need to be calibrated to provide more accurate `predict_proba` outputs. The default classifier is a :class:`~sklearn.svm.LinearSVC`. .. versionadded:: 1.2 method : {'sigmoid', 'isotonic', 'temperature'}, default='sigmoid' The method to use for calibration. Can be: - 'sigmoid', which corresponds to Platt's method (i.e. a binary logistic regression model). - 'isotonic', which is a non-parametric approach. - 'temperature', temperature scaling. Sigmoid and isotonic calibration methods natively support only binary classifiers and extend to multi-class classification using a One-vs-Rest (OvR) strategy with post-hoc renormalization, i.e., adjusting the probabilities after calibration to ensure they sum up to 1. In contrast, temperature scaling naturally supports multi-class calibration by applying `softmax(classifier_logits/T)` with a value of `T` (temperature) that optimizes the log loss. For very uncalibrated classifiers on very imbalanced datasets, sigmoid calibration might be preferred because it fits an additional intercept parameter. This helps shift decision boundaries appropriately when the classifier being calibrated is biased towards the majority class. Isotonic calibration is not recommended when the number of calibration samples is too low ``(≪1000)`` since it then tends to overfit. .. versionchanged:: 1.8 Added option 'temperature'. cv : int, cross-validation generator, or iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross-validation, - integer, to specify the number of folds. - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if ``y`` is binary or multiclass, :class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is neither binary nor multiclass, :class:`~sklearn.model_selection.KFold` is used. Refer to the :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. Base estimator clones are fitted in parallel across cross-validation iterations. See :term:`Glossary <n_jobs>` for more details. .. versionadded:: 0.24 ensemble : bool, or "auto", default="auto" Determines how the calibrator is fitted. "auto" will use `False` if the `estimator` is a :class:`~sklearn.frozen.FrozenEstimator`, and `True` otherwise. If `True`, the `estimator` is fitted using training data, and calibrated using testing data, for each `cv` fold. The final estimator is an ensemble of `n_cv` fitted classifier and calibrator pairs, where `n_cv` is the number of cross-validation folds. The output is the average predicted probabilities of all pairs. If `False`, `cv` is used to compute unbiased predictions, via :func:`~sklearn.model_selection.cross_val_predict`, which are then used for calibration. At prediction time, the classifier used is the `estimator` trained on all the data. Note that this method is also internally implemented in :mod:`sklearn.svm` estimators with the `probabilities=True` parameter. .. versionadded:: 0.24 .. versionchanged:: 1.6 `"auto"` option is added and is the default. Attributes ---------- classes_ : ndarray of shape (n_classes,) The class labels. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 1.0 calibrated_classifiers_ : list (len() equal to cv or 1 if `ensemble=False`) The list of classifier and calibrator pairs. - When `ensemble=True`, `n_cv` fitted `estimator` and calibrator pairs. `n_cv` is the number of cross-validation folds. - When `ensemble=False`, the `estimator`, fitted on all the data, and fitted calibrator. .. versionchanged:: 0.24 Single calibrated classifier case when `ensemble=False`. See Also -------- calibration_curve : Compute true and predicted probabilities for a calibration curve. References ---------- .. [1] B. Zadrozny & C. Elkan. `Obtaining calibrated probability estimates from decision trees and naive Bayesian classifiers <https://cseweb.ucsd.edu/~elkan/calibrated.pdf>`_, ICML 2001. .. [2] B. Zadrozny & C. Elkan. `Transforming Classifier Scores into Accurate Multiclass Probability Estimates <https://web.archive.org/web/20060720141520id_/http://www.research.ibm.com:80/people/z/zadrozny/kdd2002-Transf.pdf>`_, KDD 2002. .. [3] J. Platt. `Probabilistic Outputs for Support Vector Machines and Comparisons to Regularized Likelihood Methods <https://www.researchgate.net/profile/John-Platt-2/publication/2594015_Probabilistic_Outputs_for_Support_Vector_Machines_and_Comparisons_to_Regularized_Likelihood_Methods/links/004635154cff5262d6000000/Probabilistic-Outputs-for-Support-Vector-Machines-and-Comparisons-to-Regularized-Likelihood-Methods.pdf>`_, 1999. .. [4] A. Niculescu-Mizil & R. Caruana. `Predicting Good Probabilities with Supervised Learning <https://www.cs.cornell.edu/~alexn/papers/calibration.icml05.crc.rev3.pdf>`_, ICML 2005. .. [5] Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger. :doi:`On Calibration of Modern Neural Networks<10.48550/arXiv.1706.04599>`. Proceedings of the 34th International Conference on Machine Learning, PMLR 70:1321-1330, 2017. Examples -------- >>> from sklearn.datasets import make_classification >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.calibration import CalibratedClassifierCV >>> X, y = make_classification(n_samples=100, n_features=2, ... n_redundant=0, random_state=42) >>> base_clf = GaussianNB() >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv=3) >>> calibrated_clf.fit(X, y) CalibratedClassifierCV(...) >>> len(calibrated_clf.calibrated_classifiers_) 3 >>> calibrated_clf.predict_proba(X)[:5, :] array([[0.110, 0.889], [0.072, 0.927], [0.928, 0.072], [0.928, 0.072], [0.072, 0.928]]) >>> from sklearn.model_selection import train_test_split >>> X, y = make_classification(n_samples=100, n_features=2, ... n_redundant=0, random_state=42) >>> X_train, X_calib, y_train, y_calib = train_test_split( ... X, y, random_state=42 ... ) >>> base_clf = GaussianNB() >>> base_clf.fit(X_train, y_train) GaussianNB() >>> from sklearn.frozen import FrozenEstimator >>> calibrated_clf = CalibratedClassifierCV(FrozenEstimator(base_clf)) >>> calibrated_clf.fit(X_calib, y_calib) CalibratedClassifierCV(...) >>> len(calibrated_clf.calibrated_classifiers_) 1 >>> calibrated_clf.predict_proba([[-0.5, 0.5]]) array([[0.936, 0.063]]) """ _parameter_constraints: dict = { "estimator": [ HasMethods(["fit", "predict_proba"]), HasMethods(["fit", "decision_function"]), None, ], "method": [StrOptions({"isotonic", "sigmoid", "temperature"})], "cv": ["cv_object"], "n_jobs": [Integral, None], "ensemble": ["boolean", StrOptions({"auto"})], } def __init__( self, estimator=None, *, method="sigmoid", cv=None, n_jobs=None, ensemble="auto", ): self.estimator = estimator self.method = method self.cv = cv self.n_jobs = n_jobs self.ensemble = ensemble def _get_estimator(self): """Resolve which estimator to return (default is LinearSVC)""" if self.estimator is None: # we want all classifiers that don't expose a random_state # to be deterministic (and we don't want to expose this one). estimator = LinearSVC(random_state=0) if _routing_enabled(): estimator.set_fit_request(sample_weight=True) else: estimator = self.estimator return estimator @_fit_context( # CalibratedClassifierCV.estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y, sample_weight=None, **fit_params): """Fit the calibrated model. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. **fit_params : dict Parameters to pass to the `fit` method of the underlying classifier. Returns ------- self : object Returns an instance of self. """ check_classification_targets(y) X, y = indexable(X, y) estimator = self._get_estimator() _ensemble = self.ensemble if _ensemble == "auto": _ensemble = not isinstance(estimator, FrozenEstimator) self.calibrated_classifiers_ = [] # Set `classes_` using all `y` label_encoder_ = LabelEncoder().fit(y) self.classes_ = label_encoder_.classes_ if self.method == "temperature" and isinstance(y[0], str): # for temperature scaling if `y` contains strings then encode it # right here to avoid fitting LabelEncoder again within the # `_fit_calibrator` function. y = label_encoder_.transform(y=y) if _routing_enabled(): routed_params = process_routing( self, "fit", sample_weight=sample_weight, **fit_params, ) else: # sample_weight checks fit_parameters = signature(estimator.fit).parameters supports_sw = "sample_weight" in fit_parameters if sample_weight is not None and not supports_sw: estimator_name = type(estimator).__name__ warnings.warn( f"Since {estimator_name} does not appear to accept" " sample_weight, sample weights will only be used for the" " calibration itself. This can be caused by a limitation of" " the current scikit-learn API. See the following issue for" " more details:" " https://github.com/scikit-learn/scikit-learn/issues/21134." " Be warned that the result of the calibration is likely to be" " incorrect." ) routed_params = Bunch() routed_params.splitter = Bunch(split={}) # no routing for splitter routed_params.estimator = Bunch(fit=fit_params) if sample_weight is not None and supports_sw: routed_params.estimator.fit["sample_weight"] = sample_weight xp, is_array_api, device_ = get_namespace_and_device(X) if is_array_api: y, sample_weight = move_to(y, sample_weight, xp=xp, device=device_) # Check that each cross-validation fold can have at least one # example per class if isinstance(self.cv, int): n_folds = self.cv elif hasattr(self.cv, "n_splits"): n_folds = self.cv.n_splits else: n_folds = None if n_folds and xp.any(xp.unique_counts(y)[1] < n_folds): raise ValueError( f"Requesting {n_folds}-fold " "cross-validation but provided less than " f"{n_folds} examples for at least one class." ) if isinstance(self.cv, LeaveOneOut): raise ValueError( "LeaveOneOut cross-validation does not allow" "all classes to be present in test splits. " "Please use a cross-validation generator that allows " "all classes to appear in every test and train split." ) cv = check_cv(self.cv, y, classifier=True) if _ensemble: parallel = Parallel(n_jobs=self.n_jobs) self.calibrated_classifiers_ = parallel( delayed(_fit_classifier_calibrator_pair)( clone(estimator), X, y, train=train, test=test, method=self.method, classes=self.classes_, xp=xp, sample_weight=sample_weight, fit_params=routed_params.estimator.fit, ) for train, test in cv.split(X, y, **routed_params.splitter.split) ) else: this_estimator = clone(estimator) method_name = _check_response_method( this_estimator, ["decision_function", "predict_proba"], ).__name__ predictions = cross_val_predict( estimator=this_estimator, X=X, y=y, cv=cv, method=method_name, n_jobs=self.n_jobs, params=routed_params.estimator.fit, ) if self.classes_.shape[0] == 2: # Ensure shape (n_samples, 1) in the binary case if method_name == "predict_proba": # Select the probability column of the positive class predictions = _process_predict_proba( y_pred=predictions, target_type="binary", classes=self.classes_, pos_label=self.classes_[1], ) predictions = predictions.reshape(-1, 1) if sample_weight is not None: # Check that the sample_weight dtype is consistent with the # predictions to avoid unintentional upcasts. sample_weight = _check_sample_weight( sample_weight, predictions, dtype=predictions.dtype ) this_estimator.fit(X, y, **routed_params.estimator.fit) # Note: Here we don't pass on fit_params because the supported # calibrators don't support fit_params anyway calibrated_classifier = _fit_calibrator( this_estimator, predictions, y, self.classes_, self.method, xp=xp, sample_weight=sample_weight, ) self.calibrated_classifiers_.append(calibrated_classifier) first_clf = self.calibrated_classifiers_[0].estimator if hasattr(first_clf, "n_features_in_"): self.n_features_in_ = first_clf.n_features_in_ if hasattr(first_clf, "feature_names_in_"): self.feature_names_in_ = first_clf.feature_names_in_ return self def predict_proba(self, X): """Calibrated probabilities of classification. This function returns calibrated probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by `estimator.predict_proba`. Returns ------- C : ndarray of shape (n_samples, n_classes) The predicted probas. """ check_is_fitted(self) # Compute the arithmetic mean of the predictions of the calibrated # classifiers xp, _, device_ = get_namespace_and_device(X) mean_proba = xp.zeros((_num_samples(X), self.classes_.shape[0]), device=device_) for calibrated_classifier in self.calibrated_classifiers_: proba = calibrated_classifier.predict_proba(X) mean_proba += proba mean_proba /= len(self.calibrated_classifiers_) return mean_proba def predict(self, X): """Predict the target of new samples. The predicted class is the class that has the highest probability, and can thus be different from the prediction of the uncalibrated classifier. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by `estimator.predict`. Returns ------- C : ndarray of shape (n_samples,) The predicted class. """ xp, _ = get_namespace(X) check_is_fitted(self) class_indices = xp.argmax(self.predict_proba(X), axis=1) if isinstance(self.classes_[0], str): class_indices = _convert_to_numpy(class_indices, xp=xp) return self.classes_[class_indices] def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = ( MetadataRouter(owner=self) .add_self_request(self) .add( estimator=self._get_estimator(), method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) .add( splitter=self.cv, method_mapping=MethodMapping().add(caller="fit", callee="split"), ) ) return router def __sklearn_tags__(self): tags = super().__sklearn_tags__() estimator_tags = get_tags(self._get_estimator()) tags.input_tags.sparse = estimator_tags.input_tags.sparse tags.array_api_support = ( estimator_tags.array_api_support and self.method == "temperature" ) return tags def _fit_classifier_calibrator_pair( estimator, X, y, train, test, method, classes, xp, sample_weight=None, fit_params=None, ): """Fit a classifier/calibration pair on a given train/test split. Fit the classifier on the train set, compute its predictions on the test set and use the predictions as input to fit the calibrator along with the test labels. Parameters ---------- estimator : estimator instance Cloned base estimator. X : array-like, shape (n_samples, n_features) Sample data. y : array-like, shape (n_samples,) Targets. train : ndarray, shape (n_train_indices,) Indices of the training subset. test : ndarray, shape (n_test_indices,) Indices of the testing subset. method : {'sigmoid', 'isotonic', 'temperature'} Method to use for calibration. classes : ndarray, shape (n_classes,) The target classes. xp : namespace Array API namespace. sample_weight : array-like, default=None Sample weights for `X`. fit_params : dict, default=None Parameters to pass to the `fit` method of the underlying classifier. Returns ------- calibrated_classifier : _CalibratedClassifier instance """ fit_params_train = _check_method_params(X, params=fit_params, indices=train) X_train, y_train = _safe_indexing(X, train), _safe_indexing(y, train) X_test, y_test = _safe_indexing(X, test), _safe_indexing(y, test) estimator.fit(X_train, y_train, **fit_params_train) predictions, _ = _get_response_values( estimator, X_test, response_method=["decision_function", "predict_proba"], ) if predictions.ndim == 1: # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` predictions = predictions.reshape(-1, 1) if sample_weight is not None: # Check that the sample_weight dtype is consistent with the predictions # to avoid unintentional upcasts. sample_weight = _check_sample_weight(sample_weight, X, dtype=predictions.dtype) sw_test = _safe_indexing(sample_weight, test) else: sw_test = None calibrated_classifier = _fit_calibrator( estimator, predictions, y_test, classes, method, xp=xp, sample_weight=sw_test, ) return calibrated_classifier def _fit_calibrator(clf, predictions, y, classes, method, xp, sample_weight=None): """Fit calibrator(s) and return a `_CalibratedClassifier` instance. A separate calibrator is fitted for each of the `n_classes` (i.e. `len(clf.classes_)`). However, if `n_classes` is 2 or if `method` is 'temperature', only one calibrator is fitted. Parameters ---------- clf : estimator instance Fitted classifier. predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) \ when binary. Raw predictions returned by the un-calibrated base classifier. y : array-like, shape (n_samples,) The targets. For `method="temperature"`, `y` needs to be label encoded. classes : ndarray, shape (n_classes,) All the prediction classes. method : {'sigmoid', 'isotonic', 'temperature'} The method to use for calibration. xp : namespace Array API namespace. sample_weight : ndarray, shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Returns ------- pipeline : _CalibratedClassifier instance """ calibrators = [] if method in ("isotonic", "sigmoid"): Y = label_binarize(y, classes=classes) label_encoder = LabelEncoder().fit(classes) pos_class_indices = label_encoder.transform(clf.classes_) for class_idx, this_pred in zip(pos_class_indices, predictions.T): if method == "isotonic": calibrator = IsotonicRegression(out_of_bounds="clip") else: # "sigmoid" calibrator = _SigmoidCalibration() calibrator.fit(this_pred, Y[:, class_idx], sample_weight) calibrators.append(calibrator) elif method == "temperature": if classes.shape[0] == 2 and predictions.shape[-1] == 1: response_method_name = _check_response_method( clf, ["decision_function", "predict_proba"], ).__name__ if response_method_name == "predict_proba": predictions = xp.concat([1 - predictions, predictions], axis=1) calibrator = _TemperatureScaling() calibrator.fit(predictions, y, sample_weight) calibrators.append(calibrator) pipeline = _CalibratedClassifier(clf, calibrators, method=method, classes=classes) return pipeline
CalibratedClassifierCV
python
huggingface__transformers
src/transformers/models/plbart/modeling_plbart.py
{ "start": 2246, "end": 2752 }
class ____(nn.Embedding): """ This module overrides nn.Embeddings' forward by multiplying with embeddings scale. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0): super().__init__(num_embeddings, embedding_dim, padding_idx) self.embed_scale = embed_scale def forward(self, input_ids: torch.Tensor): return super().forward(input_ids) * self.embed_scale @auto_docstring
PLBartScaledWordEmbedding
python
realpython__materials
python-absolute-value/sample_code.py
{ "start": 313, "end": 1454 }
class ____: def __init__(self, *coordinates): self.coordinates = coordinates def __abs__(self): return math.hypot(*self.coordinates) def absolute_value_piecewise(x): if x >= 0: return x else: return -x def absolute_value_piecewise_conditional_expression(x): return x if x >= 0 else -x def absolute_value_algebraic(x): return sqrt(pow(x, 2)) def absolute_value_algebraic_exponents(x): return (x**2) ** 0.5 def absolute_value_silly(x): return float(str(x).replace("-", "")) if __name__ == "__main__": print(f"{absolute_value_piecewise(-12) = }") print(f"{absolute_value_piecewise_conditional_expression(-12) = }") print(f"{absolute_value_algebraic(-12) = }") print(f"{absolute_value_algebraic_exponents(-12) = }") print(f"{absolute_value_silly(-12) = }") print(f"{abs(-12) = }") print(f"{abs(-12.0) = }") print(f"{abs(complex(3, 2)) = }") print(f"{abs(Fraction('-3/4')) = }") print(f"{abs(Decimal('-0.75')) = }") print(f"{abs(VectorBound(0.42, 1.5, 0.87)) = }") print(f"{abs(VectorFree(0.42, 1.5, 0.87)) = }")
VectorFree
python
Textualize__textual
src/textual/widgets/_header.py
{ "start": 1721, "end": 2387 }
class ____(HeaderClockSpace): """Display a clock on the right of the header.""" DEFAULT_CSS = """ HeaderClock { background: $foreground-darken-1 5%; color: $foreground; text-opacity: 85%; content-align: center middle; } """ time_format: Reactive[str] = Reactive("%X") def _on_mount(self, _: Mount) -> None: self.set_interval(1, callback=self.refresh, name="update header clock") def render(self) -> RenderResult: """Render the header clock. Returns: The rendered clock. """ return Text(datetime.now().time().strftime(self.time_format))
HeaderClock
python
pypa__packaging
src/packaging/metadata.py
{ "start": 29304, "end": 39439 }
class ____: """Representation of distribution metadata. Compared to :class:`RawMetadata`, this class provides objects representing metadata fields instead of only using built-in types. Any invalid metadata will cause :exc:`InvalidMetadata` to be raised (with a :py:attr:`~BaseException.__cause__` attribute as appropriate). """ _raw: RawMetadata @classmethod def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata: """Create an instance from :class:`RawMetadata`. If *validate* is true, all metadata will be validated. All exceptions related to validation will be gathered and raised as an :class:`ExceptionGroup`. """ ins = cls() ins._raw = data.copy() # Mutations occur due to caching enriched values. if validate: exceptions: list[Exception] = [] try: metadata_version = ins.metadata_version metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version) except InvalidMetadata as metadata_version_exc: exceptions.append(metadata_version_exc) metadata_version = None # Make sure to check for the fields that are present, the required # fields (so their absence can be reported). fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS # Remove fields that have already been checked. fields_to_check -= {"metadata_version"} for key in fields_to_check: try: if metadata_version: # Can't use getattr() as that triggers descriptor protocol which # will fail due to no value for the instance argument. try: field_metadata_version = cls.__dict__[key].added except KeyError: exc = InvalidMetadata(key, f"unrecognized field: {key!r}") exceptions.append(exc) continue field_age = _VALID_METADATA_VERSIONS.index( field_metadata_version ) if field_age > metadata_age: field = _RAW_TO_EMAIL_MAPPING[key] exc = InvalidMetadata( field, f"{field} introduced in metadata version " f"{field_metadata_version}, not {metadata_version}", ) exceptions.append(exc) continue getattr(ins, key) except InvalidMetadata as exc: exceptions.append(exc) if exceptions: raise ExceptionGroup("invalid metadata", exceptions) return ins @classmethod def from_email(cls, data: bytes | str, *, validate: bool = True) -> Metadata: """Parse metadata from email headers. If *validate* is true, the metadata will be validated. All exceptions related to validation will be gathered and raised as an :class:`ExceptionGroup`. """ raw, unparsed = parse_email(data) if validate: exceptions: list[Exception] = [] for unparsed_key in unparsed: if unparsed_key in _EMAIL_TO_RAW_MAPPING: message = f"{unparsed_key!r} has invalid data" else: message = f"unrecognized field: {unparsed_key!r}" exceptions.append(InvalidMetadata(unparsed_key, message)) if exceptions: raise ExceptionGroup("unparsed", exceptions) try: return cls.from_raw(raw, validate=validate) except ExceptionGroup as exc_group: raise ExceptionGroup( "invalid or unparsed metadata", exc_group.exceptions ) from None metadata_version: _Validator[_MetadataVersion] = _Validator() """:external:ref:`core-metadata-metadata-version` (required; validated to be a valid metadata version)""" # `name` is not normalized/typed to NormalizedName so as to provide access to # the original/raw name. name: _Validator[str] = _Validator() """:external:ref:`core-metadata-name` (required; validated using :func:`~packaging.utils.canonicalize_name` and its *validate* parameter)""" version: _Validator[version_module.Version] = _Validator() """:external:ref:`core-metadata-version` (required)""" dynamic: _Validator[list[str] | None] = _Validator( added="2.2", ) """:external:ref:`core-metadata-dynamic` (validated against core metadata field names and lowercased)""" platforms: _Validator[list[str] | None] = _Validator() """:external:ref:`core-metadata-platform`""" supported_platforms: _Validator[list[str] | None] = _Validator(added="1.1") """:external:ref:`core-metadata-supported-platform`""" summary: _Validator[str | None] = _Validator() """:external:ref:`core-metadata-summary` (validated to contain no newlines)""" description: _Validator[str | None] = _Validator() # TODO 2.1: can be in body """:external:ref:`core-metadata-description`""" description_content_type: _Validator[str | None] = _Validator(added="2.1") """:external:ref:`core-metadata-description-content-type` (validated)""" keywords: _Validator[list[str] | None] = _Validator() """:external:ref:`core-metadata-keywords`""" home_page: _Validator[str | None] = _Validator() """:external:ref:`core-metadata-home-page`""" download_url: _Validator[str | None] = _Validator(added="1.1") """:external:ref:`core-metadata-download-url`""" author: _Validator[str | None] = _Validator() """:external:ref:`core-metadata-author`""" author_email: _Validator[str | None] = _Validator() """:external:ref:`core-metadata-author-email`""" maintainer: _Validator[str | None] = _Validator(added="1.2") """:external:ref:`core-metadata-maintainer`""" maintainer_email: _Validator[str | None] = _Validator(added="1.2") """:external:ref:`core-metadata-maintainer-email`""" license: _Validator[str | None] = _Validator() """:external:ref:`core-metadata-license`""" license_expression: _Validator[NormalizedLicenseExpression | None] = _Validator( added="2.4" ) """:external:ref:`core-metadata-license-expression`""" license_files: _Validator[list[str] | None] = _Validator(added="2.4") """:external:ref:`core-metadata-license-file`""" classifiers: _Validator[list[str] | None] = _Validator(added="1.1") """:external:ref:`core-metadata-classifier`""" requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator( added="1.2" ) """:external:ref:`core-metadata-requires-dist`""" requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator( added="1.2" ) """:external:ref:`core-metadata-requires-python`""" # Because `Requires-External` allows for non-PEP 440 version specifiers, we # don't do any processing on the values. requires_external: _Validator[list[str] | None] = _Validator(added="1.2") """:external:ref:`core-metadata-requires-external`""" project_urls: _Validator[dict[str, str] | None] = _Validator(added="1.2") """:external:ref:`core-metadata-project-url`""" # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation # regardless of metadata version. provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator( added="2.1", ) """:external:ref:`core-metadata-provides-extra`""" provides_dist: _Validator[list[str] | None] = _Validator(added="1.2") """:external:ref:`core-metadata-provides-dist`""" obsoletes_dist: _Validator[list[str] | None] = _Validator(added="1.2") """:external:ref:`core-metadata-obsoletes-dist`""" import_names: _Validator[list[str] | None] = _Validator(added="2.5") """:external:ref:`core-metadata-import-name`""" import_namespaces: _Validator[list[str] | None] = _Validator(added="2.5") """:external:ref:`core-metadata-import-namespace`""" requires: _Validator[list[str] | None] = _Validator(added="1.1") """``Requires`` (deprecated)""" provides: _Validator[list[str] | None] = _Validator(added="1.1") """``Provides`` (deprecated)""" obsoletes: _Validator[list[str] | None] = _Validator(added="1.1") """``Obsoletes`` (deprecated)""" def as_rfc822(self) -> RFC822Message: """ Return an RFC822 message with the metadata. """ message = RFC822Message() self._write_metadata(message) return message def _write_metadata(self, message: RFC822Message) -> None: """ Return an RFC822 message with the metadata. """ for name, validator in self.__class__.__dict__.items(): if isinstance(validator, _Validator) and name != "description": value = getattr(self, name) email_name = _RAW_TO_EMAIL_MAPPING[name] if value is not None: if email_name == "project-url": for label, url in value.items(): message[email_name] = f"{label}, {url}" elif email_name == "keywords": message[email_name] = ",".join(value) elif email_name == "import-name" and value == []: message[email_name] = "" elif isinstance(value, list): for item in value: message[email_name] = str(item) else: message[email_name] = str(value) # The description is a special case because it is in the body of the message. if self.description is not None: message.set_payload(self.description)
Metadata
python
spack__spack
lib/spack/spack/vendor/macholib/mach_o.py
{ "start": 25427, "end": 25552 }
class ____(Structure): _fields_ = (("sub_library", lc_str),) def describe(self): return {}
sub_library_command
python
spyder-ide__spyder
spyder/utils/syntaxhighlighters.py
{ "start": 68689, "end": 69012 }
class ____(GenericSH): """yaml Syntax Highlighter""" # Syntax highlighting rules: PROG = re.compile(make_yaml_patterns(), re.S) #============================================================================== # HTML highlighter #==============================================================================
YamlSH
python
RaRe-Technologies__gensim
gensim/models/lda_dispatcher.py
{ "start": 2472, "end": 11131 }
class ____: """Dispatcher object that communicates and coordinates individual workers. Warnings -------- There should never be more than one dispatcher running at any one time. """ def __init__(self, maxsize=MAX_JOBS_QUEUE, ns_conf=None): """Partly initializes the dispatcher. A full initialization (including initialization of the workers) requires a call to :meth:`~gensim.models.lda_dispatcher.Dispatcher.initialize` Parameters ---------- maxsize : int, optional Maximum number of jobs to be kept pre-fetched in the queue. ns_conf : dict of (str, object) Sets up the name server configuration for the pyro daemon server of dispatcher. This also helps to keep track of your objects in your network by using logical object names instead of exact object name(or id) and its location. """ self.maxsize = maxsize self.callback = None self.ns_conf = ns_conf if ns_conf is not None else {} @Pyro4.expose def initialize(self, **model_params): """Fully initialize the dispatcher and all its workers. Parameters ---------- **model_params Keyword parameters used to initialize individual workers, see :class:`~gensim.models.ldamodel.LdaModel`. Raises ------ RuntimeError When no workers are found (the :mod:`gensim.models.lda_worker` script must be ran beforehand). """ self.jobs = Queue(maxsize=self.maxsize) self.lock_update = threading.Lock() self._jobsdone = 0 self._jobsreceived = 0 self.workers = {} with utils.getNS(**self.ns_conf) as ns: self.callback = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX]) for name, uri in ns.list(prefix=LDA_WORKER_PREFIX).items(): try: worker = Pyro4.Proxy(uri) workerid = len(self.workers) # make time consuming methods work asynchronously logger.info("registering worker #%i at %s", workerid, uri) worker.initialize(workerid, dispatcher=self.callback, **model_params) self.workers[workerid] = worker except Pyro4.errors.PyroError: logger.warning("unresponsive worker at %s,deleting it from the name server", uri) ns.remove(name) if not self.workers: raise RuntimeError('no workers found; run some lda_worker scripts on your machines first!') @Pyro4.expose def getworkers(self): """Return pyro URIs of all registered workers. Returns ------- list of URIs The pyro URIs for each worker. """ return [worker._pyroUri for worker in self.workers.values()] @Pyro4.expose def getjob(self, worker_id): """Atomically pop a job from the queue. Parameters ---------- worker_id : int The worker that requested the job. Returns ------- iterable of list of (int, float) The corpus in BoW format. """ logger.info("worker #%i requesting a new job", worker_id) job = self.jobs.get(block=True, timeout=1) logger.info("worker #%i got a new job (%i left)", worker_id, self.jobs.qsize()) return job @Pyro4.expose def putjob(self, job): """Atomically add a job to the queue. Parameters ---------- job : iterable of list of (int, float) The corpus in BoW format. """ self._jobsreceived += 1 self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT) logger.info("added a new job (len(queue)=%i items)", self.jobs.qsize()) @Pyro4.expose def getstate(self): """Merge states from across all workers and return the result. Returns ------- :class:`~gensim.models.ldamodel.LdaState` Merged resultant state """ logger.info("end of input, assigning all remaining jobs") logger.debug("jobs done: %s, jobs received: %s", self._jobsdone, self._jobsreceived) i = 0 count = 10 while self._jobsdone < self._jobsreceived: time.sleep(0.5) # check every half a second i += 1 if i > count: i = 0 for workerid, worker in self.workers.items(): logger.info("checking aliveness for worker %s", workerid) worker.ping() logger.info("merging states from %i workers", len(self.workers)) workers = list(self.workers.values()) result = workers[0].getstate() for worker in workers[1:]: result.merge(worker.getstate()) logger.info("sending out merged state") return result @Pyro4.expose def reset(self, state): """Reinitialize all workers for a new EM iteration. Parameters ---------- state : :class:`~gensim.models.ldamodel.LdaState` State of :class:`~gensim.models.lda.LdaModel`. """ for workerid, worker in self.workers.items(): logger.info("resetting worker %s", workerid) worker.reset(state) worker.requestjob() self._jobsdone = 0 self._jobsreceived = 0 @Pyro4.expose @Pyro4.oneway @utils.synchronous('lock_update') def jobdone(self, workerid): """A worker has finished its job. Log this event and then asynchronously transfer control back to the worker. Callback used by workers to notify when their job is done. The job done event is logged and then control is asynchronously transfered back to the worker (who can then request another job). In this way, control flow basically oscillates between :meth:`gensim.models.lda_dispatcher.Dispatcher.jobdone` and :meth:`gensim.models.lda_worker.Worker.requestjob`. Parameters ---------- workerid : int The ID of the worker that finished the job (used for logging). """ self._jobsdone += 1 logger.info("worker #%s finished job #%i", workerid, self._jobsdone) self.workers[workerid].requestjob() # tell the worker to ask for another job, asynchronously (one-way) def jobsdone(self): """Wrap :attr:`~gensim.models.lda_dispatcher.Dispatcher._jobsdone` needed for remote access through proxies. Returns ------- int Number of jobs already completed. """ return self._jobsdone @Pyro4.oneway def exit(self): """Terminate all registered workers and then the dispatcher.""" for workerid, worker in self.workers.items(): logger.info("terminating worker %s", workerid) worker.exit() logger.info("terminating dispatcher") os._exit(0) # exit the whole process (not just this thread ala sys.exit()) def main(): parser = argparse.ArgumentParser(description=__doc__[:-135], formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( "--maxsize", help="How many jobs (=chunks of N documents) to keep 'pre-fetched' in a queue (default: %(default)s)", type=int, default=MAX_JOBS_QUEUE ) parser.add_argument("--host", help="Nameserver hostname (default: %(default)s)", default=None) parser.add_argument("--port", help="Nameserver port (default: %(default)s)", default=None, type=int) parser.add_argument("--no-broadcast", help="Disable broadcast (default: %(default)s)", action='store_const', default=True, const=False) parser.add_argument("--hmac", help="Nameserver hmac key (default: %(default)s)", default=None) parser.add_argument( '-v', '--verbose', help='Verbose flag', action='store_const', dest="loglevel", const=logging.INFO, default=logging.WARNING ) args = parser.parse_args() logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=args.loglevel) logger.info("running %s", " ".join(sys.argv)) ns_conf = { "broadcast": args.no_broadcast, "host": args.host, "port": args.port, "hmac_key": args.hmac } utils.pyro_daemon(LDA_DISPATCHER_PREFIX, Dispatcher(maxsize=args.maxsize, ns_conf=ns_conf), ns_conf=ns_conf) logger.info("finished running %s", " ".join(sys.argv)) if __name__ == '__main__': main()
Dispatcher
python
ZoranPandovski__al-go-rithms
others/synchronization/ProducerConsumer/Python/producer_consumer.py
{ "start": 400, "end": 998 }
class ____(Thread): def run(self): array = range(5) global queue while True: condition.acquire() if len(Q) == BUF_SIZE: print("Q full : producer waiting") condition.wait() print("Space available, consumer notified the producer") item = random.choice(array) #lock.acquire() Q.append(item) print("produced", item) condition.notify() condition.release() #lock.release() time.sleep(random.random())
Producer
python
huggingface__transformers
src/transformers/models/perception_lm/convert_perception_lm_weights_to_hf.py
{ "start": 17025, "end": 23442 }
class ____(TikTokenConverter): def __init__( self, vocab_file, special_tokens=None, context_length=11520, **kwargs, ): super().__init__(vocab_file, additional_special_tokens=special_tokens, **kwargs) tokenizer = self.converted() self.converted_tokenizer = PreTrainedTokenizerFast( tokenizer_object=tokenizer, bos_token="<|begin_of_text|>", eos_token="<|eot_id|>", model_input_names=["input_ids", "attention_mask"], model_max_length=context_length, clean_up_tokenization_spaces=True, extra_special_tokens={ "image_token": "<|image|>", "video_token": "<|video|>", "pad_token": "<|end_of_text|>", }, ) self.converted_tokenizer.image_token_id = self.converted_tokenizer.encode( self.converted_tokenizer.image_token, add_special_tokens=False )[0] self.converted_tokenizer.video_token_id = self.converted_tokenizer.encode( self.converted_tokenizer.video_token, add_special_tokens=False )[0] self.update_post_processor(self.converted_tokenizer) # finer special_tokens_map.json self.converted_tokenizer._bos_token = BOS_ADDED_TOKEN self.converted_tokenizer._eos_token = EOT_ADDED_TOKEN # We can't do this while building the tokenizer because we have no easy access to the bos token id def update_post_processor(self, tokenizer): tokenizer._tokenizer.post_processor = processors.Sequence( [ processors.ByteLevel(trim_offsets=False), processors.TemplateProcessing( single="<|begin_of_text|> $A", pair="<|begin_of_text|>:0 $A:0 <|begin_of_text|>:1 $B:1", special_tokens=[ ( "<|begin_of_text|>", tokenizer.convert_tokens_to_ids("<|begin_of_text|>"), ), ], ), ] ) def write_tokenizer( tokenizer_path, input_tokenizer_path, special_tokens=None, params=None, push_to_hub=False, ): print("Converting the tokenizer.") tokenizer_class = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast context_length = params["model"]["max_seqlen"] tokenizer = Llama3Converter( input_tokenizer_path, special_tokens, context_length, ).converted_tokenizer tokenizer.image_token_id = tokenizer.encode(tokenizer.image_token, add_special_tokens=False)[0] processor_config = { "pooling_ratio": params["model"]["pooling_ratio"], "patch_size": params["model"]["vision_model"]["patch_size"], "processor_class": "PerceptionLMProcessor", } tile_size = params["model"]["vision_model"]["image_size"] image_preprocessor_config = { "image_processor_type": "PerceptionLMImageProcessorFast", "vision_input_type": params["data"]["vision_input_type"], "tile_size": tile_size, "max_num_tiles": params["data"]["max_num_tiles"], "max_frame_tiles": 1, "size": {"height": tile_size, "width": tile_size}, "do_resize": True, "do_rescale": True, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } image_preprocessor = PerceptionLMImageProcessorFast(**image_preprocessor_config) video_preprocessor_config = { "video_processor_type": "PerceptionLMVideoProcessor", "size": {"height": tile_size, "width": tile_size}, } video_preprocessor = PerceptionLMVideoProcessor(**video_preprocessor_config) processor = PerceptionLMProcessor( image_processor=image_preprocessor, video_processor=video_preprocessor, tokenizer=tokenizer, chat_template=CHAT_TEMPLATE, **processor_config, ) if push_to_hub: print(f"Pushing a {tokenizer_class.__name__} to the Hub repo - {tokenizer_path}.") model_name = tokenizer_path.split(os.path.sep)[-1] processor.push_to_hub(model_name, private=True) else: print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}.") processor.save_pretrained(tokenizer_path) return tokenizer def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", help="Location of Llama weights, which contains tokenizer.model and model folders", ) parser.add_argument( "--output_dir", help="Location to write HF model and tokenizer", ) parser.add_argument( "--push_to_hub", help="Whether or not to push the model to the hub at `output_dir` instead of saving it locally.", action="store_true", default=False, ) parser.add_argument( "--safe_serialization", action="store_true", default=True, help="Whether or not to save using `safetensors`.", ) parser.add_argument( "--num_shards", default=None, type=int, help="The number of individual shards used for the model. Does not have to be the same as the number of consolidated_xx.pth", ) parser.add_argument( "--special_tokens", default=None, type=list[str], help="The list of special tokens that should be added to the model.", ) args = parser.parse_args() if args.special_tokens is None: # no special tokens by default args.special_tokens = DEFAULT_SPECIAL_TOKENS.get("perception_lm", []) params = read_json(os.path.join(args.input_dir, "params.json")) spm_path = os.path.join(args.input_dir, "tokenizer.model") tokenizer = write_tokenizer( args.output_dir, spm_path, special_tokens=args.special_tokens, params=params, push_to_hub=args.push_to_hub, ) write_model( model_path=args.output_dir, input_base_path=args.input_dir, params=params, image_token_id=tokenizer.image_token_id, safe_serialization=args.safe_serialization, tokenizer=tokenizer, num_shards=args.num_shards, push_to_hub=args.push_to_hub, ) if __name__ == "__main__": main()
Llama3Converter
python
zarr-developers__zarr-python
src/zarr/core/dtype/registry.py
{ "start": 648, "end": 7113 }
class ____: """ A registry for ZDType classes. This registry is a mapping from Zarr data type names to their corresponding ZDType classes. Attributes ---------- contents : dict[str, type[ZDType[TBaseDType, TBaseScalar]]] The mapping from Zarr data type names to their corresponding ZDType classes. """ contents: dict[str, type[ZDType[TBaseDType, TBaseScalar]]] = field( default_factory=dict, init=False ) _lazy_load_list: list[EntryPoint] = field(default_factory=list, init=False) def _lazy_load(self) -> None: """ Load all data types from the lazy load list and register them with the registry. After loading, clear the lazy load list. """ for e in self._lazy_load_list: self.register(e.load()._zarr_v3_name, e.load()) self._lazy_load_list.clear() def register(self: Self, key: str, cls: type[ZDType[TBaseDType, TBaseScalar]]) -> None: """ Register a data type with the registry. Parameters ---------- key : str The Zarr V3 name of the data type. cls : type[ZDType[TBaseDType, TBaseScalar]] The class of the data type to register. Notes ----- This method is idempotent. If the data type is already registered, this method does nothing. """ if key not in self.contents or self.contents[key] != cls: self.contents[key] = cls def unregister(self, key: str) -> None: """ Unregister a data type from the registry. Parameters ---------- key : str The key associated with the ZDType class to be unregistered. Returns ------- None Raises ------ KeyError If the data type is not found in the registry. """ if key in self.contents: del self.contents[key] else: raise KeyError(f"Data type '{key}' not found in registry.") def get(self, key: str) -> type[ZDType[TBaseDType, TBaseScalar]]: """ Retrieve a registered ZDType class by its key. Parameters ---------- key : str The key associated with the desired ZDType class. Returns ------- type[ZDType[TBaseDType, TBaseScalar]] The ZDType class registered under the given key. Raises ------ KeyError If the key is not found in the registry. """ return self.contents[key] def match_dtype(self, dtype: TBaseDType) -> ZDType[TBaseDType, TBaseScalar]: """ Match a native data type, e.g. a NumPy data type, to a registered ZDType. Parameters ---------- dtype : TBaseDType The native data type to match. Returns ------- ZDType[TBaseDType, TBaseScalar] The matched ZDType corresponding to the provided NumPy data type. Raises ------ ValueError If the data type is a NumPy "Object" type, which is ambiguous, or if multiple or no Zarr data types are found that match the provided dtype. Notes ----- This function attempts to resolve a Zarr data type from a given native data type. If the dtype is a NumPy "Object" data type, it raises a ValueError, as this type can represent multiple Zarr data types. In such cases, a specific Zarr data type should be explicitly constructed instead of relying on dynamic resolution. If multiple matches are found, it will also raise a ValueError. In this case conflicting data types must be unregistered, or the Zarr data type should be explicitly constructed. """ if dtype == np.dtype("O"): msg = ( f"Zarr data type resolution from {dtype} failed. " 'Attempted to resolve a zarr data type from a numpy "Object" data type, which is ' 'ambiguous, as multiple zarr data types can be represented by the numpy "Object" ' "data type. " "In this case you should construct your array by providing a specific Zarr data " 'type. For a list of Zarr data types that are compatible with the numpy "Object"' "data type, see https://github.com/zarr-developers/zarr-python/issues/3117" ) raise ValueError(msg) matched: list[ZDType[TBaseDType, TBaseScalar]] = [] for val in self.contents.values(): with contextlib.suppress(DataTypeValidationError): matched.append(val.from_native_dtype(dtype)) if len(matched) == 1: return matched[0] elif len(matched) > 1: msg = ( f"Zarr data type resolution from {dtype} failed. " f"Multiple data type wrappers found that match dtype '{dtype}': {matched}. " "You should unregister one of these data types, or avoid Zarr data type inference " "entirely by providing a specific Zarr data type when creating your array." "For more information, see https://github.com/zarr-developers/zarr-python/issues/3117" ) raise ValueError(msg) raise ValueError(f"No Zarr data type found that matches dtype '{dtype!r}'") def match_json( self, data: DTypeJSON, *, zarr_format: ZarrFormat ) -> ZDType[TBaseDType, TBaseScalar]: """ Match a JSON representation of a data type to a registered ZDType. Parameters ---------- data : DTypeJSON The JSON representation of a data type to match. zarr_format : ZarrFormat The Zarr format version to consider when matching data types. Returns ------- ZDType[TBaseDType, TBaseScalar] The matched ZDType corresponding to the JSON representation. Raises ------ ValueError If no matching Zarr data type is found for the given JSON data. """ for val in self.contents.values(): try: return val.from_json(data, zarr_format=zarr_format) except DataTypeValidationError: pass raise ValueError(f"No Zarr data type found that matches {data!r}")
DataTypeRegistry
python
plotly__plotly.py
tests/test_optional/test_figure_factory/test_figure_factory.py
{ "start": 17961, "end": 28478 }
class ____(TestCaseNoTemplate): def test_wrong_arrow_scale(self): # check for ValueError if arrow_scale is <= 0 kwargs = { "x": [0, 2], "y": [0, 2], "u": [[-1, -5], [-1, -5]], "v": [[1, 1], [-3, -3]], "arrow_scale": 0, } self.assertRaises(ValueError, ff.create_streamline, **kwargs) def test_wrong_density(self): # check for ValueError if density is <= 0 kwargs = { "x": [0, 2], "y": [0, 2], "u": [[-1, -5], [-1, -5]], "v": [[1, 1], [-3, -3]], "density": 0, } self.assertRaises(ValueError, ff.create_streamline, **kwargs) def test_uneven_x(self): # check for PlotlyError if x is not evenly spaced kwargs = { "x": [0, 2, 7, 9], "y": [0, 2, 4, 6], "u": [[-1, -5], [-1, -5]], "v": [[1, 1], [-3, -3]], } self.assertRaises(PlotlyError, ff.create_streamline, **kwargs) def test_uneven_y(self): # check for PlotlyError if y is not evenly spaced kwargs = { "x": [0, 2, 4, 6], "y": [1.5, 2, 3, 3.5], "u": [[-1, -5], [-1, -5]], "v": [[1, 1], [-3, -3]], } self.assertRaises(PlotlyError, ff.create_streamline, **kwargs) def test_unequal_length_xy(self): # check for PlotlyError if u and v are not the same length kwargs = { "x": [0, 2, 4, 6], "y": [1.5, 2, 3.5], "u": [[-1, -5], [-1, -5]], "v": [[1, 1], [-3, -3]], } self.assertRaises(PlotlyError, ff.create_streamline, **kwargs) def test_unequal_length_uv(self): # check for PlotlyError if u and v are not the same length kwargs = { "x": [0, 2, 4, 6], "y": [1.5, 2, 3, 3.5], "u": [[-1, -5], [-1, -5], [-1, -5]], "v": [[1, 1], [-3, -3]], } self.assertRaises(PlotlyError, ff.create_streamline, **kwargs) def test_simple_streamline(self): # Need np to check streamline data, # this checks that the first 101 x and y values from streamline are # what we expect for a simple streamline where: # x = np.linspace(-1, 1, 3) # y = np.linspace(-1, 1, 3) # Y, X = np.meshgrid(x, y) # u = X**2 # v = Y**2 # u = u.T #transpose # v = v.T #transpose strln = ff.create_streamline( x=[-1.0, 0.0, 1.0], y=[-1.0, 0.0, 1.0], u=[[1.0, 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 0.0, 1.0]], v=[[1.0, 1.0, 1.0], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0]], ) expected_strln_0_100 = { "y": [ -1.0, -0.9788791845863757, -0.9579399744939614, -0.9371777642073374, -0.9165881396413338, -0.8961668671832106, -0.8759098835283448, -0.8558132862403048, -0.835873324973195, -0.8160863933003534, -0.7964490210989816, -0.7769578674451656, -0.7576097139780906, -0.7384014586961288, -0.7193301101509343, -0.7003927820087748, -0.681586687951103, -0.6629091368888596, -0.64435752846723, -0.6259293488396024, -0.6076221666912738, -0.5894336294951057, -0.5713614599827976, -0.5534034528167977, -0.5355574714490806, -0.5178214451541254, -0.5001933662244311, -0.4826712873178177, -0.4652533189465894, -0.44793762709939944, -0.4307224309873414, -0.4136060009064273, -0.39658665620919065, -0.3796627633786812, -0.3628327341986042, -0.34609502401380254, -0.3294481300756896, -0.31289058996761565, -0.2964209801054992, -0.28003791430937197, -0.2637400424417804, -0.24752604910925968, -0.23139465242334434, -0.21534460281781365, -0.19937468191908325, -0.18348370146685278, -0.1676705022823033, -0.15193395328130999, -0.13627295053029143, -0.1206864163424669, -0.10517329841242584, -0.08973256898704507, -0.07436322407090357, -0.05906428266445696, -0.04383478603333624, -0.028673797007230273, -0.013580399306900914, 0.0014484211645073852, 0.01648792568956914, 0.03159429687713278, 0.04676843461935776, 0.062011259175942746, 0.07732371182540754, 0.09270675554339824, 0.10816137570939799, 0.12368858084331191, 0.1392894033734846, 0.1549649004378033, 0.1707161547196483, 0.1865442753205595, 0.20245039867161063, 0.21843568948560943, 0.23450134175238246, 0.25064857977955146, 0.26687865928136767, 0.2831928685183458, 0.29959252949062387, 0.3160789991881776, 0.33265367090123643, 0.3493179755944802, 0.366073383348855, 0.3829214048751186, 0.39986359310352526, 0.41690154485438513, 0.4340369025945845, 0.4512713562855355, 0.46860664532844054, 0.4860445606132082, 0.5035869466778524, 0.5212357039857456, 0.5389927913286829, 0.5568602283643591, 0.5748400982975623, 0.5929345507151613, 0.6111458045858065, 0.6294761514361948, 0.6479279587167714, 0.6665036733708583, 0.6852058256224467, 0.704037032999252, ], "x": [ -1.0, -0.9788791845863756, -0.9579399744939614, -0.9371777642073374, -0.9165881396413338, -0.8961668671832106, -0.8759098835283448, -0.8558132862403048, -0.835873324973195, -0.8160863933003534, -0.7964490210989816, -0.7769578674451656, -0.7576097139780906, -0.7384014586961289, -0.7193301101509344, -0.7003927820087748, -0.6815866879511031, -0.6629091368888596, -0.6443575284672302, -0.6259293488396025, -0.6076221666912739, -0.5894336294951058, -0.5713614599827976, -0.5534034528167978, -0.5355574714490807, -0.5178214451541254, -0.5001933662244312, -0.4826712873178177, -0.4652533189465894, -0.44793762709939944, -0.4307224309873414, -0.4136060009064273, -0.39658665620919065, -0.3796627633786812, -0.3628327341986042, -0.34609502401380254, -0.3294481300756896, -0.31289058996761565, -0.2964209801054992, -0.28003791430937197, -0.2637400424417804, -0.24752604910925968, -0.23139465242334434, -0.21534460281781365, -0.19937468191908325, -0.18348370146685278, -0.1676705022823033, -0.15193395328130999, -0.13627295053029143, -0.1206864163424669, -0.10517329841242584, -0.08973256898704507, -0.07436322407090357, -0.05906428266445696, -0.04383478603333624, -0.028673797007230273, -0.013580399306900914, 0.0014484211645073852, 0.01648792568956914, 0.03159429687713278, 0.04676843461935776, 0.062011259175942746, 0.07732371182540754, 0.09270675554339824, 0.10816137570939799, 0.12368858084331191, 0.1392894033734846, 0.1549649004378033, 0.1707161547196483, 0.1865442753205595, 0.20245039867161063, 0.21843568948560943, 0.23450134175238246, 0.25064857977955146, 0.26687865928136767, 0.2831928685183458, 0.29959252949062387, 0.3160789991881776, 0.33265367090123643, 0.3493179755944802, 0.366073383348855, 0.3829214048751186, 0.39986359310352526, 0.41690154485438513, 0.4340369025945845, 0.4512713562855355, 0.46860664532844054, 0.4860445606132082, 0.5035869466778524, 0.5212357039857456, 0.5389927913286829, 0.5568602283643591, 0.5748400982975623, 0.5929345507151613, 0.6111458045858065, 0.6294761514361948, 0.6479279587167714, 0.6665036733708583, 0.6852058256224467, 0.704037032999252, ], "type": "scatter", "mode": "lines", } self.assertListEqual( list(strln["data"][0]["y"][0:100]), expected_strln_0_100["y"] ) self.assertListEqual( list(strln["data"][0]["x"][0:100]), expected_strln_0_100["x"] )
TestStreamline
python
pennersr__django-allauth
allauth/socialaccount/providers/linkedin_oauth2/provider.py
{ "start": 4154, "end": 5603 }
class ____(OAuth2Provider): id = "linkedin_oauth2" # Name is displayed to ordinary users -- don't include protocol name = "LinkedIn" account_class = LinkedInOAuth2Account oauth2_adapter_class = LinkedInOAuth2Adapter def extract_uid(self, data): if "id" not in data: raise ProviderException( "LinkedIn encountered an internal error while logging in. \ Please try again." ) return str(data["id"]) def get_profile_fields(self): default_fields = [ "id", "firstName", "lastName", # This would be needed to in case you need access to the image # URL. Not enabling this by default due to the amount of data # returned. # # 'profilePicture(displayImage~:playableStreams)' ] fields = self.get_settings().get("PROFILE_FIELDS", default_fields) return fields def get_default_scope(self): scope = ["r_liteprofile"] if app_settings.QUERY_EMAIL: scope.append("r_emailaddress") return scope def extract_common_fields(self, data): return dict( first_name=_extract_name_field(data, "firstName"), last_name=_extract_name_field(data, "lastName"), email=_extract_email(data), ) provider_classes = [LinkedInOAuth2Provider]
LinkedInOAuth2Provider
python
justquick__django-activity-stream
runtests/testapp_nested/apps.py
{ "start": 36, "end": 220 }
class ____(AppConfig): name = 'testapp_nested' def ready(self): from actstream.registry import register register(self.get_model('nestedmodel'))
TestappNestedConfig
python
pyparsing__pyparsing
tests/test_simple_unit.py
{ "start": 10381, "end": 11298 }
class ____(PyparsingExpressionTestCase): tests = [ PyparsingTest( desc="Match with results name", expr=pp.Literal("xyz").set_results_name("value"), text="xyz", expected_dict={"value": "xyz"}, expected_list=["xyz"], ), PyparsingTest( desc="Match with results name - using naming short-cut", expr=pp.Literal("xyz")("value"), text="xyz", expected_dict={"value": "xyz"}, expected_list=["xyz"], ), PyparsingTest( desc="Define multiple results names", expr=pp.Word(pp.alphas, pp.alphanums)("key") + "=" + pp.pyparsing_common.integer("value"), text="range=5280", expected_dict={"key": "range", "value": 5280}, expected_list=["range", "=", 5280], ), ]
TestResultsName
python
allegroai__clearml
clearml/utilities/gpu/pynvml.py
{ "start": 64050, "end": 64221 }
class ____(Structure): _fields_ = [("count", c_uint), ("sensor", c_nvmlGpuThermalSensor_t * NVML_MAX_THERMAL_SENSORS_PER_GPU)]
c_nvmlGpuThermalSettings_t
python
openai__openai-python
src/openai/types/realtime/realtime_audio_config_input_param.py
{ "start": 813, "end": 2926 }
class ____(TypedDict, total=False): format: RealtimeAudioFormatsParam """The format of the input audio.""" noise_reduction: NoiseReduction """Configuration for input audio noise reduction. This can be set to `null` to turn off. Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model. Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio. """ transcription: AudioTranscriptionParam """ Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the model, since the model consumes audio directly. Transcription runs asynchronously through [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription) and should be treated as guidance of input audio content rather than precisely what the model heard. The client can optionally set the language and prompt for transcription, these offer additional guidance to the transcription service. """ turn_detection: Optional[RealtimeAudioInputTurnDetectionParam] """Configuration for turn detection, ether Server VAD or Semantic VAD. This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer for the user to continue speaking. This can be useful for more natural conversations, but may have a higher latency. """
RealtimeAudioConfigInputParam
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 62040, "end": 67215 }
class ____(FieldValues): """ Valid and invalid values for `ChoiceField`. """ valid_inputs = { 'poor': 'poor', 'medium': 'medium', 'good': 'good', } invalid_inputs = { 'amazing': ['"amazing" is not a valid choice.'] } outputs = { 'good': 'good', '': '', 'amazing': 'amazing', } field = serializers.ChoiceField( choices=[ ('poor', 'Poor quality'), ('medium', 'Medium quality'), ('good', 'Good quality'), ] ) def test_allow_blank(self): """ If `allow_blank=True` then '' is a valid input. """ field = serializers.ChoiceField( allow_blank=True, choices=[ ('poor', 'Poor quality'), ('medium', 'Medium quality'), ('good', 'Good quality'), ] ) output = field.run_validation('') assert output == '' def test_allow_null(self): """ If `allow_null=True` then '' on HTML forms is treated as None. """ field = serializers.ChoiceField( allow_null=True, choices=[ 1, 2, 3 ] ) field.field_name = 'example' value = field.get_value(QueryDict('example=')) assert value is None output = field.run_validation(None) assert output is None def test_iter_options(self): """ iter_options() should return a list of options and option groups. """ field = serializers.ChoiceField( choices=[ ('Numbers', ['integer', 'float']), ('Strings', ['text', 'email', 'url']), 'boolean' ] ) items = list(field.iter_options()) assert items[0].start_option_group assert items[0].label == 'Numbers' assert items[1].value == 'integer' assert items[2].value == 'float' assert items[3].end_option_group assert items[4].start_option_group assert items[4].label == 'Strings' assert items[5].value == 'text' assert items[6].value == 'email' assert items[7].value == 'url' assert items[8].end_option_group assert items[9].value == 'boolean' def test_edit_choices(self): field = serializers.ChoiceField( allow_null=True, choices=[ 1, 2, ] ) field.choices = [1] assert field.run_validation(1) == 1 with pytest.raises(serializers.ValidationError) as exc_info: field.run_validation(2) assert exc_info.value.detail == ['"2" is not a valid choice.'] def test_enum_integer_choices(self): from enum import IntEnum class ChoiceCase(IntEnum): first = auto() second = auto() # Enum validate choices = [ (ChoiceCase.first, "1"), (ChoiceCase.second, "2") ] field = serializers.ChoiceField(choices=choices) assert field.run_validation(1) == 1 assert field.run_validation(ChoiceCase.first) == 1 assert field.run_validation("1") == 1 # Enum.value validate choices = [ (ChoiceCase.first.value, "1"), (ChoiceCase.second.value, "2") ] field = serializers.ChoiceField(choices=choices) assert field.run_validation(1) == 1 assert field.run_validation(ChoiceCase.first) == 1 assert field.run_validation("1") == 1 def test_integer_choices(self): class ChoiceCase(IntegerChoices): first = auto() second = auto() # Enum validate choices = [ (ChoiceCase.first, "1"), (ChoiceCase.second, "2") ] field = serializers.ChoiceField(choices=choices) assert field.run_validation(1) == 1 assert field.run_validation(ChoiceCase.first) == 1 assert field.run_validation("1") == 1 choices = [ (ChoiceCase.first.value, "1"), (ChoiceCase.second.value, "2") ] field = serializers.ChoiceField(choices=choices) assert field.run_validation(1) == 1 assert field.run_validation(ChoiceCase.first) == 1 assert field.run_validation("1") == 1 def test_text_choices(self): class ChoiceCase(TextChoices): first = auto() second = auto() # Enum validate choices = [ (ChoiceCase.first, "first"), (ChoiceCase.second, "second") ] field = serializers.ChoiceField(choices=choices) assert field.run_validation(ChoiceCase.first) == "first" assert field.run_validation("first") == "first" choices = [ (ChoiceCase.first.value, "first"), (ChoiceCase.second.value, "second") ] field = serializers.ChoiceField(choices=choices) assert field.run_validation(ChoiceCase.first) == "first" assert field.run_validation("first") == "first"
TestChoiceField
python
pytorch__pytorch
torch/distributions/transforms.py
{ "start": 34818, "end": 38837 }
class ____(Transform): """ Transform functor that applies a sequence of transforms `tseq` component-wise to each submatrix at `dim`, of length `lengths[dim]`, in a way compatible with :func:`torch.cat`. Example:: x0 = torch.cat([torch.range(1, 10), torch.range(1, 10)], dim=0) x = torch.cat([x0, x0], dim=0) t0 = CatTransform([ExpTransform(), identity_transform], dim=0, lengths=[10, 10]) t = CatTransform([t0, t0], dim=0, lengths=[20, 20]) y = t(x) """ transforms: list[Transform] def __init__( self, tseq: Sequence[Transform], dim: int = 0, lengths: Optional[Sequence[int]] = None, cache_size: int = 0, ) -> None: assert all(isinstance(t, Transform) for t in tseq) if cache_size: tseq = [t.with_cache(cache_size) for t in tseq] super().__init__(cache_size=cache_size) self.transforms = list(tseq) if lengths is None: lengths = [1] * len(self.transforms) self.lengths = list(lengths) assert len(self.lengths) == len(self.transforms) self.dim = dim @lazy_property def event_dim(self) -> int: # type: ignore[override] return max(t.event_dim for t in self.transforms) @lazy_property def length(self) -> int: return sum(self.lengths) def with_cache(self, cache_size=1): if self._cache_size == cache_size: return self return CatTransform(self.transforms, self.dim, self.lengths, cache_size) def _call(self, x): assert -x.dim() <= self.dim < x.dim() assert x.size(self.dim) == self.length yslices = [] start = 0 for trans, length in zip(self.transforms, self.lengths): xslice = x.narrow(self.dim, start, length) yslices.append(trans(xslice)) start = start + length # avoid += for jit compat return torch.cat(yslices, dim=self.dim) def _inverse(self, y): assert -y.dim() <= self.dim < y.dim() assert y.size(self.dim) == self.length xslices = [] start = 0 for trans, length in zip(self.transforms, self.lengths): yslice = y.narrow(self.dim, start, length) xslices.append(trans.inv(yslice)) start = start + length # avoid += for jit compat return torch.cat(xslices, dim=self.dim) def log_abs_det_jacobian(self, x, y): assert -x.dim() <= self.dim < x.dim() assert x.size(self.dim) == self.length assert -y.dim() <= self.dim < y.dim() assert y.size(self.dim) == self.length logdetjacs = [] start = 0 for trans, length in zip(self.transforms, self.lengths): xslice = x.narrow(self.dim, start, length) yslice = y.narrow(self.dim, start, length) logdetjac = trans.log_abs_det_jacobian(xslice, yslice) if trans.event_dim < self.event_dim: logdetjac = _sum_rightmost(logdetjac, self.event_dim - trans.event_dim) logdetjacs.append(logdetjac) start = start + length # avoid += for jit compat # Decide whether to concatenate or sum. dim = self.dim if dim >= 0: dim = dim - x.dim() dim = dim + self.event_dim if dim < 0: return torch.cat(logdetjacs, dim=dim) else: return sum(logdetjacs) @property def bijective(self) -> bool: # type: ignore[override] return all(t.bijective for t in self.transforms) @constraints.dependent_property # pyrefly: ignore [bad-override] def domain(self): return constraints.cat( [t.domain for t in self.transforms], self.dim, self.lengths ) @constraints.dependent_property # pyrefly: ignore [bad-override] def codomain(self): return constraints.cat( [t.codomain for t in self.transforms], self.dim, self.lengths )
CatTransform
python
django-haystack__django-haystack
test_haystack/test_management_commands.py
{ "start": 201, "end": 5197 }
class ____(TestCase): @patch("haystack.management.commands.update_index.Command.update_backend") def test_update_index_default_using(self, m): """update_index uses default index when --using is not present""" call_command("update_index") for k in settings.HAYSTACK_CONNECTIONS: self.assertTrue(call("core", k) in m.call_args_list) @patch("haystack.management.commands.update_index.Command.update_backend") def test_update_index_using(self, m): """update_index only applies to indexes specified with --using""" call_command("update_index", verbosity=0, using=["eng", "fra"]) m.assert_any_call("core", "eng") m.assert_any_call("core", "fra") self.assertTrue( call("core", "default") not in m.call_args_list, "update_index should have been restricted to the index specified with --using", ) @patch("haystack.loading.ConnectionHandler.__getitem__") def test_clear_index_default_using(self, m): """clear_index uses all keys when --using is not present""" call_command("clear_index", verbosity=0, interactive=False) self.assertEqual(len(settings.HAYSTACK_CONNECTIONS), m.call_count) for k in settings.HAYSTACK_CONNECTIONS: self.assertTrue(call(k) in m.call_args_list) @patch("haystack.loading.ConnectionHandler.__getitem__") def test_clear_index_using(self, m): """clear_index only applies to indexes specified with --using""" call_command("clear_index", verbosity=0, interactive=False, using=["eng"]) m.assert_called_with("eng") self.assertTrue( m.return_value.get_backend.called, "backend.clear() should be called" ) self.assertTrue( call("default") not in m.call_args_list, "clear_index should have been restricted to the index specified with --using", ) @patch("haystack.loading.ConnectionHandler.__getitem__") @patch("haystack.management.commands.update_index.Command.update_backend") def test_rebuild_index_default_using(self, m1, m2): """rebuild_index uses default index when --using is not present""" call_command("rebuild_index", verbosity=0, interactive=False) self.assertEqual(len(settings.HAYSTACK_CONNECTIONS), m2.call_count) for k in settings.HAYSTACK_CONNECTIONS: self.assertTrue(call(k) in m2.call_args_list) m1.assert_any_call("core", "default") m1.assert_any_call("core", "whoosh") @patch("haystack.loading.ConnectionHandler.__getitem__") @patch("haystack.management.commands.update_index.Command.update_backend") def test_rebuild_index_using(self, m1, m2): """rebuild_index passes --using to clear_index and update_index""" call_command("rebuild_index", verbosity=0, interactive=False, using=["eng"]) m2.assert_called_with("eng") m1.assert_any_call("core", "eng") @patch("haystack.management.commands.update_index.Command.handle", return_value="") @patch("haystack.management.commands.clear_index.Command.handle", return_value="") def test_rebuild_index(self, mock_handle_clear, mock_handle_update): call_command("rebuild_index", interactive=False) self.assertTrue(mock_handle_clear.called) self.assertTrue(mock_handle_update.called) @patch("haystack.management.commands.update_index.Command.handle", return_value="") @patch("haystack.management.commands.clear_index.Command.handle", return_value="") def test_rebuild_index_nocommit(self, *mocks): call_command("rebuild_index", interactive=False, commit=False) for m in mocks: self.assertEqual(m.call_count, 1) args, kwargs = m.call_args self.assertIn("commit", kwargs) self.assertEqual(False, kwargs["commit"]) @patch("haystack.management.commands.clear_index.Command.handle", return_value="") @patch("haystack.management.commands.update_index.Command.handle", return_value="") def test_rebuild_index_nocommit_two(self, update_mock, clear_mock): """ Confirm that command-line option parsing produces the same results as using call_command() directly, mostly as a sanity check for the logic in rebuild_index which combines the option_lists for its component commands. """ from haystack.management.commands.rebuild_index import Command Command().run_from_argv( ["django-admin.py", "rebuild_index", "--noinput", "--nocommit"] ) for m in (clear_mock, update_mock): self.assertEqual(m.call_count, 1) args, kwargs = m.call_args self.assertIn("commit", kwargs) self.assertEqual(False, kwargs["commit"]) args, kwargs = clear_mock.call_args self.assertIn("interactive", kwargs) self.assertIs(kwargs["interactive"], False)
CoreManagementCommandsTestCase
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/cloud_sql.py
{ "start": 1414, "end": 1672 }
class ____(BaseGoogleLink): """Helper class for constructing Cloud SQL Instance Database Link.""" name = "Cloud SQL Instance Database" key = "cloud_sql_instance_database" format_str = CLOUD_SQL_INSTANCE_DATABASE_LINK
CloudSQLInstanceDatabaseLink
python
getsentry__sentry
src/sentry/new_migrations/monkey/__init__.py
{ "start": 2281, "end": 5374 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False %(replaces_str)s%(initial_str)s dependencies = [ %(dependencies)s\ ] operations = [ %(operations)s\ ] """ if VERSION[:2] < (2, 2): SENTRY_MIGRATION_TEMPLATE = f"""\ # Generated by Django %(version)s on %(timestamp)s {SENTRY_MIGRATION_TEMPLATE} """ else: SENTRY_MIGRATION_TEMPLATE = f"""\ %(migration_header)s{SENTRY_MIGRATION_TEMPLATE} """ def _ensure_patched[**P](f: Callable[P, None]) -> Callable[P, None]: def _ensure_patched_impl(*args: P.args, **kwargs: P.kwargs) -> None: module = type(args[0]).__module__ if not module.startswith("sentry."): raise AssertionError(f"unexpectedly unpatched: {module}") return f(*args, **kwargs) return _ensure_patched_impl def monkey_migrations() -> None: from django.core.management.commands import migrate from django.db import models # This import needs to be below the other imports for `executor` and `writer` so # that we can successfully monkeypatch them. from django.db.migrations import executor, migration, writer executor.MigrationExecutor.__init__ = _ensure_patched(executor.MigrationExecutor.__init__) # type: ignore[method-assign] # monkeypatch Django's migration executor and template. migrate.MigrationExecutor = executor.MigrationExecutor = SentryMigrationExecutor # type: ignore[attr-defined, misc] migration.Migration.initial = None writer.MIGRATION_TEMPLATE = SENTRY_MIGRATION_TEMPLATE models.Field.deconstruct = deconstruct # type: ignore[method-assign] from django.db.migrations import graph, state from sentry.new_migrations.monkey.state import SentryProjectState # XXX: our patched ProjectState isn't being used in a lot of places! # state.ProjectState.__init__ = _ensure_patched(state.ProjectState.__init__) # type: ignore[method-assign] state.ProjectState = SentryProjectState # type: ignore[misc] graph.ProjectState = SentryProjectState # type: ignore[attr-defined] executor.ProjectState = SentryProjectState # type: ignore[attr-defined]
Migration
python
openai__openai-python
examples/parsing.py
{ "start": 159, "end": 700 }
class ____(BaseModel): steps: List[Step] final_answer: str client = OpenAI() completion = client.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ {"role": "system", "content": "You are a helpful math tutor."}, {"role": "user", "content": "solve 8x + 31 = 2"}, ], response_format=MathResponse, ) message = completion.choices[0].message if message.parsed: rich.print(message.parsed.steps) print("answer: ", message.parsed.final_answer) else: print(message.refusal)
MathResponse
python
pytorch__pytorch
torch/_inductor/fx_utils.py
{ "start": 1848, "end": 13288 }
class ____: """ The main idea here is that it's difficult to maintain accurate fake tensors (our primary form of metadata) for each node in our graph as we transform it. The most reliable way to obtain this information is by rerunning faketensor propagation. However, in general, faketensor propagation is fairly expensive. So, instead we'd like to only rerun faketensor propagation on nodes that have changed. In order to detect which nodes have changed, we first hash its node, target, and argument lists (which are immutable in FX). Then, whenever we call incremental_update, we check which FX nodes have a new hash, and recompute the faketensor metadata for that node. Then, we continue to recursively compute the faketensors for all users until the fake tensors stop changing. """ def __init__(self, graph: torch.fx.Graph) -> None: self.processed_hashes = OrderedSet[Any]() self.graph = graph for node in self.graph.nodes: self.processed_hashes.add(self.hash_node(node)) def hash_node(self, node: torch.fx.Node): # todo(chilli): Not a great hash function return (node, node.target, id(node.args), id(node.kwargs)) def incremental_update(self): """Update FakeTensors on self.graph. We will try to do the minimum amount of work.""" existing_storages: defaultdict[Optional[int], int] = defaultdict(int) for node in self.graph.nodes: existing_storages[get_node_storage(node)] += 1 def is_intlist_same(new, old): return statically_known_true(sym_eq(new, old)) def is_fake_tensor_same(new, old, *, node): if type(new) is not type(old): return False if isinstance(new, (list, tuple)): if len(new) != len(old): return False return all( is_fake_tensor_same(new_i, old_i, node=node) for new_i, old_i in zip(new, old) ) if new is None: return old is None if not isinstance(new, torch.Tensor): assert isinstance(new, (torch.SymInt, torch.SymBool, torch.SymFloat)), ( f"Unknown type {type(new)} in {self.graph}" ) return ( new.node.shape_env._maybe_evaluate_static( sympy.Eq(new.node.expr, old.node.expr) ) == sympy.true ) if not is_intlist_same(new.shape, old.shape) or new.layout != old.layout: return False if new.layout == torch.strided and ( not is_intlist_same(new.stride(), old.stride()) or not statically_known_true( new.storage_offset() == old.storage_offset() ) ): return False if new.device != old.device: return False if get_storage(new) == get_storage(old): return True def any_user_may_alias(node): if not isinstance(node.meta["val"], torch.Tensor): # analysis too complicated on lists, can support in the future return True for user in node.users: if not ( isinstance( user.target, (torch._ops.OpOverload, torch._ops.HigherOrderOperator), ) or user.target is torch._inductor.fx_passes.reinplace._generalized_scatter ): return True if isinstance(user.target, torch._ops.HigherOrderOperator): # HOPs that survive until inductor are all non-aliasing HOPs. # We will likely never support HOPs that are aliasing. continue # Strategy: do a FakeTensor prop, see if the storage aliases. # If Inductor ever gets tighter invariants on OpOverloads # (that is, we ban things like torch.ops.aten.reshape calls in the graph), # Then this could just be a fast schema lookup. is_valid, args, kwargs = get_fake_args_kwargs(user) if not is_valid: return True with ( V.fake_mode, enable_python_dispatcher(), contextlib.ExitStack() as stack, ): # Ignore unbacked symbols (if they exist): we're making # this FakeTensor and then throwing it away. shape_env = V.fake_mode.shape_env if shape_env is not None: stack.enter_context( shape_env.ignore_fresh_unbacked_symbols() ) new_fake_tensor = user.target(*args, **kwargs) if not isinstance(new_fake_tensor, torch.Tensor): # analysis too complicated on lists, can support in the future return True if get_storage(new_fake_tensor) == get_storage(node.meta["val"]): return True return False # This is the case where it returns a completely fresh storage that's used nowhere else. # If the FakeTensor's storage is fresh and none of the node's users can alias it, then # we don't need to update this node. if ( existing_storages[get_storage(old)] == 1 and get_storage(new) not in existing_storages and not any_user_may_alias(node) ): return True return False def should_process_node(node): # node.target for nodes returning true from this function # are called under fake mode and does not work for inductor # lowerings. We check if the node.target is an aten operator # or operator.getitem which is used when returning multiple # tensors from an op. return node.op == "call_function" and ( isinstance(node.target, torch._ops.OpOverload) or node.target is operator.getitem or node.target is torch._inductor.fx_passes.reinplace._generalized_scatter ) to_process = OrderedSet[int]() for node in self.graph.nodes: # NB: Be very careful about skipping nodes (via continues) here # and ask for a careful review when changing this code. The # consequence for incorrect FakeTensor metadata is difficult-to-debug # silent incorrectness. if ( self.hash_node(node) in self.processed_hashes and id(node) not in to_process ): continue if not should_process_node(node): continue is_valid, args, kwargs = get_fake_args_kwargs(node) if not is_valid: continue with V.fake_mode, enable_python_dispatcher(): new_fake_tensor = node.target(*args, **kwargs) if "val" in node.meta and is_fake_tensor_same( new_fake_tensor, node.meta["val"], node=node ): continue rebind_unbacked(V.fake_mode.shape_env, node, new_fake_tensor) node.meta["val"] = new_fake_tensor if (shape_env := V.fake_mode.shape_env) and ( symbol_to_path := compute_unbacked_bindings(shape_env, new_fake_tensor) ): # Refresh the bindings to the new symbols node.meta["unbacked_bindings"] = symbol_to_path existing_storages[get_node_storage(node)] += 1 to_process.update([id(user) for user in node.users]) self.processed_hashes.add(self.hash_node(node)) def get_storage(t: torch.Tensor) -> int: return t.untyped_storage()._cdata def get_node_storage(node: torch.fx.Node) -> Optional[int]: if "val" not in node.meta: return None if not isinstance(node.meta["val"], torch.Tensor): return None if not torch._C._has_storage(node.meta["val"]): return None return get_storage(node.meta["val"]) def get_fake(x): if isinstance(x, torch.fx.Node): if "val" not in x.meta: return x return x.meta["val"] return x def get_fake_args_kwargs(x: torch.fx.Node) -> tuple[bool, tuple[Any], dict[str, Any]]: """ First value returns a boolean if any of the input nodes don't have a faketensor. """ args, kwargs = tree_map(get_fake, (x.args, x.kwargs)) if any( isinstance(a, torch.fx.Node) for a in pytree.arg_tree_leaves(*args, **kwargs) ): return False, args, kwargs return True, args, kwargs def is_node_realized(node: torch.fx.Node) -> bool: """Returns true if a node is always realized when lowered to inductor IR. NOTE: This may return some false negatives. e.g. it doesn't handle buffers realized heuristically during lowering, or buffers realized indirectly through view ops. """ from torch._inductor.lowering import fallbacks, needs_realized_inputs def is_buffer(node: torch.fx.Node) -> bool: if node.op == "call_function" and node.target is operator.getitem: # For nodes with multiple outputs, we get the fx graph: # foo = torch.ops.aten.foo(...) # getitem = foo[0] # getitem_1 = foo[1] # where we need to check if foo is a fallback kernel return is_buffer(node.args[0]) # type: ignore[arg-type] return node.op in ("placeholder", "output") or node.target in fallbacks if is_buffer(node): return True def realizes_inputs(node: torch.fx.Node) -> bool: return node.op == "output" or node.target in needs_realized_inputs if any(realizes_inputs(user) for user in node.users): return True # Otherwise, assume node isn't realized return False def count_flops_fx(node: torch.fx.Node) -> Optional[int]: if not countable_fx(node) or isinstance(node.target, str): return None with FakeTensorMode(allow_non_fake_inputs=True): success, args, kwargs = get_fake_args_kwargs(node) if success: with torch.utils.flop_counter.FlopCounterMode( display=False ) as flop_counter_mode: node.target(*args, **kwargs) counted_flops = flop_counter_mode.get_total_flops() return counted_flops return None def countable_fx(node: torch.fx.Node) -> bool: """ Whether or not we can count the flops of an FX node. """ assert isinstance(node, torch.fx.Node) if not hasattr(node, "target"): return False target = node.target if not hasattr(target, "overloadpacket"): return target in flop_registry packet = target.overloadpacket return packet in flop_registry
FakeTensorUpdater
python
huggingface__transformers
src/transformers/models/llava_onevision/modeling_llava_onevision.py
{ "start": 2046, "end": 3396 }
class ____(BaseModelOutputWithPast): r""" past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. image_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`. image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. video_hidden_states (`torch.FloatTensor`, *optional*): A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`. video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state. """ image_hidden_states: Optional[torch.FloatTensor] = None video_hidden_states: Optional[torch.FloatTensor] = None @dataclass @auto_docstring( custom_intro=""" Base class for LlavaOnevision causal language model (or autoregressive) outputs. """ )
LlavaOnevisionModelOutputWithPast
python
huggingface__transformers
tests/models/superglue/test_image_processing_superglue.py
{ "start": 5029, "end": 28028 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = SuperGlueImageProcessor if is_vision_available() else None fast_image_processing_class = SuperGlueImageProcessorFast if is_torchvision_available() else None def setUp(self) -> None: super().setUp() self.image_processor_tester = SuperGlueImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processing(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_grayscale")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 480, "width": 640}) image_processor = image_processing_class.from_dict( self.image_processor_dict, size={"height": 42, "width": 42} ) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) @unittest.skip(reason="SuperPointImageProcessor is always supposed to return a grayscaled image") def test_call_numpy_4_channels(self): pass def test_number_and_format_of_images_in_input(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) # Cases where the number of images and the format of lists in the input is correct image_input = self.image_processor_tester.prepare_image_inputs(pairs=False, batch_size=2) image_processed = image_processor.preprocess(image_input, return_tensors="pt") self.assertEqual((1, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape)) image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=2) image_processed = image_processor.preprocess(image_input, return_tensors="pt") self.assertEqual((1, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape)) image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=4) image_processed = image_processor.preprocess(image_input, return_tensors="pt") self.assertEqual((2, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape)) image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=6) image_processed = image_processor.preprocess(image_input, return_tensors="pt") self.assertEqual((3, 2, 3, 480, 640), tuple(image_processed["pixel_values"].shape)) # Cases where the number of images or the format of lists in the input is incorrect ## List of 4 images image_input = self.image_processor_tester.prepare_image_inputs(pairs=False, batch_size=4) with self.assertRaises(ValueError) as cm: image_processor.preprocess(image_input, return_tensors="pt") self.assertEqual(ValueError, cm.exception.__class__) ## List of 3 images image_input = self.image_processor_tester.prepare_image_inputs(pairs=False, batch_size=3) with self.assertRaises(ValueError) as cm: image_processor.preprocess(image_input, return_tensors="pt") self.assertEqual(ValueError, cm.exception.__class__) ## List of 2 pairs and 1 image image_input = self.image_processor_tester.prepare_image_inputs(pairs=True, batch_size=3) with self.assertRaises(ValueError) as cm: image_processor.preprocess(image_input, return_tensors="pt") self.assertEqual(ValueError, cm.exception.__class__) @parameterized.expand( [ ([random_array((3, 100, 200)), random_array((3, 100, 200))], (1, 2, 3, 480, 640)), ([[random_array((3, 100, 200)), random_array((3, 100, 200))]], (1, 2, 3, 480, 640)), ([random_tensor((3, 100, 200)), random_tensor((3, 100, 200))], (1, 2, 3, 480, 640)), ([random_tensor((3, 100, 200)), random_tensor((3, 100, 200))], (1, 2, 3, 480, 640)), ], ) def test_valid_image_shape_in_input(self, image_input, output): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) image_processed = image_processor.preprocess(image_input, return_tensors="pt") self.assertEqual(output, tuple(image_processed["pixel_values"].shape)) @parameterized.expand( [ (random_array((3, 100, 200)),), ([random_array((3, 100, 200))],), (random_array((1, 3, 100, 200)),), ([[random_array((3, 100, 200))]],), ([[random_array((3, 100, 200))], [random_array((3, 100, 200))]],), ([random_array((1, 3, 100, 200)), random_array((1, 3, 100, 200))],), (random_array((1, 1, 3, 100, 200)),), ], ) def test_invalid_image_shape_in_input(self, image_input): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) with self.assertRaises(ValueError) as cm: image_processor(image_input, return_tensors="pt") self.assertEqual(ValueError, cm.exception.__class__) def test_input_images_properly_paired(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs() pre_processed_images = image_processor(image_inputs, return_tensors="pt") self.assertEqual(len(pre_processed_images["pixel_values"].shape), 5) self.assertEqual(pre_processed_images["pixel_values"].shape[1], 2) def test_input_not_paired_images_raises_error(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs(pairs=False) with self.assertRaises(ValueError): image_processor(image_inputs[0]) def test_input_image_properly_converted_to_grayscale(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs() pre_processed_images = image_processor(image_inputs, return_tensors="pt") for image_pair in pre_processed_images["pixel_values"]: for image in image_pair: self.assertTrue( torch.all(image[0, ...] == image[1, ...]) and torch.all(image[1, ...] == image[2, ...]) ) def test_call_numpy(self): # Test overwritten because SuperGlueImageProcessor combines images by pair to feed it into SuperGlue # Initialize image_processing for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) # create random numpy tensors image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image_pair in image_pairs: self.assertEqual(len(image_pair), 2) expected_batch_size = int(self.image_processor_tester.batch_size / 2) # Test with 2 images encoded_images = image_processing(image_pairs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test with list of pairs encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs) self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape)) # Test without paired images image_pairs = self.image_processor_tester.prepare_image_inputs( equal_resolution=False, numpify=True, pairs=False ) with self.assertRaises(ValueError): image_processing(image_pairs, return_tensors="pt").pixel_values def test_call_pil(self): # Test overwritten because SuperGlueImageProcessor combines images by pair to feed it into SuperGlue # Initialize image_processing for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) # create random PIL images image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image_pair in image_pairs: self.assertEqual(len(image_pair), 2) expected_batch_size = int(self.image_processor_tester.batch_size / 2) # Test with 2 images encoded_images = image_processing(image_pairs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test with list of pairs encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs) self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape)) # Test without paired images image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, pairs=False) with self.assertRaises(ValueError): image_processing(image_pairs, return_tensors="pt").pixel_values def test_call_pytorch(self): # Test overwritten because SuperGlueImageProcessor combines images by pair to feed it into SuperGlue # Initialize image_processing for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_pairs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image_pair in image_pairs: self.assertEqual(len(image_pair), 2) expected_batch_size = int(self.image_processor_tester.batch_size / 2) # Test with 2 images encoded_images = image_processing(image_pairs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test with list of pairs encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs) self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape)) # Test without paired images image_pairs = self.image_processor_tester.prepare_image_inputs( equal_resolution=False, torchify=True, pairs=False ) with self.assertRaises(ValueError): image_processing(image_pairs, return_tensors="pt").pixel_values def test_image_processor_with_list_of_two_images(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) image_pairs = self.image_processor_tester.prepare_image_inputs( equal_resolution=False, numpify=True, batch_size=2, pairs=False ) self.assertEqual(len(image_pairs), 2) self.assertTrue(isinstance(image_pairs[0], np.ndarray)) self.assertTrue(isinstance(image_pairs[1], np.ndarray)) expected_batch_size = 1 encoded_images = image_processing(image_pairs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_pairs[0]) self.assertEqual(tuple(encoded_images.shape), (expected_batch_size, *expected_output_image_shape)) @require_torch def test_post_processing_keypoint_matching(self): def check_post_processed_output(post_processed_output, image_pair_size): for post_processed_output, (image_size0, image_size1) in zip(post_processed_output, image_pair_size): self.assertTrue("keypoints0" in post_processed_output) self.assertTrue("keypoints1" in post_processed_output) self.assertTrue("matching_scores" in post_processed_output) keypoints0 = post_processed_output["keypoints0"] keypoints1 = post_processed_output["keypoints1"] all_below_image_size0 = torch.all(keypoints0[:, 0] <= image_size0[1]) and torch.all( keypoints0[:, 1] <= image_size0[0] ) all_below_image_size1 = torch.all(keypoints1[:, 0] <= image_size1[1]) and torch.all( keypoints1[:, 1] <= image_size1[0] ) all_above_zero0 = torch.all(keypoints0[:, 0] >= 0) and torch.all(keypoints0[:, 1] >= 0) all_above_zero1 = torch.all(keypoints0[:, 0] >= 0) and torch.all(keypoints0[:, 1] >= 0) self.assertTrue(all_below_image_size0) self.assertTrue(all_below_image_size1) self.assertTrue(all_above_zero0) self.assertTrue(all_above_zero1) all_scores_different_from_minus_one = torch.all(post_processed_output["matching_scores"] != -1) self.assertTrue(all_scores_different_from_minus_one) for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) image_inputs = self.image_processor_tester.prepare_image_inputs() pre_processed_images = image_processor.preprocess(image_inputs, return_tensors="pt") outputs = self.image_processor_tester.prepare_keypoint_matching_output(**pre_processed_images) tuple_image_sizes = [ ((image_pair[0].size[0], image_pair[0].size[1]), (image_pair[1].size[0], image_pair[1].size[1])) for image_pair in image_inputs ] tuple_post_processed_outputs = image_processor.post_process_keypoint_matching(outputs, tuple_image_sizes) check_post_processed_output(tuple_post_processed_outputs, tuple_image_sizes) tensor_image_sizes = torch.tensor( [(image_pair[0].size, image_pair[1].size) for image_pair in image_inputs] ).flip(2) tensor_post_processed_outputs = image_processor.post_process_keypoint_matching(outputs, tensor_image_sizes) check_post_processed_output(tensor_post_processed_outputs, tensor_image_sizes) @require_torch def test_post_processing_keypoint_matching_with_padded_match_indices(self): """ Test that post_process_keypoint_matching correctly handles matches pointing to padded keypoints. This tests the edge case where a match index points beyond the actual number of real keypoints, which would cause an out-of-bounds error without proper filtering. """ for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) # Create a specific scenario with intentional padding issues batch_size = 1 max_number_keypoints = 50 # Image 0 has 10 real keypoints, image 1 has only 5 real keypoints num_keypoints0 = 10 num_keypoints1 = 5 mask = torch.zeros((batch_size, 2, max_number_keypoints), dtype=torch.int) keypoints = torch.zeros((batch_size, 2, max_number_keypoints, 2)) matches = torch.full((batch_size, 2, max_number_keypoints), -1, dtype=torch.int) scores = torch.zeros((batch_size, 2, max_number_keypoints)) # Set up real keypoints mask[0, 0, :num_keypoints0] = 1 mask[0, 1, :num_keypoints1] = 1 keypoints[0, 0, :num_keypoints0] = torch.rand((num_keypoints0, 2)) keypoints[0, 1, :num_keypoints1] = torch.rand((num_keypoints1, 2)) # Create a match that points to a padded keypoint in image 1 # This would cause IndexError before the fix matches[0, 0, 0] = 8 # Points to index 8, but image 1 only has 5 real keypoints (indices 0-4) scores[0, 0, 0] = 0.9 # High confidence score # Create a valid match for comparison matches[0, 0, 1] = 2 # Points to index 2, which is valid scores[0, 0, 1] = 0.8 outputs = SuperGlueKeypointMatchingOutput( mask=mask, keypoints=keypoints, matches=matches, matching_scores=scores ) image_sizes = [((480, 640), (480, 640))] # This should not raise an IndexError and should filter out the invalid match post_processed = image_processor.post_process_keypoint_matching(outputs, image_sizes) # Check that we got results self.assertEqual(len(post_processed), 1) result = post_processed[0] # Should only have 1 valid match (index 1), the out-of-bounds match (index 0) should be filtered out self.assertEqual(result["keypoints0"].shape[0], 1) self.assertEqual(result["keypoints1"].shape[0], 1) self.assertEqual(result["matching_scores"].shape[0], 1) # Verify the match score corresponds to the valid match self.assertAlmostEqual(result["matching_scores"][0].item(), 0.8, places=5) @unittest.skip(reason="Many failing cases. This test needs a more deep investigation.") def test_fast_is_faster_than_slow(self): """Override the generic test since EfficientLoFTR requires image pairs.""" if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast speed test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast speed test as one of the image processors is not defined") # Create image pairs for speed test dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=False) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) # Time slow processor start_time = time.time() for _ in range(10): _ = image_processor_slow(dummy_images, return_tensors="pt") slow_time = time.time() - start_time # Time fast processor start_time = time.time() for _ in range(10): _ = image_processor_fast(dummy_images, return_tensors="pt") fast_time = time.time() - start_time # Fast should be faster (or at least not significantly slower) self.assertLessEqual( fast_time, slow_time * 1.2, "Fast processor should not be significantly slower than slow processor" ) @require_vision @require_torch def test_slow_fast_equivalence(self): if not self.test_slow_image_processor or not self.test_fast_image_processor: self.skipTest(reason="Skipping slow/fast equivalence test") if self.image_processing_class is None or self.fast_image_processing_class is None: self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined") dummy_image = self.image_processor_tester.prepare_image_inputs( equal_resolution=False, numpify=True, batch_size=2, pairs=False ) image_processor_slow = self.image_processing_class(**self.image_processor_dict) image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict) encoding_slow = image_processor_slow(dummy_image, return_tensors="pt") encoding_fast = image_processor_fast(dummy_image, return_tensors="pt") self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values) @slow @require_torch_accelerator @require_vision @pytest.mark.torch_compile_test def test_can_compile_fast_image_processor(self): """Override the generic test since EfficientLoFTR requires image pairs.""" if self.fast_image_processing_class is None: self.skipTest("Skipping compilation test as fast image processor is not defined") if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") torch.compiler.reset() input_image = self.image_processor_tester.prepare_image_inputs(equal_resolution=True, torchify=False) image_processor = self.fast_image_processing_class(**self.image_processor_dict) output_eager = image_processor(input_image, device=torch_device, return_tensors="pt") image_processor = torch.compile(image_processor, mode="reduce-overhead") output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt") self._assert_slow_fast_tensors_equivalence( output_eager.pixel_values, output_compiled.pixel_values, atol=1e-4, rtol=1e-4, mean_atol=1e-5 )
SuperGlueImageProcessingTest
python
kamyu104__LeetCode-Solutions
Python/check-knight-tour-configuration.py
{ "start": 62, "end": 628 }
class ____(object): def checkValidGrid(self, grid): """ :type grid: List[List[int]] :rtype: bool """ if grid[0][0]: return False lookup = [None]*(len(grid)*len(grid[0])) for i in xrange(len(grid)): for j in xrange(len(grid[0])): lookup[grid[i][j]] = (i, j) return all(sorted([abs(lookup[i+1][0]-lookup[i][0]), abs(lookup[i+1][1]-lookup[i][1])]) == [1, 2] for i in xrange(len(lookup)-1)) # Time: O(m * n) # Space: O(m * n) # hash table, simulation
Solution
python
run-llama__llama_index
llama-index-core/llama_index/core/readers/base.py
{ "start": 1798, "end": 2106 }
class ____(BaseReader, BaseComponent): """Serialiable Data Loader with Pydantic.""" model_config = ConfigDict(arbitrary_types_allowed=True) is_remote: bool = Field( default=False, description="Whether the data is loaded from a remote API or a local file.", )
BasePydanticReader
python
readthedocs__readthedocs.org
readthedocs/subscriptions/tests/test_views.py
{ "start": 1376, "end": 8740 }
class ____(PaymentMixin, TestCase): """Subscription view tests.""" def setUp(self): super().setUp() self.user = get(User) self.organization = get(Organization, stripe_id="123", owners=[self.user]) self.stripe_product = get( djstripe.Product, id="prod_a1b2c3", ) self.stripe_price = get( djstripe.Price, id=settings.RTD_ORG_DEFAULT_STRIPE_SUBSCRIPTION_PRICE, unit_amount=50000, product=self.stripe_product, ) self.extra_product = get( djstripe.Product, id="prod_extra_builder", ) self.extra_price = get( djstripe.Price, id="price_extra_builder", unit_amount=50000, product=self.extra_product, ) self.stripe_subscription = self._create_stripe_subscription( customer_id=self.organization.stripe_id, subscription_id="sub_a1b2c3d4", ) self.stripe_customer = self.stripe_subscription.customer self.organization.stripe_customer = self.stripe_customer self.organization.stripe_subscription = self.stripe_subscription self.organization.save() self.client.force_login(self.user) def _create_stripe_subscription( self, customer_id="cus_a1b2c3", subscription_id="sub_a1b2c3" ): stripe_customer = get( djstripe.Customer, id=customer_id, ) stripe_subscription = get( djstripe.Subscription, id=subscription_id, start_date=timezone.now(), current_period_end=timezone.now() + timezone.timedelta(days=30), trial_end=timezone.now() + timezone.timedelta(days=30), status=SubscriptionStatus.active, customer=stripe_customer, ) get( djstripe.SubscriptionItem, price=self.stripe_price, quantity=1, subscription=stripe_subscription, ) return stripe_subscription def test_active_subscription(self): resp = self.client.get( reverse("subscription_detail", args=[self.organization.slug]) ) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.context["stripe_subscription"], self.stripe_subscription) self.assertContains(resp, "active") self.assertNotContains(resp, "Extra products:") # The subscribe form isn't shown, but the manage susbcription button is. self.assertContains(resp, "Manage subscription") self.assertNotContains(resp, "Start subscription") def test_active_subscription_with_extra_product(self): get( djstripe.SubscriptionItem, price=self.extra_price, quantity=2, subscription=self.stripe_subscription, ) resp = self.client.get( reverse("subscription_detail", args=[self.organization.slug]) ) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.context["stripe_subscription"], self.stripe_subscription) self.assertContains(resp, "active") self.assertContains(resp, "Extra products:") # The subscribe form isn't shown, but the manage susbcription button is. self.assertContains(resp, "Manage subscription") self.assertNotContains(resp, "Start subscription") @requests_mock.Mocker(kw="mock_request") def test_manage_subscription(self, mock_request): payload = { "url": "https://billing.stripe.com/session/a1b2c3", } mock_request.post( "https://api.stripe.com/v1/billing_portal/sessions", json=payload ) response = self.client.post( reverse( "stripe_customer_portal", kwargs={"slug": self.organization.slug}, ), ) self.assertRedirects( response, payload.get("url"), fetch_redirect_response=False, ) @mock.patch("readthedocs.subscriptions.utils.get_stripe_client") def test_user_without_subscription(self, stripe_client): stripe_subscription = self._create_stripe_subscription() stripe_customer = stripe_subscription.customer stripe_customer.subscribe = mock.MagicMock() stripe_customer.subscribe.return_value = stripe_subscription self.organization.refresh_from_db() self.organization.stripe_customer = stripe_customer self.organization.stripe_subscription = None self.organization.save() self.assertFalse(hasattr(self.organization, "subscription")) self.assertIsNone(self.organization.stripe_subscription) resp = self.client.get( reverse("subscription_detail", args=[self.organization.slug]) ) self.assertEqual(resp.status_code, 200) self.organization.refresh_from_db() self.assertEqual(self.organization.stripe_customer, stripe_customer) self.assertEqual(self.organization.stripe_subscription, stripe_subscription) stripe_client().assert_not_called() @mock.patch( "readthedocs.subscriptions.utils.djstripe.Customer.sync_from_stripe_data" ) @mock.patch("readthedocs.subscriptions.utils.get_stripe_client") def test_user_without_subscription_and_customer( self, stripe_client, sync_from_stripe_data_mock ): stripe_subscription = self._create_stripe_subscription() stripe_customer = stripe_subscription.customer stripe_customer.subscribe = mock.MagicMock() stripe_customer.subscribe.return_value = stripe_subscription sync_from_stripe_data_mock.return_value = stripe_customer # When stripe_customer is None, a new customer is created. self.organization.stripe_customer = None self.organization.stripe_subscription = None self.organization.save() self.organization.refresh_from_db() self.assertFalse(hasattr(self.organization, "subscription")) self.assertIsNone(self.organization.stripe_customer) self.assertIsNone(self.organization.stripe_subscription) resp = self.client.get( reverse("subscription_detail", args=[self.organization.slug]) ) self.assertEqual(resp.status_code, 200) self.organization.refresh_from_db() self.assertEqual(self.organization.stripe_id, "cus_a1b2c3") self.assertEqual(self.organization.stripe_customer, stripe_customer) self.assertEqual(self.organization.stripe_subscription, stripe_subscription) stripe_client().customers.create.assert_called_once() def test_user_with_canceled_subscription(self): self.stripe_subscription.status = SubscriptionStatus.canceled self.stripe_subscription.save() resp = self.client.get( reverse("subscription_detail", args=[self.organization.slug]) ) self.assertEqual(resp.status_code, 200) self.assertEqual(resp.context["stripe_subscription"], self.stripe_subscription) # The Manage subscription form isn't shown, but the Subscribe is. self.assertNotContains(resp, "Manage subscription") self.assertContains(resp, "Start subscription")
SubscriptionViewTests
python
ray-project__ray
python/ray/dashboard/modules/log/log_manager.py
{ "start": 772, "end": 1298 }
class ____(BaseModel): # The node id where the log file is located. node_id: str # The log file path name. Could be a relative path relative to ray's logging folder, # or an absolute path. filename: str # Start offset in the log file to stream from. None to indicate beginning of # the file, or determined by last tail lines. start_offset: Optional[int] # End offset in the log file to stream from. None to indicate the end of the file. end_offset: Optional[int]
ResolvedStreamFileInfo
python
ray-project__ray
python/ray/train/v2/_internal/state/state_manager.py
{ "start": 847, "end": 8150 }
class ____: """Manages the state of a train run and run attempts.""" def __init__(self) -> None: self._state_actor = get_or_create_state_actor() # NOTE: All runs and attempts are stored in memory. # This may be a memory issue for large runs. self._runs: Dict[str, TrainRun] = {} # {run_id: {attempt_id: TrainRunAttempt}} self._run_attempts: Dict[str, Dict[str, TrainRunAttempt]] = defaultdict(dict) def create_train_run( self, id: str, name: str, job_id: str, controller_actor_id: str, controller_log_file_path: str, ) -> None: run = TrainRun( id=id, name=name, job_id=job_id, status=RunStatus.INITIALIZING, status_detail=None, controller_actor_id=controller_actor_id, start_time_ns=current_time_ns(), controller_log_file_path=controller_log_file_path, ) self._runs[run.id] = run self._create_or_update_train_run(run) def update_train_run_scheduling( self, run_id: str, resize_decision: Optional[ResizeDecision] = None, ) -> None: if resize_decision is not None: status_detail = _get_scheduling_status_detail( resize_decision.num_workers, resize_decision.resources_per_worker ) else: status_detail = None run = self._runs[run_id] run.status = RunStatus.SCHEDULING run.status_detail = status_detail self._create_or_update_train_run(run) def update_train_run_running( self, run_id: str, ) -> None: run = self._runs[run_id] run.status = RunStatus.RUNNING run.status_detail = None self._create_or_update_train_run(run) def update_train_run_restarting( self, run_id: str, ) -> None: run = self._runs[run_id] run.status = RunStatus.RESTARTING run.status_detail = None self._create_or_update_train_run(run) def update_train_run_resizing( self, run_id: str, ) -> None: run = self._runs[run_id] run.status = RunStatus.RESIZING run.status_detail = None self._create_or_update_train_run(run) def update_train_run_finished( self, run_id: str, ): run = self._runs[run_id] run.status = RunStatus.FINISHED run.status_detail = None run.end_time_ns = current_time_ns() self._create_or_update_train_run(run) def update_train_run_errored( self, run_id: str, status_detail: str, ): run = self._runs[run_id] run.status = RunStatus.ERRORED run.status_detail = status_detail run.end_time_ns = current_time_ns() self._create_or_update_train_run(run) def update_train_run_aborted( self, run_id: str, ): run = self._runs[run_id] update_train_run_aborted(run=run, graceful=True) self._create_or_update_train_run(run) def create_train_run_attempt( self, run_id: str, attempt_id: str, num_workers: int, resources_per_worker: Dict[str, float], ) -> None: status_detail = _get_scheduling_status_detail(num_workers, resources_per_worker) resources = [ TrainResources(resources=resources_per_worker) for _ in range(num_workers) ] run_attempt = TrainRunAttempt( run_id=run_id, attempt_id=attempt_id, start_time_ns=current_time_ns(), status=RunAttemptStatus.PENDING, status_detail=status_detail, resources=resources, workers=[], # Not started yet. ) self._run_attempts[run_id][attempt_id] = run_attempt self._create_or_update_train_run_attempt(run_attempt) def update_train_run_attempt_running( self, run_id: str, attempt_id: str, workers: List[Worker] ) -> None: def _convert_worker(worker: Worker) -> TrainWorker: actor: ActorHandle = worker.actor distributed_context: DistributedContext = worker.distributed_context actor_metadata: ActorMetadata = worker.metadata return TrainWorker( world_rank=distributed_context.world_rank, local_rank=distributed_context.local_rank, node_rank=distributed_context.node_rank, actor_id=actor._actor_id.hex(), node_id=actor_metadata.node_id, node_ip=actor_metadata.node_ip, pid=actor_metadata.pid, gpu_ids=actor_metadata.gpu_ids, status=ActorStatus.ALIVE, resources=TrainResources(resources=worker.resources), log_file_path=worker.log_file_path, ) workers: List[TrainWorker] = [_convert_worker(worker) for worker in workers] run_attempt = self._run_attempts[run_id][attempt_id] run_attempt.status = RunAttemptStatus.RUNNING run_attempt.status_detail = None run_attempt.workers = workers self._create_or_update_train_run_attempt(run_attempt) def update_train_run_attempt_finished( self, run_id: str, attempt_id: str, ): run_attempt = self._run_attempts[run_id][attempt_id] run_attempt.status = RunAttemptStatus.FINISHED run_attempt.status_detail = None run_attempt.end_time_ns = current_time_ns() mark_workers_dead(run_attempt) self._create_or_update_train_run_attempt(run_attempt) def update_train_run_attempt_errored( self, run_id: str, attempt_id: str, status_detail: str, ): run_attempt = self._run_attempts[run_id][attempt_id] run_attempt.status = RunAttemptStatus.ERRORED run_attempt.status_detail = status_detail run_attempt.end_time_ns = current_time_ns() mark_workers_dead(run_attempt) self._create_or_update_train_run_attempt(run_attempt) def update_train_run_attempt_aborted( self, run_id: str, attempt_id: str, ): run_attempt = self._run_attempts[run_id][attempt_id] update_train_run_attempt_aborted(run_attempt=run_attempt, graceful=True) self._create_or_update_train_run_attempt(run_attempt) def _create_or_update_train_run(self, run: TrainRun) -> None: ref = self._state_actor.create_or_update_train_run.remote(run) # Block to avoid case where controller is dead but run is not terminal. if run.status.is_terminal(): ray.get(ref) def _create_or_update_train_run_attempt(self, run_attempt: TrainRunAttempt) -> None: # Block to avoid case where controller is dead but attempt is not terminal. ref = self._state_actor.create_or_update_train_run_attempt.remote(run_attempt) if run_attempt.status.is_terminal(): ray.get(ref) def _get_scheduling_status_detail( num_workers: int, resources_per_worker: Dict[str, float] ) -> str: return f"Scheduling {num_workers} workers, each requiring: {resources_per_worker}."
TrainStateManager
python
getsentry__sentry
src/sentry/relay/globalconfig.py
{ "start": 1233, "end": 1315 }
class ____(TypedDict): condition: RuleCondition value: str
SpanOpDefaultRule
python
weaviate__weaviate-python-client
weaviate/collections/classes/tenants.py
{ "start": 1857, "end": 3691 }
class ____(BaseModel): """Tenant class used to describe a tenant in Weaviate. Attributes: name: The name of the tenant. activity_status: TenantActivityStatus, default: "HOT" """ model_config = ConfigDict(populate_by_name=True) name: str activityStatusInternal: TenantActivityStatus = Field( default=TenantActivityStatus.ACTIVE, alias="activity_status", exclude=True, ) activityStatus: _TenantActivistatusServerValues = Field( init_var=False, default=_TenantActivistatusServerValues.HOT ) @property def activity_status(self) -> TenantActivityStatus: """Getter for the activity status of the tenant.""" return self.activityStatusInternal def model_post_init(self, __context: Any) -> None: # noqa: D102 self._model_post_init(user_input=True) def _model_post_init(self, user_input: bool) -> None: # noqa: D102 if self.activityStatusInternal == TenantActivityStatus.HOT: if user_input: _Warnings.deprecated_tenant_type("HOT", "ACTIVE") self.activityStatusInternal = TenantActivityStatus.ACTIVE elif self.activityStatusInternal == TenantUpdateActivityStatus.COLD: if user_input: _Warnings.deprecated_tenant_type("COLD", "INACTIVE") self.activityStatusInternal = TenantActivityStatus.INACTIVE elif self.activityStatusInternal == TenantUpdateActivityStatus.FROZEN: if user_input: _Warnings.deprecated_tenant_type("FROZEN", "OFFLOADED") self.activityStatusInternal = TenantActivityStatus.OFFLOADED if user_input: self.activityStatus = _TenantActivistatusServerValues.from_string( self.activityStatusInternal.value )
Tenant
python
getsentry__sentry
tests/sentry/middleware/test_access_log_middleware.py
{ "start": 1070, "end": 1215 }
class ____(Endpoint): permission_classes = (AllowAny,) def get(self, request): raise Exception("this is bad yo")
DummyFailEndpoint
python
kamyu104__LeetCode-Solutions
Python/find-the-number-of-subsequences-with-equal-gcd.py
{ "start": 854, "end": 2015 }
class ____(object): def subsequencePairCount(self, nums): """ :type nums: List[int] :rtype: int """ def count(g): return reduce(lambda accu, x: (accu+x)%MOD, (MU[i]*MU[j]*f[i*g][j*g] for i in xrange(1, mx//g+1) for j in xrange(1, mx//g+1)), 0) mx = max(nums) cnt = [0]*(mx+1) for x in nums: cnt[x] += 1 for i in xrange(1, mx+1): for j in xrange(i+i, mx+1, i): cnt[i] += cnt[j] f = [[0]*(mx+1) for _ in xrange(mx+1)] for g1 in xrange(1, mx+1): for g2 in xrange(g1, mx+1): l = LCM[g1][g2] c = cnt[l] if l < len(cnt) else 0 c1, c2 = cnt[g1], cnt[g2] f[g1][g2] = f[g2][g1] = (POW3[c]*POW2[(c1-c)+(c2-c)]-POW2[c1]-POW2[c2]+1)%MOD return reduce(lambda accu, x: (accu+x)%MOD, (count(g) for g in xrange(1, mx+1)), 0) # Time: O(mx^2 * (1 + 1/4 + 1/9 + ... + (1/mx)^2))) = O(mx^2 * pi^2/6), see https://en.wikipedia.org/wiki/Basel_problem # Time: O(n * r^2 * logr) # Space: O(r^2) import collections # dp, number theory
Solution
python
django__django
django/core/cache/backends/memcached.py
{ "start": 6252, "end": 6791 }
class ____(BaseMemcachedCache): """An implementation of a cache binding using pymemcache.""" def __init__(self, server, params): import pymemcache.serde super().__init__( server, params, library=pymemcache, value_not_found_exception=KeyError ) self._class = self._lib.HashClient self._options = { "allow_unicode_keys": True, "default_noreply": False, "serde": pymemcache.serde.pickle_serde, **self._options, }
PyMemcacheCache
python
huggingface__transformers
src/transformers/models/ministral/modeling_ministral.py
{ "start": 5640, "end": 8978 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config, layer_idx: int): super().__init__() self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None self.config = config self.layer_idx = layer_idx self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads) self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads self.scaling = self.head_dim**-0.5 self.attention_dropout = config.attention_dropout self.is_causal = True # Match Mistral: q/k/v do not have bias self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False) self.rotary_fn = apply_rotary_pos_emb self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None def forward( self, hidden_states: torch.Tensor, position_embeddings: tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor], past_key_values: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2) value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, dropout=0.0 if not self.training else self.attention_dropout, scaling=self.scaling, sliding_window=self.sliding_window, # main diff with Llama **kwargs, ) attn_output = attn_output.reshape(*input_shape, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights @use_kernel_forward_from_hub("RMSNorm")
MinistralAttention
python
kamyu104__LeetCode-Solutions
Python/minimum-time-to-visit-disappearing-nodes.py
{ "start": 227, "end": 1223 }
class ____(object): def minimumTime(self, n, edges, disappear): """ :type n: int :type edges: List[List[int]] :type disappear: List[int] :rtype: List[int] """ INF = float("inf") def modified_dijkstra(start): best = [-1]*n best[start] = 0 min_heap = [(best[start], start)] while min_heap: curr, u = heapq.heappop(min_heap) if curr != best[u]: continue for v, w in adj[u]: if not curr+w < min(best[v] if best[v] != -1 else INF, disappear[v]): # modified continue best[v] = curr+w heapq.heappush(min_heap, (best[v], v)) return best adj = [[] for _ in xrange(n)] for u, v, w in edges: adj[u].append((v, w)) adj[v].append((u, w)) return modified_dijkstra(0)
Solution
python
huggingface__transformers
src/transformers/models/flava/modeling_flava.py
{ "start": 57207, "end": 57768 }
class ____(nn.Module): def __init__(self, in_size: int, out_size: int, num_layers: int, **kwargs): super().__init__() self.post_gain = 1 / (num_layers**2) if in_size != out_size: self.id_path = nn.Conv2d(in_size, out_size, kernel_size=1, padding=0) else: self.id_path = nn.Identity() self.res_path = FlavaImageCodebookResPath(in_size, out_size) def forward(self, x: torch.Tensor) -> torch.Tensor: return self.id_path(x) + self.post_gain * self.res_path(x)
FlavaImageCodebookBlock
python
tensorflow__tensorflow
tensorflow/python/keras/initializers/initializers_v2.py
{ "start": 28039, "end": 29314 }
class ____(VarianceScaling): """He normal initializer. Also available via the shortcut function `tf.keras.initializers.he_normal`. It draws samples from a truncated normal distribution centered on 0 with `stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the weight tensor. Examples: >>> # Standalone usage: >>> initializer = tf.keras.initializers.HeNormal() >>> values = initializer(shape=(2, 2)) >>> # Usage in a Keras layer: >>> initializer = tf.keras.initializers.HeNormal() >>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer) Args: seed: A Python integer. An initializer created with a given seed will always produce the same random tensor for a given shape and dtype. References: [He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf)) """ def __init__(self, seed=None): super(HeNormal, self).__init__( scale=2., mode='fan_in', distribution='truncated_normal', seed=seed) def get_config(self): return {'seed': self.seed}
HeNormal
python
kamyu104__LeetCode-Solutions
Python/find-sorted-submatrices-with-maximum-element-at-most-k.py
{ "start": 46, "end": 1010 }
class ____(object): def countSubmatrices(self, grid, k): """ :type grid: List[List[int]] :type k: int :rtype: int """ def count(heights): result = curr = 0 stk = [] for i in xrange(len(heights)): while stk and heights[stk[-1]] >= heights[i]: j = stk.pop() curr -= (heights[j]-heights[i])*(j-(stk[-1] if stk else -1)) stk.append(i) curr += heights[i] result += curr return result result = 0 heights = [0]*len(grid) for j in reversed(range(len(grid[0]))): for i in xrange(len(grid)): heights[i] = 0 if grid[i][j] > k else heights[i]+1 if j+1 < len(grid[0]) and grid[i][j] >= grid[i][j+1] else 1 result += count(heights) return result # Time: O(m * n) # Space: O(m) # mono stack, dp
Solution
python
huggingface__transformers
src/transformers/models/imagegpt/modeling_imagegpt.py
{ "start": 12178, "end": 12849 }
class ____(nn.Module): def __init__(self, intermediate_size, config): super().__init__() embed_dim = config.hidden_size self.c_fc = Conv1D(intermediate_size, embed_dim) self.c_proj = Conv1D(embed_dim, intermediate_size) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
ImageGPTMLP
python
pydata__xarray
xarray/core/treenode.py
{ "start": 604, "end": 1379 }
class ____(PurePosixPath): """Represents a path from one node to another within a tree.""" def __init__(self, *pathsegments): if sys.version_info >= (3, 12): super().__init__(*pathsegments) else: super().__new__(PurePosixPath, *pathsegments) if self.drive: raise ValueError("NodePaths cannot have drives") if self.root not in ["/", ""]: raise ValueError( 'Root of NodePath can only be either "/" or "", with "" meaning the path is relative.' ) # TODO should we also forbid suffixes to avoid node names with dots in them? def absolute(self) -> Self: """Convert into an absolute path.""" return type(self)("/", *self.parts)
NodePath
python
pytorch__pytorch
test/test_static_runtime.py
{ "start": 5042, "end": 5232 }
class ____(nn.Module): def __init__(self) -> None: super().__init__() self.a = 11 self.b = 2 def forward(self, x): return self.a + self.b + x
SubModule
python
kamyu104__LeetCode-Solutions
Python/longest-common-prefix-of-k-strings-after-removal.py
{ "start": 1388, "end": 2826 }
class ____(object): def longestCommonPrefix(self, words, k): """ :type words: List[str] :type k: int :rtype: List[int] """ class Trie(object): def __init__(self): self.__root = self.__new_node() def __new_node(self): return {"cnt":0, "max":0} def update(self, w, d, k): path = [None]*(len(w)+1) path[0] = curr = self.__root for i, x in enumerate(w, 1): if x not in curr: curr[x] = self.__new_node() path[i] = curr = curr[x] for i in reversed(xrange(len(path))): curr = path[i] curr["cnt"] += d curr["max"] = i if curr["cnt"] >= k else 0 for x in curr.iterkeys(): if len(x) == 1: curr["max"] = max(curr["max"], curr[x]["max"]) def query(self): return self.__root["max"] trie = Trie() for w in words: trie.update(w, +1, k) result = [0]*len(words) for i in xrange(len(words)): trie.update(words[i], -1, k) result[i] = trie.query() trie.update(words[i], +1, k) return result # Time: O(n * l) # Space: O(t) # trie
Solution2
python
doocs__leetcode
solution/2100-2199/2180.Count Integers With Even Digit Sum/Solution2.py
{ "start": 0, "end": 244 }
class ____: def countEven(self, num: int) -> int: ans = num // 10 * 5 - 1 x, s = num // 10, 0 while x: s += x % 10 x //= 10 ans += (num % 10 + 2 - (s & 1)) >> 1 return ans
Solution
python
pallets__flask
tests/type_check/typing_route.py
{ "start": 788, "end": 2154 }
class ____(t.TypedDict): status: str @app.route("/typed-dict") def typed_dict() -> StatusJSON: return {"status": "ok"} @app.route("/generator") def hello_generator() -> t.Generator[str, None, None]: def show() -> t.Generator[str, None, None]: for x in range(100): yield f"data:{x}\n\n" return show() @app.route("/generator-expression") def hello_generator_expression() -> t.Iterator[bytes]: return (f"data:{x}\n\n".encode() for x in range(100)) @app.route("/iterator") def hello_iterator() -> t.Iterator[str]: return iter([f"data:{x}\n\n" for x in range(100)]) @app.route("/status") @app.route("/status/<int:code>") def tuple_status(code: int = 200) -> tuple[str, int]: return "hello", code @app.route("/status-enum") def tuple_status_enum() -> tuple[str, int]: return "hello", HTTPStatus.OK @app.route("/headers") def tuple_headers() -> tuple[str, dict[str, str]]: return "Hello, World!", {"Content-Type": "text/plain"} @app.route("/template") @app.route("/template/<name>") def return_template(name: str | None = None) -> str: return render_template("index.html", name=name) @app.route("/template") def return_template_stream() -> t.Iterator[str]: return stream_template("index.html", name="Hello") @app.route("/async") async def async_route() -> str: return "Hello"
StatusJSON
python
doocs__leetcode
solution/0900-0999/0996.Number of Squareful Arrays/Solution.py
{ "start": 0, "end": 731 }
class ____: def numSquarefulPerms(self, nums: List[int]) -> int: n = len(nums) f = [[0] * n for _ in range(1 << n)] for j in range(n): f[1 << j][j] = 1 for i in range(1 << n): for j in range(n): if i >> j & 1: for k in range(n): if (i >> k & 1) and k != j: s = nums[j] + nums[k] t = int(sqrt(s)) if t * t == s: f[i][j] += f[i ^ (1 << j)][k] ans = sum(f[(1 << n) - 1][j] for j in range(n)) for v in Counter(nums).values(): ans //= factorial(v) return ans
Solution
python
eventlet__eventlet
eventlet/green/http/client.py
{ "start": 58331, "end": 58391 }
class ____(ImproperConnectionState): pass
CannotSendRequest
python
sphinx-doc__sphinx
sphinx/domains/cpp/_ast.py
{ "start": 153316, "end": 163055 }
class ____(ASTBase): def __init__( self, objectType: str, directiveType: str | None = None, visibility: str | None = None, templatePrefix: ASTTemplateDeclarationPrefix | None = None, declaration: Any = None, trailingRequiresClause: ASTRequiresClause | None = None, semicolon: bool = False, ) -> None: self.objectType = objectType self.directiveType = directiveType self.visibility = visibility self.templatePrefix = templatePrefix self.declaration = declaration self.trailingRequiresClause = trailingRequiresClause self.semicolon = semicolon self.symbol: Symbol | None = None # set by CPPObject._add_enumerator_to_parent self.enumeratorScopedSymbol: Symbol | None = None # the cache assumes that by the time get_newest_id is called, no # further changes will be made to this object self._newest_id_cache: str | None = None def __eq__(self, other: object) -> bool: if not isinstance(other, ASTDeclaration): return NotImplemented return ( self.objectType == other.objectType and self.directiveType == other.directiveType and self.visibility == other.visibility and self.templatePrefix == other.templatePrefix and self.declaration == other.declaration and self.trailingRequiresClause == other.trailingRequiresClause and self.semicolon == other.semicolon and self.symbol == other.symbol and self.enumeratorScopedSymbol == other.enumeratorScopedSymbol ) def __hash__(self) -> int: return hash(( self.objectType, self.directiveType, self.visibility, self.templatePrefix, self.declaration, self.trailingRequiresClause, self.semicolon, self.symbol, self.enumeratorScopedSymbol, )) def clone(self) -> ASTDeclaration: template_prefix_clone = ( self.templatePrefix.clone() if self.templatePrefix else None ) trailing_requires_clasue_clone = ( self.trailingRequiresClause.clone() if self.trailingRequiresClause else None ) return ASTDeclaration( self.objectType, self.directiveType, self.visibility, template_prefix_clone, self.declaration.clone(), trailing_requires_clasue_clone, self.semicolon, ) @property def name(self) -> ASTNestedName: return self.declaration.name @property def function_params(self) -> list[ASTFunctionParameter]: if self.objectType != 'function': return None return self.declaration.function_params def get_id(self, version: int, prefixed: bool = True) -> str: if version == 1: if self.templatePrefix or self.trailingRequiresClause: raise NoOldIdError if self.objectType == 'enumerator' and self.enumeratorScopedSymbol: return self.enumeratorScopedSymbol.declaration.get_id(version) return self.declaration.get_id(version, self.objectType, self.symbol) # version >= 2 if self.objectType == 'enumerator' and self.enumeratorScopedSymbol: return self.enumeratorScopedSymbol.declaration.get_id(version, prefixed) if prefixed: res = [_id_prefix[version]] else: res = [] # (See also https://github.com/sphinx-doc/sphinx/pull/10286#issuecomment-1168102147) # The first implementation of requires clauses only supported a single clause after the # template prefix, and no trailing clause. It put the ID after the template parameter # list, i.e., # "I" + template_parameter_list_id + "E" + "IQ" + requires_clause_id + "E" # but the second implementation associates the requires clause with each list, i.e., # "I" + template_parameter_list_id + "IQ" + requires_clause_id + "E" + "E" # To avoid making a new ID version, we make an exception for the last requires clause # in the template prefix, and still put it in the end. # As we now support trailing requires clauses we add that as if it was a conjunction. if self.templatePrefix is not None: res.append( self.templatePrefix.get_id_except_requires_clause_in_last(version) ) requires_clause_in_last = self.templatePrefix.get_requires_clause_in_last() else: requires_clause_in_last = None if requires_clause_in_last or self.trailingRequiresClause: if version < 4: raise NoOldIdError res.append('IQ') if requires_clause_in_last and self.trailingRequiresClause: # make a conjunction of them res.append('aa') if requires_clause_in_last: res.append(requires_clause_in_last.expr.get_id(version)) if self.trailingRequiresClause: res.append(self.trailingRequiresClause.expr.get_id(version)) res.append('E') res.append(self.declaration.get_id(version, self.objectType, self.symbol)) return ''.join(res) def get_newest_id(self) -> str: if self._newest_id_cache is None: self._newest_id_cache = self.get_id(_max_id, True) return self._newest_id_cache def _stringify(self, transform: StringifyTransform) -> str: res: list[str] = [] if self.visibility and self.visibility != 'public': res.extend((self.visibility, ' ')) if self.templatePrefix: res.append(transform(self.templatePrefix)) res.append(transform(self.declaration)) if self.trailingRequiresClause: res.extend((' ', transform(self.trailingRequiresClause))) if self.semicolon: res.append(';') return ''.join(res) def describe_signature( self, signode: desc_signature, mode: str, env: BuildEnvironment, options: dict[str, bool], ) -> None: verify_description_mode(mode) assert self.symbol # The caller of the domain added a desc_signature node. # Always enable multiline: signode['is_multiline'] = True # Put each line in a desc_signature_line node. main_decl_node = addnodes.desc_signature_line() main_decl_node.sphinx_line_type = 'declarator' main_decl_node['add_permalink'] = not self.symbol.isRedeclaration if self.templatePrefix: self.templatePrefix.describe_signature( signode, mode, env, symbol=self.symbol, lineSpec=options.get('tparam-line-spec'), ) signode += main_decl_node if self.visibility and self.visibility != 'public': main_decl_node += addnodes.desc_sig_keyword( self.visibility, self.visibility ) main_decl_node += addnodes.desc_sig_space() if self.objectType == 'type': prefix = self.declaration.get_type_declaration_prefix() main_decl_node += addnodes.desc_sig_keyword(prefix, prefix) main_decl_node += addnodes.desc_sig_space() elif self.objectType == 'concept': main_decl_node += addnodes.desc_sig_keyword('concept', 'concept') main_decl_node += addnodes.desc_sig_space() elif self.objectType in {'member', 'function'}: pass elif self.objectType == 'class': assert self.directiveType in {'class', 'struct'} main_decl_node += addnodes.desc_sig_keyword( self.directiveType, self.directiveType ) main_decl_node += addnodes.desc_sig_space() elif self.objectType == 'union': main_decl_node += addnodes.desc_sig_keyword('union', 'union') main_decl_node += addnodes.desc_sig_space() elif self.objectType == 'enum': main_decl_node += addnodes.desc_sig_keyword('enum', 'enum') main_decl_node += addnodes.desc_sig_space() if self.directiveType == 'enum-class': main_decl_node += addnodes.desc_sig_keyword('class', 'class') main_decl_node += addnodes.desc_sig_space() elif self.directiveType == 'enum-struct': main_decl_node += addnodes.desc_sig_keyword('struct', 'struct') main_decl_node += addnodes.desc_sig_space() else: assert self.directiveType == 'enum', self.directiveType elif self.objectType == 'enumerator': main_decl_node += addnodes.desc_sig_keyword('enumerator', 'enumerator') main_decl_node += addnodes.desc_sig_space() else: raise AssertionError(self.objectType) self.declaration.describe_signature(main_decl_node, mode, env, self.symbol) last_decl_node = main_decl_node if self.trailingRequiresClause: trailing_req_node = addnodes.desc_signature_line() trailing_req_node.sphinx_line_type = 'trailingRequiresClause' signode.append(trailing_req_node) last_decl_node = trailing_req_node self.trailingRequiresClause.describe_signature( trailing_req_node, 'markType', env, self.symbol ) if self.semicolon: last_decl_node += addnodes.desc_sig_punctuation(';', ';')
ASTDeclaration
python
huggingface__transformers
src/transformers/models/plbart/modeling_plbart.py
{ "start": 13723, "end": 20130 }
class ____(PLBartPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`PLBartEncoderLayer`]. Args: config: PLBartConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: PLBartConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = PLBartScaledWordEmbedding( config.vocab_size, embed_dim, self.padding_idx, embed_scale=embed_scale ) self.embed_positions = PLBartLearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, ) self.layers = nn.ModuleList([PLBartEncoderLayer(config, layer_idx=i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutput]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input = input_ids input_ids = input_ids.view(-1, input_ids.shape[-1]) elif inputs_embeds is not None: input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) embed_pos = self.embed_positions(input) embed_pos = embed_pos.to(inputs_embeds.device) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) attention_mask = create_bidirectional_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, ) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
PLBartEncoder
python
huggingface__transformers
src/transformers/models/dia/modeling_dia.py
{ "start": 14783, "end": 18225 }
class ____(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: DiaDecoderConfig, layer_idx: int): super().__init__() self.config = config self.layer_idx = layer_idx self.hidden_size = config.hidden_size self.cross_hidden_size = config.cross_hidden_size self.num_heads = self.config.cross_num_attention_heads self.num_key_value_heads = self.config.cross_num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.head_dim = config.cross_head_dim self.scaling = 1 self.attention_dropout = 0.0 self.is_causal = False self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.cross_hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.cross_hidden_size, self.num_key_value_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) def forward( self, hidden_states: torch.Tensor, cross_attention_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[EncoderDecoderCache] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> tuple[torch.Tensor, Optional[torch.Tensor]]: input_shape = hidden_states.shape[:-1] hidden_shape = (*input_shape, -1, self.head_dim) cross_shape = (*cross_attention_states.shape[:-1], -1, self.head_dim) query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2) is_updated = past_key_values.is_updated.get(self.layer_idx) if past_key_values is not None else False if past_key_values is not None and is_updated: # reuse k,v, cross_attentions key_states = past_key_values.cross_attention_cache.layers[self.layer_idx].keys value_states = past_key_values.cross_attention_cache.layers[self.layer_idx].values else: key_states = self.k_proj(cross_attention_states).view(cross_shape).transpose(1, 2) value_states = self.v_proj(cross_attention_states).view(cross_shape).transpose(1, 2) if past_key_values is not None: # save all states to the cache key_states, value_states = past_key_values.cross_attention_cache.update( key_states, value_states, self.layer_idx, ) # set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls past_key_values.is_updated[self.layer_idx] = True attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] attn_output, attn_weights = attention_interface( self, query_states, key_states, value_states, attention_mask, scaling=self.scaling, **kwargs, ) attn_output = attn_output.reshape((*input_shape, -1)).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights
DiaCrossAttention
python
PyCQA__pylint
tests/functional/s/string/string_formatting.py
{ "start": 568, "end": 7013 }
class ____: """ can't be properly inferred """ missing = Missing() def log(message, message_type="error"): """ Test """ return message def print_good(): """ Good format strings """ "{0} {1}".format(1, 2) "{0!r:20}".format("Hello") "{!r:20}".format("Hello") "{a!r:20}".format(a="Hello") "{pid}".format(pid=os.getpid()) str("{}").format(2) "{0.missing.length}".format(ReturnYes()) "{1.missing.length}".format(ReturnYes()) "{a.ids[3][1]}".format(a=Test()) "{a[0][0]}".format(a=[[1]]) "{[0][0]}".format({0: {0: 1}}) "{a.test}".format(a=Custom()) "{a.__len__}".format(a=[]) "{a.ids.__len__}".format(a=Test()) "{a[0]}".format(a=Getitem()) "{a[0][0]}".format(a=[Getitem()]) "{[0][0]}".format(["test"]) # these are skipped "{0} {1}".format(*[1, 2]) "{a} {b}".format(**{'a': 1, 'b': 2}) "{a}".format(a=Missing()) logging.debug("%s", 42) logging.debug("%s %s", 42, 43) def pprint_bad(): """Test string format """ "{{}}".format(1) # [format-string-without-interpolation] "{} {".format() # [bad-format-string] "{} }".format() # [bad-format-string] "{0} {}".format(1, 2) # [format-combined-specification] # +1: [missing-format-argument-key, unused-format-string-argument] "{a} {b}".format(a=1, c=2) "{} {a}".format(1, 2) # [missing-format-argument-key] "{} {}".format(1) # [too-few-format-args] "{} {}".format(1, 2, 3) # [too-many-format-args] # +1: [missing-format-argument-key,missing-format-argument-key,missing-format-argument-key] "{a} {b} {c}".format() "{} {}".format(a=1, b=2) # [too-few-format-args] # +1: [missing-format-argument-key, missing-format-argument-key] "{a} {b}".format(1, 2) "{0} {1} {a}".format(1, 2, 3) # [missing-format-argument-key] # +1: [missing-format-attribute] "{a.ids.__len__.length}".format(a=Test()) "{a.ids[3][400]}".format(a=Test()) # [invalid-format-index] "{a.ids[3]['string']}".format(a=Test()) # [invalid-format-index] "{[0][1]}".format(["a"]) # [invalid-format-index] "{[0][0]}".format(((1, ))) # [invalid-format-index] # +1: [missing-format-argument-key, unused-format-string-argument] "{b[0]}".format(a=23) "{a[0]}".format(a=object) # [invalid-format-index] log("{}".format(2, "info")) # [too-many-format-args] "{0.missing}".format(2) # [missing-format-attribute] "{0} {1} {2}".format(1, 2) # [too-few-format-args] "{0} {1}".format(1, 2, 3) # [too-many-format-args] "{0} {a}".format(a=4) # [too-few-format-args] "{[0]} {}".format([4]) # [too-few-format-args] "{[0]} {}".format([4], 5, 6) # [too-many-format-args] logging.debug("%s %s", 42) # [logging-too-few-args] logging.debug("%s", 42, 43) # [logging-too-many-args] "String".format(1) # [format-string-without-interpolation] "String".format(()) # [format-string-without-interpolation] "String".format([]) # [format-string-without-interpolation] "String".format(None) # [format-string-without-interpolation] def good_issue288(*args, **kwargs): """ Test that using kwargs does not emit a false positive. """ 'Hello John Doe {0[0]}'.format(args) 'Hello {0[name]}'.format(kwargs) def good_issue287(): """ Test that the string format checker skips format nodes which don't have a string as a parent (but a subscript, name etc). """ name = 'qwerty' ret = {'comment': ''} ret['comment'] = 'MySQL grant {0} is set to be revoked' ret['comment'] = ret['comment'].format(name) return ret, name def nested_issue294(): """ Test nested format fields. """ '{0:>{1}}'.format(42, 24) '{0:{a[1]}} {a}'.format(1, a=[1, 2]) '{:>{}}'.format(42, 24) '{0:>{1}}'.format(42) # [too-few-format-args] '{0:>{1}}'.format(42, 24, 54) # [too-many-format-args] '{0:{a[1]}}'.format(1) # [missing-format-argument-key] '{0:{a.x}}'.format(1, a=2) # [missing-format-attribute] def issue310(): """ Test a regression using duplicate manual position arguments. """ '{0} {1} {0}'.format(1, 2) '{0} {1} {0}'.format(1) # [too-few-format-args] def issue322(): """ Test a regression using mixed manual position arguments and attribute access arguments. """ '{0}{1[FOO]}'.format(123, {'FOO': 456}) '{0}{1[FOO]}'.format(123, {'FOO': 456}, 321) # [too-many-format-args] '{0}{1[FOO]}'.format(123) # [too-few-format-args] def issue338(): """ Check that using a namedtuple subclass doesn't crash when trying to infer EmptyNodes (resulted after mocking the members of namedtuples). """ from collections import namedtuple # pylint: disable=import-outside-toplevel class Crash(namedtuple("C", "foo bar")): """ Looking for attributes in __str__ will crash, because EmptyNodes can't be inferred. """ def __str__(self): return "{0.foo}: {0.bar}".format(self) return Crash def issue351(): """ Check that the format method can be assigned to a variable, ie: """ fmt = 'test {} {}'.format fmt('arg1') # [too-few-format-args] fmt('arg1', 'arg2') fmt('arg1', 'arg2', 'arg3') # [too-many-format-args] def issue373(): """ Ignore any object coming from an argument. """ class SomeClass: """ empty docstring. """ def __init__(self, opts=None): self.opts = opts def dunc(self, arg): """Don't try to analyze this.""" return "A{0}{1}".format(arg, self.opts) def func(self): """Don't try to analyze the following string.""" return 'AAA{0[iface]}BBB{0[port]}'.format(self.opts) return SomeClass def issue_463(): """ Mix positional arguments, `{0}`, with positional arguments with attribute access, `{0.__x__}`. """ data = "{0.__class__.__name__}: {0}".format(42) data2 = "{0[0]}: {0}".format([1]) return (data, data2) def avoid_empty_attribute(): """The following string is invalid, avoid crashing.""" return "There are {.:2f} undiscovered errors.".format(1) # [bad-format-string] def invalid_format_index_on_inference_ambiguity(): """Test inference bug for invalid-format-index""" if len(sys.argv) > 1: options = [["Woof!"]] else: options = [["Barf!"]] return 'Why is this bad? {options[0][0]}'.format(options=options)
ReturnYes
python
django__django
tests/staticfiles_tests/test_management.py
{ "start": 8703, "end": 10616 }
class ____(CollectionTestCase): copying_msg = "Copying " run_collectstatic_in_setUp = False post_process_msg = "Post-processed" staticfiles_copied_msg = "static files copied to" def test_verbosity_0(self): stdout = StringIO() self.run_collectstatic(verbosity=0, stdout=stdout) self.assertEqual(stdout.getvalue(), "") def test_verbosity_1(self): stdout = StringIO() self.run_collectstatic(verbosity=1, stdout=stdout) output = stdout.getvalue() self.assertIn(self.staticfiles_copied_msg, output) self.assertNotIn(self.copying_msg, output) def test_verbosity_2(self): stdout = StringIO() self.run_collectstatic(verbosity=2, stdout=stdout) output = stdout.getvalue() self.assertIn(self.staticfiles_copied_msg, output) self.assertIn(self.copying_msg, output) @override_settings( STORAGES={ **settings.STORAGES, STATICFILES_STORAGE_ALIAS: { "BACKEND": ( "django.contrib.staticfiles.storage.ManifestStaticFilesStorage" ) }, } ) def test_verbosity_1_with_post_process(self): stdout = StringIO() self.run_collectstatic(verbosity=1, stdout=stdout, post_process=True) self.assertNotIn(self.post_process_msg, stdout.getvalue()) @override_settings( STORAGES={ **settings.STORAGES, STATICFILES_STORAGE_ALIAS: { "BACKEND": ( "django.contrib.staticfiles.storage.ManifestStaticFilesStorage" ) }, } ) def test_verbosity_2_with_post_process(self): stdout = StringIO() self.run_collectstatic(verbosity=2, stdout=stdout, post_process=True) self.assertIn(self.post_process_msg, stdout.getvalue())
TestCollectionVerbosity
python
lepture__authlib
authlib/oauth2/rfc9101/registration.py
{ "start": 88, "end": 1299 }
class ____(BaseClaims): """Additional client metadata can be used with :ref:`specs/rfc7591` and :ref:`specs/rfc7592` endpoints. This can be used with:: server.register_endpoint( ClientRegistrationEndpoint( claims_classes=[ rfc7591.ClientMetadataClaims, rfc9101.ClientMetadataClaims, ] ) ) server.register_endpoint( ClientRegistrationEndpoint( claims_classes=[ rfc7591.ClientMetadataClaims, rfc9101.ClientMetadataClaims, ] ) ) """ REGISTERED_CLAIMS = [ "require_signed_request_object", ] def validate(self): self._validate_essential_claims() self.validate_require_signed_request_object() def validate_require_signed_request_object(self): self.setdefault("require_signed_request_object", False) if not isinstance(self["require_signed_request_object"], bool): raise InvalidClaimError("require_signed_request_object") self._validate_claim_value("require_signed_request_object")
ClientMetadataClaims
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/paramSpec27.py
{ "start": 409, "end": 513 }
class ____(Protocol[P]): def __call__(self, /, *args: P.args, **kwargs: P.kwargs) -> None: ...
Handler
python
redis__redis-py
redis/commands/search/querystring.py
{ "start": 1135, "end": 1716 }
class ____: @property def combinable(self): """ Whether this type of value may be combined with other values for the same field. This makes the filter potentially more efficient """ return False @staticmethod def make_value(v): """ Convert an object to a value, if it is not a value already """ if isinstance(v, Value): return v return ScalarValue(v) def to_string(self): raise NotImplementedError() def __str__(self): return self.to_string()
Value
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 186475, "end": 187267 }
class ____(Operation): def __init__(self, axis=-1, *, name=None): super().__init__(name=name) self.axis = axis def call(self, x): return backend.numpy.sort(x, axis=self.axis) def compute_output_spec(self, x): return KerasTensor(x.shape, x.dtype) @keras_export(["keras.ops.sort", "keras.ops.numpy.sort"]) def sort(x, axis=-1): """Sorts the elements of `x` along a given axis in ascending order. Args: x: Input tensor. axis: Axis along which to sort. If `None`, the tensor is flattened before sorting. Defaults to `-1`; the last axis. Returns: Sorted tensor. """ if any_symbolic_tensors((x,)): return Sort(axis=axis).symbolic_call(x) return backend.numpy.sort(x, axis=axis)
Sort
python
huggingface__transformers
src/transformers/models/flaubert/modeling_flaubert.py
{ "start": 7638, "end": 9247 }
class ____(nn.Module): """ Prediction layer (cross_entropy or adaptive_softmax). """ def __init__(self, config): super().__init__() self.asm = config.asm self.n_words = config.n_words self.pad_index = config.pad_index dim = config.emb_dim if config.asm is False: self.proj = nn.Linear(dim, config.n_words, bias=True) else: self.proj = nn.AdaptiveLogSoftmaxWithLoss( in_features=dim, n_classes=config.n_words, cutoffs=config.asm_cutoffs, div_value=config.asm_div_value, head_bias=True, # default is False ) def forward(self, x, y=None): """Compute the loss, and optionally the scores.""" outputs = () if self.asm is False: scores = self.proj(x) outputs = (scores,) + outputs if y is not None: loss = nn.functional.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction="mean") outputs = (loss,) + outputs else: scores = self.proj.log_prob(x) outputs = (scores,) + outputs if y is not None: _, loss = self.proj(x, y) outputs = (loss,) + outputs return outputs @dataclass @auto_docstring( custom_intro=""" Base class for outputs of question answering models using a [`~modeling_utils.FlaubertSQuADHead`]. """ ) # Copied from transformers.models.xlm.modeling_xlm.XLMSquadHeadOutput with XLM->Flaubert
FlaubertPredLayer
python
cookiecutter__cookiecutter
cookiecutter/exceptions.py
{ "start": 4064, "end": 4249 }
class ____(CookiecutterException): """ Exception for bad zip repo. Raised when the specified cookiecutter repository isn't a valid Zip archive. """
InvalidZipRepository
python
doocs__leetcode
solution/1700-1799/1740.Find Distance in a Binary Tree/Solution.py
{ "start": 192, "end": 984 }
class ____: def findDistance(self, root: Optional[TreeNode], p: int, q: int) -> int: def lca(root, p, q): if root is None or root.val in [p, q]: return root left = lca(root.left, p, q) right = lca(root.right, p, q) if left is None: return right if right is None: return left return root def dfs(root, v): if root is None: return -1 if root.val == v: return 0 left, right = dfs(root.left, v), dfs(root.right, v) if left == right == -1: return -1 return 1 + max(left, right) g = lca(root, p, q) return dfs(g, p) + dfs(g, q)
Solution
python
getsentry__sentry
src/sentry/testutils/cases.py
{ "start": 9118, "end": 16828 }
class ____(Fixtures): @pytest.fixture(autouse=True) def setup_dummy_auth_provider(self): auth.register(DummyProvider) yield auth.unregister(DummyProvider) def tasks(self): return TaskRunner() @pytest.fixture(autouse=True) def polyfill_capture_on_commit_callbacks(self, django_capture_on_commit_callbacks): """ https://pytest-django.readthedocs.io/en/latest/helpers.html#django_capture_on_commit_callbacks pytest-django comes with its own polyfill of this Django helper for older Django versions, so we're using that. """ self.capture_on_commit_callbacks = django_capture_on_commit_callbacks @pytest.fixture(autouse=True) def expose_stale_database_reads(self, stale_database_reads): self.stale_database_reads = stale_database_reads def feature(self, names): """ >>> with self.feature({'feature:name': True}) >>> # ... """ return Feature(names) def save_session(self): self.session.save() self.save_cookie( name=settings.SESSION_COOKIE_NAME, value=self.session.session_key, max_age=None, path="/", domain=settings.SESSION_COOKIE_DOMAIN, secure=settings.SESSION_COOKIE_SECURE or None, expires=None, ) def save_cookie(self, name, value, **params): self.client.cookies[name] = value self.client.cookies[name].update({k.replace("_", "-"): v for k, v in params.items()}) def make_request( self, user=None, auth=None, method="get", is_superuser=False, is_staff=False, path="/", secure_scheme=False, subdomain=None, *, GET: dict[str, str] | None = None, ) -> HttpRequest: request = getattr(RequestFactory(), method.lower())(path, query_params=GET) if subdomain: setattr(request, "subdomain", subdomain) request.META["REMOTE_ADDR"] = "127.0.0.1" request.META["SERVER_NAME"] = "testserver" request.META["SERVER_PORT"] = 80 if secure_scheme: assert settings.SECURE_PROXY_SSL_HEADER is not None secure_header = settings.SECURE_PROXY_SSL_HEADER request.META[secure_header[0]] = secure_header[1] # order matters here, session -> user -> other things request.session = self.session request.auth = auth request.user = user or AnonymousUser() # must happen after request.user/request.session is populated request.superuser = Superuser(request) request.staff = Staff(request) if is_superuser: # XXX: this is gross, but it's a one-off and apis change only once in a great while request.superuser.set_logged_in(user) if is_staff: request.staff.set_logged_in(user) request.successful_authenticator = None return request # TODO(dcramer): ideally superuser_sso would be False by default, but that would require # a lot of tests changing def login_as( self, user, organization_id=None, organization_ids=None, superuser=False, staff=False, staff_sso=True, superuser_sso=True, ): if isinstance(user, OrganizationMember): with assume_test_silo_mode(SiloMode.CONTROL): user = User.objects.get(id=user.user_id) user.backend = settings.AUTHENTICATION_BACKENDS[0] request = self.make_request() with assume_test_silo_mode(SiloMode.CONTROL): login(request, user) request.user = user if organization_ids is None: organization_ids = set() else: organization_ids = set(organization_ids) if superuser and superuser_sso is not False: if SUPERUSER_ORG_ID: organization_ids.add(SUPERUSER_ORG_ID) if staff and staff_sso is not False: if STAFF_ORG_ID: organization_ids.add(SUPERUSER_ORG_ID) if organization_id: organization_ids.add(organization_id) # TODO(dcramer): ideally this would get abstracted if organization_ids: for o in organization_ids: sso_session = SsoSession.create(o) self.session[sso_session.session_key] = sso_session.to_dict() # logging in implicitly binds superuser, but for test cases we # want that action to be explicit to avoid accidentally testing # superuser-only code if not superuser: # XXX(dcramer): we're calling the internal method to avoid logging request.superuser._set_logged_out() elif request.user.is_superuser and superuser: request.superuser.set_logged_in(request.user) # XXX(dcramer): awful hack to ensure future attempts to instantiate # the Superuser object are successful self.save_cookie( name=SU_COOKIE_NAME, value=signing.get_cookie_signer(salt=SU_COOKIE_NAME + SU_COOKIE_SALT).sign( request.superuser.token ), max_age=None, path=SU_COOKIE_PATH, domain=SU_COOKIE_DOMAIN, secure=SU_COOKIE_SECURE or None, expires=None, ) # XXX(schew2381): Same as above, but for staff if not staff: request.staff._set_logged_out() elif request.user.is_staff and staff: request.staff.set_logged_in(request.user) self.save_cookie( name=STAFF_COOKIE_NAME, value=signing.get_cookie_signer(salt=STAFF_COOKIE_NAME + STAFF_COOKIE_SALT).sign( request.staff.token ), max_age=None, path=STAFF_COOKIE_PATH, domain=STAFF_COOKIE_DOMAIN, secure=STAFF_COOKIE_SECURE or None, expires=None, ) # Save the session values. self.save_session() def load_fixture(self, filepath): with open(get_fixture_path(filepath), "rb") as fp: return fp.read() @classmethod def _pre_setup(cls): super()._pre_setup() cache.clear() ProjectOption.objects.clear_local_cache() GroupMeta.objects.clear_local_cache() def _post_teardown(self): super()._post_teardown() def options(self, options): """ A context manager that temporarily sets a global option and reverts back to the original value when exiting the context. """ return override_options(options) def assert_valid_deleted_log(self, deleted_log, original_object): assert deleted_log is not None assert original_object.name == deleted_log.name assert deleted_log.name == original_object.name assert deleted_log.slug == original_object.slug if not isinstance(deleted_log, DeletedOrganization): assert deleted_log.organization_id == original_object.organization.id assert deleted_log.organization_name == original_object.organization.name assert deleted_log.organization_slug == original_object.organization.slug assert deleted_log.date_created == original_object.date_added assert deleted_log.date_deleted >= deleted_log.date_created def get_mock_uuid(self, hex_value="abc123"): class uuid: hex = hex_value bytes = b"\x00\x01\x02" return uuid
BaseTestCase