language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/mkldnn_verbose.py | {
"start": 32,
"end": 637
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(1, 10, 5, 1)
def forward(self, x):
y = self.conv(x)
return y
def run_model(level):
m = Module().eval()
d = torch.rand(1, 1, 112, 112)
with torch.backends.mkldnn.verbose(level):
m(d)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--verbose-level", default=0, type=int)
args = parser.parse_args()
try:
run_model(args.verbose_level)
except Exception as e:
print(e)
| Module |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table30.py | {
"start": 315,
"end": 920
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table30.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("C:F", 10.288)
worksheet.add_table("C3:F13")
worksheet.set_background(self.image_dir + "logo.jpg")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/divide-intervals-into-minimum-number-of-groups.py | {
"start": 73,
"end": 505
} | class ____(object):
def minGroups(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
events = collections.Counter()
for l, r in intervals:
events[l] += 1
events[r+1] -= 1
result = curr = 0
for t in sorted(events.iterkeys()):
curr += events[t]
result = max(result, curr)
return result
| Solution |
python | plotly__plotly.py | plotly/graph_objs/barpolar/marker/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8554
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "barpolar.marker.colorbar"
_path_str = "barpolar.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.barpolar.marke
r.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.barpolar.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.barpolar.marker.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/definitions_tests/test_default_io_manager.py | {
"start": 663,
"end": 3405
} | class ____(PickledObjectFilesystemIOManager):
def __init__(self, ctx):
super().__init__(base_dir="/tmp/dagster/foo-io-manager")
assert ctx.instance
foo_io_manager_def = dg.IOManagerDefinition(
resource_fn=FooIoManager,
config_schema={},
)
@dg.op
def foo_io_manager_op(context):
assert type(context.resources.io_manager) == FooIoManager
@dg.job
def foo_io_manager_job():
foo_io_manager_op()
def test_override_default_io_manager(instance):
with environ(
{
"DAGSTER_DEFAULT_IO_MANAGER_MODULE": (
"dagster_tests.definitions_tests.test_default_io_manager"
),
"DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE": "foo_io_manager_def",
}
):
result = dg.execute_job(dg.reconstructable(foo_io_manager_job), instance)
assert result.success
@dg.asset
def foo_io_manager_asset(context):
assert type(context.resources.io_manager) == FooIoManager
@dg.asset_check(asset=foo_io_manager_asset)
def check_foo(context, foo_io_manager_asset):
assert foo_io_manager_asset is None
assert type(context.resources.io_manager) == FooIoManager
return dg.AssetCheckResult(passed=True)
def create_asset_job():
return dg.define_asset_job(name="foo_io_manager_asset_job").resolve(
asset_graph=AssetGraph.from_assets([foo_io_manager_asset, check_foo])
)
def test_asset_override_default_io_manager(instance):
with environ(
{
"DAGSTER_DEFAULT_IO_MANAGER_MODULE": (
"dagster_tests.definitions_tests.test_default_io_manager"
),
"DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE": "foo_io_manager_def",
}
):
result = dg.execute_job(dg.reconstructable(create_asset_job), instance)
assert result.success
def test_bad_override(instance):
with pytest.raises(dg.DagsterSubprocessError, match=r"has no attribute \'foo_io_manager_def\'"):
with environ(
{
"DAGSTER_DEFAULT_IO_MANAGER_MODULE": "dagster_tests",
"DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE": "foo_io_manager_def",
}
):
result = dg.execute_job(
dg.reconstructable(fs_io_manager_job), instance, raise_on_error=True
)
assert not result.success
with environ(
{
"DAGSTER_DEFAULT_IO_MANAGER_MODULE": "dagster_tests",
"DAGSTER_DEFAULT_IO_MANAGER_ATTRIBUTE": "foo_io_manager_def",
"DAGSTER_DEFAULT_IO_MANAGER_SILENCE_FAILURES": "True",
}
):
result = dg.execute_job(
dg.reconstructable(fs_io_manager_job), instance, raise_on_error=True
)
assert result.success
| FooIoManager |
python | huggingface__transformers | tests/models/falcon_h1/test_modeling_falcon_h1.py | {
"start": 15821,
"end": 23736
} | class ____(unittest.TestCase):
@slow
def test_falcon_h1_hard(self):
"""
An integration test for Falcon-H1.
"""
EXPECTED_TEXT_DEFAULT = """
user
Tell me about the french revolution.
assistant
The French Revolution (1789–1799) was a period of radical social and political upheaval in France that fundamentally transformed the nation and had profound effects on the rest of Europe and the world. Here are the key aspects of the revolution:
### **Causes**
1. **Economic Crisis**: France was in severe financial trouble due to costly wars (particularly the American Revolution), extravagant spending by the monarchy, and inefficient taxation.
2. **Social Inequality**: The rigid class system (the Ancien Régime) divided society into the privileged nobility and clergy (First Estate) and the commoners (Third Estate), who bore the brunt of taxation and had few rights.
3. **Enlightenment Ideas**: Philosophers like Voltaire, Rousseau, and Montesquieu inspired ideas of liberty, equality, and popular sovereignty.
4. **Settlement of 1789**: The Estates-General convened to address the financial crisis, leading to the Third Estate's assertion of its rights and the eventual abolition of the feudal system.
### **Key Events**
1. **Storming of the Bastille (July 14, 1789)**: A symbol of royal tyranny, the Bastille fortress was stormed by revolutionaries, sparking widespread rebellion.
2. **Declaration of the Rights of Man and of the Citizen (August 1789)**: A foundational document proclaiming liberty, equality, and fraternity.
3. **National Assembly and King’s Trial (1791–1792)**: King Louis XVI and his ministers were tried and executed (King Louis was guillotined, Marie Antoinette was banished), marking the end of the monarchy.
4. **Rise of the Jacobins and Reign of Terror (1793–1794)**: Radical leaders like Maximilien Robespierre sought to purge France of counter-revolutionaries, leading to mass executions and widespread fear.
5. **Thermidorian Reaction
"""
EXPECTED_TEXT_A10 = """
user
Tell me about the french revolution.
assistant
The French Revolution (1789–1799) was a period of profound social upheaval and radical political change in France that fundamentally transformed the nation and had far-reaching effects on the rest of Europe and the world. Here are the key aspects of the revolution:
### **Causes**
1. **Economic Crisis**: France was in severe financial trouble due to costly wars (particularly the American Revolution), extravagant spending by the monarchy, and an inefficient tax system.
2. **Social Inequality**: The privileged classes (the nobility and clergy) enjoyed immense wealth and power, while the majority of the population (the Third Estate, comprising commoners) faced poverty and lack of representation.
3. **Enlightenment Ideas**: Philosophers like Voltaire, Rousseau, and Montesquieu inspired ideas of liberty, equality, and popular sovereignty, which fueled revolutionary fervor.
4. **Political Instability**: The absolute monarchy under King Louis XVI proved unable to address the nation's problems, leading to growing discontent.
### **Key Events**
1. **Estates-General (1789)**: The Third Estate broke away and formed the National Assembly, forcing King Louis XVI to convene the Estates-General, an old legislative body, to address the financial crisis.
2. **Storming of the Bastille (July 14, 1789)**: A symbol of royal tyranny, the Bastille fortress was stormed by revolutionaries, sparking widespread rebellion.
3. **Declaration of the Rights of Man and of the Citizen (August 1789)**: This foundational document proclaimed liberty, equality, and fraternity as fundamental rights.
4. **Abolition of Feudalism (November 1789)**: The National Assembly abolished feudal privileges, redistributing church lands to the people.
5. **Tennis Court Oath (May 5, 1789)**: The National Assembly members, meeting on a tennis court, pledged to continue their work until a new constitution was established.
6.
"""
EXPECTED_TEXT_XPU = """
user
Tell me about the french revolution.
assistant
The French Revolution (1789–1799) was a period of radical social and political upheaval in France that fundamentally transformed the nation and had profound effects on the rest of Europe and the world. Here are the key aspects of the revolution:
### **Causes**
1. **Economic Crisis**: France was in severe financial trouble due to costly wars (particularly the American Revolution), extravagant spending by the monarchy, and inefficient taxation.
2. **Social Inequality**: The rigid class system (the Ancien Régime) favored the nobility and clergy while the majority of the population (the Third Estate) bore the brunt of taxation and had limited rights.
3. **Enlightenment Ideas**: Philosophers like Rousseau, Voltaire, and Montesquieu inspired ideas of liberty, equality, and popular sovereignty.
4. **Settlement of 1789**: The Estates-General convened to address the financial crisis, leading to debates that exposed the weaknesses of the monarchy and the grievances of the common people.
### **Key Events**
1. **Opening of the Revolution (1789)**:
- **Storming of the Bastille**: A symbol of royal tyranny, marking the start of the revolution.
- **Declaration of the Rights of Man and of the Citizen**: A foundational document proclaiming liberty, equality, and fraternity.
2. **Stages of the Revolution**:
- **Staffords' Reforms (1789–1791)**: Attempts to address grievances, including the abolition of feudal privileges and the introduction of the Civil Constitution of the Church.
- **Reign of Terror (1793–1794)**: Led by Maximilien Robespierre, characterized by mass executions of perceived enemies of the revolution, including King Louis XVI and Queen Marie Antoinette.
- **Thermidorian Reaction (1794)**: The fall of Robespierre and the end of the Reign of Terror.
3. **
"""
expected_texts = Expectations(
{
(None, None): EXPECTED_TEXT_DEFAULT,
("cuda", 8): EXPECTED_TEXT_A10,
("xpu", None): EXPECTED_TEXT_XPU,
}
)
EXPECTED_TEXT = expected_texts.get_expectation()
# Remove the first char (`\n`) and the consecutive whitespaces caused by the formatting.
EXPECTED_TEXT = EXPECTED_TEXT.strip().replace(" " * 12, "")
device_properties = get_device_properties()
# For A10, there is an ending " "
if device_properties[0] == "cuda" and device_properties[1] == 8:
EXPECTED_TEXT = EXPECTED_TEXT + " "
model_id = "tiiuae/Falcon-H1-1.5B-Deep-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = FalconH1ForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map="auto")
messages = [{"role": "user", "content": "Tell me about the french revolution."}]
input_text = tokenizer.apply_chat_template(messages, tokenize=False)
inputs = tokenizer.encode(input_text, return_tensors="pt").to(torch_device)
with torch.no_grad():
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_TEXT)
| FalconH1ModelIntegrationTest |
python | pypa__pip | src/pip/_vendor/pygments/formatter.py | {
"start": 465,
"end": 4390
} | class ____:
"""
Converts a token stream to text.
Formatters should have attributes to help selecting them. These
are similar to the corresponding :class:`~pygments.lexer.Lexer`
attributes.
.. autoattribute:: name
:no-value:
.. autoattribute:: aliases
:no-value:
.. autoattribute:: filenames
:no-value:
You can pass options as keyword arguments to the constructor.
All formatters accept these basic options:
``style``
The style to use, can be a string or a Style subclass
(default: "default"). Not used by e.g. the
TerminalFormatter.
``full``
Tells the formatter to output a "full" document, i.e.
a complete self-contained document. This doesn't have
any effect for some formatters (default: false).
``title``
If ``full`` is true, the title that should be used to
caption the document (default: '').
``encoding``
If given, must be an encoding name. This will be used to
convert the Unicode token strings to byte strings in the
output. If it is "" or None, Unicode strings will be written
to the output file, which most file-like objects do not
support (default: None).
``outencoding``
Overrides ``encoding`` if given.
"""
#: Full name for the formatter, in human-readable form.
name = None
#: A list of short, unique identifiers that can be used to lookup
#: the formatter from a list, e.g. using :func:`.get_formatter_by_name()`.
aliases = []
#: A list of fnmatch patterns that match filenames for which this
#: formatter can produce output. The patterns in this list should be unique
#: among all formatters.
filenames = []
#: If True, this formatter outputs Unicode strings when no encoding
#: option is given.
unicodeoutput = True
def __init__(self, **options):
"""
As with lexers, this constructor takes arbitrary optional arguments,
and if you override it, you should first process your own options, then
call the base class implementation.
"""
self.style = _lookup_style(options.get('style', 'default'))
self.full = get_bool_opt(options, 'full', False)
self.title = options.get('title', '')
self.encoding = options.get('encoding', None) or None
if self.encoding in ('guess', 'chardet'):
# can happen for e.g. pygmentize -O encoding=guess
self.encoding = 'utf-8'
self.encoding = options.get('outencoding') or self.encoding
self.options = options
def get_style_defs(self, arg=''):
"""
This method must return statements or declarations suitable to define
the current style for subsequent highlighted text (e.g. CSS classes
in the `HTMLFormatter`).
The optional argument `arg` can be used to modify the generation and
is formatter dependent (it is standardized because it can be given on
the command line).
This method is called by the ``-S`` :doc:`command-line option <cmdline>`,
the `arg` is then given by the ``-a`` option.
"""
return ''
def format(self, tokensource, outfile):
"""
This method must format the tokens from the `tokensource` iterable and
write the formatted version to the file object `outfile`.
Formatter options can control how exactly the tokens are converted.
"""
if self.encoding:
# wrap the outfile in a StreamWriter
outfile = codecs.lookup(self.encoding)[3](outfile)
return self.format_unencoded(tokensource, outfile)
# Allow writing Formatter[str] or Formatter[bytes]. That's equivalent to
# Formatter. This helps when using third-party type stubs from typeshed.
def __class_getitem__(cls, name):
return cls
| Formatter |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 36526,
"end": 36927
} | class ____(VOTableSpecWarning):
"""
The column fields as defined using ``FIELD`` elements do not match
those in the headers of the embedded PARQUET file. If ``verify`` is not
``'exception'``, the embedded PARQUET file will take precedence.
"""
message_template = (
"The fields defined in the VOTable do not match those in the "
"embedded PARQUET file"
)
| W56 |
python | aio-libs__aiohttp | aiohttp/_websocket/models.py | {
"start": 265,
"end": 626
} | class ____(IntEnum):
OK = 1000
GOING_AWAY = 1001
PROTOCOL_ERROR = 1002
UNSUPPORTED_DATA = 1003
ABNORMAL_CLOSURE = 1006
INVALID_TEXT = 1007
POLICY_VIOLATION = 1008
MESSAGE_TOO_BIG = 1009
MANDATORY_EXTENSION = 1010
INTERNAL_ERROR = 1011
SERVICE_RESTART = 1012
TRY_AGAIN_LATER = 1013
BAD_GATEWAY = 1014
| WSCloseCode |
python | oauthlib__oauthlib | tests/openid/connect/core/grant_types/test_implicit.py | {
"start": 6762,
"end": 7637
} | class ____(OpenIDImplicitTest):
def setUp(self):
super().setUp()
self.request.response_type = 'id_token'
token = 'MOCKED_TOKEN'
self.url_query = 'https://a.b/cb?state=abc&id_token=%s' % token
self.url_fragment = 'https://a.b/cb#state=abc&id_token=%s' % token
@mock.patch('oauthlib.common.generate_token')
def test_required_nonce(self, generate_token):
generate_token.return_value = 'abc'
self.request.nonce = None
self.assertRaises(errors.InvalidRequestError, self.auth.validate_authorization_request, self.request)
bearer = BearerToken(self.mock_validator)
h, b, s = self.auth.create_authorization_response(self.request, bearer)
self.assertIn('error=invalid_request', h['Location'])
self.assertIsNone(b)
self.assertEqual(s, 302)
| OpenIDImplicitNoAccessTokenTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 491442,
"end": 492003
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("organization", "permission", "source")
organization = sgqlc.types.Field(
sgqlc.types.non_null("Organization"), graphql_name="organization"
)
permission = sgqlc.types.Field(
sgqlc.types.non_null(DefaultRepositoryPermissionField),
graphql_name="permission",
)
source = sgqlc.types.Field(
sgqlc.types.non_null("PermissionGranter"), graphql_name="source"
)
| PermissionSource |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 133374,
"end": 143054
} | class ____(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- ``quote_char`` - string of one or more characters defining the
quote delimiting string
- ``esc_char`` - character to re_escape quotes, typically backslash
(default= ``None``)
- ``esc_quote`` - special quote sequence to re_escape an embedded quote
string (such as SQL's ``""`` to re_escape an embedded ``"``)
(default= ``None``)
- ``multiline`` - boolean indicating whether quotes can span
multiple lines (default= ``False``)
- ``unquote_results`` - boolean indicating whether the matched text
should be unquoted (default= ``True``)
- ``end_quote_char`` - string of one or more characters defining the
end of the quote delimited string (default= ``None`` => same as
quote_char)
- ``convert_whitespace_escapes`` - convert escaped whitespace
(``'\t'``, ``'\n'``, etc.) to actual whitespace
(default= ``True``)
.. caution:: ``convert_whitespace_escapes`` has no effect if
``unquote_results`` is ``False``.
Example:
.. doctest::
>>> qs = QuotedString('"')
>>> print(qs.search_string('lsjdf "This is the quote" sldjf'))
[['This is the quote']]
>>> complex_qs = QuotedString('{{', end_quote_char='}}')
>>> print(complex_qs.search_string(
... 'lsjdf {{This is the "quote"}} sldjf'))
[['This is the "quote"']]
>>> sql_qs = QuotedString('"', esc_quote='""')
>>> print(sql_qs.search_string(
... 'lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
[['This is the quote with "embedded" quotes']]
"""
ws_map = dict(((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r")))
def __init__(
self,
quote_char: str = "",
esc_char: typing.Optional[str] = None,
esc_quote: typing.Optional[str] = None,
multiline: bool = False,
unquote_results: bool = True,
end_quote_char: typing.Optional[str] = None,
convert_whitespace_escapes: bool = True,
**kwargs,
) -> None:
super().__init__()
quoteChar: str = deprecate_argument(kwargs, "quoteChar", "")
escChar: str = deprecate_argument(kwargs, "escChar", None)
escQuote: str = deprecate_argument(kwargs, "escQuote", None)
unquoteResults: bool = deprecate_argument(kwargs, "unquoteResults", True)
endQuoteChar: typing.Optional[str] = deprecate_argument(
kwargs, "endQuoteChar", None
)
convertWhitespaceEscapes: bool = deprecate_argument(
kwargs, "convertWhitespaceEscapes", True
)
esc_char = escChar or esc_char
esc_quote = escQuote or esc_quote
unquote_results = unquoteResults and unquote_results
end_quote_char = endQuoteChar or end_quote_char
convert_whitespace_escapes = (
convertWhitespaceEscapes and convert_whitespace_escapes
)
quote_char = quoteChar or quote_char
# remove white space from quote chars
quote_char = quote_char.strip()
if not quote_char:
raise ValueError("quote_char cannot be the empty string")
if end_quote_char is None:
end_quote_char = quote_char
else:
end_quote_char = end_quote_char.strip()
if not end_quote_char:
raise ValueError("end_quote_char cannot be the empty string")
self.quote_char: str = quote_char
self.quote_char_len: int = len(quote_char)
self.first_quote_char: str = quote_char[0]
self.end_quote_char: str = end_quote_char
self.end_quote_char_len: int = len(end_quote_char)
self.esc_char: str = esc_char or ""
self.has_esc_char: bool = esc_char is not None
self.esc_quote: str = esc_quote or ""
self.unquote_results: bool = unquote_results
self.convert_whitespace_escapes: bool = convert_whitespace_escapes
self.multiline = multiline
self.re_flags = re.RegexFlag(0)
# fmt: off
# build up re pattern for the content between the quote delimiters
inner_pattern: list[str] = []
if esc_quote:
inner_pattern.append(rf"(?:{re.escape(esc_quote)})")
if esc_char:
inner_pattern.append(rf"(?:{re.escape(esc_char)}.)")
if len(self.end_quote_char) > 1:
inner_pattern.append(
"(?:"
+ "|".join(
f"(?:{re.escape(self.end_quote_char[:i])}(?!{re.escape(self.end_quote_char[i:])}))"
for i in range(len(self.end_quote_char) - 1, 0, -1)
)
+ ")"
)
if self.multiline:
self.re_flags |= re.MULTILINE | re.DOTALL
inner_pattern.append(
rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}"
rf"{(_escape_regex_range_chars(self.esc_char) if self.has_esc_char else '')}])"
)
else:
inner_pattern.append(
rf"(?:[^{_escape_regex_range_chars(self.end_quote_char[0])}\n\r"
rf"{(_escape_regex_range_chars(self.esc_char) if self.has_esc_char else '')}])"
)
self.pattern = "".join(
[
re.escape(self.quote_char),
"(?:",
'|'.join(inner_pattern),
")*",
re.escape(self.end_quote_char),
]
)
if self.unquote_results:
if self.convert_whitespace_escapes:
self.unquote_scan_re = re.compile(
rf"({'|'.join(re.escape(k) for k in self.ws_map)})"
rf"|(\\[0-7]{3}|\\0|\\x[0-9a-fA-F]{2}|\\u[0-9a-fA-F]{4})"
rf"|({re.escape(self.esc_char)}.)"
rf"|(\n|.)",
flags=self.re_flags,
)
else:
self.unquote_scan_re = re.compile(
rf"({re.escape(self.esc_char)}.)"
rf"|(\n|.)",
flags=self.re_flags
)
# fmt: on
try:
self.re = re.compile(self.pattern, self.re_flags)
self.reString = self.pattern
self.re_match = self.re.match
except re.error:
raise ValueError(f"invalid pattern {self.pattern!r} passed to Regex")
self.errmsg = f"Expected {self.name}"
self.mayIndexError = False
self._may_return_empty = True
def _generateDefaultName(self) -> str:
if self.quote_char == self.end_quote_char and isinstance(
self.quote_char, str_type
):
return f"string enclosed in {self.quote_char!r}"
return f"quoted string, starting with {self.quote_char} ending with {self.end_quote_char}"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
# check first character of opening quote to see if that is a match
# before doing the more complicated regex match
result = (
instring[loc] == self.first_quote_char
and self.re_match(instring, loc)
or None
)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
# get ending loc and matched string from regex matching result
loc = result.end()
ret = result.group()
def convert_escaped_numerics(s: str) -> str:
if s == "0":
return "\0"
if s.isdigit() and len(s) == 3:
return chr(int(s, base=8))
elif s.startswith(("u", "x")):
return chr(int(s[1:], base=16))
else:
return s
if self.unquote_results:
# strip off quotes
ret = ret[self.quote_char_len : -self.end_quote_char_len]
if isinstance(ret, str_type):
# fmt: off
if self.convert_whitespace_escapes:
# as we iterate over matches in the input string,
# collect from whichever match group of the unquote_scan_re
# regex matches (only 1 group will match at any given time)
ret = "".join(
# match group 1 matches \t, \n, etc.
self.ws_map[match.group(1)] if match.group(1)
# match group 2 matches escaped octal, null, hex, and Unicode
# sequences
else convert_escaped_numerics(match.group(2)[1:]) if match.group(2)
# match group 3 matches escaped characters
else match.group(3)[-1] if match.group(3)
# match group 4 matches any character
else match.group(4)
for match in self.unquote_scan_re.finditer(ret)
)
else:
ret = "".join(
# match group 1 matches escaped characters
match.group(1)[-1] if match.group(1)
# match group 2 matches any character
else match.group(2)
for match in self.unquote_scan_re.finditer(ret)
)
# fmt: on
# replace escaped quotes
if self.esc_quote:
ret = ret.replace(self.esc_quote, self.end_quote_char)
return loc, ret
| QuotedString |
python | kamyu104__LeetCode-Solutions | Python/zero-array-transformation-ii.py | {
"start": 70,
"end": 1030
} | class ____(object):
def minZeroArray(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: int
"""
def binary_search(left, right, check):
while left <= right:
mid = left+(right-left)//2
if check(mid):
right = mid-1
else:
left = mid+1
return left
def check(k):
events = [0]*(len(nums)+1)
for i in xrange(k):
events[queries[i][0]] += queries[i][2]
events[queries[i][1]+1] -= queries[i][2]
curr = 0
for i in xrange(len(nums)):
curr += events[i]
if nums[i] > curr:
return False
return True
result = binary_search(0, len(queries), check)
return result if result <= len(queries) else -1
| Solution |
python | django-haystack__django-haystack | test_haystack/elasticsearch7_tests/test_backend.py | {
"start": 59888,
"end": 62602
} | class ____(TestCase):
def setUp(self):
super().setUp()
# Wipe it clean.
self.raw_es = elasticsearch.Elasticsearch(
settings.HAYSTACK_CONNECTIONS["elasticsearch"]["URL"]
)
clear_elasticsearch_index()
# Stow.
self.old_ui = connections["elasticsearch"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = Elasticsearch7BoostMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections["elasticsearch"]._index = self.ui
self.sb = connections["elasticsearch"].get_backend()
self.sample_objs = []
for i in range(1, 5):
mock = AFourthMockModel()
mock.id = i
if i % 2:
mock.author = "daniel"
mock.editor = "david"
else:
mock.author = "david"
mock.editor = "daniel"
mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)
self.sample_objs.append(mock)
def tearDown(self):
connections["elasticsearch"]._index = self.old_ui
super().tearDown()
def raw_search(self, query):
return self.raw_es.search(
q="*:*", index=settings.HAYSTACK_CONNECTIONS["elasticsearch"]["INDEX_NAME"]
)
def test_boost(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search("*:*")["hits"]["total"]["value"], 4)
results = SearchQuerySet(using="elasticsearch").filter(
SQ(author="daniel") | SQ(editor="daniel")
)
self.assertEqual(
set([result.id for result in results]),
{
"core.afourthmockmodel.4",
"core.afourthmockmodel.3",
"core.afourthmockmodel.1",
"core.afourthmockmodel.2",
},
)
def test__to_python(self):
self.assertEqual(self.sb._to_python("abc"), "abc")
self.assertEqual(self.sb._to_python("1"), 1)
self.assertEqual(self.sb._to_python("2653"), 2653)
self.assertEqual(self.sb._to_python("25.5"), 25.5)
self.assertEqual(self.sb._to_python("[1, 2, 3]"), [1, 2, 3])
self.assertEqual(
self.sb._to_python('{"a": 1, "b": 2, "c": 3}'), {"a": 1, "c": 3, "b": 2}
)
self.assertEqual(
self.sb._to_python("2009-05-09T16:14:00"),
datetime.datetime(2009, 5, 9, 16, 14),
)
self.assertEqual(
self.sb._to_python("2009-05-09T00:00:00"),
datetime.datetime(2009, 5, 9, 0, 0),
)
self.assertEqual(self.sb._to_python(None), None)
| Elasticsearch7BoostBackendTestCase |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/get_task_states_test.py | {
"start": 6157,
"end": 6438
} | class ____(GetTaskStatesTest, test.TestCase):
"""This covers the cases where multiple workers and PS are used."""
def setUp(self):
super().setUp(2, 2)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
multi_process_runner.test_main()
| MultiWorkerGetTaskStatesTest |
python | jazzband__django-polymorphic | src/polymorphic/models.py | {
"start": 538,
"end": 594
} | class ____(LookupError):
pass
| PolymorphicTypeUndefined |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_quicksight.py | {
"start": 1385,
"end": 3862
} | class ____:
def setup_method(self):
self.default_op_kwargs = {
"task_id": "quicksight_sensor",
"aws_conn_id": None,
"data_set_id": DATA_SET_ID,
"ingestion_id": INGESTION_ID,
}
def test_init(self):
self.default_op_kwargs.pop("aws_conn_id", None)
sensor = QuickSightSensor(
**self.default_op_kwargs,
# Generic hooks parameters
aws_conn_id="fake-conn-id",
region_name="ca-west-1",
verify=True,
botocore_config={"read_timeout": 42},
)
assert sensor.hook.client_type == "quicksight"
assert sensor.hook.resource_type is None
assert sensor.hook.aws_conn_id == "fake-conn-id"
assert sensor.hook._region_name == "ca-west-1"
assert sensor.hook._verify is True
assert sensor.hook._config is not None
assert sensor.hook._config.read_timeout == 42
sensor = QuickSightSensor(**self.default_op_kwargs)
assert sensor.hook.aws_conn_id == "aws_default"
assert sensor.hook._region_name is None
assert sensor.hook._verify is None
assert sensor.hook._config is None
@pytest.mark.parametrize("status", ["COMPLETED"])
def test_poke_completed(self, status, mocked_get_status):
mocked_get_status.return_value = status
assert QuickSightSensor(**self.default_op_kwargs).poke({}) is True
mocked_get_status.assert_called_once_with(None, DATA_SET_ID, INGESTION_ID)
@pytest.mark.parametrize("status", ["INITIALIZED"])
def test_poke_not_completed(self, status, mocked_get_status):
mocked_get_status.return_value = status
assert QuickSightSensor(**self.default_op_kwargs).poke({}) is False
mocked_get_status.assert_called_once_with(None, DATA_SET_ID, INGESTION_ID)
@pytest.mark.parametrize("status", ["FAILED", "CANCELLED"])
def test_poke_terminated_status(self, status, mocked_get_status, mocked_get_error_info):
mocked_get_status.return_value = status
mocked_get_error_info.return_value = "something bad happen"
with pytest.raises(AirflowException, match="Error info: something bad happen"):
QuickSightSensor(**self.default_op_kwargs).poke({})
mocked_get_status.assert_called_once_with(None, DATA_SET_ID, INGESTION_ID)
mocked_get_error_info.assert_called_once_with(None, DATA_SET_ID, INGESTION_ID)
| TestQuickSightSensor |
python | allegroai__clearml | clearml/utilities/gpu/pynvml.py | {
"start": 57123,
"end": 58122
} | class ____(_PrintableStructure):
_fields_ = [
('sessionId', c_uint),
('pid', c_uint),
('vgpuInstance', _nvmlVgpuInstance_t),
('displayOrdinal', c_uint),
('sessionType', c_uint),
('sessionFlags', c_uint),
('hMaxResolution', c_uint),
('vMaxResolution', c_uint),
('hResolution', c_uint),
('vResolution', c_uint),
('averageFPS', c_uint),
('averageLatency', c_uint),
]
NVML_DEVICE_MIG_DISABLE = 0x0
NVML_DEVICE_MIG_ENABLE = 0x1
NVML_GPU_INSTANCE_PROFILE_1_SLICE = 0x0
NVML_GPU_INSTANCE_PROFILE_2_SLICE = 0x1
NVML_GPU_INSTANCE_PROFILE_3_SLICE = 0x2
NVML_GPU_INSTANCE_PROFILE_4_SLICE = 0x3
NVML_GPU_INSTANCE_PROFILE_7_SLICE = 0x4
NVML_GPU_INSTANCE_PROFILE_8_SLICE = 0x5
NVML_GPU_INSTANCE_PROFILE_6_SLICE = 0x6
NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV1 = 0x7
NVML_GPU_INSTANCE_PROFILE_2_SLICE_REV1 = 0x8
NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV2 = 0x9
NVML_GPU_INSTANCE_PROFILE_COUNT = 0xA
| c_nvmlFBCSession_t |
python | sqlalchemy__sqlalchemy | examples/association/dict_of_sets_with_default.py | {
"start": 1433,
"end": 1879
} | class ____(Base):
__tablename__ = "a"
associations: Mapped[Mapping[str, B]] = relationship(
"B",
collection_class=lambda: GenDefaultCollection(
operator.attrgetter("key")
),
)
collections: AssociationProxy[dict[str, set[int]]] = association_proxy(
"associations", "values"
)
"""Bridge the association from 'associations' over to the 'values'
association proxy of B.
"""
| A |
python | PrefectHQ__prefect | src/prefect/context.py | {
"start": 13633,
"end": 16653
} | class ____(RunContext):
"""
The context for a flow run. Data in this context is only available from within a
flow run function.
Attributes:
flow: The flow instance associated with the run
flow_run: The API metadata for the flow run
task_runner: The task runner instance being used for the flow run
run_results: A mapping of result ids to run states for this flow run
log_prints: Whether to log print statements from the flow run
parameters: The parameters passed to the flow run
detached: Flag indicating if context has been serialized and sent to remote infrastructure
result_store: The result store used to persist results
persist_result: Whether to persist the flow run result
task_run_dynamic_keys: Counter for task calls allowing unique keys
observed_flow_pauses: Counter for flow pauses
events: Events worker to emit events
"""
flow: Optional["Flow[Any, Any]"] = None
flow_run: Optional[FlowRun] = None
task_runner: TaskRunner[Any]
log_prints: bool = False
parameters: Optional[dict[str, Any]] = None
# Flag signaling if the flow run context has been serialized and sent
# to remote infrastructure.
detached: bool = False
# Result handling
result_store: ResultStore
persist_result: bool = Field(default_factory=get_default_persist_setting)
# Counter for task calls allowing unique
task_run_dynamic_keys: dict[str, Union[str, int]] = Field(default_factory=dict)
# Counter for flow pauses
observed_flow_pauses: dict[str, int] = Field(default_factory=dict)
# Tracking for result from task runs and sub flows in this flow run for
# dependency tracking. Holds the ID of the object returned by
# the run and state
run_results: dict[int, tuple[State, RunType]] = Field(default_factory=dict)
# Tracking information needed to track asset linage between
# tasks and materialization
task_run_assets: dict[UUID, set[Asset]] = Field(default_factory=dict)
# Events worker to emit events
events: Optional[EventsWorker] = None
__var__: ClassVar[ContextVar[Self]] = ContextVar("flow_run")
def serialize(self: Self, include_secrets: bool = True) -> dict[str, Any]:
serialized = self.model_dump(
include={
"flow_run",
"flow",
"parameters",
"log_prints",
"start_time",
"input_keyset",
"persist_result",
},
exclude_unset=True,
context={"include_secrets": include_secrets},
)
if self.result_store:
serialized["result_store"] = self.result_store.model_dump(
serialize_as_any=True,
exclude_unset=True,
context={"include_secrets": include_secrets},
)
return serialized
FlowRunContext = EngineContext # for backwards compatibility
| EngineContext |
python | encode__django-rest-framework | tests/test_request.py | {
"start": 10113,
"end": 10422
} | class ____(TestCase):
def test_default_secure_false(self):
request = Request(factory.get('/', secure=False))
assert request.scheme == 'http'
def test_default_secure_true(self):
request = Request(factory.get('/', secure=True))
assert request.scheme == 'https'
| TestSecure |
python | huggingface__transformers | tests/models/sam/test_image_processing_sam.py | {
"start": 4049,
"end": 13134
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = SamImageProcessor if is_vision_available() else None
fast_image_processing_class = SamImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = SamImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_rescale"))
self.assertTrue(hasattr(image_processing, "rescale_factor"))
self.assertTrue(hasattr(image_processing, "do_pad"))
self.assertTrue(hasattr(image_processing, "pad_size"))
self.assertTrue(hasattr(image_processing, "mask_size"))
self.assertTrue(hasattr(image_processing, "mask_pad_size"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processing_class = image_processing_class(**self.image_processor_dict)
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"longest_edge": 20})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size={"longest_edge": 42})
self.assertEqual(image_processor.size, {"longest_edge": 42})
def test_call_segmentation_maps(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processor
image_processor = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
maps = []
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
encoding = image_processor(image_inputs[0], maps[0], return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched
encoding = image_processor(image_inputs, maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
self.image_processor_tester.batch_size,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test not batched input (PIL images)
image, segmentation_map = prepare_semantic_single_inputs()
encoding = image_processor(image, segmentation_map, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
1,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched input (PIL images)
images, segmentation_maps = prepare_semantic_batch_inputs()
encoding = image_processor(images, segmentation_maps, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape,
(
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.pad_size["height"],
self.image_processor_tester.pad_size["width"],
),
)
self.assertEqual(
encoding["labels"].shape,
(
2,
self.image_processor_tester.mask_pad_size["height"],
self.image_processor_tester.mask_pad_size["width"],
),
)
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image, dummy_map = prepare_semantic_single_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
image_encoding_slow = image_processor_slow(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
image_encoding_fast = image_processor_fast(dummy_image, segmentation_maps=dummy_map, return_tensors="pt")
self.assertTrue(torch.allclose(image_encoding_slow.pixel_values, image_encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(image_encoding_slow.pixel_values - image_encoding_fast.pixel_values)).item(), 1e-3
)
self.assertTrue(torch.allclose(image_encoding_slow.labels, image_encoding_fast.labels, atol=1e-1))
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_images, dummy_maps = prepare_semantic_batch_inputs()
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, segmentation_maps=dummy_maps, return_tensors="pt")
self.assertTrue(torch.allclose(encoding_slow.pixel_values, encoding_fast.pixel_values, atol=1e-1))
self.assertLessEqual(
torch.mean(torch.abs(encoding_slow.pixel_values - encoding_fast.pixel_values)).item(), 1e-3
)
| SamImageProcessingTest |
python | ansible__ansible | lib/ansible/module_utils/common/arg_spec.py | {
"start": 2787,
"end": 11219
} | class ____:
"""Argument spec validation class
Creates a validator based on the ``argument_spec`` that can be used to
validate a number of parameters using the :meth:`validate` method.
"""
def __init__(self, argument_spec,
mutually_exclusive=None,
required_together=None,
required_one_of=None,
required_if=None,
required_by=None,
):
"""
:arg argument_spec: Specification of valid parameters and their type. May
include nested argument specs.
:type argument_spec: dict[str, dict]
:kwarg mutually_exclusive: List or list of lists of terms that should not
be provided together.
:type mutually_exclusive: list[str] or list[list[str]]
:kwarg required_together: List of lists of terms that are required together.
:type required_together: list[list[str]]
:kwarg required_one_of: List of lists of terms, one of which in each list
is required.
:type required_one_of: list[list[str]]
:kwarg required_if: List of lists of ``[parameter, value, [parameters]]`` where
one of ``[parameters]`` is required if ``parameter == value``.
:type required_if: list
:kwarg required_by: Dictionary of parameter names that contain a list of
parameters required by each key in the dictionary.
:type required_by: dict[str, list[str]]
"""
self._mutually_exclusive = mutually_exclusive
self._required_together = required_together
self._required_one_of = required_one_of
self._required_if = required_if
self._required_by = required_by
self._valid_parameter_names = set()
self.argument_spec = argument_spec
for key in sorted(self.argument_spec.keys()):
aliases = self.argument_spec[key].get('aliases')
if aliases:
self._valid_parameter_names.update(["{key} ({aliases})".format(key=key, aliases=", ".join(sorted(aliases)))])
else:
self._valid_parameter_names.update([key])
def validate(self, parameters, *args, **kwargs):
"""Validate ``parameters`` against argument spec.
Error messages in the :class:`ValidationResult` may contain no_log values and should be
sanitized with :func:`~ansible.module_utils.common.parameters.sanitize_keys` before logging or displaying.
:arg parameters: Parameters to validate against the argument spec
:type parameters: dict[str, dict]
:return: :class:`ValidationResult` containing validated parameters.
:Simple Example:
.. code-block:: text
argument_spec = {
'name': {'type': 'str'},
'age': {'type': 'int'},
}
parameters = {
'name': 'bo',
'age': '42',
}
validator = ArgumentSpecValidator(argument_spec)
result = validator.validate(parameters)
if result.error_messages:
sys.exit("Validation failed: {0}".format(", ".join(result.error_messages))
valid_params = result.validated_parameters
"""
result = ValidationResult(parameters)
result._no_log_values.update(set_fallbacks(self.argument_spec, result._validated_parameters))
alias_warnings = []
alias_deprecations = []
try:
result._aliases.update(_handle_aliases(self.argument_spec, result._validated_parameters, alias_warnings, alias_deprecations))
except (TypeError, ValueError) as e:
result.errors.append(AliasError(to_native(e)))
legal_inputs = _get_legal_inputs(self.argument_spec, result._validated_parameters, result._aliases)
for option, alias in alias_warnings:
result._warnings.append({'option': option, 'alias': alias})
for deprecation in alias_deprecations:
result._deprecations.append({
'msg': "Alias '%s' is deprecated. See the module docs for more information" % deprecation['name'],
'version': deprecation.get('version'),
'date': deprecation.get('date'),
'collection_name': deprecation.get('collection_name'),
})
try:
result._no_log_values.update(_list_no_log_values(self.argument_spec, result._validated_parameters))
except TypeError as te:
result.errors.append(NoLogError(to_native(te)))
try:
result._deprecations.extend(_list_deprecations(self.argument_spec, result._validated_parameters))
except TypeError as te:
result.errors.append(DeprecationError(to_native(te)))
try:
result._unsupported_parameters.update(
_get_unsupported_parameters(
self.argument_spec,
result._validated_parameters,
legal_inputs,
store_supported=result._supported_parameters,
)
)
except TypeError as te:
result.errors.append(RequiredDefaultError(to_native(te)))
except ValueError as ve:
result.errors.append(AliasError(to_native(ve)))
try:
check_mutually_exclusive(self._mutually_exclusive, result._validated_parameters)
except TypeError as te:
result.errors.append(MutuallyExclusiveError(to_native(te)))
result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters, False))
try:
check_required_arguments(self.argument_spec, result._validated_parameters)
except TypeError as e:
result.errors.append(RequiredError(to_native(e)))
_validate_argument_types(self.argument_spec, result._validated_parameters, errors=result.errors)
_validate_argument_values(self.argument_spec, result._validated_parameters, errors=result.errors)
for check in _ADDITIONAL_CHECKS:
try:
check['func'](getattr(self, "_{attr}".format(attr=check['attr'])), result._validated_parameters)
except TypeError as te:
result.errors.append(check['err'](to_native(te)))
result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters))
alias_deprecations = []
_validate_sub_spec(self.argument_spec, result._validated_parameters,
errors=result.errors,
no_log_values=result._no_log_values,
unsupported_parameters=result._unsupported_parameters,
supported_parameters=result._supported_parameters,
alias_deprecations=alias_deprecations,)
for deprecation in alias_deprecations:
result._deprecations.append({
'msg': "Alias '%s' is deprecated. See the module docs for more information" % deprecation['name'],
'version': deprecation.get('version'),
'date': deprecation.get('date'),
'collection_name': deprecation.get('collection_name'),
})
if result._unsupported_parameters:
flattened_names = []
for item in result._unsupported_parameters:
if isinstance(item, tuple):
flattened_names.append(".".join(item))
else:
flattened_names.append(item)
unsupported_string = ", ".join(sorted(list(flattened_names)))
supported_params = supported_aliases = []
if result._supported_parameters.get(item):
supported_params = sorted(list(result._supported_parameters[item][0]))
supported_aliases = sorted(list(result._supported_parameters[item][1]))
supported_string = ", ".join(supported_params)
if supported_aliases:
aliases_string = ", ".join(supported_aliases)
supported_string += " (%s)" % aliases_string
msg = "{0}. Supported parameters include: {1}.".format(unsupported_string, supported_string)
result.errors.append(UnsupportedError(msg))
return result
| ArgumentSpecValidator |
python | ray-project__ray | release/llm_tests/serve/test_llm_serve_correctness.py | {
"start": 2396,
"end": 7811
} | class ____:
def __init__(
self,
tensor_parallel_size: int = 1,
pipeline_parallel_size: int = 1,
model_id: str = MODEL_ID,
):
self.tensor_parallel_size = tensor_parallel_size
self.pipeline_parallel_size = pipeline_parallel_size
self.model_id = model_id
self.vllm_url = self._start_vllm_server()
self.openai_client = create_openai_client(self.vllm_url)
wait_for_server_ready(self.vllm_url, server_type="vllm", timeout=240)
def _start_vllm_server(self) -> str:
"""Start vLLM server with specified parallelism parameters."""
vllm_port = 8001
cmd = [
"vllm",
"serve",
self.model_id,
"--port",
str(vllm_port),
"--distributed-executor-backend=ray",
"--tensor-parallel-size",
str(self.tensor_parallel_size),
"--pipeline-parallel-size",
str(self.pipeline_parallel_size),
]
self.process = subprocess.Popen(cmd)
return f"http://localhost:{vllm_port}"
def generate_completion(self, test_prompt: str) -> str:
"""Generate completion using the provided OpenAI client."""
return generate_completion(self.openai_client, self.model_id, test_prompt)
def generate_chat_completion(self, test_message: str) -> str:
"""Generate chat completion using the provided OpenAI client."""
return generate_chat_completion(self.openai_client, self.model_id, test_message)
def shutdown(self):
"""Shutdown the vLLM server."""
self.process.terminate()
for _ in range(5):
if self.process.poll() is not None:
break
time.sleep(1)
if self.process.poll() is None:
self.process.kill()
def wait_for_server_ready(
url: str,
server_type: Literal["ray", "vllm"] = "ray",
timeout: int = 120,
retry_interval: int = 2,
) -> bool:
"""Poll the server until it's ready or timeout is reached.
Args:
url: The server URL to check
server_type: Either "ray" or "vllm"
timeout: Maximum time to wait in seconds
retry_interval: Time between retry attempts
"""
start_time = time.time()
while time.time() - start_time < timeout:
try:
# Directly test if the server can handle a completion request
model_id = MODEL_ID if server_type == "vllm" else RAY_MODEL_ID
test_data = {
"model": model_id,
"prompt": "test",
"max_tokens": 5,
"temperature": 0,
}
completion_response = requests.post(
f"{url}/v1/completions", json=test_data, timeout=10
)
if completion_response.status_code == 200:
print(
f"{server_type.upper()} server at {url} is ready to handle requests!"
)
return True
except Exception:
pass
print(f"Waiting for {server_type.upper()} server at {url} to be ready...")
time.sleep(retry_interval)
raise TimeoutError(
f"{server_type.upper()} server at {url} did not become ready within {timeout} seconds"
)
@pytest.mark.parametrize(
"tensor_parallel_size, pipeline_parallel_size",
[
(1, 1),
(2, 1),
(1, 2),
(2, 2),
],
)
def test_llm_serve_correctness(
tensor_parallel_size: int, pipeline_parallel_size: int
) -> None:
"""Test that Ray Serve and vLLM produce the same completion output for the same input."""
test_prompt = "Two households, both alike in dignity,"
test_message = "What is the capital of France?"
print(
f"Starting Ray Serve LLM with tensor_parallel_size={tensor_parallel_size}, pipeline_parallel_size={pipeline_parallel_size}"
)
ray_url = start_ray_serve(tensor_parallel_size, pipeline_parallel_size)
ray_client = create_openai_client(ray_url)
wait_for_server_ready(ray_url, server_type="ray", timeout=240)
time.sleep(5) # Buffer time for server to be ready
ray_completion_output = generate_completion(ray_client, RAY_MODEL_ID, test_prompt)
ray_chat_output = generate_chat_completion(ray_client, RAY_MODEL_ID, test_message)
serve.shutdown()
print(
f"Starting vLLM server with tensor_parallel_size={tensor_parallel_size}, pipeline_parallel_size={pipeline_parallel_size}"
)
vllm_server = VllmServer(tensor_parallel_size, pipeline_parallel_size)
time.sleep(5) # Buffer time for server to be ready
vllm_completion_output = vllm_server.generate_completion(test_prompt)
vllm_chat_output = vllm_server.generate_chat_completion(test_message)
vllm_server.shutdown()
assert ray_completion_output == vllm_completion_output, (
f"Ray and vLLM outputs do not match with TP={tensor_parallel_size}, PP={pipeline_parallel_size}\n"
f"Ray output: {ray_completion_output}\n"
f"vLLM output: {vllm_completion_output}"
)
assert ray_chat_output == vllm_chat_output, (
f"Ray and vLLM chat outputs do not match with TP={tensor_parallel_size}, PP={pipeline_parallel_size}\n"
f"Ray output: {ray_chat_output}\n"
f"vLLM output: {vllm_chat_output}"
)
if __name__ == "__main__":
pytest.main(["-xvs", __file__])
| VllmServer |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_annotated.py | {
"start": 276,
"end": 336
} | class ____(Enum):
RED = 1
GREEN = 2
BLUE = 3
| Color |
python | django__django | tests/foreign_object/tests.py | {
"start": 23742,
"end": 25491
} | class ____(SimpleTestCase):
@isolate_apps("foreign_object")
def test_check_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
class Meta:
unique_together = (("a", "b"),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = models.ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=("a", "b"),
to_fields=("a", "b"),
related_name="children",
)
self.assertEqual(Child._meta.get_field("parent").check(from_model=Child), [])
@isolate_apps("foreign_object")
def test_check_subset_composite_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (("a", "b"),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.CharField(max_length=255)
parent = models.ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=("a", "b", "c"),
to_fields=("a", "b", "c"),
related_name="children",
)
self.assertEqual(Child._meta.get_field("parent").check(from_model=Child), [])
| TestModelCheckTests |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-lilac/llama_index/readers/lilac/base.py | {
"start": 314,
"end": 3894
} | class ____(BaseReader):
"""
Lilac dataset reader.
"""
def load_data(
self,
dataset: str,
text_path: "Path" = "text",
doc_id_path: Optional["Path"] = "doc_id",
columns: Optional[List["ColumnId"]] = None,
filters: Optional[List["FilterLike"]] = None,
project_dir: Optional[str] = None,
) -> List[Document]:
"""
Load text from relevant posts and top-level comments in subreddit(s), given keyword(s) for search.
Args:
project_dir (Optional[str]): The Lilac project dir to read from. If not defined, uses the `LILAC_PROJECT_DIR`
environment variable.
text_path: The path to the text field in the dataset. If not defined, uses 'text'.
columns (Optional[List[ColumnId]]): The columns to load from the dataset. If not defined, loads all columns.
dataset (str): The dataset to load. Should be formatted like {namespace}/{dataset_name}.
filters (Optional[Filter]): A filter to apply to the dataset before loading into documents. Useful to filter
for labeled data.
"""
try:
import lilac as ll
except ImportError:
raise ("`lilac` package not found, please run `pip install lilac`")
namespace, dataset_name = dataset.split("/")
lilac_dataset = ll.get_dataset(namespace, dataset_name, project_dir=project_dir)
# Check to make sure text path, and doc_id path are valid.
manifest = lilac_dataset.manifest()
text_path = ll.normalize_path(text_path)
text_field = manifest.data_schema.get_field(text_path)
if not text_field:
raise ValueError(
f"Could not find text field {text_path} in dataset {dataset}"
)
doc_id_path = ll.normalize_path(doc_id_path)
doc_id_field = manifest.data_schema.get_field(doc_id_path)
if not doc_id_field:
raise ValueError(
f"Could not find doc_id field {doc_id_path} in dataset {dataset}"
)
rows = lilac_dataset.select_rows(
columns=([*columns, text_field, doc_id_path]) if columns else ["*"],
filters=filters,
combine_columns=True,
)
def _item_from_path(item: ll.Item, path: ll.PathTuple) -> ll.Item:
if len(path) == 1:
item = item[path[0]]
if isinstance(item, dict):
return item[ll.VALUE_KEY]
else:
return item
else:
return _item_from_path(item[path[0]], path[1:])
def _remove_item_path(item: ll.Item, path: ll.PathTuple) -> None:
if len(path) == 0:
return
if len(path) == 1:
if item and path[0] in item:
leaf_item = item[path[0]]
if isinstance(leaf_item, dict):
del item[path[0]][ll.VALUE_KEY]
else:
del item[path[0]]
return
else:
_remove_item_path(item[path[0]], path[1:])
documents: List[Document] = []
for row in rows:
text = _item_from_path(row, text_path)
doc_id = _item_from_path(row, doc_id_path)
_remove_item_path(row, text_path)
_remove_item_path(row, doc_id_path)
documents.append(Document(text=text, doc_id=doc_id, extra_info=row or {}))
return documents
| LilacReader |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_delete_field_double_pending_app/migrations/0002_delete_pending.py | {
"start": 190,
"end": 507
} | class ____(CheckedMigration):
dependencies = [
("bad_flow_delete_field_double_pending_app", "0001_initial"),
]
operations = [
SafeRemoveField(
model_name="testtable",
name="field",
deletion_action=DeletionAction.MOVE_TO_PENDING,
),
]
| Migration |
python | ray-project__ray | python/ray/serve/tests/test_https_proxy.py | {
"start": 1743,
"end": 11053
} | class ____:
def test_https_basic_deployment(self, https_serve_instance):
"""Test basic HTTPS deployment functionality."""
@serve.deployment
def hello():
return "Hello HTTPS!"
serve.run(hello.bind())
# Test HTTPS request with certificate verification disabled for self-signed cert
response = requests.get(
"https://localhost:8000/hello",
verify=False, # Skip cert verification for self-signed
)
assert response.status_code == 200
assert response.text == "Hello HTTPS!"
def test_https_vs_http_requests(self, https_serve_instance):
"""Test that HTTP requests fail when HTTPS is enabled."""
@serve.deployment
def echo():
return "echo"
serve.run(echo.bind())
# HTTPS request should succeed
https_response = requests.get("https://localhost:8000/echo", verify=False)
assert https_response.status_code == 200
# HTTP request should fail with connection error
with pytest.raises(requests.exceptions.ConnectionError):
requests.get("http://localhost:8000/echo", timeout=5)
def test_https_with_fastapi_deployment(self, https_serve_instance):
"""Test HTTPS with FastAPI-based deployment."""
from fastapi import FastAPI
app = FastAPI()
@app.get("/items/{item_id}")
async def read_item(item_id: int):
return {"item_id": item_id, "secure": True}
@serve.deployment
@serve.ingress(app)
class FastAPIDeployment:
pass
serve.run(FastAPIDeployment.bind())
response = requests.get("https://localhost:8000/items/42", verify=False)
assert response.status_code == 200
assert response.json() == {"item_id": 42, "secure": True}
def test_https_concurrent_requests(self, https_serve_instance):
"""Test HTTPS with concurrent requests."""
import concurrent.futures
@serve.deployment
def concurrent_handler():
import time
time.sleep(0.1) # Small delay to test concurrency
return "concurrent"
serve.run(concurrent_handler.bind())
def make_request():
return requests.get(
"https://localhost:8000/concurrent_handler", verify=False
)
# Send 10 concurrent requests
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(make_request) for _ in range(10)]
responses = [f.result() for f in futures]
# All requests should succeed
for response in responses:
assert response.status_code == 200
assert response.text == "concurrent"
def test_https_large_payload(self, https_serve_instance):
"""Test HTTPS with large payloads."""
@serve.deployment
class LargePayloadHandler:
def __call__(self, request):
# Return a large response (1MB)
large_data = "x" * (1024 * 1024) # 1MB string
return {"data": large_data, "size": len(large_data)}
serve.run(LargePayloadHandler.bind())
response = requests.get(
"https://localhost:8000/LargePayloadHandler", verify=False
)
assert response.status_code == 200
data = response.json()
assert data["size"] == 1024 * 1024
assert len(data["data"]) == 1024 * 1024
def test_https_websocket_with_fastapi(self, https_serve_instance):
"""Test WebSocket functionality with FastAPI over HTTPS."""
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
app = FastAPI()
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
try:
while True:
# Receive message from client
data = await websocket.receive_text()
message = json.loads(data)
# Echo back with modification
response = {
"echo": message.get("message", ""),
"secure": True,
"protocol": "wss",
}
await websocket.send_text(json.dumps(response))
except WebSocketDisconnect:
pass
@serve.deployment
@serve.ingress(app)
class WebSocketDeployment:
pass
serve.run(WebSocketDeployment.bind())
# Test WebSocket connection over HTTPS (wss://)
async def test_websocket():
# Create SSL context that doesn't verify certificates (for self-signed certs)
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
uri = "wss://localhost:8000/ws"
async with websockets.connect(uri, ssl=ssl_context) as websocket:
# Send test message
test_message = {"message": "Hello WebSocket over HTTPS!"}
await websocket.send(json.dumps(test_message))
# Receive response
response = await websocket.recv()
data = json.loads(response)
# Verify response
assert data["echo"] == "Hello WebSocket over HTTPS!"
assert data["secure"] is True
assert data["protocol"] == "wss"
# Send another message to test bidirectional communication
test_message2 = {"message": "Second message"}
await websocket.send(json.dumps(test_message2))
response2 = await websocket.recv()
data2 = json.loads(response2)
assert data2["echo"] == "Second message"
# Run the async test
asyncio.run(test_websocket())
def test_https_websocket_multiple_connections(self, https_serve_instance):
"""Test multiple WebSocket connections over HTTPS."""
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
app = FastAPI()
# Store active connections
connections = []
@app.websocket("/ws/broadcast")
async def websocket_broadcast(websocket: WebSocket):
await websocket.accept()
connections.append(websocket)
try:
while True:
data = await websocket.receive_text()
message = json.loads(data)
# Broadcast to all connections
broadcast_message = {
"type": "broadcast",
"message": message.get("message", ""),
"connections": len(connections),
"secure": True,
}
# Send to all connected clients
disconnected = []
for conn in connections:
try:
await conn.send_text(json.dumps(broadcast_message))
except Exception:
disconnected.append(conn)
# Remove disconnected clients
for conn in disconnected:
connections.remove(conn)
except WebSocketDisconnect:
if websocket in connections:
connections.remove(websocket)
@serve.deployment
@serve.ingress(app)
class WebSocketBroadcastDeployment:
pass
serve.run(WebSocketBroadcastDeployment.bind())
async def test_multiple_websockets():
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
uri = "wss://localhost:8000/ws/broadcast"
# Connect multiple clients
websocket1 = await websockets.connect(uri, ssl=ssl_context)
websocket2 = await websockets.connect(uri, ssl=ssl_context)
try:
# Send message from client 1
test_message = {"message": "Hello from client 1"}
await websocket1.send(json.dumps(test_message))
# Both clients should receive the broadcast
response1 = await websocket1.recv()
response2 = await websocket2.recv()
data1 = json.loads(response1)
data2 = json.loads(response2)
# Verify both received the same broadcast
assert data1["type"] == "broadcast"
assert data1["message"] == "Hello from client 1"
assert data1["connections"] == 2
assert data1["secure"] is True
assert data2["type"] == "broadcast"
assert data2["message"] == "Hello from client 1"
assert data2["connections"] == 2
assert data2["secure"] is True
finally:
await websocket1.close()
await websocket2.close()
# Run the async test
asyncio.run(test_multiple_websockets())
| TestHTTPSProxy |
python | getsentry__sentry | src/sentry/api/fields/actor.py | {
"start": 287,
"end": 611
} | class ____(serializers.Field):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
def to_representation(self, value):
return value.identifier
def to_internal_value(self, data) -> Actor | None:
return parse_and_validate_actor(data, self.context["organization"].id)
| ActorField |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/digitalocean.py | {
"start": 739,
"end": 1498
} | class ____(CloudEnvironment):
"""Updates integration test environment after delegation. Will setup the config file as parameter."""
def get_environment_config(self) -> CloudEnvironmentConfig:
"""Return environment configuration for use in the test environment after delegation."""
parser = configparser.ConfigParser()
parser.read(self.config_path)
env_vars = dict(
DO_API_KEY=parser.get('default', 'key'),
)
display.sensitive.add(env_vars['DO_API_KEY'])
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
)
| DigitalOceanCloudEnvironment |
python | huggingface__transformers | src/transformers/models/pix2struct/image_processing_pix2struct.py | {
"start": 8185,
"end": 20061
} | class ____(BaseImageProcessor):
r"""
Constructs a Pix2Struct image processor.
Args:
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method. According to Pix2Struct paper and code, the image is normalized with its own mean and standard
deviation.
patch_size (`dict[str, int]`, *optional*, defaults to `{"height": 16, "width": 16}`):
The patch size to use for the image. According to Pix2Struct paper and code, the patch size is 16x16.
max_patches (`int`, *optional*, defaults to 2048):
The maximum number of patches to extract from the image as per the [Pix2Struct
paper](https://huggingface.co/papers/2210.03347).
is_vqa (`bool`, *optional*, defaults to `False`):
Whether or not the image processor is for the VQA task. If `True` and `header_text` is passed in, text is
rendered onto the input images.
"""
model_input_names = ["flattened_patches", "attention_mask"]
valid_kwargs = Pix2StructImageProcessorKwargs
def __init__(
self,
do_convert_rgb: bool = True,
do_normalize: bool = True,
patch_size: Optional[dict[str, int]] = None,
max_patches: int = 2048,
is_vqa: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16}
self.do_normalize = do_normalize
self.do_convert_rgb = do_convert_rgb
self.max_patches = max_patches
self.is_vqa = is_vqa
def extract_flattened_patches(
self,
image: np.ndarray,
max_patches: int,
patch_size: dict,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Extract flattened patches from an image.
Args:
image (`np.ndarray`):
Image to extract flattened patches from.
max_patches (`int`):
Maximum number of patches to extract.
patch_size (`dict`):
Dictionary containing the patch height and width.
Returns:
result (`np.ndarray`):
A sequence of `max_patches` flattened patches.
"""
requires_backends(self.extract_flattened_patches, "torch")
# convert to torch
image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format)
image = torch.from_numpy(image)
patch_height, patch_width = patch_size["height"], patch_size["width"]
image_height, image_width = get_image_size(image, ChannelDimension.FIRST)
# maximize scale s.t.
scale = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
num_feasible_rows = max(min(math.floor(scale * image_height / patch_height), max_patches), 1)
num_feasible_cols = max(min(math.floor(scale * image_width / patch_width), max_patches), 1)
resized_height = max(num_feasible_rows * patch_height, 1)
resized_width = max(num_feasible_cols * patch_width, 1)
image = torch.nn.functional.interpolate(
image.unsqueeze(0),
size=(resized_height, resized_width),
mode="bilinear",
align_corners=False,
antialias=True,
).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
patches = torch_extract_patches(image, patch_height, patch_width)
patches_shape = patches.shape
rows = patches_shape[1]
columns = patches_shape[2]
depth = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
patches = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
row_ids = torch.arange(rows).reshape([rows, 1]).repeat(1, columns).reshape([rows * columns, 1])
col_ids = torch.arange(columns).reshape([1, columns]).repeat(rows, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
row_ids = row_ids.to(torch.float32)
col_ids = col_ids.to(torch.float32)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
result = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - (rows * columns)]).float()
result = to_numpy_array(result)
return result
def normalize(
self,
image: np.ndarray,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Normalize an image. image = (image - image_mean) / image_std.
Args:
image (`np.ndarray`):
Image to normalize.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
if image.dtype == np.uint8:
image = image.astype(np.float32)
# take mean across the whole `image`
mean = np.mean(image)
std = np.std(image)
adjusted_stddev = max(std, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(
image,
mean=mean,
std=adjusted_stddev,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def preprocess(
self,
images: ImageInput,
header_text: Optional[str] = None,
do_convert_rgb: Optional[bool] = None,
do_normalize: Optional[bool] = None,
max_patches: Optional[int] = None,
patch_size: Optional[dict[str, int]] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> ImageInput:
"""
Preprocess an image or batch of images. The processor first computes the maximum possible number of
aspect-ratio preserving patches of size `patch_size` that can be extracted from the image. It then pads the
image with zeros to make the image respect the constraint of `max_patches`.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images.
header_text (`Union[list[str], str]`, *optional*):
Text to render as a header. Only has an effect if `image_processor.is_vqa` is `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
max_patches (`int`, *optional*, defaults to `self.max_patches`):
Maximum number of patches to extract.
patch_size (`dict`, *optional*, defaults to `self.patch_size`):
Dictionary containing the patch height and width.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
patch_size = patch_size if patch_size is not None else self.patch_size
max_patches = max_patches if max_patches is not None else self.max_patches
is_vqa = self.is_vqa
if kwargs.get("data_format") is not None:
raise ValueError("data_format is not an accepted input as the outputs are ")
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models.")
font_bytes = kwargs.pop("font_bytes", None)
font_path = kwargs.pop("font_path", None)
if isinstance(header_text, str):
header_text = [header_text] * len(images)
images = [
render_header(image, header_text[i], font_bytes=font_bytes, font_path=font_path)
for i, image in enumerate(images)
]
if do_normalize:
images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]
# convert to torch tensor and permute
images = [
self.extract_flattened_patches(
image=image, max_patches=max_patches, patch_size=patch_size, input_data_format=input_data_format
)
for image in images
]
# create attention mask in numpy
attention_masks = [(image.sum(axis=-1) != 0).astype(np.float32) for image in images]
encoded_outputs = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks}, tensor_type=return_tensors
)
return encoded_outputs
__all__ = ["Pix2StructImageProcessor"]
| Pix2StructImageProcessor |
python | catalyst-team__catalyst | catalyst/contrib/data/reader.py | {
"start": 4442,
"end": 5229
} | class ____(object):
"""Abstraction to compose several readers into one open function."""
def __init__(self, transforms: List[IReader]):
"""
Args:
transforms: list of reader to compose
"""
self.transforms = transforms
def __call__(self, element):
"""
Reads a row from your annotations dict and applies all readers and mixins
Args:
element: elem in your dataset.
Returns:
Value after applying all readers and mixins
"""
result = {}
for transform_fn in self.transforms:
result = {**result, **transform_fn(element)}
return result
__all__ = [
"IReader",
"ScalarReader",
"LambdaReader",
"ReaderCompose",
]
| ReaderCompose |
python | huggingface__transformers | src/transformers/models/gemma2/configuration_gemma2.py | {
"start": 1321,
"end": 10374
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Gemma2Model`]. It is used to instantiate an Gemma2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Gemma2-7B.
e.g. [google/gemma2-7b](https://huggingface.co/google/gemma2-7b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 256000):
Vocabulary size of the Gemma2 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Gemma2Model`]
hidden_size (`int`, *optional*, defaults to 2304):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 9216):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 26):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
query_pre_attn_scalar (`float`, *optional*, defaults to 256):
scaling factor used on the attention scores
sliding_window (`int`, *optional*, defaults to 4096):
in Gemma2, every other layer uses sliding window attention. This is the size of the sliding window.
layer_types (`list`, *optional*):
Attention pattern for each layer.
final_logit_softcapping (`float`, *optional*, defaults to 30.0):
scaling factor when applying tanh softcapping on the logits.
attn_logit_softcapping (`float`, *optional*, defaults to 50.0):
scaling factor when applying tanh softcapping on the attention scores.
use_bidirectional_attention (`bool`, *optional*):
If True, the model will attend to all text tokens instead of using a causal mask.
```python
>>> from transformers import Gemma2Model, Gemma2Config
>>> # Initializing a Gemma2 gemma2-7b style configuration
>>> configuration = Gemma2Config()
>>> # Initializing a model from the gemma2-7b style configuration
>>> model = Gemma2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "gemma2"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 256000,
hidden_size: Optional[int] = 2304,
intermediate_size: Optional[int] = 9216,
num_hidden_layers: Optional[int] = 26,
num_attention_heads: Optional[int] = 8,
num_key_value_heads: Optional[int] = 4,
head_dim: Optional[int] = 256,
hidden_activation: Optional[str] = "gelu_pytorch_tanh",
max_position_embeddings: Optional[int] = 8192,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 0,
eos_token_id: Optional[int] = 1,
bos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = True,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
query_pre_attn_scalar: Optional[int] = 256,
sliding_window: Optional[int] = 4096,
layer_types: Optional[list[str]] = None,
final_logit_softcapping: Optional[float] = 30.0,
attn_logit_softcapping: Optional[float] = 50.0,
use_bidirectional_attention: Optional[bool] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.num_key_value_heads = num_key_value_heads
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.query_pre_attn_scalar = query_pre_attn_scalar
self.sliding_window = sliding_window
self.final_logit_softcapping = final_logit_softcapping
self.attn_logit_softcapping = attn_logit_softcapping
self.layer_types = layer_types
self.use_bidirectional_attention = use_bidirectional_attention
if self.layer_types is None:
self.layer_types = [
"sliding_attention" if bool((i + 1) % 2) else "full_attention" for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["Gemma2Config"]
| Gemma2Config |
python | bokeh__bokeh | src/bokeh/core/property/dataspec.py | {
"start": 12291,
"end": 12468
} | class ____(DataSpec):
def __init__(self, default, *, help: str | None = None) -> None:
super().__init__(Enum(enums.FontStyle), default=default, help=help)
| FontStyleSpec |
python | ray-project__ray | python/ray/data/_internal/planner/exchange/push_based_shuffle_task_scheduler.py | {
"start": 12596,
"end": 15357
} | class ____:
def __init__(
self,
stage: _PushBasedShuffleStage,
shuffle_reduce,
all_merge_results: List[List[List[ObjectRef]]],
ray_remote_args,
reduce_args: List[Any],
_debug_limit_execution_to_num_blocks: Optional[int],
):
self._shuffle_reduce = shuffle_reduce
self._stage = stage
self._reduce_arg_blocks: List[Tuple[int, List[ObjectRef]]] = []
self._ray_remote_args = ray_remote_args
self._reduce_args = reduce_args
for reduce_idx in self._stage.merge_schedule.round_robin_reduce_idx_iterator():
merge_idx = self._stage.merge_schedule.get_merge_idx_for_reducer_idx(
reduce_idx
)
reduce_arg_blocks = [
merge_results.pop(0) for merge_results in all_merge_results[merge_idx]
]
self._reduce_arg_blocks.append((reduce_idx, reduce_arg_blocks))
assert len(self._reduce_arg_blocks) == stage.merge_schedule.output_num_blocks
if _debug_limit_execution_to_num_blocks is not None:
self._reduce_arg_blocks = self._reduce_arg_blocks[
:_debug_limit_execution_to_num_blocks
]
logger.debug(
f"Limiting execution to {len(self._reduce_arg_blocks)} reduce tasks"
)
for merge_idx, merge_results in enumerate(all_merge_results):
assert all(len(merge_result) == 0 for merge_result in merge_results), (
"Reduce stage did not process outputs from merge tasks at index: "
f"{merge_idx}"
)
self._reduce_results: List[Tuple[int, ObjectRef]] = []
def __iter__(self):
return self
def __next__(self):
if not self._reduce_arg_blocks:
raise StopIteration
reduce_idx, reduce_arg_blocks = self._reduce_arg_blocks.pop(0)
merge_idx = self._stage.merge_schedule.get_merge_idx_for_reducer_idx(reduce_idx)
# Submit one partition of reduce tasks, one for each of the P
# outputs produced by the corresponding merge task.
# We also add the merge task arguments so that the reduce task
# is colocated with its inputs.
block, meta_with_schema = self._shuffle_reduce.options(
**self._ray_remote_args,
**self._stage.get_merge_task_options(merge_idx),
num_returns=2,
).remote(*self._reduce_args, *reduce_arg_blocks, partial_reduce=False)
self._reduce_results.append((reduce_idx, block))
return meta_with_schema
def pop_reduce_results(self):
reduce_results = self._reduce_results
self._reduce_results = []
return reduce_results
| _ReduceStageIterator |
python | ethereum__web3.py | ens/_normalization.py | {
"start": 1292,
"end": 1355
} | class ____(Enum):
EMOJI = "emoji"
TEXT = "text"
| TokenType |
python | getsentry__sentry | src/sentry/analytics/events/onboarding_complete.py | {
"start": 76,
"end": 230
} | class ____(analytics.Event):
user_id: int
organization_id: int
referrer: str
analytics.register(OnboardingCompleteEvent)
| OnboardingCompleteEvent |
python | Textualize__textual | tests/text_area/test_messages.py | {
"start": 199,
"end": 3152
} | class ____(App):
def __init__(self):
super().__init__()
self.messages = []
@on(TextArea.Changed)
@on(TextArea.SelectionChanged)
def message_received(self, message: Message):
self.messages.append(message)
def compose(self) -> ComposeResult:
yield TextArea("123")
def get_changed_messages(messages: List[Event]) -> List[TextArea.Changed]:
return [message for message in messages if isinstance(message, TextArea.Changed)]
def get_selection_changed_messages(
messages: List[Event],
) -> List[TextArea.SelectionChanged]:
return [
message
for message in messages
if isinstance(message, TextArea.SelectionChanged)
]
async def test_changed_message_edit_via_api():
app = TextAreaApp()
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
assert get_changed_messages(app.messages) == []
text_area.insert("A")
await pilot.pause()
assert get_changed_messages(app.messages) == [TextArea.Changed(text_area)]
assert get_selection_changed_messages(app.messages) == [
TextArea.SelectionChanged(text_area.selection, text_area)
]
async def test_changed_message_via_typing():
app = TextAreaApp()
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
assert get_changed_messages(app.messages) == []
await pilot.press("a")
assert get_changed_messages(app.messages) == [TextArea.Changed(text_area)]
assert get_selection_changed_messages(app.messages) == [
TextArea.SelectionChanged(text_area.selection, text_area)
]
async def test_changed_message_edit_via_assignment():
app = TextAreaApp()
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
assert get_changed_messages(app.messages) == []
text_area.text = ""
await pilot.pause()
assert get_changed_messages(app.messages) == [TextArea.Changed(text_area)]
assert get_selection_changed_messages(app.messages) == []
async def test_selection_changed_via_api():
app = TextAreaApp()
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
assert get_selection_changed_messages(app.messages) == []
text_area.cursor_location = (0, 1)
await pilot.pause()
assert get_selection_changed_messages(app.messages) == [
TextArea.SelectionChanged(text_area.selection, text_area)
]
async def test_selection_changed_via_typing():
app = TextAreaApp()
async with app.run_test() as pilot:
text_area = app.query_one(TextArea)
assert get_selection_changed_messages(app.messages) == []
await pilot.press("a")
assert get_selection_changed_messages(app.messages) == [
TextArea.SelectionChanged(text_area.selection, text_area)
]
| TextAreaApp |
python | pytorch__pytorch | test/test_tensorboard.py | {
"start": 20560,
"end": 26925
} | class ____(BaseTestCase):
def test_pytorch_graph(self):
dummy_input = (torch.zeros(1, 3),)
class myLinear(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l = torch.nn.Linear(3, 5)
def forward(self, x):
return self.l(x)
with self.createSummaryWriter() as w:
w.add_graph(myLinear(), dummy_input)
actual_proto, _ = graph(myLinear(), dummy_input)
expected_str = read_expected_content(self)
expected_proto = GraphDef()
text_format.Parse(expected_str, expected_proto)
self.assertEqual(len(expected_proto.node), len(actual_proto.node))
for i in range(len(expected_proto.node)):
expected_node = expected_proto.node[i]
actual_node = actual_proto.node[i]
self.assertEqual(expected_node.name, actual_node.name)
self.assertEqual(expected_node.op, actual_node.op)
self.assertEqual(expected_node.input, actual_node.input)
self.assertEqual(expected_node.device, actual_node.device)
self.assertEqual(
sorted(expected_node.attr.keys()), sorted(actual_node.attr.keys())
)
def test_nested_nn_squential(self):
dummy_input = torch.randn(2, 3)
class InnerNNSquential(torch.nn.Module):
def __init__(self, dim1, dim2):
super().__init__()
self.inner_nn_squential = torch.nn.Sequential(
torch.nn.Linear(dim1, dim2),
torch.nn.Linear(dim2, dim1),
)
def forward(self, x):
x = self.inner_nn_squential(x)
return x
class OuterNNSquential(torch.nn.Module):
def __init__(self, dim1=3, dim2=4, depth=2):
super().__init__()
layers = []
for _ in range(depth):
layers.append(InnerNNSquential(dim1, dim2))
self.outer_nn_squential = torch.nn.Sequential(*layers)
def forward(self, x):
x = self.outer_nn_squential(x)
return x
with self.createSummaryWriter() as w:
w.add_graph(OuterNNSquential(), dummy_input)
actual_proto, _ = graph(OuterNNSquential(), dummy_input)
expected_str = read_expected_content(self)
expected_proto = GraphDef()
text_format.Parse(expected_str, expected_proto)
self.assertEqual(len(expected_proto.node), len(actual_proto.node))
for i in range(len(expected_proto.node)):
expected_node = expected_proto.node[i]
actual_node = actual_proto.node[i]
self.assertEqual(expected_node.name, actual_node.name)
self.assertEqual(expected_node.op, actual_node.op)
self.assertEqual(expected_node.input, actual_node.input)
self.assertEqual(expected_node.device, actual_node.device)
self.assertEqual(
sorted(expected_node.attr.keys()), sorted(actual_node.attr.keys())
)
def test_pytorch_graph_dict_input(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l = torch.nn.Linear(3, 5)
def forward(self, x):
return self.l(x)
class ModelDict(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l = torch.nn.Linear(3, 5)
def forward(self, x):
return {"out": self.l(x)}
dummy_input = torch.zeros(1, 3)
with self.createSummaryWriter() as w:
w.add_graph(Model(), dummy_input)
with self.createSummaryWriter() as w:
w.add_graph(Model(), dummy_input, use_strict_trace=True)
# expect error: Encountering a dict at the output of the tracer...
with self.assertRaises(RuntimeError):
with self.createSummaryWriter() as w:
w.add_graph(ModelDict(), dummy_input, use_strict_trace=True)
with self.createSummaryWriter() as w:
w.add_graph(ModelDict(), dummy_input, use_strict_trace=False)
def test_mlp_graph(self):
dummy_input = (torch.zeros(2, 1, 28, 28),)
# This MLP class with the above input is expected
# to fail JIT optimizations as seen at
# https://github.com/pytorch/pytorch/issues/18903
#
# However, it should not raise an error during
# the add_graph call and still continue.
class myMLP(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.input_len = 1 * 28 * 28
self.fc1 = torch.nn.Linear(self.input_len, 1200)
self.fc2 = torch.nn.Linear(1200, 1200)
self.fc3 = torch.nn.Linear(1200, 10)
def forward(self, x, update_batch_stats=True):
h = torch.nn.functional.relu(self.fc1(x.view(-1, self.input_len)))
h = self.fc2(h)
h = torch.nn.functional.relu(h)
h = self.fc3(h)
return h
with self.createSummaryWriter() as w:
w.add_graph(myMLP(), dummy_input)
def test_wrong_input_size(self):
with self.assertRaises(RuntimeError):
dummy_input = torch.rand(1, 9)
model = torch.nn.Linear(3, 5)
with self.createSummaryWriter() as w:
w.add_graph(model, dummy_input) # error
@skipIfNoTorchVision
def test_torchvision_smoke(self):
model_input_shapes = {
"alexnet": (2, 3, 224, 224),
"resnet34": (2, 3, 224, 224),
"resnet152": (2, 3, 224, 224),
"densenet121": (2, 3, 224, 224),
"vgg16": (2, 3, 224, 224),
"vgg19": (2, 3, 224, 224),
"vgg16_bn": (2, 3, 224, 224),
"vgg19_bn": (2, 3, 224, 224),
"mobilenet_v2": (2, 3, 224, 224),
}
for model_name, input_shape in model_input_shapes.items():
with self.createSummaryWriter() as w:
model = getattr(torchvision.models, model_name)()
w.add_graph(model, torch.zeros(input_shape))
| TestTensorBoardPytorchGraph |
python | tiangolo__fastapi | docs_src/extra_models/tutorial002.py | {
"start": 300,
"end": 824
} | class ____(UserBase):
hashed_password: str
def fake_password_hasher(raw_password: str):
return "supersecret" + raw_password
def fake_save_user(user_in: UserIn):
hashed_password = fake_password_hasher(user_in.password)
user_in_db = UserInDB(**user_in.dict(), hashed_password=hashed_password)
print("User saved! ..not really")
return user_in_db
@app.post("/user/", response_model=UserOut)
async def create_user(user_in: UserIn):
user_saved = fake_save_user(user_in)
return user_saved
| UserInDB |
python | MongoEngine__mongoengine | tests/test_common.py | {
"start": 95,
"end": 362
} | class ____:
def test__import_class(self):
doc_cls = _import_class("Document")
assert doc_cls is Document
def test__import_class_raise_if_not_known(self):
with pytest.raises(ValueError):
_import_class("UnknownClass")
| TestCommon |
python | django__django | tests/datatypes/models.py | {
"start": 585,
"end": 729
} | class ____(models.Model):
baked_date = models.DateField(auto_now_add=True)
baked_timestamp = models.DateTimeField(auto_now_add=True)
| RumBaba |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-ollama/tests/test_embeddings_ollama.py | {
"start": 2015,
"end": 8839
} | class ____:
"""Test cases for the new instruction functionality."""
def test_instruction_fields_default_none(self):
"""Test that instruction fields default to None."""
embedder = OllamaEmbedding(model_name="test-model")
assert embedder.query_instruction is None
assert embedder.text_instruction is None
def test_instruction_fields_set_correctly(self):
"""Test that instruction fields are properly set."""
embedder = OllamaEmbedding(
model_name="test-model",
query_instruction="Query instruction:",
text_instruction="Text instruction:",
)
assert embedder.query_instruction == "Query instruction:"
assert embedder.text_instruction == "Text instruction:"
def test_format_query_with_instruction(self):
"""Test query formatting with instruction."""
embedder = OllamaEmbedding(
model_name="test-model",
query_instruction="Represent the question for retrieval:",
)
result = embedder._format_query("What is AI?")
expected = "Represent the question for retrieval: What is AI?"
assert result == expected
def test_format_query_without_instruction(self):
"""Test query formatting without instruction."""
embedder = OllamaEmbedding(model_name="test-model")
result = embedder._format_query("What is AI?")
assert result == "What is AI?"
def test_format_text_with_instruction(self):
"""Test text formatting with instruction."""
embedder = OllamaEmbedding(
model_name="test-model",
text_instruction="Represent the document for retrieval:",
)
result = embedder._format_text("AI is a field of computer science")
expected = (
"Represent the document for retrieval: AI is a field of computer science"
)
assert result == expected
def test_format_text_without_instruction(self):
"""Test text formatting without instruction."""
embedder = OllamaEmbedding(model_name="test-model")
result = embedder._format_text("AI is a field of computer science")
assert result == "AI is a field of computer science"
def test_instruction_stripping(self):
"""Test that whitespace is handled correctly."""
embedder = OllamaEmbedding(
model_name="test-model",
query_instruction=" Query: ", # Extra spaces
)
result = embedder._format_query(" What is AI? ") # Extra spaces
expected = "Query: What is AI?" # Should be cleaned
assert result == expected
def test_empty_strings(self):
"""Test handling of empty strings."""
embedder = OllamaEmbedding(model_name="test-model", query_instruction="Query:")
result = embedder._format_query("")
expected = "Query:"
assert result == expected
@patch.object(OllamaEmbedding, "get_general_text_embedding")
def test_query_embedding_uses_instruction(self, mock_embed):
"""Test that query embedding methods use instructions."""
embedder = OllamaEmbedding(model_name="test-model", query_instruction="Query:")
mock_embed.return_value = [0.1, 0.2, 0.3]
embedder._get_query_embedding("What is AI?")
# Verify the formatting was applied
mock_embed.assert_called_once_with("Query: What is AI?")
@patch.object(OllamaEmbedding, "get_general_text_embedding")
def test_text_embedding_uses_instruction(self, mock_embed):
"""Test that text embedding methods use instructions."""
embedder = OllamaEmbedding(model_name="test-model", text_instruction="Text:")
mock_embed.return_value = [0.1, 0.2, 0.3]
embedder._get_text_embedding("AI is computer science")
# Verify the formatting was applied
mock_embed.assert_called_once_with("Text: AI is computer science")
@patch.object(OllamaEmbedding, "aget_general_text_embedding")
async def test_async_query_embedding_uses_instruction(self, mock_embed):
"""Test that async query embedding methods use instructions."""
embedder = OllamaEmbedding(
model_name="test-model", query_instruction="Async Query:"
)
mock_embed.return_value = [0.1, 0.2, 0.3]
await embedder._aget_query_embedding("What is AI?")
# Verify the formatting was applied
mock_embed.assert_called_once_with("Async Query: What is AI?")
@patch.object(OllamaEmbedding, "aget_general_text_embedding")
async def test_async_text_embedding_uses_instruction(self, mock_embed):
"""Test that async text embedding methods use instructions."""
embedder = OllamaEmbedding(
model_name="test-model", text_instruction="Async Text:"
)
mock_embed.return_value = [0.1, 0.2, 0.3]
await embedder._aget_text_embedding("AI is computer science")
# Verify the formatting was applied
mock_embed.assert_called_once_with("Async Text: AI is computer science")
@patch.object(OllamaEmbedding, "get_general_text_embedding")
def test_batch_text_embeddings_use_instruction(self, mock_embed):
"""Test that batch text embedding methods use instructions."""
embedder = OllamaEmbedding(model_name="test-model", text_instruction="Batch:")
mock_embed.return_value = [0.1, 0.2, 0.3]
embedder._get_text_embeddings(["Text 1", "Text 2"])
# Verify both calls used the instruction
expected_calls = [(("Batch: Text 1",),), (("Batch: Text 2",),)]
assert mock_embed.call_args_list == expected_calls
@patch.object(OllamaEmbedding, "aget_general_text_embedding")
async def test_async_batch_text_embeddings_use_instruction(self, mock_embed):
"""Test that async batch text embedding methods use instructions."""
embedder = OllamaEmbedding(
model_name="test-model", text_instruction="Async Batch:"
)
mock_embed.return_value = [0.1, 0.2, 0.3]
await embedder._aget_text_embeddings(["Text 1", "Text 2"])
# Verify both calls used the instruction
expected_calls = [(("Async Batch: Text 1",),), (("Async Batch: Text 2",),)]
assert mock_embed.call_args_list == expected_calls
def test_constructor_passes_instructions_to_parent(self):
"""Test that instructions are properly accessible as attributes."""
embedder = OllamaEmbedding(
model_name="test-model",
query_instruction="Query:",
text_instruction="Text:",
)
# Verify instructions are accessible as attributes
assert embedder.query_instruction == "Query:"
assert embedder.text_instruction == "Text:"
| TestInstructionFunctionality |
python | scipy__scipy | scipy/signal/tests/test_signaltools.py | {
"start": 20890,
"end": 37982
} | class ____:
@skip_xp_backends("torch", reason="dtypes do not match")
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_real(self, axes, xp):
a = xp.asarray([1, 2, 3])
expected = xp.asarray([1, 4, 10, 12, 9.])
if axes == '':
out = fftconvolve(a, a)
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, a, axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
@skip_xp_backends("torch", reason="dtypes do not match")
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_real_axes(self, axes, xp):
a = xp.asarray([1, 2, 3])
expected = xp.asarray([1, 4, 10, 12, 9.])
a = xp.asarray(np.tile(a, [2, 1]))
expected = xp.asarray(np.tile(expected, [2, 1]))
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, a, axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_complex(self, axes, xp):
a = xp.asarray([1 + 1j, 2 + 2j, 3 + 3j])
expected = xp.asarray([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
if axes == '':
out = fftconvolve(a, a)
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, a, axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_complex_axes(self, axes, xp):
a = xp.asarray([1 + 1j, 2 + 2j, 3 + 3j])
expected = xp.asarray([0 + 2j, 0 + 8j, 0 + 20j, 0 + 24j, 0 + 18j])
a = xp.asarray(np.tile(a, [2, 1]))
expected = xp.asarray(np.tile(expected, [2, 1]))
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, a, axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
@pytest.mark.parametrize('axes', ['',
None,
[0, 1],
[1, 0],
[0, -1],
[-1, 0],
[-2, 1],
[1, -2],
[-2, -1],
[-1, -2]])
def test_2d_real_same(self, axes, xp):
a = xp.asarray([[1.0, 2, 3],
[4, 5, 6]])
expected = xp.asarray([[1.0, 4, 10, 12, 9],
[8, 26, 56, 54, 36],
[16, 40, 73, 60, 36]])
if axes == '':
out = fftconvolve(a, a)
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, a, axes=axes)
xp_assert_close(out, expected)
@pytest.mark.parametrize('axes', [[1, 2],
[2, 1],
[1, -1],
[-1, 1],
[-2, 2],
[2, -2],
[-2, -1],
[-1, -2]])
def test_2d_real_same_axes(self, axes, xp):
a = xp.asarray([[1, 2, 3],
[4, 5, 6]])
expected = xp.asarray([[1, 4, 10, 12, 9],
[8, 26, 56, 54, 36],
[16, 40, 73, 60, 36]])
a = xp.asarray(np.tile(a, [2, 1, 1]))
expected = xp.asarray(np.tile(expected, [2, 1, 1]))
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, a, axes=axes)
xp_assert_close(out, expected, atol=1.5e-6, check_dtype=False)
@pytest.mark.parametrize('axes', ['',
None,
[0, 1],
[1, 0],
[0, -1],
[-1, 0],
[-2, 1],
[1, -2],
[-2, -1],
[-1, -2]])
def test_2d_complex_same(self, axes, xp):
a = xp.asarray([[1 + 2j, 3 + 4j, 5 + 6j],
[2 + 1j, 4 + 3j, 6 + 5j]])
expected = xp.asarray([
[-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
[10j, 44j, 118j, 156j, 122j],
[3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]
])
if axes == '':
out = fftconvolve(a, a)
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, a, axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
@pytest.mark.parametrize('axes', [[1, 2],
[2, 1],
[1, -1],
[-1, 1],
[-2, 2],
[2, -2],
[-2, -1],
[-1, -2]])
def test_2d_complex_same_axes(self, axes, xp):
a = xp.asarray([[1 + 2j, 3 + 4j, 5 + 6j],
[2 + 1j, 4 + 3j, 6 + 5j]])
expected = xp.asarray([
[-3 + 4j, -10 + 20j, -21 + 56j, -18 + 76j, -11 + 60j],
[10j, 44j, 118j, 156j, 122j],
[3 + 4j, 10 + 20j, 21 + 56j, 18 + 76j, 11 + 60j]
])
a = xp.asarray(np.tile(a, [2, 1, 1]))
expected = xp.asarray(np.tile(expected, [2, 1, 1]))
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, a, axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
@skip_xp_backends("torch", reason="dtypes do not match")
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_real_same_mode(self, axes, xp):
a = xp.asarray([1, 2, 3])
b = xp.asarray([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected_1 = xp.asarray([35., 41., 47.])
expected_2 = xp.asarray([9., 20., 25., 35., 41., 47., 39., 28., 2.])
if axes == '':
out = fftconvolve(a, b, 'same')
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, b, 'same', axes=axes)
xp_assert_close(out, expected_1)
if axes == '':
out = fftconvolve(b, a, 'same')
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(b, a, 'same', axes=axes)
xp_assert_close(out, expected_2, atol=1.5e-6)
@skip_xp_backends("torch", reason="dtypes do not match")
@pytest.mark.parametrize('axes', [1, -1, [1], [-1]])
def test_real_same_mode_axes(self, axes, xp):
a = xp.asarray([1, 2, 3])
b = xp.asarray([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected_1 = xp.asarray([35., 41., 47.])
expected_2 = xp.asarray([9., 20., 25., 35., 41., 47., 39., 28., 2.])
a = xp.asarray(np.tile(a, [2, 1]))
b = xp.asarray(np.tile(b, [2, 1]))
expected_1 = xp.asarray(np.tile(expected_1, [2, 1]))
expected_2 = xp.asarray(np.tile(expected_2, [2, 1]))
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, b, 'same', axes=axes)
xp_assert_close(out, expected_1, atol=1.5e-6)
out = fftconvolve(b, a, 'same', axes=axes)
xp_assert_close(out, expected_2, atol=1.5e-6)
@skip_xp_backends("torch", reason="dtypes do not match")
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_valid_mode_real(self, axes, xp):
# See gh-5897
a = xp.asarray([3, 2, 1])
b = xp.asarray([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = xp.asarray([24., 31., 41., 43., 49., 25., 12.])
if axes == '':
out = fftconvolve(a, b, 'valid')
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, b, 'valid', axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
if axes == '':
out = fftconvolve(b, a, 'valid')
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(b, a, 'valid', axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
@skip_xp_backends("torch", reason="dtypes do not match")
@pytest.mark.parametrize('axes', [1, [1]])
def test_valid_mode_real_axes(self, axes, xp):
# See gh-5897
a = xp.asarray([3, 2, 1])
b = xp.asarray([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = xp.asarray([24., 31., 41., 43., 49., 25., 12.])
a = xp.asarray(np.tile(a, [2, 1]))
b = xp.asarray(np.tile(b, [2, 1]))
expected = xp.asarray(np.tile(expected, [2, 1]))
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, b, 'valid', axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_valid_mode_complex(self, axes, xp):
a = xp.asarray([3 - 1j, 2 + 7j, 1 + 0j])
b = xp.asarray([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
expected = xp.asarray([45. + 12.j, 30. + 23.j, 48 + 32.j])
if axes == '':
out = fftconvolve(a, b, 'valid')
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, b, 'valid', axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
if axes == '':
out = fftconvolve(b, a, 'valid')
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(b, a, 'valid', axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_valid_mode_complex_axes(self, axes, xp):
a = xp.asarray([3 - 1j, 2 + 7j, 1 + 0j])
b = xp.asarray([3 + 2j, 3 - 3j, 5 + 0j, 6 - 1j, 8 + 0j])
expected = xp.asarray([45. + 12.j, 30. + 23.j, 48 + 32.j])
a = xp.asarray(np.tile(a, [2, 1]))
b = xp.asarray(np.tile(b, [2, 1]))
expected = xp.asarray(np.tile(expected, [2, 1]))
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, b, 'valid', axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
out = fftconvolve(b, a, 'valid', axes=axes)
xp_assert_close(out, expected, atol=1.5e-6)
@skip_xp_backends("jax.numpy", reason="mapped axes must have same shape")
@skip_xp_backends("torch", reason="dtypes do not match")
def test_valid_mode_ignore_nonaxes(self, xp):
# See gh-5897
a = xp.asarray([3, 2, 1])
b = xp.asarray([3, 3, 5, 6, 8, 7, 9, 0, 1])
expected = xp.asarray([24., 31., 41., 43., 49., 25., 12.])
a = xp.asarray(np.tile(a, [2, 1]))
b = xp.asarray(np.tile(b, [1, 1]))
expected = xp.asarray(np.tile(expected, [2, 1]))
out = fftconvolve(a, b, 'valid', axes=1)
xp_assert_close(out, expected, atol=1.5e-6)
@xfail_xp_backends("cupy", reason="dtypes do not match")
@xfail_xp_backends("jax.numpy", reason="assorted error messages")
@pytest.mark.parametrize("a,b", [([], []), ([5, 6], []), ([], [7])])
def test_empty(self, a, b, xp):
# Regression test for #1745: crashes with 0-length input.
xp_assert_equal(
fftconvolve(xp.asarray(a), xp.asarray(b)),
xp.asarray([]),
)
@skip_xp_backends("jax.numpy", reason="jnp.pad: pad_width with nd=0")
def test_zero_rank(self, xp):
a = xp.asarray(4967)
b = xp.asarray(3920)
out = fftconvolve(a, b)
xp_assert_equal(out, a * b)
def test_single_element(self, xp):
a = xp.asarray([4967])
b = xp.asarray([3920])
out = fftconvolve(a, b)
xp_assert_equal(out,
xp.asarray(a * b, dtype=out.dtype))
@pytest.mark.parametrize('axes', ['', None, 0, [0], -1, [-1]])
def test_random_data(self, axes, xp):
rng = np.random.default_rng(1234)
a_np = np.random.rand(1233) + 1j * rng.random(1233)
b_np = np.random.rand(1321) + 1j * rng.random(1321)
expected = xp.asarray(np.convolve(a_np, b_np, 'full'))
a = xp.asarray(a_np)
b = xp.asarray(b_np)
if axes == '':
out = fftconvolve(a, b, 'full')
else:
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, b, 'full', axes=axes)
xp_assert_close(out, expected, rtol=1e-10)
@pytest.mark.parametrize('axes', [1, [1], -1, [-1]])
def test_random_data_axes(self, axes, xp):
rng = np.random.default_rng(1234)
a_np = np.random.rand(1233) + 1j * rng.random(1233)
b_np = np.random.rand(1321) + 1j * rng.random(1321)
expected = np.convolve(a_np, b_np, 'full')
a_np = np.tile(a_np, [2, 1])
b_np = np.tile(b_np, [2, 1])
expected = xp.asarray(np.tile(expected, [2, 1]))
a = xp.asarray(a_np)
b = xp.asarray(b_np)
if isinstance(axes, list):
axes = tuple(axes)
out = fftconvolve(a, b, 'full', axes=axes)
xp_assert_close(out, expected, rtol=1e-10)
@xfail_xp_backends(np_only=True, reason="TODO: swapaxes")
@pytest.mark.parametrize('axes', [[1, 4],
[4, 1],
[1, -1],
[-1, 1],
[-4, 4],
[4, -4],
[-4, -1],
[-1, -4]])
def test_random_data_multidim_axes(self, axes, xp):
a_shape, b_shape = (123, 22), (132, 11)
rng = np.random.default_rng(1234)
a = xp.asarray(np.random.rand(*a_shape) + 1j * rng.random(a_shape))
b = xp.asarray(np.random.rand(*b_shape) + 1j * rng.random(b_shape))
expected = convolve2d(a, b, 'full')
a = a[:, :, None, None, None]
b = b[:, :, None, None, None]
expected = expected[:, :, None, None, None]
a = xp.moveaxis(a.swapaxes(0, 2), 1, 4)
b = xp.moveaxis(b.swapaxes(0, 2), 1, 4)
expected = xp.moveaxis(expected.swapaxes(0, 2), 1, 4)
# use 1 for dimension 2 in a and 3 in b to test broadcasting
a = xp.asarray(np.tile(a, [2, 1, 3, 1, 1]))
b = xp.asarray(np.tile(b, [2, 1, 1, 4, 1]))
expected = xp.asarray(np.tile(expected, [2, 1, 3, 4, 1]))
out = fftconvolve(a, b, 'full', axes=axes)
xp_assert_close(out, expected, rtol=1e-10, atol=1e-10)
@pytest.mark.slow
@pytest.mark.parametrize(
'n',
list(range(1, 100)) +
list(range(1000, 1500)) +
np.random.RandomState(1234).randint(1001, 10000, 5).tolist())
def test_many_sizes(self, n, xp):
a_np = np.random.rand(n) + 1j * np.random.rand(n)
b_np = np.random.rand(n) + 1j * np.random.rand(n)
expected = xp.asarray(np.convolve(a_np, b_np, 'full'))
a = xp.asarray(a_np)
b = xp.asarray(b_np)
out = fftconvolve(a, b, 'full')
xp_assert_close(out, expected, atol=1e-10)
out = fftconvolve(a, b, 'full', axes=(0,))
xp_assert_close(out, expected, atol=1e-10)
@skip_xp_backends(np_only=True)
def test_fft_nan(self, xp):
n = 1000
rng = np.random.default_rng(43876432987)
sig_nan = xp.asarray(rng.standard_normal(n))
for val in [np.nan, np.inf]:
sig_nan[100] = val
coeffs = xp.asarray(signal.firwin(200, 0.2))
msg = "Use of fft convolution.*|invalid value encountered.*"
with pytest.warns(RuntimeWarning, match=msg):
signal.convolve(sig_nan, coeffs, mode='same', method='fft')
def fftconvolve_err(*args, **kwargs):
raise RuntimeError('Fell back to fftconvolve')
def gen_oa_shapes(sizes):
return [(a, b) for a, b in product(sizes, repeat=2)
if abs(a - b) > 3]
def gen_oa_shapes_2d(sizes):
shapes0 = gen_oa_shapes(sizes)
shapes1 = gen_oa_shapes(sizes)
shapes = [ishapes0+ishapes1 for ishapes0, ishapes1 in
zip(shapes0, shapes1)]
modes = ['full', 'valid', 'same']
return [ishapes+(imode,) for ishapes, imode in product(shapes, modes)
if imode != 'valid' or
(ishapes[0] > ishapes[1] and ishapes[2] > ishapes[3]) or
(ishapes[0] < ishapes[1] and ishapes[2] < ishapes[3])]
def gen_oa_shapes_eq(sizes):
return [(a, b) for a, b in product(sizes, repeat=2)
if a >= b]
@make_xp_test_case(oaconvolve)
| TestFFTConvolve |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qunary_test.py | {
"start": 562,
"end": 4740
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, M, N, dtype, op_func):
f_input = torch.rand(M, N)
scale = 1.0
zero_point = 0
self.inputs = {
"q_input": torch.quantize_per_tensor(
f_input, scale=scale, zero_point=zero_point, dtype=dtype
)
}
self.op_func = op_func
def forward(self, q_input):
return self.op_func(q_input)
# TODO: Uncomment the ops whenever they are implemented for quantized tensor.
qunary_ops_list = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[
# ['q_abs', torch.abs],
# ['q_abs_', torch.abs_],
# ['q_acos', torch.acos],
# ['q_acos_', torch.acos_],
["q_argsort", torch.argsort],
# ['q_asin', torch.asin],
# ['q_asin_', torch.asin_],
# ['q_atan', torch.atan],
# ['q_atan_', torch.atan_],
# ['q_ceil', torch.ceil],
# ['q_ceil_', torch.ceil_],
["q_clone", torch.clone],
# ['q_cos', torch.cos],
# ['q_cos_', torch.cos_],
# ['q_cosh', torch.cosh],
# ['q_digamma', torch.digamma],
# ['q_erf', torch.erf],
# ['q_erf_', torch.erf_],
# ['q_erfc', torch.erfc],
# ['q_erfc_', torch.erfc_],
# ['q_erfinv', torch.erfinv],
# ['q_exp', torch.exp],
# ['q_exp_', torch.exp_],
# ['q_expm1', torch.expm1],
# ['q_expm1_', torch.expm1_],
# ['q_floor', torch.floor],
# ['q_floor_', torch.floor_],
# ['q_frac', torch.frac],
# ['q_frac_', torch.frac_],
# ['q_hardshrink', torch.hardshrink],
# ['q_lgamma', torch.lgamma],
# ['q_log', torch.log],
# ['q_log10', torch.log10],
# ['q_log10_', torch.log10_],
# ['q_log1p', torch.log1p],
# ['q_log1p_', torch.log1p_],
# ['q_log2', torch.log2],
# ['q_log2_', torch.log2_],
# ['q_log_', torch.log_],
["q_mean", torch.mean],
# ['q_neg', torch.neg],
# ['q_neg_', torch.neg_],
# ['q_reciprocal', torch.reciprocal],
# ['q_reciprocal_', torch.reciprocal_],
["q_relu", torch.relu],
["q_relu_", torch.relu_],
# ['q_round', torch.round],
# ['q_round_', torch.round_],
# ['q_rsqrt', torch.rsqrt],
# ['q_rsqrt_', torch.rsqrt_],
# ['q_sigmoid', torch.sigmoid],
# ['q_sigmoid_', torch.sigmoid_],
# ['q_sign', torch.sign],
# ['q_sin', torch.sin],
# ['q_sin_', torch.sin_],
# ['q_sinh', torch.sinh],
["q_sort", torch.sort],
# ['q_sqrt', torch.sqrt],
# ['q_sqrt_', torch.sqrt_],
# ['q_tan', torch.tan],
# ['q_tan_', torch.tan_],
# ['q_tanh', torch.tanh],
# ['q_tanh_', torch.tanh_],
# ['q_trunc', torch.trunc],
# ['q_trunc_', torch.trunc_],
# ['q_unique', torch.unique],
# ['q_zero_', torch.zero_],
# ['q_bernoulli_', lambda t: t.bernoulli_()],
# ['q_cauchy_', lambda t: t.cauchy_()],
# ['q_digamma_', lambda t: t.digamma_()],
# ['q_exponential_', lambda t: t.exponential_()],
# ['q_normal_', lambda t: t.normal_()],
# ['q_random_', lambda t: t.random_()],
# ['q_sign_', lambda t: t.sign_()],
# ['q_uniform_', lambda t: t.uniform_()],
# ['q_half', lambda t: t.half()],
# ['q_long', lambda t: t.long()],
],
)
op_bench.generate_pt_tests_from_op_list(
qunary_ops_list,
qunary_ops_configs_short + qunary_ops_configs_long,
QUnaryOpBenchmark,
)
# === Other unary ops (i.e. the ones that need parameters as args) ===
# Configs for pointwise and reduction unary ops
qunary_ops_topk_configs_short = op_bench.config_list(
attr_names=["M", "N", "k"],
attrs=[
[512, 512, 5],
],
cross_product_configs={
"dtype": [torch.quint8],
},
tags=["short"],
)
qunary_ops_topk_configs_long = op_bench.cross_product_configs(
M=[256, 1024],
N=[256, 1024],
k=[1, 3, 5],
dtype=[torch.quint8, torch.qint8, torch.qint32],
tags=["long"],
)
| QUnaryOpBenchmark |
python | wandb__wandb | wandb/vendor/pygments/lexers/webmisc.py | {
"start": 1786,
"end": 33366
} | class ____(ExtendedRegexLexer):
"""
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
.. versionadded:: 1.4
"""
name = 'XQuery'
aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm']
filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm']
mimetypes = ['text/xquery', 'application/xquery']
xquery_parse_state = []
# FIX UNICODE LATER
# ncnamestartchar = (
# ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
# ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
# ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
# ur"[\u10000-\uEFFFF]"
# )
ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
# FIX UNICODE LATER
# ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
# ur"[\u203F-\u2040]")
ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
pitarget_namestartchar = r"(?:[A-KN-WYZ]|_|:|[a-kn-wyz])"
pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
prefixedname = "%s:%s" % (ncname, ncname)
unprefixedname = ncname
qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
# FIX UNICODE LATER
# elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_\'`|~]'
# quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
# ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%()*+,\-./:;=?@\[\\\]^_\'`|~]'
# aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%()*+,\-./:;=?@\[\\\]^_`|~]'
# CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
# aposattrcontentchar
# x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
flags = re.DOTALL | re.MULTILINE | re.UNICODE
def punctuation_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def operator_root_callback(lexer, match, ctx):
yield match.start(), Operator, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def popstate_tag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
next_state = lexer.xquery_parse_state.pop()
if next_state == 'occurrenceindicator':
if re.match("[?*+]+", match.group(2)):
yield match.start(), Punctuation, match.group(2)
ctx.stack.append('operator')
ctx.pos = match.end()
else:
ctx.stack.append('operator')
ctx.pos = match.end(1)
else:
ctx.stack.append(next_state)
ctx.pos = match.end(1)
def popstate_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# if we have run out of our state stack, pop whatever is on the pygments
# state stack
if len(lexer.xquery_parse_state) == 0:
ctx.stack.pop()
elif len(ctx.stack) > 1:
ctx.stack.append(lexer.xquery_parse_state.pop())
else:
# i don't know if i'll need this, but in case, default back to root
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_element_content_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('element_content')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.pos = match.end()
def pushstate_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_order_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_map_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate_withmode(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Keyword, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('kindtest')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtestforpi')
ctx.pos = match.end()
def pushstate_operator_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('occurrenceindicator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_operator_root_construct_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
cur_state = ctx.stack.pop()
lexer.xquery_parse_state.append(cur_state)
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_operator_attribute_callback(lexer, match, ctx):
yield match.start(), Name.Attribute, match.group(1)
ctx.stack.append('operator')
ctx.pos = match.end()
def pushstate_operator_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
tokens = {
'comment': [
# xquery comments
(r'(:\))', Comment, '#pop'),
(r'(\(:)', Comment, '#push'),
(r'[^:)]', Comment),
(r'([^:)]|:|\))', Comment),
],
'whitespace': [
(r'\s+', Text),
],
'operator': [
include('whitespace'),
(r'(\})', popstate_callback),
(r'\(:', Comment, 'comment'),
(r'(\{)', pushstate_root_callback),
(r'then|else|external|at|div|except', Keyword, 'root'),
(r'order by', Keyword, 'root'),
(r'group by', Keyword, 'root'),
(r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
(r'return|satisfies|to|union|where|count|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\|\||\||:=|=|!)',
operator_root_callback),
(r'(::|:|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(instance)(\s+)(of)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(treat)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(case)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'itemtype'),
(r'(case)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'itemtype'),
(r'(case|as)\b', Keyword, 'itemtype'),
(r'(\))(\s*)(as)',
bygroups(Punctuation, Text, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
(r'(for|let|previous|next)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Keyword, Text, Name.Variable),
'varname'),
# (r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
(r'(allowing)(\s+)(empty)', bygroups(Keyword, Text, Keyword)),
(r'external', Keyword),
(r'(start|when|end)', Keyword, 'root'),
(r'(only)(\s+)(end)', bygroups(Keyword, Text, Keyword), 'root'),
(r'collation', Keyword, 'uritooperator'),
# eXist specific XQUF
(r'(into|following|preceding|with)', Keyword, 'root'),
# support for current context on rhs of Simple Map Operator
(r'\.', Operator),
# finally catch all string literals and stay in operator state
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'(catch)(\s*)', bygroups(Keyword, Text), 'root'),
],
'uritooperator': [
(stringdouble, String.Double, '#pop'),
(stringsingle, String.Single, '#pop'),
],
'namespacedecl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)),
(r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r',', Punctuation),
(r'=', Operator),
(r';', Punctuation, 'root'),
(ncname, Name.Namespace),
],
'namespacekeyword': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double, 'namespacedecl'),
(stringsingle, String.Single, 'namespacedecl'),
(r'inherit|no-inherit', Keyword, 'root'),
(r'namespace', Keyword, 'namespacedecl'),
(r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
(r'preserve|no-preserve', Keyword),
(r',', Punctuation),
],
'annotationname': [
(r'\(:', Comment, 'comment'),
(qname, Name.Decorator),
(r'(\()(' + stringdouble + ')', bygroups(Punctuation, String.Double)),
(r'(\()(' + stringsingle + ')', bygroups(Punctuation, String.Single)),
(r'(\,)(\s+)(' + stringdouble + ')',
bygroups(Punctuation, Text, String.Double)),
(r'(\,)(\s+)(' + stringsingle + ')',
bygroups(Punctuation, Text, String.Single)),
(r'\)', Punctuation),
(r'(\s+)(\%)', bygroups(Text, Name.Decorator), 'annotationname'),
(r'(\s+)(variable)(\s+)(\$)',
bygroups(Text, Keyword.Declaration, Text, Name.Variable), 'varname'),
(r'(\s+)(function)(\s+)',
bygroups(Text, Keyword.Declaration, Text), 'root')
],
'varname': [
(r'\(:', Comment, 'comment'),
(r'(' + qname + ')(\()?', bygroups(Name, Punctuation), 'operator'),
],
'singletype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
],
'itemtype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\$', Name.Variable, 'varname'),
(r'(void)(\s*)(\()(\s*)(\))',
bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
(r'(element|attribute|schema-element|schema-attribute|comment|text|'
r'node|binary|document-node|empty-sequence)(\s*)(\()',
pushstate_occurrenceindicator_kindtest_callback),
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
(r'(\(\#)(\s*)', bygroups(Punctuation, Text), 'pragma'),
(r';', Punctuation, '#pop'),
(r'then|else', Keyword, '#pop'),
(r'(at)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
(r'except|intersect|in|is|return|satisfies|to|union|where|count',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|\||\|', Operator, 'root'),
(r'external|at', Keyword, 'root'),
(r'(stable)(\s+)(order)(\s+)(by)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
(r'(castable|cast)(\s+)(as)',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
(r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
(r'(case)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'itemtype'),
(r'(case)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'itemtype'),
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
(r'(function|map|array)(\()', bygroups(Keyword.Type, Punctuation)),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
(r'\(:', Comment, 'comment'),
(r'\{', Punctuation, 'root'),
(r'(\))([*+?]?)', popstate_kindtest_callback),
(r'\*', Name, 'closekindtest'),
(qname, Name, 'closekindtest'),
(r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
],
'kindtestforpi': [
(r'\(:', Comment, 'comment'),
(r'\)', Punctuation, '#pop'),
(ncname, Name.Variable),
(stringdouble, String.Double),
(stringsingle, String.Single),
],
'closekindtest': [
(r'\(:', Comment, 'comment'),
(r'(\))', popstate_callback),
(r',', Punctuation),
(r'(\{)', pushstate_operator_root_callback),
(r'\?', Punctuation),
],
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
(r'\s+', Text, 'processing_instruction_content'),
(r'\?>', String.Doc, '#pop'),
(pitarget, Name),
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
include('whitespace'),
(r'(/>)', popstate_tag_callback),
(r'>', Name.Tag, 'element_content'),
(r'"', Punctuation, 'quot_attribute_content'),
(r"'", Punctuation, 'apos_attribute_content'),
(r'=', Operator),
(qname, Name.Tag),
],
'quot_attribute_content': [
(r'"', Punctuation, 'start_tag'),
(r'(\{)', pushstate_root_callback),
(r'""', Name.Attribute),
(quotattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'apos_attribute_content': [
(r"'", Punctuation, 'start_tag'),
(r'\{', Punctuation, 'root'),
(r"''", Name.Attribute),
(aposattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'element_content': [
(r'</', Name.Tag, 'end_tag'),
(r'(\{)', pushstate_root_callback),
(r'(<!--)', pushstate_element_content_xmlcomment_callback),
(r'(<\?)', pushstate_element_content_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
(r'(<)', pushstate_element_content_starttag_callback),
(elementcontentchar, Literal),
(entityref, Literal),
(charref, Literal),
(r'\{\{|\}\}', Literal),
],
'end_tag': [
include('whitespace'),
(r'(>)', popstate_tag_callback),
(qname, Name.Tag),
],
'xmlspace_decl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
'declareordering': [
(r'\(:', Comment, 'comment'),
include('whitespace'),
(r'ordered|unordered', Keyword, '#pop'),
],
'xqueryversion': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'encoding', Keyword),
(r';', Punctuation, '#pop'),
],
'pragma': [
(qname, Name.Variable, 'pragmacontents'),
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
(u'\\t|\\r|\\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
'occurrenceindicator': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\*|\?|\+', Operator, 'operator'),
(r':=', Operator, 'root'),
default('operator'),
],
'option': [
include('whitespace'),
(qname, Name.Variable, '#pop'),
],
'qname_braren': [
include('whitespace'),
(r'(\{)', pushstate_operator_root_callback),
(r'(\()', Punctuation, 'root'),
],
'element_qname': [
(qname, Name.Variable, 'root'),
],
'attribute_qname': [
(qname, Name.Variable, 'root'),
],
'root': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
# handle operator state
# order on numbers matters - handle most complex first
(r'\d+(\.\d*)?[eE][+-]?\d+', Number.Float, 'operator'),
(r'(\.\d+)[eE][+-]?\d+', Number.Float, 'operator'),
(r'(\.\d+|\d+\.\d*)', Number.Float, 'operator'),
(r'(\d+)', Number.Integer, 'operator'),
(r'(\.\.|\.|\))', Punctuation, 'operator'),
(r'(declare)(\s+)(construction)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(r'(declare)(\s+)(context)(\s+)(item)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'operator'),
(ncname + ':\*', Name, 'operator'),
('\*:'+ncname, Name.Tag, 'operator'),
('\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
(r'(\}|\])', popstate_callback),
# NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration)),
(r'(module|declare)(\s+)(namespace)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'namespacedecl'),
(r'(declare)(\s+)(base-uri)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'namespacedecl'),
# NAMESPACE KEYWORD
(r'(declare)(\s+)(default)(\s+)(element|function)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Keyword.Declaration), 'namespacekeyword'),
(r'(import)(\s+)(schema|module)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'),
(r'(declare)(\s+)(copy-namespaces)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'namespacekeyword'),
# VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'(for)(\s+)(tumbling|sliding)(\s+)(window)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration, Text, Name.Variable), 'varname'),
# ANNOTATED GLOBAL VARIABLES AND FUNCTIONS
(r'(declare)(\s+)(\%)', bygroups(Keyword.Declaration, Text, Name.Decorator), 'annotationname'),
# ITEMTYPE
(r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(r'(element|attribute|schema-element|schema-attribute|comment|'
r'text|node|document-node|empty-sequence)(\s+)(\()',
pushstate_operator_kindtest_callback),
(r'(processing-instruction)(\s+)(\()',
pushstate_operator_kindtestforpi_callback),
(r'(<!--)', pushstate_operator_xmlcomment_callback),
(r'(<\?)', pushstate_operator_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
# (r'</', Name.Tag, 'end_tag'),
(r'(<)', pushstate_operator_starttag_callback),
(r'(declare)(\s+)(boundary-space)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'xmlspace_decl'),
(r'(validate)(\s+)(lax|strict)',
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(switch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(element|attribute|namespace)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
pushstate_operator_root_construct_callback),
# ATTRIBUTE
(r'(attribute)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'attribute_qname'),
# ELEMENT
(r'(element)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'element_qname'),
# PROCESSING_INSTRUCTION
(r'(processing-instruction|namespace)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Text, Name.Variable, Text, Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration)),
(r'(\{|\[)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
(r'(map|array)(\s*)(\{)',
pushstate_operator_map_callback),
(r'(declare)(\s+)(ordering)',
bygroups(Keyword.Declaration, Text, Keyword.Declaration), 'declareordering'),
(r'(xquery)(\s+)(version)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'xqueryversion'),
(r'(\(#)(\s*)', bygroups(Punctuation, Text), 'pragma'),
# sometimes return can occur in root state
(r'return', Keyword),
(r'(declare)(\s+)(option)', bygroups(Keyword.Declaration, Text, Keyword.Declaration),
'option'),
# URI LITERALS - single and double quoted
(r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
(r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
(r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
bygroups(Keyword, Punctuation)),
(r'(descendant|following-sibling|following|parent|preceding-sibling'
r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'then|else', Keyword),
# eXist specific XQUF
(r'(update)(\s*)(insert|delete|replace|value|rename)', bygroups(Keyword, Text, Keyword)),
(r'(into|following|preceding|with)', Keyword),
# Marklogic specific
(r'(try)(\s*)', bygroups(Keyword, Text), 'root'),
(r'(catch)(\s*)(\()(\$)',
bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'),
(r'(@'+qname+')', Name.Attribute, 'operator'),
(r'(@'+ncname+')', Name.Attribute, 'operator'),
(r'@\*:'+ncname, Name.Attribute, 'operator'),
(r'@\*', Name.Attribute, 'operator'),
(r'(@)', Name.Attribute, 'operator'),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
# STANDALONE QNAMES
(qname + r'(?=\s*\{)', Name.Tag, 'qname_braren'),
(qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
(r'(' + qname + ')(#)([0-9]+)', bygroups(Name.Function, Keyword.Type, Number.Integer)),
(qname, Name.Tag, 'operator'),
]
}
| XQueryLexer |
python | kamyu104__LeetCode-Solutions | Python/maximum-star-sum-of-a-graph.py | {
"start": 60,
"end": 1851
} | class ____(object):
def maxStarSum(self, vals, edges, k):
"""
:type vals: List[int]
:type edges: List[List[int]]
:type k: int
:rtype: int
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
adj = [[] for _ in xrange(len(vals))]
for u, v in edges:
if vals[v] > 0:
adj[u].append(v)
if vals[u] > 0:
adj[v].append(u)
result = float("-inf")
for u in xrange(len(vals)):
if 1 <= k <= len(adj[u]):
nth_element(adj[u], k-1, lambda a, b: vals[a] > vals[b])
result = max(result, vals[u]+sum(vals[adj[u][i]] for i in range(min(k, len(adj[u])))))
return result
| Solution |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 29762,
"end": 30425
} | class ____(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
parent = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
"projects-subprojects-detail",
kwargs={
"parent_lookup_parent__slug": obj.parent.slug,
"alias_slug": obj.alias,
},
)
return self._absolute_url(path)
def get_parent(self, obj):
path = reverse(
"projects-detail",
kwargs={
"project_slug": obj.parent.slug,
},
)
return self._absolute_url(path)
| SubprojectLinksSerializer |
python | pexpect__pexpect | tests/test_constructor.py | {
"start": 1023,
"end": 1948
} | class ____(PexpectTestCase.PexpectTestCase):
def test_constructor (self):
'''This tests that the constructor will work and give
the same results for different styles of invoking __init__().
This assumes that the root directory / is static during the test.
'''
p1 = pexpect.spawn('uname -m -n -p -r -s -v')
p2 = pexpect.spawn('uname', ['-m', '-n', '-p', '-r', '-s', '-v'])
p1.expect(pexpect.EOF)
p2.expect(pexpect.EOF)
assert p1.before == p2.before
def test_named_parameters (self):
'''This tests that named parameters work.
'''
p = pexpect.spawn ('/bin/ls',timeout=10)
p = pexpect.spawn (timeout=10, command='/bin/ls')
p = pexpect.spawn (args=[], command='/bin/ls')
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestCaseConstructor)
| TestCaseConstructor |
python | gevent__gevent | src/gevent/tests/test__monkey_module_run.py | {
"start": 411,
"end": 4368
} | class ____(greentest.TestCase):
maxDiff = None
def setUp(self):
self.abs_pythonpath = absolute_pythonpath() # before we cd
self.cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
def tearDown(self):
os.chdir(self.cwd)
def _run(self, script, module=False):
env = os.environ.copy()
env['PYTHONWARNINGS'] = 'ignore'
if self.abs_pythonpath:
env['PYTHONPATH'] = self.abs_pythonpath
run_kwargs = dict(
buffer_output=True,
quiet=True,
nested=True,
env=env,
timeout=10,
)
args = [sys.executable, '-m', 'gevent.monkey']
if module:
args.append('--module')
args += [script, 'patched']
monkey_result = run(
args,
**run_kwargs
)
self.assertTrue(monkey_result)
if module:
args = [sys.executable, "-m", script, 'stdlib']
else:
args = [sys.executable, script, 'stdlib']
std_result = run(
args,
**run_kwargs
)
self.assertTrue(std_result)
monkey_out_lines = monkey_result.output_lines
std_out_lines = std_result.output_lines
self.assertEqual(monkey_out_lines, std_out_lines)
self.assertEqual(monkey_result.error, std_result.error)
return monkey_out_lines
def test_run_simple(self):
self._run(os.path.join('monkey_package', 'script.py'))
def _run_package(self, module):
lines = self._run('monkey_package', module=module)
self.assertTrue(lines[0].endswith(u'__main__.py'), lines[0])
self.assertEqual(lines[1].strip(), u'__main__')
def test_run_package(self):
# Run a __main__ inside a package, even without specifying -m
self._run_package(module=False)
def test_run_module(self):
# Run a __main__ inside a package, when specifying -m
self._run_package(module=True)
def test_issue_302(self):
monkey_lines = self._run(os.path.join('monkey_package', 'issue302monkey.py'))
self.assertEqual(monkey_lines[0].strip(), u'True')
monkey_lines[1] = monkey_lines[1].replace(u'\\', u'/') # windows path
self.assertTrue(monkey_lines[1].strip().endswith(u'monkey_package/issue302monkey.py'))
self.assertEqual(monkey_lines[2].strip(), u'True', monkey_lines)
# These three tests all sometimes fail on Py2 on CI, writing
# to stderr:
# Unhandled exception in thread started by \n
# sys.excepthook is missing\n
# lost sys.stderr\n
# Fatal Python error: PyImport_GetModuleDict: no module dictionary!\n'
# I haven't been able to produce this locally on macOS or Linux.
# The last line seems new with 2.7.17?
# Also, occasionally, they get '3' instead of '2' for the number of threads.
# That could have something to do with...? Most commonly that's PyPy, but
# sometimes CPython. Again, haven't reproduced.
# Not relevant since Py2 has been dropped.
def test_threadpool_in_patched_after_patch(self):
# Issue 1484
# If we don't have this correct, then we get exceptions
out = self._run(os.path.join('monkey_package', 'threadpool_monkey_patches.py'))
self.assertEqual(out, ['False', '2'])
def test_threadpool_in_patched_after_patch_module(self):
# Issue 1484
# If we don't have this correct, then we get exceptions
out = self._run('monkey_package.threadpool_monkey_patches', module=True)
self.assertEqual(out, ['False', '2'])
def test_threadpool_not_patched_after_patch_module(self):
# Issue 1484
# If we don't have this correct, then we get exceptions
out = self._run('monkey_package.threadpool_no_monkey', module=True)
self.assertEqual(out, ['False', 'False', '2'])
if __name__ == '__main__':
greentest.main()
| TestRun |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/base.py | {
"start": 214242,
"end": 214416
} | class ____(Protocol[Input, Output]):
def __call__(
self, _in: Iterator[Input], /, *, config: RunnableConfig
) -> Iterator[Output]: ...
| _RunnableCallableIterator |
python | pandas-dev__pandas | pandas/io/excel/_base.py | {
"start": 51981,
"end": 67543
} | class ____:
"""
Class for parsing tabular Excel sheets into DataFrame objects.
See read_excel for more documentation.
Parameters
----------
path_or_buffer : str, bytes, pathlib.Path,
A file-like object, xlrd workbook or openpyxl workbook.
If a string or path object, expected to be a path to a
.xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``, ``calamine``
Engine compatibility :
- ``xlrd`` supports old-style Excel files (.xls).
- ``openpyxl`` supports newer Excel file formats.
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
- ``pyxlsb`` supports Binary Excel files.
- ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)
and OpenDocument (.ods) file formats.
The engine `xlrd <https://xlrd.readthedocs.io/en/latest/>`_
now only supports old-style ``.xls`` files.
When ``engine=None``, the following logic will be
used to determine the engine:
- If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),
then `odf <https://pypi.org/project/odfpy/>`_ will be used.
- Otherwise if ``path_or_buffer`` is an xls format,
``xlrd`` will be used.
- Otherwise if ``path_or_buffer`` is in xlsb format,
`pyxlsb <https://pypi.org/project/pyxlsb/>`_ will be used.
- Otherwise if `openpyxl <https://pypi.org/project/openpyxl/>`_ is installed,
then ``openpyxl`` will be used.
- Otherwise if ``xlrd >= 2.0`` is installed, a ``ValueError`` will be raised.
.. warning::
Please do not report issues when using ``xlrd`` to read ``.xlsx`` files.
This is not supported, switch to using ``openpyxl`` instead.
{storage_options}
engine_kwargs : dict, optional
Arbitrary keyword arguments passed to excel engine.
See Also
--------
DataFrame.to_excel : Write DataFrame to an Excel file.
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> file = pd.ExcelFile("myfile.xlsx") # doctest: +SKIP
>>> with pd.ExcelFile("myfile.xls") as xls: # doctest: +SKIP
... df1 = pd.read_excel(xls, "Sheet1") # doctest: +SKIP
"""
from pandas.io.excel._calamine import CalamineReader
from pandas.io.excel._odfreader import ODFReader
from pandas.io.excel._openpyxl import OpenpyxlReader
from pandas.io.excel._pyxlsb import PyxlsbReader
from pandas.io.excel._xlrd import XlrdReader
_engines: Mapping[str, Any] = {
"xlrd": XlrdReader,
"openpyxl": OpenpyxlReader,
"odf": ODFReader,
"pyxlsb": PyxlsbReader,
"calamine": CalamineReader,
}
def __init__(
self,
path_or_buffer,
engine: str | None = None,
storage_options: StorageOptions | None = None,
engine_kwargs: dict | None = None,
) -> None:
if engine_kwargs is None:
engine_kwargs = {}
if engine is not None and engine not in self._engines:
raise ValueError(f"Unknown engine: {engine}")
# Always a string
self._io = stringify_path(path_or_buffer)
if engine is None:
# Only determine ext if it is needed
ext: str | None = None
if not isinstance(
path_or_buffer, (str, os.PathLike, ExcelFile)
) and not is_file_like(path_or_buffer):
# GH#56692 - avoid importing xlrd if possible
if import_optional_dependency("xlrd", errors="ignore") is None:
xlrd_version = None
else:
import xlrd
xlrd_version = Version(get_version(xlrd))
if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book):
ext = "xls"
if ext is None:
ext = inspect_excel_format(
content_or_path=path_or_buffer, storage_options=storage_options
)
if ext is None:
raise ValueError(
"Excel file format cannot be determined, you must specify "
"an engine manually."
)
engine = config.get_option(f"io.excel.{ext}.reader")
if engine == "auto":
engine = get_default_engine(ext, mode="reader")
assert engine is not None
self.engine = engine
self.storage_options = storage_options
self._reader = self._engines[engine](
self._io,
storage_options=storage_options,
engine_kwargs=engine_kwargs,
)
def __fspath__(self):
return self._io
def parse(
self,
sheet_name: str | int | list[int] | list[str] | None = 0,
header: int | Sequence[int] | None = 0,
names: SequenceNotStr[Hashable] | range | None = None,
index_col: int | Sequence[int] | None = None,
usecols=None,
converters=None,
true_values: Iterable[Hashable] | None = None,
false_values: Iterable[Hashable] | None = None,
skiprows: Sequence[int] | int | Callable[[int], object] | None = None,
nrows: int | None = None,
na_values=None,
parse_dates: list | dict | bool = False,
date_format: str | dict[Hashable, str] | None = None,
thousands: str | None = None,
comment: str | None = None,
skipfooter: int = 0,
dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default,
**kwds,
) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]:
"""
Parse specified sheet(s) into a DataFrame.
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters.
Parameters
----------
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions (chart sheets do not count as a sheet position).
Lists of strings/integers are used to request multiple sheets.
When ``None``, will return a dictionary containing DataFrames for
each sheet.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, str, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
Missing values will be forward filled to allow roundtripping with
``to_excel`` for ``merged_cells=True``. To avoid forward filling the
missing values use ``set_index`` after reading the data instead of
``index_col``.
usecols : str, list-like, or callable, default None
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed
(0-indexed).
* If list of string, then indicates list of column names to be parsed.
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
Returns a subset of the columns according to behavior above.
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like, int, or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int) at the
start of the file. If callable, the callable function will be evaluated
against the row indices, returning True if the row should be skipped and
False otherwise. An example of a valid callable argument would be ``lambda
x: x in [0, 2]``.
nrows : int, default None
Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* ``bool``. If True -> try parsing the index.
* ``list`` of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* ``list`` of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and
parse as a single date column.
* ``dict``, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparsable date, the entire column or
index will be returned unaltered as an object data type. If you
don`t want to parse some cells as date just change their type
in Excel to "Text".For non-standard datetime parsing, use
``pd.to_datetime`` after ``pd.read_excel``.
Note: A fast-path exists for iso8601-formatted dates.
date_format : str or dict of column -> format, default ``None``
If used in conjunction with ``parse_dates``, will parse dates
according to this format. For anything more complex,
please read in as ``object`` and then apply :func:`to_datetime` as-needed.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
dtype_backend : {{'numpy_nullable', 'pyarrow'}}
Back-end data type applied to the resultant :class:`DataFrame`
(still experimental). If not specified, the default behavior
is to not use nullable data types. If specified, the behavior
is as follows:
* ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
* ``"pyarrow"``: returns pyarrow-backed nullable
:class:`ArrowDtype` :class:`DataFrame`
.. versionadded:: 2.0
**kwds : dict, optional
Arbitrary keyword arguments passed to excel engine.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file.
See Also
--------
read_excel : Read an Excel sheet values (xlsx) file into DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
>>> df.to_excel("myfile.xlsx") # doctest: +SKIP
>>> file = pd.ExcelFile("myfile.xlsx") # doctest: +SKIP
>>> file.parse() # doctest: +SKIP
"""
return self._reader.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_format=date_format,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
dtype_backend=dtype_backend,
**kwds,
)
@property
def book(self):
"""
Gets the Excel workbook.
Workbook is the top-level container for all document information.
Returns
-------
Excel Workbook
The workbook object of the type defined by the engine being used.
See Also
--------
read_excel : Read an Excel file into a pandas DataFrame.
Examples
--------
>>> file = pd.ExcelFile("myfile.xlsx") # doctest: +SKIP
>>> file.book # doctest: +SKIP
<openpyxl.workbook.workbook.Workbook object at 0x11eb5ad70>
>>> file.book.path # doctest: +SKIP
'/xl/workbook.xml'
>>> file.book.active # doctest: +SKIP
<openpyxl.worksheet._read_only.ReadOnlyWorksheet object at 0x11eb5b370>
>>> file.book.sheetnames # doctest: +SKIP
['Sheet1', 'Sheet2']
"""
return self._reader.book
@property
def sheet_names(self):
"""
Names of the sheets in the document.
This is particularly useful for loading a specific sheet into a DataFrame when
you do not know the sheet names beforehand.
Returns
-------
list of str
List of sheet names in the document.
See Also
--------
ExcelFile.parse : Parse a sheet into a DataFrame.
read_excel : Read an Excel file into a pandas DataFrame. If you know the sheet
names, it may be easier to specify them directly to read_excel.
Examples
--------
>>> file = pd.ExcelFile("myfile.xlsx") # doctest: +SKIP
>>> file.sheet_names # doctest: +SKIP
["Sheet1", "Sheet2"]
"""
return self._reader.sheet_names
def close(self) -> None:
"""close io if necessary"""
self._reader.close()
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close()
| ExcelFile |
python | ray-project__ray | rllib/policy/torch_policy.py | {
"start": 1663,
"end": 48994
} | class ____(Policy):
"""PyTorch specific Policy class to use with RLlib."""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
*,
model: Optional[TorchModelV2] = None,
loss: Optional[
Callable[
[Policy, ModelV2, Type[TorchDistributionWrapper], SampleBatch],
Union[TensorType, List[TensorType]],
]
] = None,
action_distribution_class: Optional[Type[TorchDistributionWrapper]] = None,
action_sampler_fn: Optional[
Callable[
[TensorType, List[TensorType]],
Union[
Tuple[TensorType, TensorType, List[TensorType]],
Tuple[TensorType, TensorType, TensorType, List[TensorType]],
],
]
] = None,
action_distribution_fn: Optional[
Callable[
[Policy, ModelV2, TensorType, TensorType, TensorType],
Tuple[TensorType, Type[TorchDistributionWrapper], List[TensorType]],
]
] = None,
max_seq_len: int = 20,
get_batch_divisibility_req: Optional[Callable[[Policy], int]] = None,
):
"""Initializes a TorchPolicy instance.
Args:
observation_space: Observation space of the policy.
action_space: Action space of the policy.
config: The Policy's config dict.
model: PyTorch policy module. Given observations as
input, this module must return a list of outputs where the
first item is action logits, and the rest can be any value.
loss: Callable that returns one or more (a list of) scalar loss
terms.
action_distribution_class: Class for a torch action distribution.
action_sampler_fn: A callable returning either a sampled action,
its log-likelihood and updated state or a sampled action, its
log-likelihood, updated state and action distribution inputs
given Policy, ModelV2, input_dict, state batches (optional),
explore, and timestep. Provide `action_sampler_fn` if you would
like to have full control over the action computation step,
including the model forward pass, possible sampling from a
distribution, and exploration logic.
Note: If `action_sampler_fn` is given, `action_distribution_fn`
must be None. If both `action_sampler_fn` and
`action_distribution_fn` are None, RLlib will simply pass
inputs through `self.model` to get distribution inputs, create
the distribution object, sample from it, and apply some
exploration logic to the results.
The callable takes as inputs: Policy, ModelV2, input_dict
(SampleBatch), state_batches (optional), explore, and timestep.
action_distribution_fn: A callable returning distribution inputs
(parameters), a dist-class to generate an action distribution
object from, and internal-state outputs (or an empty list if
not applicable).
Provide `action_distribution_fn` if you would like to only
customize the model forward pass call. The resulting
distribution parameters are then used by RLlib to create a
distribution object, sample from it, and execute any
exploration logic.
Note: If `action_distribution_fn` is given, `action_sampler_fn`
must be None. If both `action_sampler_fn` and
`action_distribution_fn` are None, RLlib will simply pass
inputs through `self.model` to get distribution inputs, create
the distribution object, sample from it, and apply some
exploration logic to the results.
The callable takes as inputs: Policy, ModelV2, ModelInputDict,
explore, timestep, is_training.
max_seq_len: Max sequence length for LSTM training.
get_batch_divisibility_req: Optional callable that returns the
divisibility requirement for sample batches given the Policy.
"""
self.framework = config["framework"] = "torch"
self._loss_initialized = False
super().__init__(observation_space, action_space, config)
# Create multi-GPU model towers, if necessary.
# - The central main model will be stored under self.model, residing
# on self.device (normally, a CPU).
# - Each GPU will have a copy of that model under
# self.model_gpu_towers, matching the devices in self.devices.
# - Parallelization is done by splitting the train batch and passing
# it through the model copies in parallel, then averaging over the
# resulting gradients, applying these averages on the main model and
# updating all towers' weights from the main model.
# - In case of just one device (1 (fake or real) GPU or 1 CPU), no
# parallelization will be done.
# If no Model is provided, build a default one here.
if model is None:
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"], framework=self.framework
)
model = ModelCatalog.get_model_v2(
obs_space=self.observation_space,
action_space=self.action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework=self.framework,
)
if action_distribution_class is None:
action_distribution_class = dist_class
# Get devices to build the graph on.
num_gpus = self._get_num_gpus_for_policy()
gpu_ids = list(range(torch.cuda.device_count()))
logger.info(f"Found {len(gpu_ids)} visible cuda devices.")
# Place on one or more CPU(s) when either:
# - Fake GPU mode.
# - num_gpus=0 (either set by user or we are in local_mode=True).
# - No GPUs available.
if config["_fake_gpus"] or num_gpus == 0 or not gpu_ids:
self.device = torch.device("cpu")
self.devices = [self.device for _ in range(int(math.ceil(num_gpus)) or 1)]
self.model_gpu_towers = [
model if i == 0 else copy.deepcopy(model)
for i in range(int(math.ceil(num_gpus)) or 1)
]
if hasattr(self, "target_model"):
self.target_models = {
m: self.target_model for m in self.model_gpu_towers
}
self.model = model
# Place on one or more actual GPU(s), when:
# - num_gpus > 0 (set by user) AND
# - local_mode=False AND
# - actual GPUs available AND
# - non-fake GPU mode.
else:
# We are a remote worker (WORKER_MODE=1):
# GPUs should be assigned to us by ray.
if ray._private.worker._mode() == ray._private.worker.WORKER_MODE:
gpu_ids = ray.get_gpu_ids()
if len(gpu_ids) < num_gpus:
raise ValueError(
"TorchPolicy was not able to find enough GPU IDs! Found "
f"{gpu_ids}, but num_gpus={num_gpus}."
)
self.devices = [
torch.device("cuda:{}".format(i))
for i, id_ in enumerate(gpu_ids)
if i < num_gpus
]
self.device = self.devices[0]
ids = [id_ for i, id_ in enumerate(gpu_ids) if i < num_gpus]
self.model_gpu_towers = []
for i, _ in enumerate(ids):
model_copy = copy.deepcopy(model)
self.model_gpu_towers.append(model_copy.to(self.devices[i]))
if hasattr(self, "target_model"):
self.target_models = {
m: copy.deepcopy(self.target_model).to(self.devices[i])
for i, m in enumerate(self.model_gpu_towers)
}
self.model = self.model_gpu_towers[0]
# Lock used for locking some methods on the object-level.
# This prevents possible race conditions when calling the model
# first, then its value function (e.g. in a loss function), in
# between of which another model call is made (e.g. to compute an
# action).
self._lock = threading.RLock()
self._state_inputs = self.model.get_initial_state()
self._is_recurrent = len(self._state_inputs) > 0
# Auto-update model's inference view requirements, if recurrent.
self._update_model_view_requirements_from_init_state()
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.view_requirements)
self.exploration = self._create_exploration()
self.unwrapped_model = model # used to support DistributedDataParallel
# To ensure backward compatibility:
# Old way: If `loss` provided here, use as-is (as a function).
if loss is not None:
self._loss = loss
# New way: Convert the overridden `self.loss` into a plain function,
# so it can be called the same way as `loss` would be, ensuring
# backward compatibility.
elif self.loss.__func__.__qualname__ != "Policy.loss":
self._loss = self.loss.__func__
# `loss` not provided nor overridden from Policy -> Set to None.
else:
self._loss = None
self._optimizers = force_list(self.optimizer())
# Store, which params (by index within the model's list of
# parameters) should be updated per optimizer.
# Maps optimizer idx to set or param indices.
self.multi_gpu_param_groups: List[Set[int]] = []
main_params = {p: i for i, p in enumerate(self.model.parameters())}
for o in self._optimizers:
param_indices = []
for pg_idx, pg in enumerate(o.param_groups):
for p in pg["params"]:
param_indices.append(main_params[p])
self.multi_gpu_param_groups.append(set(param_indices))
# Create n sample-batch buffers (num_multi_gpu_tower_stacks), each
# one with m towers (num_gpus).
num_buffers = self.config.get("num_multi_gpu_tower_stacks", 1)
self._loaded_batches = [[] for _ in range(num_buffers)]
self.dist_class = action_distribution_class
self.action_sampler_fn = action_sampler_fn
self.action_distribution_fn = action_distribution_fn
# If set, means we are using distributed allreduce during learning.
self.distributed_world_size = None
self.max_seq_len = max_seq_len
self.batch_divisibility_req = (
get_batch_divisibility_req(self)
if callable(get_batch_divisibility_req)
else (get_batch_divisibility_req or 1)
)
@override(Policy)
def compute_actions_from_input_dict(
self,
input_dict: Dict[str, TensorType],
explore: bool = None,
timestep: Optional[int] = None,
**kwargs,
) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
with torch.no_grad():
# Pass lazy (torch) tensor dict to Model as `input_dict`.
input_dict = self._lazy_tensor_dict(input_dict)
input_dict.set_training(True)
# Pack internal state inputs into (separate) list.
state_batches = [
input_dict[k] for k in input_dict.keys() if "state_in" in k[:8]
]
# Calculate RNN sequence lengths.
seq_lens = (
torch.tensor(
[1] * len(state_batches[0]),
dtype=torch.long,
device=state_batches[0].device,
)
if state_batches
else None
)
return self._compute_action_helper(
input_dict, state_batches, seq_lens, explore, timestep
)
@override(Policy)
def compute_actions(
self,
obs_batch: Union[List[TensorStructType], TensorStructType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorStructType], TensorStructType] = None,
prev_reward_batch: Union[List[TensorStructType], TensorStructType] = None,
info_batch: Optional[Dict[str, list]] = None,
episodes=None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
**kwargs,
) -> Tuple[TensorStructType, List[TensorType], Dict[str, TensorType]]:
with torch.no_grad():
seq_lens = torch.ones(len(obs_batch), dtype=torch.int32)
input_dict = self._lazy_tensor_dict(
{
SampleBatch.CUR_OBS: obs_batch,
"is_training": False,
}
)
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = np.asarray(prev_action_batch)
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = np.asarray(prev_reward_batch)
state_batches = [
convert_to_torch_tensor(s, self.device) for s in (state_batches or [])
]
return self._compute_action_helper(
input_dict, state_batches, seq_lens, explore, timestep
)
@with_lock
@override(Policy)
def compute_log_likelihoods(
self,
actions: Union[List[TensorStructType], TensorStructType],
obs_batch: Union[List[TensorStructType], TensorStructType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Optional[
Union[List[TensorStructType], TensorStructType]
] = None,
prev_reward_batch: Optional[
Union[List[TensorStructType], TensorStructType]
] = None,
actions_normalized: bool = True,
**kwargs,
) -> TensorType:
if self.action_sampler_fn and self.action_distribution_fn is None:
raise ValueError(
"Cannot compute log-prob/likelihood w/o an "
"`action_distribution_fn` and a provided "
"`action_sampler_fn`!"
)
with torch.no_grad():
input_dict = self._lazy_tensor_dict(
{SampleBatch.CUR_OBS: obs_batch, SampleBatch.ACTIONS: actions}
)
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch
seq_lens = torch.ones(len(obs_batch), dtype=torch.int32)
state_batches = [
convert_to_torch_tensor(s, self.device) for s in (state_batches or [])
]
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(explore=False)
# Action dist class and inputs are generated via custom function.
if self.action_distribution_fn:
# Try new action_distribution_fn signature, supporting
# state_batches and seq_lens.
try:
dist_inputs, dist_class, state_out = self.action_distribution_fn(
self,
self.model,
input_dict=input_dict,
state_batches=state_batches,
seq_lens=seq_lens,
explore=False,
is_training=False,
)
# Trying the old way (to stay backward compatible).
# TODO: Remove in future.
except TypeError as e:
if (
"positional argument" in e.args[0]
or "unexpected keyword argument" in e.args[0]
):
dist_inputs, dist_class, _ = self.action_distribution_fn(
policy=self,
model=self.model,
obs_batch=input_dict[SampleBatch.CUR_OBS],
explore=False,
is_training=False,
)
else:
raise e
# Default action-dist inputs calculation.
else:
dist_class = self.dist_class
dist_inputs, _ = self.model(input_dict, state_batches, seq_lens)
action_dist = dist_class(dist_inputs, self.model)
# Normalize actions if necessary.
actions = input_dict[SampleBatch.ACTIONS]
if not actions_normalized and self.config["normalize_actions"]:
actions = normalize_action(actions, self.action_space_struct)
log_likelihoods = action_dist.logp(actions)
return log_likelihoods
@with_lock
@override(Policy)
def learn_on_batch(self, postprocessed_batch: SampleBatch) -> Dict[str, TensorType]:
# Set Model to train mode.
if self.model:
self.model.train()
# Callback handling.
learn_stats = {}
self.callbacks.on_learn_on_batch(
policy=self, train_batch=postprocessed_batch, result=learn_stats
)
# Compute gradients (will calculate all losses and `backward()`
# them to get the grads).
grads, fetches = self.compute_gradients(postprocessed_batch)
# Step the optimizers.
self.apply_gradients(_directStepOptimizerSingleton)
self.num_grad_updates += 1
if self.model:
fetches["model"] = self.model.metrics()
fetches.update(
{
"custom_metrics": learn_stats,
NUM_AGENT_STEPS_TRAINED: postprocessed_batch.count,
NUM_GRAD_UPDATES_LIFETIME: self.num_grad_updates,
# -1, b/c we have to measure this diff before we do the update above.
DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY: (
self.num_grad_updates
- 1
- (postprocessed_batch.num_grad_updates or 0)
),
}
)
return fetches
@override(Policy)
def load_batch_into_buffer(
self,
batch: SampleBatch,
buffer_index: int = 0,
) -> int:
# Set the is_training flag of the batch.
batch.set_training(True)
# Shortcut for 1 CPU only: Store batch in `self._loaded_batches`.
if len(self.devices) == 1 and self.devices[0].type == "cpu":
assert buffer_index == 0
pad_batch_to_sequences_of_same_size(
batch=batch,
max_seq_len=self.max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
)
self._lazy_tensor_dict(batch)
self._loaded_batches[0] = [batch]
return len(batch)
# Batch (len=28, seq-lens=[4, 7, 4, 10, 3]):
# 0123 0123456 0123 0123456789ABC
# 1) split into n per-GPU sub batches (n=2).
# [0123 0123456] [012] [3 0123456789 ABC]
# (len=14, 14 seq-lens=[4, 7, 3] [1, 10, 3])
slices = batch.timeslices(num_slices=len(self.devices))
# 2) zero-padding (max-seq-len=10).
# - [0123000000 0123456000 0120000000]
# - [3000000000 0123456789 ABC0000000]
for slice in slices:
pad_batch_to_sequences_of_same_size(
batch=slice,
max_seq_len=self.max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
)
# 3) Load splits into the given buffer (consisting of n GPUs).
slices = [slice.to_device(self.devices[i]) for i, slice in enumerate(slices)]
self._loaded_batches[buffer_index] = slices
# Return loaded samples per-device.
return len(slices[0])
@override(Policy)
def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int:
if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
assert buffer_index == 0
return sum(len(b) for b in self._loaded_batches[buffer_index])
@override(Policy)
def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0):
if not self._loaded_batches[buffer_index]:
raise ValueError(
"Must call Policy.load_batch_into_buffer() before "
"Policy.learn_on_loaded_batch()!"
)
# Get the correct slice of the already loaded batch to use,
# based on offset and batch size.
device_batch_size = self.config.get("minibatch_size")
if device_batch_size is None:
device_batch_size = self.config.get(
"sgd_minibatch_size",
self.config["train_batch_size"],
)
device_batch_size //= len(self.devices)
# Set Model to train mode.
if self.model_gpu_towers:
for t in self.model_gpu_towers:
t.train()
# Shortcut for 1 CPU only: Batch should already be stored in
# `self._loaded_batches`.
if len(self.devices) == 1 and self.devices[0].type == "cpu":
assert buffer_index == 0
if device_batch_size >= len(self._loaded_batches[0][0]):
batch = self._loaded_batches[0][0]
else:
batch = self._loaded_batches[0][0][offset : offset + device_batch_size]
return self.learn_on_batch(batch)
if len(self.devices) > 1:
# Copy weights of main model (tower-0) to all other towers.
state_dict = self.model.state_dict()
# Just making sure tower-0 is really the same as self.model.
assert self.model_gpu_towers[0] is self.model
for tower in self.model_gpu_towers[1:]:
tower.load_state_dict(state_dict)
if device_batch_size >= sum(len(s) for s in self._loaded_batches[buffer_index]):
device_batches = self._loaded_batches[buffer_index]
else:
device_batches = [
b[offset : offset + device_batch_size]
for b in self._loaded_batches[buffer_index]
]
# Callback handling.
batch_fetches = {}
for i, batch in enumerate(device_batches):
custom_metrics = {}
self.callbacks.on_learn_on_batch(
policy=self, train_batch=batch, result=custom_metrics
)
batch_fetches[f"tower_{i}"] = {"custom_metrics": custom_metrics}
# Do the (maybe parallelized) gradient calculation step.
tower_outputs = self._multi_gpu_parallel_grad_calc(device_batches)
# Mean-reduce gradients over GPU-towers (do this on CPU: self.device).
all_grads = []
for i in range(len(tower_outputs[0][0])):
if tower_outputs[0][0][i] is not None:
all_grads.append(
torch.mean(
torch.stack([t[0][i].to(self.device) for t in tower_outputs]),
dim=0,
)
)
else:
all_grads.append(None)
# Set main model's grads to mean-reduced values.
for i, p in enumerate(self.model.parameters()):
p.grad = all_grads[i]
self.apply_gradients(_directStepOptimizerSingleton)
self.num_grad_updates += 1
for i, (model, batch) in enumerate(zip(self.model_gpu_towers, device_batches)):
batch_fetches[f"tower_{i}"].update(
{
LEARNER_STATS_KEY: self.extra_grad_info(batch),
"model": model.metrics(),
NUM_GRAD_UPDATES_LIFETIME: self.num_grad_updates,
# -1, b/c we have to measure this diff before we do the update
# above.
DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY: (
self.num_grad_updates - 1 - (batch.num_grad_updates or 0)
),
}
)
batch_fetches.update(self.extra_compute_grad_fetches())
return batch_fetches
@with_lock
@override(Policy)
def compute_gradients(self, postprocessed_batch: SampleBatch) -> ModelGradients:
assert len(self.devices) == 1
# If not done yet, see whether we have to zero-pad this batch.
if not postprocessed_batch.zero_padded:
pad_batch_to_sequences_of_same_size(
batch=postprocessed_batch,
max_seq_len=self.max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
)
postprocessed_batch.set_training(True)
self._lazy_tensor_dict(postprocessed_batch, device=self.devices[0])
# Do the (maybe parallelized) gradient calculation step.
tower_outputs = self._multi_gpu_parallel_grad_calc([postprocessed_batch])
all_grads, grad_info = tower_outputs[0]
grad_info["allreduce_latency"] /= len(self._optimizers)
grad_info.update(self.extra_grad_info(postprocessed_batch))
fetches = self.extra_compute_grad_fetches()
return all_grads, dict(fetches, **{LEARNER_STATS_KEY: grad_info})
@override(Policy)
def apply_gradients(self, gradients: ModelGradients) -> None:
if gradients == _directStepOptimizerSingleton:
for i, opt in enumerate(self._optimizers):
opt.step()
else:
# TODO(sven): Not supported for multiple optimizers yet.
assert len(self._optimizers) == 1
for g, p in zip(gradients, self.model.parameters()):
if g is not None:
if torch.is_tensor(g):
p.grad = g.to(self.device)
else:
p.grad = torch.from_numpy(g).to(self.device)
self._optimizers[0].step()
def get_tower_stats(self, stats_name: str) -> List[TensorStructType]:
"""Returns list of per-tower stats, copied to this Policy's device.
Args:
stats_name: The name of the stats to average over (this str
must exist as a key inside each tower's `tower_stats` dict).
Returns:
The list of stats tensor (structs) of all towers, copied to this
Policy's device.
Raises:
AssertionError: If the `stats_name` cannot be found in any one
of the tower's `tower_stats` dicts.
"""
data = []
for tower in self.model_gpu_towers:
if stats_name in tower.tower_stats:
data.append(
tree.map_structure(
lambda s: s.to(self.device), tower.tower_stats[stats_name]
)
)
assert len(data) > 0, (
f"Stats `{stats_name}` not found in any of the towers (you have "
f"{len(self.model_gpu_towers)} towers in total)! Make "
"sure you call the loss function on at least one of the towers."
)
return data
@override(Policy)
def get_weights(self) -> ModelWeights:
return {k: v.cpu().detach().numpy() for k, v in self.model.state_dict().items()}
@override(Policy)
def set_weights(self, weights: ModelWeights) -> None:
weights = convert_to_torch_tensor(weights, device=self.device)
self.model.load_state_dict(weights)
@override(Policy)
def is_recurrent(self) -> bool:
return self._is_recurrent
@override(Policy)
def num_state_tensors(self) -> int:
return len(self.model.get_initial_state())
@override(Policy)
def get_initial_state(self) -> List[TensorType]:
return [s.detach().cpu().numpy() for s in self.model.get_initial_state()]
@override(Policy)
def get_state(self) -> PolicyState:
state = super().get_state()
state["_optimizer_variables"] = []
for i, o in enumerate(self._optimizers):
optim_state_dict = convert_to_numpy(o.state_dict())
state["_optimizer_variables"].append(optim_state_dict)
# Add exploration state.
if self.exploration:
# This is not compatible with RLModules, which have a method
# `forward_exploration` to specify custom exploration behavior.
state["_exploration_state"] = self.exploration.get_state()
return state
@override(Policy)
def set_state(self, state: PolicyState) -> None:
# Set optimizer vars first.
optimizer_vars = state.get("_optimizer_variables", None)
if optimizer_vars:
assert len(optimizer_vars) == len(self._optimizers)
for o, s in zip(self._optimizers, optimizer_vars):
# Torch optimizer param_groups include things like beta, etc. These
# parameters should be left as scalar and not converted to tensors.
# otherwise, torch.optim.step() will start to complain.
optim_state_dict = {"param_groups": s["param_groups"]}
optim_state_dict["state"] = convert_to_torch_tensor(
s["state"], device=self.device
)
o.load_state_dict(optim_state_dict)
# Set exploration's state.
if hasattr(self, "exploration") and "_exploration_state" in state:
self.exploration.set_state(state=state["_exploration_state"])
# Restore global timestep.
self.global_timestep = state["global_timestep"]
# Then the Policy's (NN) weights and connectors.
super().set_state(state)
def extra_grad_process(
self, optimizer: "torch.optim.Optimizer", loss: TensorType
) -> Dict[str, TensorType]:
"""Called after each optimizer.zero_grad() + loss.backward() call.
Called for each self._optimizers/loss-value pair.
Allows for gradient processing before optimizer.step() is called.
E.g. for gradient clipping.
Args:
optimizer: A torch optimizer object.
loss: The loss tensor associated with the optimizer.
Returns:
An dict with information on the gradient processing step.
"""
return {}
def extra_compute_grad_fetches(self) -> Dict[str, Any]:
"""Extra values to fetch and return from compute_gradients().
Returns:
Extra fetch dict to be added to the fetch dict of the
`compute_gradients` call.
"""
return {LEARNER_STATS_KEY: {}} # e.g, stats, td error, etc.
def extra_action_out(
self,
input_dict: Dict[str, TensorType],
state_batches: List[TensorType],
model: TorchModelV2,
action_dist: TorchDistributionWrapper,
) -> Dict[str, TensorType]:
"""Returns dict of extra info to include in experience batch.
Args:
input_dict: Dict of model input tensors.
state_batches: List of state tensors.
model: Reference to the model object.
action_dist: Torch action dist object
to get log-probs (e.g. for already sampled actions).
Returns:
Extra outputs to return in a `compute_actions_from_input_dict()`
call (3rd return value).
"""
return {}
def extra_grad_info(self, train_batch: SampleBatch) -> Dict[str, TensorType]:
"""Return dict of extra grad info.
Args:
train_batch: The training batch for which to produce
extra grad info for.
Returns:
The info dict carrying grad info per str key.
"""
return {}
def optimizer(
self,
) -> Union[List["torch.optim.Optimizer"], "torch.optim.Optimizer"]:
"""Custom the local PyTorch optimizer(s) to use.
Returns:
The local PyTorch optimizer(s) to use for this Policy.
"""
if hasattr(self, "config"):
optimizers = [
torch.optim.Adam(self.model.parameters(), lr=self.config["lr"])
]
else:
optimizers = [torch.optim.Adam(self.model.parameters())]
if self.exploration:
optimizers = self.exploration.get_exploration_optimizer(optimizers)
return optimizers
@override(Policy)
def export_model(self, export_dir: str, onnx: Optional[int] = None) -> None:
"""Exports the Policy's Model to local directory for serving.
Creates a TorchScript model and saves it.
Args:
export_dir: Local writable directory or filename.
onnx: If given, will export model in ONNX format. The
value of this parameter set the ONNX OpSet version to use.
"""
os.makedirs(export_dir, exist_ok=True)
if onnx:
self._lazy_tensor_dict(self._dummy_batch)
# Provide dummy state inputs if not an RNN (torch cannot jit with
# returned empty internal states list).
if "state_in_0" not in self._dummy_batch:
self._dummy_batch["state_in_0"] = self._dummy_batch[
SampleBatch.SEQ_LENS
] = np.array([1.0])
seq_lens = self._dummy_batch[SampleBatch.SEQ_LENS]
state_ins = []
i = 0
while "state_in_{}".format(i) in self._dummy_batch:
state_ins.append(self._dummy_batch["state_in_{}".format(i)])
i += 1
dummy_inputs = {
k: self._dummy_batch[k]
for k in self._dummy_batch.keys()
if k != "is_training"
}
file_name = os.path.join(export_dir, "model.onnx")
torch.onnx.export(
self.model,
(dummy_inputs, state_ins, seq_lens),
file_name,
export_params=True,
opset_version=onnx,
do_constant_folding=True,
input_names=list(dummy_inputs.keys())
+ ["state_ins", SampleBatch.SEQ_LENS],
output_names=["output", "state_outs"],
dynamic_axes={
k: {0: "batch_size"}
for k in list(dummy_inputs.keys())
+ ["state_ins", SampleBatch.SEQ_LENS]
},
)
# Save the torch.Model (architecture and weights, so it can be retrieved
# w/o access to the original (custom) Model or Policy code).
else:
filename = os.path.join(export_dir, "model.pt")
try:
torch.save(self.model, f=filename)
except Exception:
if os.path.exists(filename):
os.remove(filename)
logger.warning(ERR_MSG_TORCH_POLICY_CANNOT_SAVE_MODEL)
@override(Policy)
def import_model_from_h5(self, import_file: str) -> None:
"""Imports weights into torch model."""
return self.model.import_from_h5(import_file)
@with_lock
def _compute_action_helper(
self, input_dict, state_batches, seq_lens, explore, timestep
):
"""Shared forward pass logic (w/ and w/o trajectory view API).
Returns:
A tuple consisting of a) actions, b) state_out, c) extra_fetches.
"""
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
self._is_recurrent = state_batches is not None and state_batches != []
# Switch to eval mode.
if self.model:
self.model.eval()
if self.action_sampler_fn:
action_dist = dist_inputs = None
action_sampler_outputs = self.action_sampler_fn(
self,
self.model,
input_dict,
state_batches,
explore=explore,
timestep=timestep,
)
if len(action_sampler_outputs) == 4:
actions, logp, dist_inputs, state_out = action_sampler_outputs
else:
actions, logp, state_out = action_sampler_outputs
else:
# Call the exploration before_compute_actions hook.
self.exploration.before_compute_actions(explore=explore, timestep=timestep)
if self.action_distribution_fn:
# Try new action_distribution_fn signature, supporting
# state_batches and seq_lens.
try:
dist_inputs, dist_class, state_out = self.action_distribution_fn(
self,
self.model,
input_dict=input_dict,
state_batches=state_batches,
seq_lens=seq_lens,
explore=explore,
timestep=timestep,
is_training=False,
)
# Trying the old way (to stay backward compatible).
# TODO: Remove in future.
except TypeError as e:
if (
"positional argument" in e.args[0]
or "unexpected keyword argument" in e.args[0]
):
(
dist_inputs,
dist_class,
state_out,
) = self.action_distribution_fn(
self,
self.model,
input_dict[SampleBatch.CUR_OBS],
explore=explore,
timestep=timestep,
is_training=False,
)
else:
raise e
else:
dist_class = self.dist_class
dist_inputs, state_out = self.model(input_dict, state_batches, seq_lens)
if not (
isinstance(dist_class, functools.partial)
or issubclass(dist_class, TorchDistributionWrapper)
):
raise ValueError(
"`dist_class` ({}) not a TorchDistributionWrapper "
"subclass! Make sure your `action_distribution_fn` or "
"`make_model_and_action_dist` return a correct "
"distribution class.".format(dist_class.__name__)
)
action_dist = dist_class(dist_inputs, self.model)
# Get the exploration action from the forward results.
actions, logp = self.exploration.get_exploration_action(
action_distribution=action_dist, timestep=timestep, explore=explore
)
input_dict[SampleBatch.ACTIONS] = actions
# Add default and custom fetches.
extra_fetches = self.extra_action_out(
input_dict, state_batches, self.model, action_dist
)
# Action-dist inputs.
if dist_inputs is not None:
extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs
# Action-logp and action-prob.
if logp is not None:
extra_fetches[SampleBatch.ACTION_PROB] = torch.exp(logp.float())
extra_fetches[SampleBatch.ACTION_LOGP] = logp
# Update our global timestep by the batch size.
self.global_timestep += len(input_dict[SampleBatch.CUR_OBS])
return convert_to_numpy((actions, state_out, extra_fetches))
def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch, device=None):
# TODO: (sven): Keep for a while to ensure backward compatibility.
if not isinstance(postprocessed_batch, SampleBatch):
postprocessed_batch = SampleBatch(postprocessed_batch)
postprocessed_batch.set_get_interceptor(
functools.partial(convert_to_torch_tensor, device=device or self.device)
)
return postprocessed_batch
def _multi_gpu_parallel_grad_calc(
self, sample_batches: List[SampleBatch]
) -> List[Tuple[List[TensorType], GradInfoDict]]:
"""Performs a parallelized loss and gradient calculation over the batch.
Splits up the given train batch into n shards (n=number of this
Policy's devices) and passes each data shard (in parallel) through
the loss function using the individual devices' models
(self.model_gpu_towers). Then returns each tower's outputs.
Args:
sample_batches: A list of SampleBatch shards to
calculate loss and gradients for.
Returns:
A list (one item per device) of 2-tuples, each with 1) gradient
list and 2) grad info dict.
"""
assert len(self.model_gpu_towers) == len(sample_batches)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(shard_idx, model, sample_batch, device):
torch.set_grad_enabled(grad_enabled)
try:
with NullContextManager() if device.type == "cpu" else torch.cuda.device( # noqa: E501
device
):
loss_out = force_list(
self._loss(self, model, self.dist_class, sample_batch)
)
# Call Model's custom-loss with Policy loss outputs and
# train_batch.
loss_out = model.custom_loss(loss_out, sample_batch)
assert len(loss_out) == len(self._optimizers)
# Loop through all optimizers.
grad_info = {"allreduce_latency": 0.0}
parameters = list(model.parameters())
all_grads = [None for _ in range(len(parameters))]
for opt_idx, opt in enumerate(self._optimizers):
# Erase gradients in all vars of the tower that this
# optimizer would affect.
param_indices = self.multi_gpu_param_groups[opt_idx]
for param_idx, param in enumerate(parameters):
if param_idx in param_indices and param.grad is not None:
param.grad.data.zero_()
# Recompute gradients of loss over all variables.
loss_out[opt_idx].backward(retain_graph=True)
grad_info.update(
self.extra_grad_process(opt, loss_out[opt_idx])
)
grads = []
# Note that return values are just references;
# Calling zero_grad would modify the values.
for param_idx, param in enumerate(parameters):
if param_idx in param_indices:
if param.grad is not None:
grads.append(param.grad)
all_grads[param_idx] = param.grad
if self.distributed_world_size:
start = time.time()
if torch.cuda.is_available():
# Sadly, allreduce_coalesced does not work with
# CUDA yet.
for g in grads:
torch.distributed.all_reduce(
g, op=torch.distributed.ReduceOp.SUM
)
else:
torch.distributed.all_reduce_coalesced(
grads, op=torch.distributed.ReduceOp.SUM
)
for param_group in opt.param_groups:
for p in param_group["params"]:
if p.grad is not None:
p.grad /= self.distributed_world_size
grad_info["allreduce_latency"] += time.time() - start
with lock:
results[shard_idx] = (all_grads, grad_info)
except Exception as e:
import traceback
with lock:
results[shard_idx] = (
ValueError(
f"Error In tower {shard_idx} on device "
f"{device} during multi GPU parallel gradient "
f"calculation:"
f": {e}\n"
f"Traceback: \n"
f"{traceback.format_exc()}\n"
),
e,
)
# Single device (GPU) or fake-GPU case (serialize for better
# debugging).
if len(self.devices) == 1 or self.config["_fake_gpus"]:
for shard_idx, (model, sample_batch, device) in enumerate(
zip(self.model_gpu_towers, sample_batches, self.devices)
):
_worker(shard_idx, model, sample_batch, device)
# Raise errors right away for better debugging.
last_result = results[len(results) - 1]
if isinstance(last_result[0], ValueError):
raise last_result[0] from last_result[1]
# Multi device (GPU) case: Parallelize via threads.
else:
threads = [
threading.Thread(
target=_worker, args=(shard_idx, model, sample_batch, device)
)
for shard_idx, (model, sample_batch, device) in enumerate(
zip(self.model_gpu_towers, sample_batches, self.devices)
)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Gather all threads' outputs and return.
outputs = []
for shard_idx in range(len(sample_batches)):
output = results[shard_idx]
if isinstance(output[0], Exception):
raise output[0] from output[1]
outputs.append(results[shard_idx])
return outputs
@OldAPIStack
| TorchPolicy |
python | getsentry__sentry | src/sentry/incidents/endpoints/project_alert_rule_task_details.py | {
"start": 746,
"end": 2504
} | class ____(ProjectEndpoint):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (ProjectSettingPermission,)
def get(self, request: Request, project, task_uuid) -> Response:
"""
Retrieve the status of the async task
Return details of the alert rule if the task is successful
"""
client = RedisRuleStatus(task_uuid)
result = client.get_value()
status = result["status"]
rule_id = result.get("rule_id")
error = result.get("error")
# if the status is "pending" we don't have a rule yet or error
context = {"status": status, "alertRule": None, "error": None}
if rule_id and status == "success":
try:
alert_rule = AlertRule.objects.get(projects=project, id=rule_id)
if features.has(
"organizations:workflow-engine-rule-serializers", project.organization
):
try:
detector = Detector.objects.get(
alertruledetector__alert_rule_id=alert_rule.id
)
except Detector.DoesNotExist:
raise Http404
context["alertRule"] = serialize(
detector, request.user, WorkflowEngineDetectorSerializer()
)
else:
context["alertRule"] = serialize(alert_rule, request.user)
except AlertRule.DoesNotExist:
raise Http404
if status == "failed":
context["error"] = error
return Response(context, status=200)
| ProjectAlertRuleTaskDetailsEndpoint |
python | huggingface__transformers | src/transformers/models/prompt_depth_anything/image_processing_prompt_depth_anything.py | {
"start": 3453,
"end": 25202
} | class ____(BaseImageProcessor):
r"""
Constructs a PromptDepthAnything image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions. Can be overridden by `do_resize` in `preprocess`.
size (`dict[str, int]` *optional*, defaults to `{"height": 384, "width": 384}`):
Size of the image after resizing. Can be overridden by `size` in `preprocess`.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Defines the resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. Can
be overridden by `keep_aspect_ratio` in `preprocess`.
ensure_multiple_of (`int`, *optional*, defaults to 1):
If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overridden
by `ensure_multiple_of` in `preprocess`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
`preprocess`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in `preprocess`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_pad (`bool`, *optional*, defaults to `False`):
Whether to apply center padding. This was introduced in the DINOv2 paper, which uses the model in
combination with DPT.
size_divisor (`int`, *optional*):
If `do_pad` is `True`, pads the image dimensions to be divisible by this value. This was introduced in the
DINOv2 paper, which uses the model in combination with DPT.
prompt_scale_to_meter (`float`, *optional*, defaults to 0.001):
Scale factor to convert the prompt depth to meters.
"""
model_input_names = ["pixel_values", "prompt_depth"]
valid_kwargs = PromptDepthAnythingImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
keep_aspect_ratio: bool = False,
ensure_multiple_of: int = 1,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: bool = False,
size_divisor: Optional[int] = None,
prompt_scale_to_meter: float = 0.001, # default unit is mm
**kwargs,
):
super().__init__(**kwargs)
size = size if size is not None else {"height": 384, "width": 384}
size = get_size_dict(size)
self.do_resize = do_resize
self.size = size
self.keep_aspect_ratio = keep_aspect_ratio
self.ensure_multiple_of = ensure_multiple_of
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD
self.do_pad = do_pad
self.size_divisor = size_divisor
self.prompt_scale_to_meter = prompt_scale_to_meter
def resize(
self,
image: np.ndarray,
size: dict[str, int],
keep_aspect_ratio: bool = False,
ensure_multiple_of: int = 1,
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image
is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is
set, the image is resized to a size that is a multiple of this value.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Target size of the output image.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.
ensure_multiple_of (`int`, *optional*, defaults to 1):
The image is resized to a size that is a multiple of this value.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
output_size = _get_resize_output_image_size(
image,
output_size=(size["height"], size["width"]),
keep_aspect_ratio=keep_aspect_ratio,
multiple=ensure_multiple_of,
input_data_format=input_data_format,
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def pad_image(
self,
image: np.ndarray,
size_divisor: int,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Center pad an image to be a multiple of `multiple`.
Args:
image (`np.ndarray`):
Image to pad.
size_divisor (`int`):
The width and height of the image will be padded to a multiple of this number.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
def _get_pad(size, size_divisor):
new_size = math.ceil(size / size_divisor) * size_divisor
pad_size = new_size - size
pad_size_left = pad_size // 2
pad_size_right = pad_size - pad_size_left
return pad_size_left, pad_size_right
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
height, width = get_image_size(image, input_data_format)
pad_size_top, pad_size_bottom = _get_pad(height, size_divisor)
pad_size_left, pad_size_right = _get_pad(width, size_divisor)
padded_image = pad(
image, ((pad_size_top, pad_size_bottom), (pad_size_left, pad_size_right)), data_format=data_format
)
return padded_image
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
prompt_depth: Optional[ImageInput] = None,
do_resize: Optional[bool] = None,
size: Optional[int] = None,
keep_aspect_ratio: Optional[bool] = None,
ensure_multiple_of: Optional[int] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_pad: Optional[bool] = None,
size_divisor: Optional[int] = None,
prompt_scale_to_meter: Optional[float] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: ChannelDimension = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> BatchFeature:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
prompt_depth (`ImageInput`, *optional*):
Prompt depth to preprocess, which can be sparse depth obtained from multi-view geometry or
low-resolution depth from a depth sensor. Generally has shape (height, width), where height
and width can be smaller than those of the images. It's optional and can be None, which means no prompt depth
is used. If it is None, the output depth will be a monocular relative depth.
It is recommended to provide a prompt_scale_to_meter value, which is the scale factor to convert the prompt depth
to meters. This is useful when the prompt depth is not in meters.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. If `keep_aspect_ratio` is `True`, the image is resized to the largest
possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is
resized to a size that is a multiple of this value.
keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`):
Whether to keep the aspect ratio of the image. If False, the image will be resized to (size, size). If
True, the image will be resized to keep the aspect ratio and the size will be the maximum possible.
ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`):
Ensure that the image size is a multiple of this value.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation.
prompt_scale_to_meter (`float`, *optional*, defaults to `self.prompt_scale_to_meter`):
Scale factor to convert the prompt depth to meters.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size)
keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_pad = do_pad if do_pad is not None else self.do_pad
size_divisor = size_divisor if size_divisor is not None else self.size_divisor
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if is_scaled_image(images[0]) and do_rescale:
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
preprocessed_images = []
for image in images:
if do_resize:
image = self.resize(
image=image,
size=size,
resample=resample,
keep_aspect_ratio=keep_aspect_ratio,
ensure_multiple_of=ensure_multiple_of,
input_data_format=input_data_format,
)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
if do_pad:
image = self.pad_image(image=image, size_divisor=size_divisor, input_data_format=input_data_format)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
preprocessed_images.append(image)
images = preprocessed_images
data = {"pixel_values": images}
if prompt_depth is not None:
# prompt_depth is a list of images with shape (height, width)
# we need to convert it to a list of images with shape (1, height, width)
prompt_depths = make_flat_list_of_images(prompt_depth, expected_ndims=2)
# Validate prompt_depths has same length as images
if len(prompt_depths) != len(images):
raise ValueError(
f"Number of prompt depth images ({len(prompt_depths)}) does not match number of input images ({len(images)})"
)
if prompt_scale_to_meter is None:
prompt_scale_to_meter = self.prompt_scale_to_meter
processed_prompt_depths = []
for depth in prompt_depths:
depth = to_numpy_array(depth)
depth = depth * prompt_scale_to_meter
if depth.min() == depth.max():
# Prompt depth is invalid, min and max are the same.
# We can simply select one pixel and set it to a small value.
depth[0, 0] = depth[0, 0] + 1e-6
depth = depth[..., None].astype(np.float32)
# Always use LAST as input format since we add channel dim with [..., None]
depth = to_channel_dimension_format(depth, data_format, input_channel_dim=ChannelDimension.LAST)
processed_prompt_depths.append(depth)
prompt_depths = processed_prompt_depths
data["prompt_depth"] = prompt_depths
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.dpt.image_processing_dpt.DPTImageProcessor.post_process_depth_estimation with DPT->PromptDepthAnything
def post_process_depth_estimation(
self,
outputs: "DepthEstimatorOutput",
target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None,
) -> list[dict[str, TensorType]]:
"""
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.
Args:
outputs ([`DepthEstimatorOutput`]):
Raw outputs of the model.
target_sizes (`TensorType` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions.
"""
requires_backends(self, "torch")
predicted_depth = outputs.predicted_depth
if (target_sizes is not None) and (len(predicted_depth) != len(target_sizes)):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the predicted depth"
)
results = []
target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes
for depth, target_size in zip(predicted_depth, target_sizes):
if target_size is not None:
depth = torch.nn.functional.interpolate(
depth.unsqueeze(0).unsqueeze(1), size=target_size, mode="bicubic", align_corners=False
).squeeze()
results.append({"predicted_depth": depth})
return results
__all__ = ["PromptDepthAnythingImageProcessor"]
| PromptDepthAnythingImageProcessor |
python | getsentry__sentry | src/sentry/utils/cursors.py | {
"start": 2216,
"end": 2560
} | class ____(Cursor):
@classmethod
def from_string(cls, cursor_str: str) -> Cursor:
bits = cursor_str.split(":")
if len(bits) != 3:
raise ValueError
try:
return Cursor(bits[0], int(bits[1]), int(bits[2]))
except (TypeError, ValueError):
raise ValueError
| EAPPageTokenCursor |
python | pandas-dev__pandas | pandas/core/base.py | {
"start": 2977,
"end": 4249
} | class ____:
"""
Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on an accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self) -> None:
"""
Prevents setting additional attributes.
"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key: str, value) -> None:
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if getattr(self, "__frozen", False) and not (
key == "_cache"
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
raise AttributeError(f"You cannot add any new attribute '{key}'")
object.__setattr__(self, key, value)
| NoNewAttributesMixin |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 46908,
"end": 47424
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("ja_JP")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert day in JaJpProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert month in JaJpProvider.MONTH_NAMES.values()
def test_traditional_month(self):
month = self.fake.traditional_month_name()
assert month in JaJpProvider.TRADITIONAL_MONTH_NAMES.values()
| TestJaJp |
python | huggingface__transformers | src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py | {
"start": 31734,
"end": 32393
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.layers = nn.ModuleList(
SeamlessM4Tv2ConformerAdapterLayer(config) for _ in range(config.num_adapter_layers)
)
def forward(self, hidden_states, attention_mask):
# down project hidden_states if necessary
for layer in self.layers:
hidden_states = layer(hidden_states, attention_mask)
return hidden_states
############ TEXT / UNITS related code ################
# Copied from transformers.models.m2m_100.modeling_m2m_100.M2M100ScaledWordEmbedding with M2M100->SeamlessM4Tv2
| SeamlessM4Tv2ConformerAdapter |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_stateful.py | {
"start": 9019,
"end": 9226
} | class ____(RuleBasedStateMachine):
values = Bundle("values")
@rule(target=values, value=st.lists(values))
def f(self, value):
assert len(value) == 0
return value
| SourceSameAsTarget |
python | sanic-org__sanic | tests/test_exceptions_handler.py | {
"start": 403,
"end": 8681
} | class ____(ServerError):
pass
@pytest.fixture
def exception_handler_app():
exception_handler_app = Sanic("test_exception_handler")
@exception_handler_app.route("/1", error_format="html")
def handler_1(request):
raise BadRequest("OK")
@exception_handler_app.route("/2", error_format="html")
def handler_2(request):
raise ServerError("OK")
@exception_handler_app.route("/3", error_format="html")
def handler_3(request):
raise NotFound("OK")
@exception_handler_app.route("/4", error_format="html")
def handler_4(request):
foo = bar # noqa -- F821
return text(foo)
@exception_handler_app.route("/5", error_format="html")
def handler_5(request):
class CustomServerError(ServerError):
pass
raise CustomServerError("Custom server error")
@exception_handler_app.route("/6/<arg:int>", error_format="html")
def handler_6(request, arg):
try:
foo = 1 / arg
except Exception as e:
raise e from ValueError(f"{arg}")
return text(foo)
@exception_handler_app.route("/7", error_format="html")
def handler_7(request):
raise Forbidden("go away!")
@exception_handler_app.route("/8", error_format="html")
def handler_8(request):
raise ErrorWithRequestCtx("OK")
@exception_handler_app.exception(ErrorWithRequestCtx, NotFound)
def handler_exception_with_ctx(request, exception):
return text(request.ctx.middleware_ran)
@exception_handler_app.exception(ServerError)
def handler_exception(request, exception):
return text("OK")
@exception_handler_app.exception(Forbidden)
async def async_handler_exception(request, exception):
response = await request.respond(content_type="text/csv")
await response.send("foo,")
await asyncio.sleep(0.001)
await response.send("bar")
@exception_handler_app.middleware
async def some_request_middleware(request):
request.ctx.middleware_ran = "Done."
return exception_handler_app
def test_invalid_usage_exception_handler(exception_handler_app: Sanic):
request, response = exception_handler_app.test_client.get("/1")
assert response.status == 400
def test_server_error_exception_handler(exception_handler_app: Sanic):
request, response = exception_handler_app.test_client.get("/2")
assert response.status == 200
assert response.text == "OK"
def test_not_found_exception_handler(exception_handler_app: Sanic):
request, response = exception_handler_app.test_client.get("/3")
assert response.status == 200
def test_text_exception__handler(exception_handler_app: Sanic):
request, response = exception_handler_app.test_client.get("/random")
assert response.status == 200
assert response.text == "Done."
def test_async_exception_handler(exception_handler_app: Sanic):
request, response = exception_handler_app.test_client.get("/7")
assert response.status == 200
assert response.text == "foo,bar"
def test_html_traceback_output_in_debug_mode(exception_handler_app: Sanic):
request, response = exception_handler_app.test_client.get("/4", debug=True)
assert response.status == 500
soup = BeautifulSoup(response.body, "html.parser")
html = str(soup)
assert "handler_4" in html
assert "foo = bar" in html
summary_text = soup.select("h3")[0].text
assert "NameError: name 'bar' is not defined" == summary_text
request_text = soup.select("h2")[-1].text
assert "GET /4" == request_text
def test_inherited_exception_handler(exception_handler_app: Sanic):
request, response = exception_handler_app.test_client.get("/5")
assert response.status == 200
def test_chained_exception_handler(exception_handler_app: Sanic):
request, response = exception_handler_app.test_client.get(
"/6/0", debug=True
)
assert response.status == 500
soup = BeautifulSoup(response.body, "html.parser")
html = str(soup)
assert "handler_6" in html
assert "foo = 1 / arg" in html
assert "ValueError" in html
assert "GET /6" in html
summary_text = soup.select("h3")[0].text
assert "ZeroDivisionError: division by zero" == summary_text
def test_exception_handler_lookup(exception_handler_app: Sanic):
class CustomError(Exception):
pass
class CustomServerError(ServerError):
pass
def custom_error_handler():
pass
def server_error_handler():
pass
def import_error_handler():
pass
try:
ModuleNotFoundError # noqa: F823
except Exception:
class ModuleNotFoundError(ImportError):
pass
handler = ErrorHandler()
handler.add(ImportError, import_error_handler)
handler.add(CustomError, custom_error_handler)
handler.add(ServerError, server_error_handler)
assert handler.lookup(ImportError(), None) == import_error_handler
assert handler.lookup(ModuleNotFoundError(), None) == import_error_handler
assert handler.lookup(CustomError(), None) == custom_error_handler
assert handler.lookup(ServerError("Error"), None) == server_error_handler
assert (
handler.lookup(CustomServerError("Error"), None)
== server_error_handler
)
# once again to ensure there is no caching bug
assert handler.lookup(ImportError(), None) == import_error_handler
assert handler.lookup(ModuleNotFoundError(), None) == import_error_handler
assert handler.lookup(CustomError(), None) == custom_error_handler
assert handler.lookup(ServerError("Error"), None) == server_error_handler
assert (
handler.lookup(CustomServerError("Error"), None)
== server_error_handler
)
def test_exception_handler_processed_request_middleware(
exception_handler_app: Sanic,
):
request, response = exception_handler_app.test_client.get("/8")
assert response.status == 200
assert response.text == "Done."
def test_error_handler_noisy_log(
exception_handler_app: Sanic, monkeypatch: MonkeyPatch
):
err_logger = Mock()
monkeypatch.setattr(handlers.error, "error_logger", err_logger)
exception_handler_app.config["NOISY_EXCEPTIONS"] = False
exception_handler_app.test_client.get("/1")
err_logger.exception.assert_not_called()
exception_handler_app.config["NOISY_EXCEPTIONS"] = True
request, _ = exception_handler_app.test_client.get("/1")
err_logger.exception.assert_called_with(
"Exception occurred while handling uri: %s", repr(request.url)
)
def test_exception_handler_response_was_sent(
app: Sanic,
caplog: LogCaptureFixture,
message_in_records: Callable[[list[logging.LogRecord], str], bool],
):
exception_handler_ran = False
@app.exception(ServerError)
async def exception_handler(request, exception):
nonlocal exception_handler_ran
exception_handler_ran = True
return text("Error")
@app.route("/1")
async def handler1(request: Request):
response = await request.respond()
await response.send("some text")
raise ServerError("Exception")
@app.route("/2")
async def handler2(request: Request):
await request.respond()
raise ServerError("Exception")
with caplog.at_level(logging.WARNING):
_, response = app.test_client.get("/1")
assert "some text" in response.text
message_in_records(
caplog.records,
(
"An error occurred while handling the request after at "
"least some part of the response was sent to the client. "
"Therefore, the response from your custom exception "
),
)
_, response = app.test_client.get("/2")
assert "Error" in response.text
def test_errir_on_duplicate(app: Sanic):
@app.exception(ServerError)
async def exception_handler_1(request, exception): ...
message = (
"Duplicate exception handler definition on: route=__ALL_ROUTES__ and "
"exception=<class 'sanic.exceptions.ServerError'>"
)
with pytest.raises(ServerError, match=message):
@app.exception(ServerError)
async def exception_handler_2(request, exception): ...
| ErrorWithRequestCtx |
python | mlflow__mlflow | mlflow/genai/git_versioning/git_info.py | {
"start": 282,
"end": 389
} | class ____(Exception):
"""Raised when a git operation fails"""
@dataclass(kw_only=True)
| GitOperationError |
python | doocs__leetcode | lcof2/剑指 Offer II 084. 含有重复元素集合的全排列/Solution.py | {
"start": 0,
"end": 602
} | class ____:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
n = len(nums)
res = []
path = [0] * n
used = [False] * n
nums.sort()
def dfs(u):
if u == n:
res.append(path.copy())
return
for i in range(n):
if used[i] or (i > 0 and nums[i] == nums[i - 1] and not used[i - 1]):
continue
path[u] = nums[i]
used[i] = True
dfs(u + 1)
used[i] = False
dfs(0)
return res
| Solution |
python | pandas-dev__pandas | pandas/tests/indexing/multiindex/test_partial.py | {
"start": 154,
"end": 8358
} | class ____:
def test_getitem_partial_int(self):
# GH 12416
# with single item
l1 = [10, 20]
l2 = ["a", "b"]
df = DataFrame(index=range(2), columns=MultiIndex.from_product([l1, l2]))
expected = DataFrame(index=range(2), columns=l2)
result = df[20]
tm.assert_frame_equal(result, expected)
# with list
expected = DataFrame(
index=range(2), columns=MultiIndex.from_product([l1[1:], l2])
)
result = df[[20]]
tm.assert_frame_equal(result, expected)
# missing item:
with pytest.raises(KeyError, match="1"):
df[1]
with pytest.raises(KeyError, match=r"'\[1\] not in index'"):
df[[1]]
def test_series_slice_partial(self):
pass
def test_xs_partial(
self,
multiindex_dataframe_random_data,
multiindex_year_month_day_dataframe_random_data,
):
frame = multiindex_dataframe_random_data
ymd = multiindex_year_month_day_dataframe_random_data
result = frame.xs("foo")
result2 = frame.loc["foo"]
expected = frame.T["foo"].T
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, result2)
result = ymd.xs((2000, 4))
expected = ymd.loc[2000, 4]
tm.assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], [-1, 1]],
codes=[
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1],
],
)
df = DataFrame(
np.random.default_rng(2).standard_normal((8, 4)),
index=index,
columns=list("abcd"),
)
result = df.xs(("foo", "one"))
expected = df.loc["foo", "one"]
tm.assert_frame_equal(result, expected)
def test_getitem_partial(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
ymd = ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.codes[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
tm.assert_frame_equal(result, expected)
def test_fancy_slice_partial(
self,
multiindex_dataframe_random_data,
multiindex_year_month_day_dataframe_random_data,
):
frame = multiindex_dataframe_random_data
result = frame.loc["bar":"baz"]
expected = frame[3:7]
tm.assert_frame_equal(result, expected)
ymd = multiindex_year_month_day_dataframe_random_data
result = ymd.loc[(2000, 2) : (2000, 4)]
lev = ymd.index.codes[1]
expected = ymd[(lev >= 1) & (lev <= 3)]
tm.assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(
codes=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[["a", "b"], ["x", "y"], ["p", "q"]],
)
df = DataFrame(np.random.default_rng(2).random((3, 2)), index=idx)
result = df.loc[("a", "y"), :]
expected = df.loc[("a", "y")]
tm.assert_frame_equal(result, expected)
result = df.loc[("a", "y"), [1, 0]]
expected = df.loc[("a", "y")][[1, 0]]
tm.assert_frame_equal(result, expected)
with pytest.raises(KeyError, match=r"\('a', 'foo'\)"):
df.loc[("a", "foo"), :]
def test_partial_set(
self,
multiindex_year_month_day_dataframe_random_data,
):
# GH #397
ymd = multiindex_year_month_day_dataframe_random_data
df = ymd.copy()
exp = ymd.copy()
df.loc[2000, 4] = 0
exp.iloc[65:85] = 0
tm.assert_frame_equal(df, exp)
with tm.raises_chained_assignment_error():
df["A"].loc[2000, 4] = 1
df.loc[(2000, 4), "A"] = 1
exp.iloc[65:85, 0] = 1
tm.assert_frame_equal(df, exp)
df.loc[2000] = 5
exp.iloc[:100] = 5
tm.assert_frame_equal(df, exp)
# this works...for now
with tm.raises_chained_assignment_error():
df["A"].iloc[14] = 5
assert df["A"].iloc[14] == exp["A"].iloc[14]
@pytest.mark.parametrize("dtype", [int, float])
def test_getitem_intkey_leading_level(
self, multiindex_year_month_day_dataframe_random_data, dtype
):
# GH#33355 dont fall-back to positional when leading level is int
ymd = multiindex_year_month_day_dataframe_random_data
levels = ymd.index.levels
ymd.index = ymd.index.set_levels([levels[0].astype(dtype)] + levels[1:])
ser = ymd["A"]
mi = ser.index
assert isinstance(mi, MultiIndex)
if dtype is int:
assert mi.levels[0].dtype == np.dtype(int)
else:
assert mi.levels[0].dtype == np.float64
assert 14 not in mi.levels[0]
assert not mi.levels[0]._should_fallback_to_positional
assert not mi._should_fallback_to_positional
with pytest.raises(KeyError, match="14"):
ser[14]
# ---------------------------------------------------------------------
def test_setitem_multiple_partial(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
expected = frame.copy()
result = frame.copy()
result.loc[["foo", "bar"]] = 0
expected.loc["foo"] = 0
expected.loc["bar"] = 0
tm.assert_frame_equal(result, expected)
expected = frame.copy()
result = frame.copy()
result.loc["foo":"bar"] = 0
expected.loc["foo"] = 0
expected.loc["bar"] = 0
tm.assert_frame_equal(result, expected)
expected = frame["A"].copy()
result = frame["A"].copy()
result.loc[["foo", "bar"]] = 0
expected.loc["foo"] = 0
expected.loc["bar"] = 0
tm.assert_series_equal(result, expected)
expected = frame["A"].copy()
result = frame["A"].copy()
result.loc["foo":"bar"] = 0
expected.loc["foo"] = 0
expected.loc["bar"] = 0
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"indexer, exp_idx, exp_values",
[
(
slice("2019-2", None),
DatetimeIndex(["2019-02-01"], dtype="M8[ns]"),
[2, 3],
),
(
slice(None, "2019-2"),
date_range("2019", periods=2, freq="MS", unit="ns"),
[0, 1, 2, 3],
),
],
)
def test_partial_getitem_loc_datetime(self, indexer, exp_idx, exp_values):
# GH: 25165
date_idx = date_range("2019", periods=2, freq="MS", unit="ns")
df = DataFrame(
list(range(4)),
index=MultiIndex.from_product([date_idx, [0, 1]], names=["x", "y"]),
)
expected = DataFrame(
exp_values,
index=MultiIndex.from_product([exp_idx, [0, 1]], names=["x", "y"]),
)
result = df[indexer]
tm.assert_frame_equal(result, expected)
result = df.loc[indexer]
tm.assert_frame_equal(result, expected)
result = df.loc(axis=0)[indexer]
tm.assert_frame_equal(result, expected)
result = df.loc[indexer, :]
tm.assert_frame_equal(result, expected)
df2 = df.swaplevel(0, 1).sort_index()
expected = expected.swaplevel(0, 1).sort_index()
result = df2.loc[:, indexer, :]
tm.assert_frame_equal(result, expected)
def test_loc_getitem_partial_both_axis():
# gh-12660
iterables = [["a", "b"], [2, 1]]
columns = MultiIndex.from_product(iterables, names=["col1", "col2"])
rows = MultiIndex.from_product(iterables, names=["row1", "row2"])
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)), index=rows, columns=columns
)
expected = df.iloc[:2, 2:].droplevel("row1").droplevel("col1", axis=1)
result = df.loc["a", "b"]
tm.assert_frame_equal(result, expected)
| TestMultiIndexPartial |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py | {
"start": 41468,
"end": 52648
} | class ____(quantize_model_test_base.QuantizedModelTest):
@parameterized.parameters(
testing.parameter_combinations([{
'bias_fn': (
None,
nn_ops.bias_add,
),
'activation_fn': (
None,
nn_ops.relu,
nn_ops.relu6,
),
'dim_sizes': (
# tf.MatMul cases.
([None, 1024], [1024, 3]), # dynamic batch dim.
([1, 1024], [1024, 3]),
# tf.BatchMatMul cases.
([10, 1, 1024], [10, 1024, 3]),
([2, 3, 1, 1024], [2, 3, 1024, 3]),
),
}])
)
@test_util.run_in_graph_and_eager_modes
def test_matmul_weight_only_model(
self,
bias_fn: Optional[ops.Operation],
activation_fn: Optional[ops.Operation],
dim_sizes: Sequence[int],
):
lhs_dim_size, rhs_dim_size = dim_sizes
input_shape = (*lhs_dim_size,)
filter_shape = (*rhs_dim_size,)
static_input_shape = [dim if dim is not None else 2 for dim in input_shape]
model = self._create_matmul_model(
input_shape,
filter_shape,
self._input_saved_model_path,
bias_fn,
activation_fn,
)
rng = np.random.default_rng(1234)
input_data = ops.convert_to_tensor(
rng.uniform(low=0.0, high=1.0, size=static_input_shape).astype(
np.float32
)
)
config = qc.QuantizationConfig(
weight_only_ptq_preset=qc.WeightOnlyPtqPreset(),
tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]),
)
quantization.quantize_saved_model(
self._input_saved_model_path,
self._output_saved_model_path,
config,
)
expected_outputs = model.matmul(input_data)
root = load.load(self._output_saved_model_path)
self.assertCountEqual(root.signatures.keys(), {'serving_default'})
new_outputs = root.signatures['serving_default'](
input_tensor=ops.convert_to_tensor(input_data)
)
# Tests that the quantized graph outputs similar values. The rtol and atol
# values are arbitrary.
self.assertAllClose(new_outputs, expected_outputs, rtol=0.03, atol=0.2)
module_str = self._extract_first_xla_call_module_op(
self._output_saved_model_path
)
# Tests that the output graph contains multiply for symmetric
# dequantization.
self.assertTrue(re.search('stablehlo.multiply', module_str))
# Tests that the output graph contains float dot_general.
self.assertTrue(
re.search('stablehlo.dot_general.*xf32>.*xf32>.*xf32>', module_str)
)
# Due to other meta data, the compression is not exactly 1/4.
self.assertLess(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
0.3,
)
@parameterized.parameters(
testing.parameter_combinations([{
'bias_fn': (
None,
nn_ops.bias_add,
),
'activation_fn': (
None,
nn_ops.relu,
nn_ops.relu6,
),
'has_batch_norm': (False,),
'input_shape_dynamic': (
False,
True,
),
'has_func_alias': (False, True),
}])
)
@test_util.run_in_graph_and_eager_modes
def test_conv_weight_only_model(
self,
bias_fn: Optional[ops.Operation],
activation_fn: Optional[ops.Operation],
has_batch_norm: bool,
input_shape_dynamic: bool,
dilations: Sequence[int] = None,
has_func_alias: bool = False,
):
input_shape = (None, 3, 4, 3) if input_shape_dynamic else (1, 3, 4, 3)
filter_shape = (2, 3, 3, 2)
strides = (1, 1, 1, 1)
model = self._create_conv2d_model(
input_shape,
filter_shape,
self._input_saved_model_path,
bias_fn,
activation_fn,
has_batch_norm,
strides,
dilations,
'SAME',
has_func_alias,
)
rng = np.random.default_rng(1234)
static_input_shape = [dim if dim is not None else 2 for dim in input_shape]
input_data = ops.convert_to_tensor(
rng.uniform(low=0.0, high=1.0, size=static_input_shape).astype(
np.float32
)
)
config = qc.QuantizationConfig(
weight_only_ptq_preset=qc.WeightOnlyPtqPreset(),
tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]),
)
quantization.quantize_saved_model(
self._input_saved_model_path,
self._output_saved_model_path,
config,
)
expected_outputs = model.conv2d(input_data)
root = load.load(self._output_saved_model_path)
self.assertCountEqual(root.signatures.keys(), {'serving_default'})
new_outputs = root.signatures['serving_default'](
input_tensor=ops.convert_to_tensor(input_data)
)
# Tests that the quantized graph outputs similar values. The rtol and atol
# values are arbitrary.
self.assertAllClose(new_outputs, expected_outputs, rtol=0.03, atol=0.2)
module_str = self._extract_first_xla_call_module_op(
self._output_saved_model_path
)
# Tests that the output graph contains multiply op for symmetric
# dequantization.
self.assertTrue(re.search('stablehlo.multiply', module_str))
# Tests that the output graph contains float dot_general.
self.assertTrue(
re.search('stablehlo.convolution.*xf32>.*xf32>.*xf32>', module_str)
)
if has_func_alias:
func_aliases = self._get_function_aliases(
self._output_saved_model_path, [tag_constants.SERVING]
)
self.assertCountEqual(
func_aliases.values(), [quantize_model_test_base.FUNC_ALIAS]
)
# Due to other meta data, the compression is not exactly 1/4.
self.assertLess(
testing.get_size_ratio(
self._output_saved_model_path, self._input_saved_model_path
),
0.4,
)
@parameterized.parameters(
testing.parameter_combinations([{
'shape_dynamic': (
False,
True,
),
}])
)
@test_util.run_in_graph_and_eager_modes
def test_add_ptq_model(
self,
shape_dynamic: bool,
):
input_shape = (None, 3, 4, 3) if shape_dynamic else (2, 3, 4, 3)
self._create_add_model(
input_shape,
self._input_saved_model_path,
)
# Generate model input data.
rng = np.random.default_rng(seed=42)
static_input_shape = [dim if dim is not None else 2 for dim in input_shape]
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(100):
yield {
'input_tensor': rng.uniform(
low=0.0, high=1.0, size=static_input_shape
).astype(np.float32)
}
dataset_path = self.create_tempfile('tfrecord').full_path
path_map = {'serving_default': dataset_path}
repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save(
{'serving_default': data_gen()}
)
config = qc.QuantizationConfig(
static_range_ptq_preset=qc.StaticRangePtqPreset(
representative_datasets=[
qc.RepresentativeDatasetConfig(
tf_record=qc.TfRecordFile(path=dataset_path)
)
],
),
tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]),
)
quantization.quantize_saved_model(
self._input_saved_model_path,
self._output_saved_model_path,
config,
)
self.assertEqual(
self._get_num_xla_call_module_op(self._output_saved_model_path), 1
)
module_str = self._extract_first_xla_call_module_op(
self._output_saved_model_path
)
# Check add is not quantized.
self.assertTrue(re.search(r'stablehlo.add.*f32>', module_str))
@parameterized.parameters(
testing.parameter_combinations([{
'shape_dynamic': (
False,
True,
),
}])
)
@test_util.run_in_graph_and_eager_modes
def test_add_weight_only_model(
self,
shape_dynamic: bool,
):
input_shape = (None, 3, 4, 3) if shape_dynamic else (2, 3, 4, 3)
self._create_add_model(
input_shape,
self._input_saved_model_path,
)
# Generate model input data.
rng = np.random.default_rng(seed=42)
static_input_shape = [dim if dim is not None else 2 for dim in input_shape]
def data_gen() -> repr_dataset.RepresentativeDataset:
for _ in range(100):
yield {
'input_tensor': rng.uniform(
low=0.0, high=1.0, size=static_input_shape
).astype(np.float32)
}
dataset_path = self.create_tempfile('tfrecord').full_path
path_map = {'serving_default': dataset_path}
repr_dataset.TfRecordRepresentativeDatasetSaver(path_map).save(
{'serving_default': data_gen()}
)
config = qc.QuantizationConfig(
weight_only_ptq_preset=qc.WeightOnlyPtqPreset(),
tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]),
)
quantization.quantize_saved_model(
self._input_saved_model_path,
self._output_saved_model_path,
config,
)
self.assertEqual(
self._get_num_xla_call_module_op(self._output_saved_model_path), 1
)
module_str = self._extract_first_xla_call_module_op(
self._output_saved_model_path
)
# Check add is not quantized.
self.assertTrue(re.search(r'stablehlo.add.*f32>', module_str), module_str)
def test_save_quantization_report_file(self):
"""Tests that the quantization report file is created.
Also test that it is populated with textproto of `QuantizationResults`.
"""
input_shape = (1, 3, 4, 3)
filter_shape = (2, 3, 3, 2)
self._create_conv2d_model(
input_shape,
filter_shape,
self._input_saved_model_path,
)
report_file_path = self.create_tempfile('report.txtpb').full_path
config = qc.QuantizationConfig(
weight_only_ptq_preset=qc.WeightOnlyPtqPreset(),
tf_saved_model=qc.TfSavedModelConfig(tags=[tag_constants.SERVING]),
report_file_path=report_file_path,
)
quantization.quantize_saved_model(
self._input_saved_model_path,
self._output_saved_model_path,
config,
)
# Test the contents of the report file, which is a textproto of
# `QuantizationResults`.
self.assertTrue(os.path.exists(report_file_path))
with open(report_file_path, 'r') as f:
quantization_results_textpb = f.read()
results = qc.QuantizationResults()
text_format.Parse(quantization_results_textpb, results)
self.assertProtoEquals(
expected_message_maybe_ascii=r"""
results {
quantizable_unit { name: "composite_conv_fn_1" }
method {
weight_only_ptq {
input_quantized_types {
key: 1
value { dimension_specs {} }
}
}
}
}
""",
validate_message=results,
)
if __name__ == '__main__':
test.main()
| WeightOnlyQuantizationTest |
python | wandb__wandb | wandb/vendor/pygments/lexer.py | {
"start": 24288,
"end": 29031
} | class ____(RegexLexer):
"""
A RegexLexer that uses a context object to store its state.
"""
def get_tokens_unprocessed(self, text=None, context=None):
"""
Split ``text`` into (tokentype, text) pairs.
If ``context`` is given, use this lexer context instead.
"""
tokendefs = self._tokens
if not context:
ctx = LexerContext(text, 0)
statetokens = tokendefs['root']
else:
ctx = context
statetokens = tokendefs[ctx.stack[-1]]
text = ctx.text
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, ctx.pos, ctx.end)
if m:
if action is not None:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
for item in action(self, m, ctx):
yield item
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
# CAUTION: callback must set ctx.pos!
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to "root"
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, u'\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def do_insertions(insertions, tokens):
"""
Helper for lexers which must combine the results of several
sublexers.
``insertions`` is a list of ``(index, itokens)`` pairs.
Each ``itokens`` iterable should be inserted at position
``index`` into the token stream given by the ``tokens``
argument.
The result is a combined token stream.
TODO: clean up the code here.
"""
insertions = iter(insertions)
try:
index, itokens = next(insertions)
except StopIteration:
# no insertions
for item in tokens:
yield item
return
realpos = None
insleft = True
# iterate over the token stream where we want to insert
# the tokens from the insertion list.
for i, t, v in tokens:
# first iteration. store the postition of first item
if realpos is None:
realpos = i
oldi = 0
while insleft and i + len(v) >= index:
tmpval = v[oldi:index - i]
yield realpos, t, tmpval
realpos += len(tmpval)
for it_index, it_token, it_value in itokens:
yield realpos, it_token, it_value
realpos += len(it_value)
oldi = index - i
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
yield realpos, t, v[oldi:]
realpos += len(v) - oldi
# leftover tokens
while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
try:
index, itokens = next(insertions)
except StopIteration:
insleft = False
break # not strictly necessary
| ExtendedRegexLexer |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 25374,
"end": 25726
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("pl_PL")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert day in PlProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert month in PlProvider.MONTH_NAMES.values()
| TestPlPL |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_events_monitors_test.py | {
"start": 8666,
"end": 9455
} | class ____(test_util.TensorFlowTestCase):
"""Unit tests for alert-class objects."""
def testInfNanMonitor(self):
alert = debug_events_monitors.InfNanAlert(
1234,
"FooOp",
1,
size=1000,
num_neg_inf=5,
num_pos_inf=10,
num_nan=20,
execution_index=777,
graph_execution_trace_index=888)
self.assertEqual(alert.wall_time, 1234)
self.assertEqual(alert.op_type, "FooOp")
self.assertEqual(alert.output_slot, 1)
self.assertEqual(alert.size, 1000)
self.assertEqual(alert.num_neg_inf, 5)
self.assertEqual(alert.num_pos_inf, 10)
self.assertEqual(alert.num_nan, 20)
self.assertEqual(alert.execution_index, 777)
self.assertEqual(alert.graph_execution_trace_index, 888)
| AlertDataObjectsTest |
python | pandas-dev__pandas | pandas/core/apply.py | {
"start": 48989,
"end": 52414
} | class ____(NDFrameApply):
obj: Series
axis: AxisInt = 0
by_row: Literal[False, "compat", "_compat"] # only relevant for apply()
def __init__(
self,
obj: Series,
func: AggFuncType,
*,
by_row: Literal[False, "compat", "_compat"] = "compat",
args,
kwargs,
) -> None:
super().__init__(
obj,
func,
raw=False,
result_type=None,
by_row=by_row,
args=args,
kwargs=kwargs,
)
def apply(self) -> DataFrame | Series:
obj = self.obj
if len(obj) == 0:
return self.apply_empty_result()
# dispatch to handle list-like or dict-like
if is_list_like(self.func):
return self.apply_list_or_dict_like()
if isinstance(self.func, str):
# if we are a string, try to dispatch
return self.apply_str()
if self.by_row == "_compat":
return self.apply_compat()
# self.func is Callable
return self.apply_standard()
def agg(self):
result = super().agg()
if result is None:
obj = self.obj
func = self.func
# string, list-like, and dict-like are entirely handled in super
assert callable(func)
result = func(obj, *self.args, **self.kwargs)
return result
def apply_empty_result(self) -> Series:
obj = self.obj
return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(
obj, method="apply"
)
def apply_compat(self):
"""compat apply method for funcs in listlikes and dictlikes.
Used for each callable when giving listlikes and dictlikes of callables to
apply. Needed for compatibility with Pandas < v2.1.
.. versionadded:: 2.1.0
"""
obj = self.obj
func = self.func
if callable(func):
f = com.get_cython_func(func)
if f and not self.args and not self.kwargs:
return obj.apply(func, by_row=False)
try:
result = obj.apply(func, by_row="compat")
except (ValueError, AttributeError, TypeError):
result = obj.apply(func, by_row=False)
return result
def apply_standard(self) -> DataFrame | Series:
# caller is responsible for ensuring that f is Callable
func = cast(Callable, self.func)
obj = self.obj
if isinstance(func, np.ufunc):
with np.errstate(all="ignore"):
return func(obj, *self.args, **self.kwargs)
elif not self.by_row:
return func(obj, *self.args, **self.kwargs)
if self.args or self.kwargs:
# _map_values does not support args/kwargs
def curried(x):
return func(x, *self.args, **self.kwargs)
else:
curried = func
mapped = obj._map_values(mapper=curried)
if len(mapped) and isinstance(mapped[0], ABCSeries):
# GH#43986 Need to do list(mapped) in order to get treated as nested
# See also GH#25959 regarding EA support
return obj._constructor_expanddim(list(mapped), index=obj.index)
else:
return obj._constructor(mapped, index=obj.index).__finalize__(
obj, method="apply"
)
| SeriesApply |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/async_job.py | {
"start": 8559,
"end": 19883
} | class ____(AsyncJob):
"""Wraps FB AdReportRun with retry/split logic driven in _check_status()."""
page_size = 100
def __init__(
self,
edge_object: Union[AdAccount, Campaign, AdSet, Ad],
params: Mapping[str, Any],
job_timeout: timedelta,
primary_key: Optional[List[str]] = None,
object_breakdowns: Optional[Mapping[str, str]] = None,
**kwargs,
):
super().__init__(**kwargs)
self._params = dict(params)
self._params["time_range"] = {
"since": self._interval.to_date_string(self._interval.start),
"until": self._interval.to_date_string(self._interval.end),
}
self._job_timeout = job_timeout
self._edge_object = edge_object
self._job: Optional[AdReportRun] = None
self._primary_key = primary_key or []
self._object_breakdowns = dict(object_breakdowns or {})
self._start_time = None
self._finish_time = None
self._failed = False
def _log_throttle(self, where: str):
throttle = getattr(self._api, "ads_insights_throttle", None)
if throttle:
logger.info(
f"{self}: throttle ({where}): per_account={getattr(throttle, 'per_account', 'N/A')}, "
f"per_application={getattr(throttle, 'per_application', 'N/A')}"
)
def start(self, api_limit: "APILimit") -> None:
self._api_limit = api_limit
if self.started:
raise RuntimeError(f"{self}: Incorrect usage of start - the job already started, use restart instead")
if not api_limit.try_consume():
return # Manager will try again later
self._job = self._edge_object.get_insights(params=self._params, is_async=True)
self._start_time = ab_datetime_now()
self._attempt_number += 1
logger.info(f"{self}: created AdReportRun")
@property
def started(self) -> bool:
return self._start_time is not None
@property
def elapsed_time(self) -> Optional[timedelta]:
"""Elapsed time since the job start"""
if not self._start_time:
return None
end_time = self._finish_time or ab_datetime_now()
return end_time - self._start_time
@property
def completed(self) -> bool:
"""Check job status and return True if it is completed, use failed/succeeded to check if it was successful
:return: True if completed, False - if task still running
:raises: JobException in case job failed to start, failed or timed out
"""
return self._finish_time is not None
@property
def failed(self) -> bool:
"""Tell if the job previously failed"""
return self._failed
def update_job(self, batch: Optional[FacebookAdsApiBatch] = None):
if not self._job:
return
if self.completed:
job_status = self._job.get("async_status")
percent = self._job.get("async_percent_completion")
logger.info(f"{self}: is {percent} complete ({job_status})")
# No need to update job status if its already completed
return
self._job.api_get(
batch=batch,
success=self._batch_success_handler,
failure=self._batch_failure_handler,
)
def _batch_success_handler(self, response: FacebookResponse):
self._job = ObjectParser(reuse_object=self._job).parse_single(response.json())
self._check_status()
def _batch_failure_handler(self, response: FacebookResponse):
logger.info(f"{self}: Request failed with response: {response.body()}.")
def _check_status(self) -> bool:
"""Advance to terminal state, release capacity, and decide retry/split."""
job_status = self._job.get("async_status")
percent = self._job.get("async_percent_completion")
logger.info(f"{self}: is {percent} complete ({job_status})")
released = False
if self.elapsed_time and self.elapsed_time > self._job_timeout:
logger.info(f"{self}: exceeded max allowed time {self._job_timeout}.")
self._finish_time = ab_datetime_now()
self._failed = True
released = True
elif job_status == Status.COMPLETED:
self._finish_time = ab_datetime_now() # TODO: is not actual running time, but interval between check_status calls
released = True
elif job_status in [Status.FAILED, Status.SKIPPED]:
self._finish_time = ab_datetime_now()
self._failed = True
logger.info(f"{self}: has status {job_status} after {self.elapsed_time.total_seconds()} seconds.")
released = True
if released and self._api_limit:
self._api_limit.release()
# Retry/split policy without explicit restart():
# - 1st failure → reset state so start() can be called again
# - 2nd+ failure → produce replacement jobs via _split_job()
if self._failed:
if self._attempt_number == 1:
self._job = None
self._failed = False
self._start_time = None
elif self._attempt_number >= 2:
self.new_jobs = self._split_job()
self._finish_time = None
return self.completed
# --------------------------- splitting -----------------------------------
def _split_job(self) -> List["AsyncJob"]:
if isinstance(self._edge_object, AdAccount):
return self._split_by_edge_class(Campaign)
elif isinstance(self._edge_object, Campaign):
return self._split_by_edge_class(AdSet)
elif isinstance(self._edge_object, AdSet):
return self._split_by_edge_class(Ad)
elif isinstance(self._edge_object, Ad):
return [self._split_by_fields_parent()]
else:
raise ValueError("Unsupported edge for splitting")
def _split_by_edge_class(self, edge_class: Union[Type[Campaign], Type[AdSet], Type[Ad]]) -> List["AsyncJob"]:
if edge_class == Campaign:
pk_name, level = "campaign_id", "campaign"
elif edge_class == AdSet:
pk_name, level = "adset_id", "adset"
elif edge_class == Ad:
pk_name, level = "ad_id", "ad"
else:
raise RuntimeError("Unsupported edge_class")
ids = self._collect_child_ids(pk_name=pk_name, level=level)
if not ids:
raise ValueError(f"No child IDs at level={level}")
return [
InsightAsyncJob(
api=self._api,
edge_object=edge_class(pk),
params=self._params,
interval=self._interval,
job_timeout=self._job_timeout,
primary_key=self._primary_key,
object_breakdowns=self._object_breakdowns,
)
for pk in ids
]
def _collect_child_ids(self, pk_name: str, level: str) -> List[str]:
"""
Start a tiny async insights job to collect child IDs, poll until terminal,
then return the list of IDs. Separated for unit testing.
"""
since = AirbyteDateTime.from_datetime(datetime.combine(self._interval.start - timedelta(days=28 + 1), datetime.min.time()))
since = validate_start_date(since)
params = {
"fields": [pk_name],
"level": level,
"time_range": {"since": since.strftime("%Y-%m-%d"), "until": self._interval.end.strftime("%Y-%m-%d")},
}
try:
id_job: AdReportRun = self._edge_object.get_insights(params=params, is_async=True)
except Exception as e:
raise ValueError(f"Failed to start ID-collection at level={level}: {e}") from e
start_ts = ab_datetime_now()
while True:
id_job = id_job.api_get()
status = id_job.get("async_status")
percent = id_job.get("async_percent_completion")
logger.info(f"[Split:{level}] status={status}, {percent}%")
if status == Status.COMPLETED:
break
if status in (Status.FAILED, Status.SKIPPED):
raise ValueError(f"ID-collection failed for level={level}: {status}")
if (ab_datetime_now() - start_ts) > self._job_timeout:
raise ValueError(f"ID-collection timed out for level={level}")
time.sleep(30)
try:
result_cursor = id_job.get_result(params={"limit": self.page_size})
except FacebookBadObjectError as e:
raise ValueError(f"Failed to fetch ID-collection results for level={level}: {e}") from e
ids = {row[pk_name] for row in result_cursor if pk_name in row}
logger.info(f"[Split:{level}] collected {len(ids)} {pk_name}(s)")
return list(ids)
def _split_by_fields_parent(self) -> ParentAsyncJob:
all_fields: List[str] = list(self._params.get("fields", []))
split_candidates = [f for f in all_fields if f not in self._primary_key]
if len(split_candidates) <= 1:
raise ValueError("Cannot split by fields: not enough non-PK fields")
mid = len(split_candidates) // 2
part_a, part_b = split_candidates[:mid], split_candidates[mid:]
params_a = dict(self._params)
params_a["fields"] = self._primary_key + part_a
params_b = dict(self._params)
params_b["fields"] = self._primary_key + part_b
job_a = InsightAsyncJob(
api=self._api,
edge_object=self._edge_object,
params=params_a,
interval=self._interval,
job_timeout=self._job_timeout,
primary_key=self._primary_key,
object_breakdowns=self._object_breakdowns,
)
job_b = InsightAsyncJob(
api=self._api,
edge_object=self._edge_object,
params=params_b,
interval=self._interval,
job_timeout=self._job_timeout,
primary_key=self._primary_key,
object_breakdowns=self._object_breakdowns,
)
logger.info("%s split by fields: common=%d, A=%d, B=%d", self, len(self._primary_key), len(part_a), len(part_b))
return ParentAsyncJob(
jobs=[job_a, job_b],
api=self._api,
interval=self._interval,
primary_key=self._primary_key,
object_breakdowns=self._object_breakdowns,
)
# --------------------------- results -------------------------------------
@backoff_policy
def get_result(self) -> Any:
"""Retrieve result of the finished job."""
if not self._job or self.failed:
raise RuntimeError(f"{self}: Incorrect usage of get_result - the job is not started or failed")
return self._job.get_result(params={"limit": self.page_size})
def __str__(self) -> str:
"""String representation of the job wrapper."""
job_id = self._job["report_run_id"] if self._job else "<None>"
breakdowns = self._params.get("breakdowns", [])
return f"InsightAsyncJob(id={job_id}, {self._edge_object}, time_range={self._interval}, breakdowns={breakdowns}, fields={self._params.get('fields', [])})"
| InsightAsyncJob |
python | networkx__networkx | networkx/readwrite/tests/test_leda.py | {
"start": 35,
"end": 1392
} | class ____:
def test_parse_leda(self):
data = """#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|"""
G = nx.parse_leda(data)
G = nx.parse_leda(data.split("\n"))
assert sorted(G.nodes()) == ["v1", "v2", "v3", "v4", "v5"]
assert sorted(G.edges(data=True)) == [
("v1", "v2", {"label": "4"}),
("v1", "v3", {"label": "3"}),
("v2", "v3", {"label": "2"}),
("v3", "v4", {"label": "3"}),
("v3", "v5", {"label": "7"}),
("v4", "v5", {"label": "6"}),
("v5", "v1", {"label": "foo"}),
]
def test_read_LEDA(self):
fh = io.BytesIO()
data = """#header section \nLEDA.GRAPH \nstring\nint\n-1\n#nodes section\n5 \n|{v1}| \n|{v2}| \n|{v3}| \n|{v4}| \n|{v5}| \n\n#edges section\n7 \n1 2 0 |{4}| \n1 3 0 |{3}| \n2 3 0 |{2}| \n3 4 0 |{3}| \n3 5 0 |{7}| \n4 5 0 |{6}| \n5 1 0 |{foo}|"""
G = nx.parse_leda(data)
fh.write(data.encode("UTF-8"))
fh.seek(0)
Gin = nx.read_leda(fh)
assert sorted(G.nodes()) == sorted(Gin.nodes())
assert sorted(G.edges()) == sorted(Gin.edges())
| TestLEDA |
python | PrefectHQ__prefect | src/prefect/server/events/actions.py | {
"start": 42989,
"end": 46750
} | class ____(JinjaTemplateAction):
"""Call a webhook when an Automation is triggered."""
type: Literal["call-webhook"] = "call-webhook"
block_document_id: UUID = Field(
description="The identifier of the webhook block to use"
)
payload: str = Field(
default="",
description="An optional templatable payload to send when calling the webhook.",
)
@field_validator("payload", mode="before")
@classmethod
def ensure_payload_is_a_string(
cls, value: Union[str, Dict[str, Any], None]
) -> Optional[str]:
"""Temporary measure while we migrate payloads from being a dictionary to
a string template. This covers both reading from the database where values
may currently be a dictionary, as well as the API, where older versions of the
frontend may be sending a JSON object with the single `"message"` key."""
if value is None:
return value
if isinstance(value, str):
return value
return orjson.dumps(value, option=orjson.OPT_INDENT_2).decode()
@field_validator("payload")
@classmethod
def validate_payload_templates(cls, value: Optional[str]) -> Optional[str]:
"""
Validate user-provided payload template.
"""
if not value:
return value
cls.validate_template(value, "payload")
return value
async def _get_webhook_block(self, triggered_action: "TriggeredAction") -> Webhook:
async with await self.orchestration_client(triggered_action) as orchestration:
response = await orchestration.read_block_document_raw(
self.block_document_id
)
if response.status_code >= 300:
raise ActionFailed(self.reason_from_response(response))
try:
block_document = BlockDocument.model_validate(response.json())
block = await _load_block_from_block_document(block_document)
except Exception as e:
raise ActionFailed(f"The webhook block was invalid: {e!r}")
if not isinstance(block, Webhook):
raise ActionFailed("The referenced block was not a webhook block")
self._resulting_related_resources += [
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.block-document.{self.block_document_id}",
"prefect.resource.role": "block",
"prefect.resource.name": block_document.name,
}
),
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.block-type.{block.get_block_type_slug()}",
"prefect.resource.role": "block-type",
}
),
]
return block
async def act(self, triggered_action: "TriggeredAction") -> None:
block = await self._get_webhook_block(triggered_action=triggered_action)
(payload,) = await self._render([self.payload], triggered_action)
try:
response = await block.call(payload=payload)
ok_headers = {
k: v for k, v in response.headers.items() if not should_redact_header(k)
}
self._result_details.update(
{
"status_code": response.status_code,
"response_body": truncated_to(1000, response.text),
"response_headers": {**(ok_headers or {})},
}
)
except Exception as e:
raise ActionFailed(f"Webhook call failed: {e!r}")
| CallWebhook |
python | doocs__leetcode | solution/0500-0599/0589.N-ary Tree Preorder Traversal/Solution2.py | {
"start": 152,
"end": 485
} | class ____:
def preorder(self, root: 'Node') -> List[int]:
ans = []
if root is None:
return ans
stk = [root]
while stk:
node = stk.pop()
ans.append(node.val)
for child in node.children[::-1]:
stk.append(child)
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/multi_client_input_util_test.py | {
"start": 4353,
"end": 10617
} | class ____:
"""tf.data service cluster with dispatcher and workers as subprocesses.
To run the cluster in co-located mode, set `num_workers` to 0 and create the
tf.data service workers manually in each client process.
"""
def __init__(self,
test_name,
num_workers,
worker_ports=None,
worker_addresses=None):
self._test_name = test_name
self._num_workers = num_workers
self._start_dispatcher(worker_addresses)
self._start_workers(worker_ports)
def _start_dispatcher(self, worker_addresses, port=0):
self._pipe_to_dispatcher, dispatcher_pipe = mp_context.Pipe(True)
logging.info(
'Starting remote dispatcher on port %d with worker addresses: %s', port,
worker_addresses)
self._dispatcher_process = mp_context.Process(
target=create_dispatcher,
args=(self._test_name, worker_addresses, port, dispatcher_pipe),
)
self._dispatcher_process.start()
self._dispatcher_address = self._pipe_to_dispatcher.recv()
def dispatcher_address(self):
return self._dispatcher_address
def _start_workers(self, worker_ports=None):
self._workers = []
self._worker_addresses = []
self._worker_pipes = []
for idx in range(self._num_workers):
port = worker_ports[idx] if worker_ports else None
self._start_worker(port)
def _start_worker(self, port=None):
pipe_to_worker, worker_pipe = mp_context.Pipe(True)
logging.info(
'Starting remote worker on port %d with dispatcher address: %s', port,
self._dispatcher_address)
worker_process = mp_context.Process(
target=create_worker,
args=(self._test_name, self._dispatcher_address, port, worker_pipe),
)
worker_process.start()
worker_address = self._pipe_to_worker.recv()
self._workers.append(worker_process)
self._worker_addresses.append(worker_address)
self._worker_pipes.append(pipe_to_worker)
def worker_addresses(self):
return self._worker_addresses
def stop(self):
# Segfault logs may still be printed because clean exit of child processes
# is not always possible. This will not affect the outcome of the test.
logging.info('Will try to stop TFDataServiceCluster!')
for idx in range(self._num_workers):
address = self._worker_addresses[idx]
pipe_to_worker = self._worker_pipes[idx]
logging.info('Stopping worker %s...', address)
pipe_to_worker.send('stop')
if pipe_to_worker.poll(2):
if pipe_to_worker.recv() == 'stopped':
logging.info('Successfully stopped worker %s', address)
self._workers[idx].terminate()
logging.info('Stopping dispatcher...')
self._pipe_to_dispatcher.send('stop')
if self._pipe_to_dispatcher.poll(2):
if self._pipe_to_dispatcher.recv() == 'stopped':
logging.info('Successfully stopped dispatcher')
self._dispatcher_process.terminate()
def setup_local_devices(num_devices):
physical_cpus = tf_config.list_physical_devices('CPU')
tf_config.set_logical_device_configuration(
physical_cpus[0],
[context.LogicalDeviceConfiguration() for _ in range(num_devices)],
)
def setup_client(client_id: int, test_name: str, env: Mapping[str, str],
num_local_devices: int):
"""Set up a DTensor client for use in multi-client tests.
Args:
client_id: the index of the client.
test_name: the name of the test under which this client is running, used To
identify the log file artifact containing the test output.
env: a dictionary of environment variables to update.
num_local_devices: number of local devices to set up.
"""
# Redirect client's stderr/stdout to undeclared outputs on sponge.
redirect_output(f'test-{test_name}-process-{client_id}.log')
# Update any specified environment variables.
for var, val in env.items():
os.environ[var] = val
# Set up local devices.
setup_local_devices(num_local_devices)
# Set up DTensor cluster and enable collectives.
accelerator_util.initialize_accelerator_system()
def run_client(
client_id: int,
test_name: str,
env: Mapping[str, str],
num_local_devices: int,
dispatcher_address: str,
worker_port: int,
batch_size: int,
dataset_paths: List[str],
mesh: Mesh,
batch_dim: Optional[str],
layouts: Tuple[Layout, Layout],
) -> List[Tuple[Any, Any]]:
# Co-located tf.data service mode. It is important to hold the worker object
# until the end otherwise it will get garbage collected.
worker, worker_address = create_worker( # pylint: disable=unused-variable
test_name, dispatcher_address, port=worker_port)
logging.info(
'tf.data service worker running at %s',
worker_address,
)
setup_client(client_id, test_name, env, num_local_devices)
def decode_fn(record_bytes):
decoded = parsing_ops.parse_single_example_v2(
serialized=record_bytes,
features={
'idx': parsing_config.FixedLenFeature([], dtype=dtypes.int64),
'elem': parsing_config.FixedLenFeature([], dtype=dtypes.string),
},
)
parsed_elem = gen_parsing_ops.parse_tensor(decoded['elem'], dtypes.int32)
elem = check_ops.ensure_shape(
parsed_elem, [IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS]
)
return decoded['idx'], elem
dataset = dataset_ops.DatasetV2.from_tensor_slices(dataset_paths)
dataset = dataset.interleave(readers.TFRecordDatasetV2)
dataset = dataset.map(decode_fn)
tf_data_service_config = input_util.TFDataServiceConfig(
dispatcher_address=dispatcher_address, job_name=TF_DATA_SERVICE_JOB_NAME
)
d_dataset = input_util.DTensorDataset(
dataset=dataset,
global_batch_size=batch_size,
mesh=mesh,
layouts=layouts,
batch_dim=batch_dim,
tf_data_service_config=tf_data_service_config,
)
# Subprocesses cannot return a sharded DTensor as it triggers a copy and
# copying non-replicated DTensors is not supported. So instead we unpack it
# and return the component tensors.
ret = []
for batch_idx, elem in d_dataset:
n_batch_idx = api.unpack(batch_idx)
n_elem = api.unpack(elem)
ret.append((n_batch_idx, n_elem))
return ret
| TFDataServiceCluster |
python | python__mypy | mypyc/codegen/emitwrapper.py | {
"start": 32268,
"end": 37926
} | class ____:
"""Helper that simplifies the generation of wrapper functions."""
# TODO: Use this for more wrappers
def __init__(self, cl: ClassIR | None, emitter: Emitter) -> None:
self.cl = cl
self.emitter = emitter
self.cleanups: list[str] = []
self.optional_args: list[RuntimeArg] = []
self.traceback_code = ""
def set_target(self, fn: FuncIR) -> None:
"""Set the wrapped function.
It's fine to modify the attributes initialized here later to customize
the wrapper function.
"""
self.target_name = fn.name
self.target_cname = fn.cname(self.emitter.names)
self.num_bitmap_args = fn.sig.num_bitmap_args
if self.num_bitmap_args:
self.args = fn.args[: -self.num_bitmap_args]
else:
self.args = fn.args
self.arg_names = [arg.name for arg in self.args]
self.ret_type = fn.ret_type
def wrapper_name(self) -> str:
"""Return the name of the wrapper function."""
return "{}{}{}".format(
DUNDER_PREFIX,
self.target_name,
self.cl.name_prefix(self.emitter.names) if self.cl else "",
)
def use_goto(self) -> bool:
"""Do we use a goto for error handling (instead of straight return)?"""
return bool(self.cleanups or self.traceback_code)
def emit_header(self) -> None:
"""Emit the function header of the wrapper implementation."""
input_args = ", ".join(f"PyObject *obj_{arg}" for arg in self.arg_names)
self.emitter.emit_line(
"static PyObject *{name}({input_args}) {{".format(
name=self.wrapper_name(), input_args=input_args
)
)
def emit_arg_processing(
self, error: ErrorHandler | None = None, raise_exception: bool = True
) -> None:
"""Emit validation and unboxing of arguments."""
error = error or self.error()
bitmap_arg_index = 0
for arg_name, arg in zip(self.arg_names, self.args):
# Suppress the argument check for *args/**kwargs, since we know it must be right.
typ = arg.type if arg.kind not in (ARG_STAR, ARG_STAR2) else object_rprimitive
optional = arg in self.optional_args
generate_arg_check(
arg_name,
typ,
self.emitter,
error,
raise_exception=raise_exception,
optional=optional,
bitmap_arg_index=bitmap_arg_index,
)
if optional and typ.error_overlap:
bitmap_arg_index += 1
def emit_call(self, not_implemented_handler: str = "") -> None:
"""Emit call to the wrapper function.
If not_implemented_handler is non-empty, use this C code to handle
a NotImplemented return value (if it's possible based on the return type).
"""
native_args = ", ".join(f"arg_{arg}" for arg in self.arg_names)
if self.num_bitmap_args:
bitmap_args = ", ".join(
[bitmap_name(i) for i in reversed(range(self.num_bitmap_args))]
)
native_args = f"{native_args}, {bitmap_args}"
ret_type = self.ret_type
emitter = self.emitter
if ret_type.is_unboxed or self.use_goto():
# TODO: The Py_RETURN macros return the correct PyObject * with reference count
# handling. Are they relevant?
emitter.emit_line(
"{}retval = {}{}({});".format(
emitter.ctype_spaced(ret_type), NATIVE_PREFIX, self.target_cname, native_args
)
)
emitter.emit_lines(*self.cleanups)
if ret_type.is_unboxed:
emitter.emit_error_check("retval", ret_type, "return NULL;")
emitter.emit_box("retval", "retbox", ret_type, declare_dest=True)
emitter.emit_line("return {};".format("retbox" if ret_type.is_unboxed else "retval"))
else:
if not_implemented_handler and not isinstance(ret_type, RInstance):
# The return value type may overlap with NotImplemented.
emitter.emit_line(
"PyObject *retbox = {}{}({});".format(
NATIVE_PREFIX, self.target_cname, native_args
)
)
emitter.emit_lines(
"if (retbox == Py_NotImplemented) {",
not_implemented_handler,
"}",
"return retbox;",
)
else:
emitter.emit_line(f"return {NATIVE_PREFIX}{self.target_cname}({native_args});")
# TODO: Tracebacks?
def error(self) -> ErrorHandler:
"""Figure out how to deal with errors in the wrapper."""
if self.cleanups or self.traceback_code:
# We'll have a label at the end with error handling code.
return GotoHandler("fail")
else:
# Nothing special needs to done to handle errors, so just return.
return ReturnHandler("NULL")
def emit_error_handling(self) -> None:
"""Emit error handling block at the end of the wrapper, if needed."""
emitter = self.emitter
if self.use_goto():
emitter.emit_label("fail")
emitter.emit_lines(*self.cleanups)
if self.traceback_code:
emitter.emit_line(self.traceback_code)
emitter.emit_line("return NULL;")
def finish(self) -> None:
self.emitter.emit_line("}")
| WrapperGenerator |
python | doocs__leetcode | solution/2900-2999/2948.Make Lexicographically Smallest Array by Swapping Elements/Solution.py | {
"start": 0,
"end": 496
} | class ____:
def lexicographicallySmallestArray(self, nums: List[int], limit: int) -> List[int]:
n = len(nums)
arr = sorted(zip(nums, range(n)))
ans = [0] * n
i = 0
while i < n:
j = i + 1
while j < n and arr[j][0] - arr[j - 1][0] <= limit:
j += 1
idx = sorted(k for _, k in arr[i:j])
for k, (x, _) in zip(idx, arr[i:j]):
ans[k] = x
i = j
return ans
| Solution |
python | numpy__numpy | numpy/_core/tests/test_deprecations.py | {
"start": 7828,
"end": 9576
} | class ____(_DeprecationTestCase):
message = r".*stop allowing conversion of out-of-bound.*"
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
def test_deprecated_scalar(self, dtype):
dtype = np.dtype(dtype)
info = np.iinfo(dtype)
# Cover the most common creation paths (all end up in the
# same place):
def scalar(value, dtype):
dtype.type(value)
def assign(value, dtype):
arr = np.array([0, 0, 0], dtype=dtype)
arr[2] = value
def create(value, dtype):
np.array([value], dtype=dtype)
for creation_func in [scalar, assign, create]:
try:
self.assert_deprecated(
lambda: creation_func(info.min - 1, dtype))
except OverflowError:
pass # OverflowErrors always happened also before and are OK.
try:
self.assert_deprecated(
lambda: creation_func(info.max + 1, dtype))
except OverflowError:
pass # OverflowErrors always happened also before and are OK.
@pytest.mark.parametrize("name", ["str", "bytes", "object"])
def test_future_scalar_attributes(name):
# FutureWarning added 2022-11-17, NumPy 1.24,
assert name not in dir(np) # we may want to not add them
with pytest.warns(FutureWarning,
match=f"In the future .*{name}"):
assert not hasattr(np, name)
# Unfortunately, they are currently still valid via `np.dtype()`
np.dtype(name)
name in np._core.sctypeDict
# Ignore the above future attribute warning for this test.
@pytest.mark.filterwarnings("ignore:In the future:FutureWarning")
| TestPyIntConversion |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_deployments.py | {
"start": 43518,
"end": 45861
} | class ____:
async def test_read_deployment_by_name(self, client, flow, deployment):
response = await client.get(f"/deployments/name/{flow.name}/{deployment.name}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["id"] == str(deployment.id)
assert response.json()["name"] == deployment.name
assert response.json()["flow_id"] == str(deployment.flow_id)
async def test_read_deployment_by_name_returns_404_if_does_not_exist(self, client):
response = await client.get(f"/deployments/name/{uuid4()}")
assert response.status_code == status.HTTP_404_NOT_FOUND
async def test_read_deployment_by_name_returns_404_if_just_given_flow_name(
self, client, flow
):
response = await client.get(f"/deployments/name/{flow.name}")
assert response.status_code == status.HTTP_404_NOT_FOUND
async def test_read_deployment_by_name_returns_404_if_just_given_deployment_name(
self, client, deployment
):
response = await client.get(f"/deployments/name/{deployment.name}")
assert response.status_code == status.HTTP_404_NOT_FOUND
@pytest.mark.parametrize(
"name",
[
"my deployment",
"my:deployment",
r"my\deployment",
"my👍deployment",
"my|deployment",
],
)
async def test_read_deployment_by_name_with_nonstandard_characters(
self,
client,
name,
flow,
):
response = await client.post(
"/deployments/",
json=dict(
name=name,
flow_id=str(flow.id),
),
)
deployment_id = response.json()["id"]
response = await client.get(f"/deployments/name/{flow.name}/{name}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["id"] == deployment_id
@pytest.mark.parametrize(
"name",
[
"my/deployment",
"my%deployment",
],
)
async def test_read_deployment_by_name_with_invalid_characters_fails(
self, client, name, flow
):
response = await client.get(f"/deployments/name/{flow.name}/{name}")
assert response.status_code == status.HTTP_404_NOT_FOUND
| TestReadDeploymentByName |
python | great-expectations__great_expectations | tests/expectations/metrics/query_metrics/test_query_metrics.py | {
"start": 2234,
"end": 2387
} | class ____(QueryColumn):
metric_name = "my_query.column"
value_keys = ("my_query",)
query_param_name: ClassVar[str] = "my_query"
| MyQueryColumn |
python | pandas-dev__pandas | pandas/core/accessor.py | {
"start": 5690,
"end": 17395
} | class ____:
"""
Custom property-like object.
A descriptor for accessors.
Parameters
----------
name : str
Namespace that will be accessed under, e.g. ``df.foo``.
accessor : cls
Class with the extension methods.
Notes
-----
For accessor, The class's __init__ method assumes that one of
``Series``, ``DataFrame`` or ``Index`` as the
single argument ``data``.
"""
def __init__(self, name: str, accessor) -> None:
self._name = name
self._accessor = accessor
def __get__(self, obj, cls):
if obj is None:
# we're accessing the attribute of the class, i.e., Dataset.geo
return self._accessor
return self._accessor(obj)
# Alias kept for downstream libraries
# TODO: Deprecate as name is now misleading
CachedAccessor = Accessor
def _register_accessor(
name: str, cls: type[NDFrame | Index]
) -> Callable[[TypeT], TypeT]:
"""
Register a custom accessor on objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
This function allows you to register a custom-defined accessor class
for pandas objects (DataFrame, Series, or Index).
The requirements for the accessor class are as follows:
* Must contain an init method that:
* accepts a single object
* raises an AttributeError if the object does not have correctly
matching inputs for the accessor
* Must contain a method for each access pattern.
* The methods should be able to take any argument signature.
* Accessible using the @property decorator if no additional arguments are
needed.
"""
def decorator(accessor: TypeT) -> TypeT:
if hasattr(cls, name):
warnings.warn(
f"registration of accessor {accessor!r} under name "
f"{name!r} for type {cls!r} is overriding a preexisting "
f"attribute with the same name.",
UserWarning,
stacklevel=find_stack_level(),
)
setattr(cls, name, Accessor(name, accessor))
cls._accessors.add(name)
return accessor
return decorator
_register_df_examples = """
An accessor that only accepts integers could
have a class defined like this:
>>> @pd.api.extensions.register_dataframe_accessor("int_accessor")
... class IntAccessor:
... def __init__(self, pandas_obj):
... if not all(pandas_obj[col].dtype == 'int64' for col in pandas_obj.columns):
... raise AttributeError("All columns must contain integer values only")
... self._obj = pandas_obj
...
... def sum(self):
... return self._obj.sum()
...
>>> df = pd.DataFrame([[1, 2], ['x', 'y']])
>>> df.int_accessor
Traceback (most recent call last):
...
AttributeError: All columns must contain integer values only.
>>> df = pd.DataFrame([[1, 2], [3, 4]])
>>> df.int_accessor.sum()
0 4
1 6
dtype: int64"""
@set_module("pandas.api.extensions")
def register_dataframe_accessor(name: str) -> Callable[[TypeT], TypeT]:
"""
Register a custom accessor on DataFrame objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
This function allows you to register a custom-defined accessor class for DataFrame.
The requirements for the accessor class are as follows:
* Must contain an init method that:
* accepts a single DataFrame object
* raises an AttributeError if the DataFrame object does not have correctly
matching inputs for the accessor
* Must contain a method for each access pattern.
* The methods should be able to take any argument signature.
* Accessible using the @property decorator if no additional arguments are
needed.
Examples
--------
An accessor that only accepts integers could
have a class defined like this:
>>> @pd.api.extensions.register_dataframe_accessor("int_accessor")
... class IntAccessor:
... def __init__(self, pandas_obj):
... if not all(
... pandas_obj[col].dtype == "int64" for col in pandas_obj.columns
... ):
... raise AttributeError("All columns must contain integer values only")
... self._obj = pandas_obj
...
... def sum(self):
... return self._obj.sum()
>>> df = pd.DataFrame([[1, 2], ["x", "y"]])
>>> df.int_accessor
Traceback (most recent call last):
...
AttributeError: All columns must contain integer values only.
>>> df = pd.DataFrame([[1, 2], [3, 4]])
>>> df.int_accessor.sum()
0 4
1 6
dtype: int64
"""
from pandas import DataFrame
return _register_accessor(name, DataFrame)
_register_series_examples = """
An accessor that only accepts integers could
have a class defined like this:
>>> @pd.api.extensions.register_series_accessor("int_accessor")
... class IntAccessor:
... def __init__(self, pandas_obj):
... if not pandas_obj.dtype == 'int64':
... raise AttributeError("The series must contain integer data only")
... self._obj = pandas_obj
...
... def sum(self):
... return self._obj.sum()
...
>>> df = pd.Series([1, 2, 'x'])
>>> df.int_accessor
Traceback (most recent call last):
...
AttributeError: The series must contain integer data only.
>>> df = pd.Series([1, 2, 3])
>>> df.int_accessor.sum()
np.int64(6)"""
@set_module("pandas.api.extensions")
def register_series_accessor(name: str) -> Callable[[TypeT], TypeT]:
"""
Register a custom accessor on Series objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
This function allows you to register a custom-defined accessor class for Series.
The requirements for the accessor class are as follows:
* Must contain an init method that:
* accepts a single Series object
* raises an AttributeError if the Series object does not have correctly
matching inputs for the accessor
* Must contain a method for each access pattern.
* The methods should be able to take any argument signature.
* Accessible using the @property decorator if no additional arguments are
needed.
Examples
--------
An accessor that only accepts integers could
have a class defined like this:
>>> @pd.api.extensions.register_series_accessor("int_accessor")
... class IntAccessor:
... def __init__(self, pandas_obj):
... if not pandas_obj.dtype == "int64":
... raise AttributeError("The series must contain integer data only")
... self._obj = pandas_obj
...
... def sum(self):
... return self._obj.sum()
>>> df = pd.Series([1, 2, "x"])
>>> df.int_accessor
Traceback (most recent call last):
...
AttributeError: The series must contain integer data only.
>>> df = pd.Series([1, 2, 3])
>>> df.int_accessor.sum()
np.int64(6)
"""
from pandas import Series
return _register_accessor(name, Series)
_register_index_examples = """
An accessor that only accepts integers could
have a class defined like this:
>>> @pd.api.extensions.register_index_accessor("int_accessor")
... class IntAccessor:
... def __init__(self, pandas_obj):
... if not all(isinstance(x, int) for x in pandas_obj):
... raise AttributeError("The index must only be an integer value")
... self._obj = pandas_obj
...
... def even(self):
... return [x for x in self._obj if x % 2 == 0]
>>> df = pd.DataFrame.from_dict(
... {"row1": {"1": 1, "2": "a"}, "row2": {"1": 2, "2": "b"}}, orient="index"
... )
>>> df.index.int_accessor
Traceback (most recent call last):
...
AttributeError: The index must only be an integer value.
>>> df = pd.DataFrame(
... {"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]}, index=[1, 2, 5, 8]
... )
>>> df.index.int_accessor.even()
[2, 8]"""
@set_module("pandas.api.extensions")
def register_index_accessor(name: str) -> Callable[[TypeT], TypeT]:
"""
Register a custom accessor on Index objects.
Parameters
----------
name : str
Name under which the accessor should be registered. A warning is issued
if this name conflicts with a preexisting attribute.
Returns
-------
callable
A class decorator.
See Also
--------
register_dataframe_accessor : Register a custom accessor on DataFrame objects.
register_series_accessor : Register a custom accessor on Series objects.
register_index_accessor : Register a custom accessor on Index objects.
Notes
-----
This function allows you to register a custom-defined accessor class for Index.
The requirements for the accessor class are as follows:
* Must contain an init method that:
* accepts a single Index object
* raises an AttributeError if the Index object does not have correctly
matching inputs for the accessor
* Must contain a method for each access pattern.
* The methods should be able to take any argument signature.
* Accessible using the @property decorator if no additional arguments are
needed.
Examples
--------
An accessor that only accepts integers could
have a class defined like this:
>>> @pd.api.extensions.register_index_accessor("int_accessor")
... class IntAccessor:
... def __init__(self, pandas_obj):
... if not all(isinstance(x, int) for x in pandas_obj):
... raise AttributeError("The index must only be an integer value")
... self._obj = pandas_obj
...
... def even(self):
... return [x for x in self._obj if x % 2 == 0]
>>> df = pd.DataFrame.from_dict(
... {"row1": {"1": 1, "2": "a"}, "row2": {"1": 2, "2": "b"}}, orient="index"
... )
>>> df.index.int_accessor
Traceback (most recent call last):
...
AttributeError: The index must only be an integer value.
>>> df = pd.DataFrame(
... {"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]}, index=[1, 2, 5, 8]
... )
>>> df.index.int_accessor.even()
[2, 8]
"""
from pandas import Index
return _register_accessor(name, Index)
| Accessor |
python | getsentry__sentry | src/sentry/relay/types/rule_condition.py | {
"start": 1039,
"end": 1173
} | class ____(TypedDict):
"""Less than or equal condition"""
op: Literal["lte"]
name: str
value: Value | None
| LteCondition |
python | cython__cython | Cython/Compiler/ParseTreeTransforms.py | {
"start": 141135,
"end": 146748
} | class ____(CythonTransform):
# Output closure classes in module scope for all functions
# that really need it.
def __init__(self, context):
super().__init__(context)
self.path = []
self.in_lambda = False
def visit_ModuleNode(self, node):
self.module_scope = node.scope
self.visitchildren(node)
return node
def find_entries_used_in_closures(self, node):
from_closure = []
in_closure = []
for scope in node.local_scope.iter_local_scopes():
for name, entry in scope.entries.items():
if not name:
continue
if entry.from_closure:
from_closure.append((name, entry))
elif entry.in_closure:
in_closure.append((name, entry))
return from_closure, in_closure
def create_class_from_scope(self, node, target_module_scope, inner_node=None):
# move local variables into closure
if node.is_generator:
for scope in node.local_scope.iter_local_scopes():
for entry in scope.entries.values():
if not (entry.from_closure or entry.is_pyglobal or entry.is_cglobal):
entry.in_closure = True
from_closure, in_closure = self.find_entries_used_in_closures(node)
in_closure.sort()
# Now from the beginning
node.needs_closure = False
node.needs_outer_scope = False
func_scope = node.local_scope
cscope = node.entry.scope
while cscope.is_py_class_scope or cscope.is_c_class_scope:
cscope = cscope.outer_scope
if not from_closure and (self.path or inner_node):
if not inner_node:
if not node.py_cfunc_node:
raise InternalError("DefNode does not have assignment node")
inner_node = node.py_cfunc_node
inner_node.needs_closure_code = False
node.needs_outer_scope = False
if node.is_generator:
pass
elif not in_closure and not from_closure:
return
elif not in_closure:
func_scope.is_passthrough = True
func_scope.scope_class = cscope.scope_class
node.needs_outer_scope = True
return
# entry.cname can contain periods (eg. a derived C method of a class).
# We want to use the cname as part of a C struct name, so we replace
# periods with double underscores.
as_name = '%s_%s' % (
target_module_scope.next_id(Naming.closure_class_prefix),
node.entry.cname.replace('.','__'))
as_name = EncodedString(as_name)
entry = target_module_scope.declare_c_class(
name=as_name, pos=node.pos, defining=True,
implementing=True)
entry.type.is_final_type = True
func_scope.scope_class = entry
class_scope = entry.type.scope
class_scope.is_internal = True
class_scope.is_closure_class_scope = True
if node.is_async_def or node.is_generator:
# Generators need their closure intact during cleanup as they resume to handle GeneratorExit
class_scope.directives['no_gc_clear'] = True
if Options.closure_freelist_size:
class_scope.directives['freelist'] = Options.closure_freelist_size
if from_closure:
assert cscope.is_closure_scope
class_scope.declare_var(pos=node.pos,
name=Naming.outer_scope_cname,
cname=Naming.outer_scope_cname,
type=cscope.scope_class.type,
is_cdef=True)
node.needs_outer_scope = True
for name, entry in in_closure:
closure_entry = class_scope.declare_var(
pos=entry.pos,
name=entry.name if not entry.in_subscope else None,
cname=entry.cname,
type=entry.type,
is_cdef=True)
if entry.is_declared_generic:
closure_entry.is_declared_generic = 1
node.needs_closure = True
# Do it here because other classes are already checked
target_module_scope.check_c_class(func_scope.scope_class)
def visit_LambdaNode(self, node):
if not isinstance(node.def_node, Nodes.DefNode):
# fused function, an error has been previously issued
return node
was_in_lambda = self.in_lambda
self.in_lambda = True
self.create_class_from_scope(node.def_node, self.module_scope, node)
self.visitchildren(node)
self.in_lambda = was_in_lambda
return node
def visit_FuncDefNode(self, node):
if self.in_lambda:
self.visitchildren(node)
return node
if node.needs_closure or self.path:
self.create_class_from_scope(node, self.module_scope)
self.path.append(node)
self.visitchildren(node)
self.path.pop()
return node
def visit_GeneratorBodyDefNode(self, node):
self.visitchildren(node)
return node
def visit_CFuncDefNode(self, node):
if not node.overridable:
return self.visit_FuncDefNode(node)
else:
self.visitchildren(node)
return node
def visit_GeneratorExpressionNode(self, node):
node = _HandleGeneratorArguments()(node)
return self.visit_LambdaNode(node)
| CreateClosureClasses |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_svg.py | {
"start": 51030,
"end": 51132
} | class ____(_Backend):
backend_version = mpl.__version__
FigureCanvas = FigureCanvasSVG
| _BackendSVG |
python | openai__openai-python | src/openai/types/shared/response_format_text_grammar.py | {
"start": 202,
"end": 418
} | class ____(BaseModel):
grammar: str
"""The custom grammar for the model to follow."""
type: Literal["grammar"]
"""The type of response format being defined. Always `grammar`."""
| ResponseFormatTextGrammar |
python | pandas-dev__pandas | pandas/core/arrays/sparse/array.py | {
"start": 7467,
"end": 65739
} | class ____(OpsMixin, PandasObject, ExtensionArray):
"""
An ExtensionArray for storing sparse data.
SparseArray efficiently stores data with a high frequency of a
specific fill value (e.g., zeros), saving memory by only retaining
non-fill elements and their indices. This class is particularly
useful for large datasets where most values are redundant.
Parameters
----------
data : array-like or scalar
A dense array of values to store in the SparseArray. This may contain
`fill_value`.
sparse_index : SparseIndex, optional
Index indicating the locations of sparse elements.
fill_value : scalar, optional
Elements in data that are ``fill_value`` are not stored in the
SparseArray. For memory savings, this should be the most common value
in `data`. By default, `fill_value` depends on the dtype of `data`:
=========== ==========
data.dtype na_value
=========== ==========
float ``np.nan``
int ``0``
bool False
datetime64 ``pd.NaT``
timedelta64 ``pd.NaT``
=========== ==========
The fill value is potentially specified in three ways. In order of
precedence, these are
1. The `fill_value` argument
2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
a ``SparseDtype``
3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
is not a ``SparseDtype`` and `data` is a ``SparseArray``.
kind : str
Can be 'integer' or 'block', default is 'integer'.
The type of storage for sparse locations.
* 'block': Stores a `block` and `block_length` for each
contiguous *span* of sparse values. This is best when
sparse data tends to be clumped together, with large
regions of ``fill-value`` values between sparse values.
* 'integer': uses an integer to store the location of
each sparse value.
dtype : np.dtype or SparseDtype, optional
The dtype to use for the SparseArray. For numpy dtypes, this
determines the dtype of ``self.sp_values``. For SparseDtype,
this determines ``self.sp_values`` and ``self.fill_value``.
copy : bool, default False
Whether to explicitly copy the incoming `data` array.
Attributes
----------
None
Methods
-------
None
See Also
--------
SparseDtype : Dtype for sparse data.
Examples
--------
>>> from pandas.arrays import SparseArray
>>> arr = SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
"""
_subtyp = "sparse_array" # register ABCSparseArray
_hidden_attrs = PandasObject._hidden_attrs | frozenset([])
_sparse_index: SparseIndex
_sparse_values: np.ndarray
_dtype: SparseDtype
def __init__(
self,
data,
sparse_index=None,
fill_value=None,
kind: SparseIndexKind = "integer",
dtype: Dtype | None = None,
copy: bool = False,
) -> None:
if fill_value is None and isinstance(dtype, SparseDtype):
fill_value = dtype.fill_value
if isinstance(data, type(self)):
# disable normal inference on dtype, sparse_index, & fill_value
if sparse_index is None:
sparse_index = data.sp_index
if fill_value is None:
fill_value = data.fill_value
if dtype is None:
dtype = data.dtype
# TODO: make kind=None, and use data.kind?
data = data.sp_values
# Handle use-provided dtype
if isinstance(dtype, str):
# Two options: dtype='int', regular numpy dtype
# or dtype='Sparse[int]', a sparse dtype
try:
dtype = SparseDtype.construct_from_string(dtype)
except TypeError:
dtype = pandas_dtype(dtype)
if isinstance(dtype, SparseDtype):
if fill_value is None:
fill_value = dtype.fill_value
dtype = dtype.subtype
if is_scalar(data):
raise TypeError(
f"Cannot construct {type(self).__name__} from scalar data. "
"Pass a sequence instead."
)
if dtype is not None:
dtype = pandas_dtype(dtype)
# TODO: disentangle the fill_value dtype inference from
# dtype inference
if data is None:
# TODO: What should the empty dtype be? Object or float?
# error: Argument "dtype" to "array" has incompatible type
# "Union[ExtensionDtype, dtype[Any], None]"; expected "Union[dtype[Any],
# None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,
# Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
data = np.array([], dtype=dtype) # type: ignore[arg-type]
try:
data = sanitize_array(data, index=None)
except ValueError:
# NumPy may raise a ValueError on data like [1, []]
# we retry with object dtype here.
if dtype is None:
dtype = np.dtype(object)
data = np.atleast_1d(np.asarray(data, dtype=dtype))
else:
raise
if copy:
# TODO: avoid double copy when dtype forces cast.
data = data.copy()
if fill_value is None:
fill_value_dtype = data.dtype if dtype is None else dtype
if fill_value_dtype is None:
fill_value = np.nan
else:
fill_value = na_value_for_dtype(fill_value_dtype)
if isinstance(data, type(self)) and sparse_index is None:
sparse_index = data._sparse_index
# error: Argument "dtype" to "asarray" has incompatible type
# "Union[ExtensionDtype, dtype[Any], None]"; expected "None"
sparse_values = np.asarray(
data.sp_values,
dtype=dtype, # type: ignore[arg-type]
)
elif sparse_index is None:
data = extract_array(data, extract_numpy=True)
if not isinstance(data, np.ndarray):
# EA
if isinstance(data.dtype, DatetimeTZDtype):
warnings.warn(
f"Creating SparseArray from {data.dtype} data "
"loses timezone information. Cast to object before "
"sparse to retain timezone information.",
UserWarning,
stacklevel=find_stack_level(),
)
data = np.asarray(data, dtype="datetime64[ns]")
if fill_value is NaT:
fill_value = np.datetime64("NaT", "ns")
data = np.asarray(data)
sparse_values, sparse_index, fill_value = _make_sparse(
# error: Argument "dtype" to "_make_sparse" has incompatible type
# "Union[ExtensionDtype, dtype[Any], None]"; expected
# "Optional[dtype[Any]]"
data,
kind=kind,
fill_value=fill_value,
dtype=dtype, # type: ignore[arg-type]
)
else:
# error: Argument "dtype" to "asarray" has incompatible type
# "Union[ExtensionDtype, dtype[Any], None]"; expected "None"
sparse_values = np.asarray(data, dtype=dtype) # type: ignore[arg-type]
if len(sparse_values) != sparse_index.npoints:
raise AssertionError(
f"Non array-like type {type(sparse_values)} must "
"have the same length as the index"
)
self._sparse_index = sparse_index
self._sparse_values = sparse_values
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
@classmethod
def _simple_new(
cls,
sparse_array: np.ndarray,
sparse_index: SparseIndex,
dtype: SparseDtype,
) -> Self:
new = object.__new__(cls)
new._sparse_index = sparse_index
new._sparse_values = sparse_array
new._dtype = dtype
return new
@classmethod
def from_spmatrix(cls, data: _SparseMatrixLike) -> Self:
"""
Create a SparseArray from a scipy.sparse matrix.
Parameters
----------
data : scipy.sparse.sp_matrix
This should be a SciPy sparse matrix where the size
of the second dimension is 1. In other words, a
sparse matrix with a single column.
Returns
-------
SparseArray
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.coo_matrix((4, 1))
>>> pd.arrays.SparseArray.from_spmatrix(mat)
[0.0, 0.0, 0.0, 0.0]
Fill: 0.0
IntIndex
Indices: array([], dtype=int32)
"""
length, ncol = data.shape
if ncol != 1:
raise ValueError(f"'data' must have a single column, not '{ncol}'")
# our sparse index classes require that the positions be strictly
# increasing. So we need to sort loc, and arr accordingly.
data_csc = data.tocsc()
data_csc.sort_indices()
arr = data_csc.data
idx = data_csc.indices
zero = np.array(0, dtype=arr.dtype).item()
dtype = SparseDtype(arr.dtype, zero)
index = IntIndex(length, idx)
return cls._simple_new(arr, index, dtype)
def __array__(
self, dtype: NpDtype | None = None, copy: bool | None = None
) -> np.ndarray:
if self.sp_index.ngaps == 0:
# Compat for na dtype and int values.
if copy is True:
return np.array(self.sp_values)
else:
result = self.sp_values
if self._readonly:
result = result.view()
result.flags.writeable = False
return result
if copy is False:
raise ValueError(
"Unable to avoid copy while creating an array as requested."
)
fill_value = self.fill_value
if dtype is None:
# Can NumPy represent this type?
# If not, `np.result_type` will raise. We catch that
# and return object.
if self.sp_values.dtype.kind == "M":
# However, we *do* special-case the common case of
# a datetime64 with pandas NaT.
if fill_value is NaT:
# Can't put pd.NaT in a datetime64[ns]
fill_value = np.datetime64("NaT")
try:
dtype = np.result_type(self.sp_values.dtype, type(fill_value))
except TypeError:
dtype = object
out = np.full(self.shape, fill_value, dtype=dtype)
out[self.sp_index.indices] = self.sp_values
return out
def __setitem__(self, key, value) -> None:
if self._readonly:
raise ValueError("Cannot modify read-only array")
# I suppose we could allow setting of non-fill_value elements.
# TODO(SparseArray.__setitem__): remove special cases in
# ExtensionBlock.where
msg = "SparseArray does not support item assignment via setitem"
raise TypeError(msg)
@classmethod
def _from_sequence(
cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
) -> Self:
return cls(scalars, dtype=dtype)
@classmethod
def _from_factorized(cls, values, original) -> Self:
return cls(values, dtype=original.dtype)
def _cast_pointwise_result(self, values):
result = super()._cast_pointwise_result(values)
if result.dtype.kind == self.dtype.kind:
try:
# e.g. test_groupby_agg_extension
res = type(self)._from_sequence(result, dtype=self.dtype)
if ((res == result) | (isna(result) & res.isna())).all():
# This does not hold for e.g.
# test_arith_frame_with_scalar[0-__truediv__]
return res
return type(self)._from_sequence(result)
except (ValueError, TypeError):
return type(self)._from_sequence(result)
else:
# e.g. test_combine_le avoid casting bools to Sparse[float64, nan]
return type(self)._from_sequence(result)
# ------------------------------------------------------------------------
# Data
# ------------------------------------------------------------------------
@property
def sp_index(self) -> SparseIndex:
"""
The SparseIndex containing the location of non- ``fill_value`` points.
"""
return self._sparse_index
@property
def sp_values(self) -> np.ndarray:
"""
An ndarray containing the non- ``fill_value`` values.
This property returns the actual data values stored in the sparse
representation, excluding the values that are equal to the ``fill_value``.
The result is an ndarray of the underlying values, preserving the sparse
structure by omitting the default ``fill_value`` entries.
See Also
--------
Series.sparse.to_dense : Convert a Series from sparse values to dense.
Series.sparse.fill_value : Elements in `data` that are `fill_value` are
not stored.
Series.sparse.density : The percent of non- ``fill_value`` points, as decimal.
Examples
--------
>>> from pandas.arrays import SparseArray
>>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)
>>> s.sp_values
array([1, 2])
"""
return self._sparse_values
@property
def dtype(self) -> SparseDtype:
return self._dtype
@property
def fill_value(self):
"""
Elements in `data` that are `fill_value` are not stored.
For memory savings, this should be the most common value in the array.
See Also
--------
SparseDtype : Dtype for data stored in :class:`SparseArray`.
Series.value_counts : Return a Series containing counts of unique values.
Series.fillna : Fill NA/NaN in a Series with a specified value.
Examples
--------
>>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]")
>>> ser.sparse.fill_value
0
>>> spa_dtype = pd.SparseDtype(dtype=np.int32, fill_value=2)
>>> ser = pd.Series([0, 0, 2, 2, 2], dtype=spa_dtype)
>>> ser.sparse.fill_value
2
"""
return self.dtype.fill_value
@fill_value.setter
def fill_value(self, value) -> None:
self._dtype = SparseDtype(self.dtype.subtype, value)
@property
def kind(self) -> SparseIndexKind:
"""
The kind of sparse index for this array. One of {'integer', 'block'}.
"""
if isinstance(self.sp_index, IntIndex):
return "integer"
else:
return "block"
@property
def _valid_sp_values(self) -> np.ndarray:
sp_vals = self.sp_values
mask = notna(sp_vals)
return sp_vals[mask]
def __len__(self) -> int:
return self.sp_index.length
@property
def _null_fill_value(self) -> bool:
return self._dtype._is_na_fill_value
@property
def nbytes(self) -> int:
return self.sp_values.nbytes + self.sp_index.nbytes
@property
def density(self) -> float:
"""
The percent of non- ``fill_value`` points, as decimal.
See Also
--------
DataFrame.sparse.from_spmatrix : Create a new DataFrame from a
scipy sparse matrix.
Examples
--------
>>> from pandas.arrays import SparseArray
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.density
0.6
"""
return self.sp_index.npoints / self.sp_index.length
@property
def npoints(self) -> int:
"""
The number of non- ``fill_value`` points.
This property returns the number of elements in the sparse series that are
not equal to the ``fill_value``. Sparse data structures store only the
non-``fill_value`` elements, reducing memory usage when the majority of
values are the same.
See Also
--------
Series.sparse.to_dense : Convert a Series from sparse values to dense.
Series.sparse.fill_value : Elements in ``data`` that are ``fill_value`` are
not stored.
Series.sparse.density : The percent of non- ``fill_value`` points, as decimal.
Examples
--------
>>> from pandas.arrays import SparseArray
>>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
>>> s.npoints
3
"""
return self.sp_index.npoints
# error: Return type "SparseArray" of "isna" incompatible with return type
# "ndarray[Any, Any] | ExtensionArraySupportsAnyAll" in supertype "ExtensionArray"
def isna(self) -> Self: # type: ignore[override]
# If null fill value, we want SparseDtype[bool, true]
# to preserve the same memory usage.
dtype = SparseDtype(bool, self._null_fill_value)
if self._null_fill_value:
return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype)
mask = np.full(len(self), False, dtype=np.bool_)
mask[self.sp_index.indices] = isna(self.sp_values)
return type(self)(mask, fill_value=False, dtype=dtype)
def fillna(
self,
value,
limit: int | None = None,
copy: bool = True,
) -> Self:
"""
Fill missing values with `value`.
Parameters
----------
value : scalar
limit : int, optional
Not supported for SparseArray, must be None.
copy: bool, default True
Ignored for SparseArray.
Returns
-------
SparseArray
Notes
-----
When `value` is specified, the result's ``fill_value`` depends on
``self.fill_value``. The goal is to maintain low-memory use.
If ``self.fill_value`` is NA, the result dtype will be
``SparseDtype(self.dtype, fill_value=value)``. This will preserve
amount of memory used before and after filling.
When ``self.fill_value`` is not NA, the result dtype will be
``self.dtype``. Again, this preserves the amount of memory used.
"""
if limit is not None:
raise ValueError("limit must be None")
new_values = np.where(isna(self.sp_values), value, self.sp_values)
if self._null_fill_value:
# This is essentially just updating the dtype.
new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
else:
new_dtype = self.dtype
return self._simple_new(new_values, self._sparse_index, new_dtype)
def shift(self, periods: int = 1, fill_value=None) -> Self:
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
subtype = np.result_type(fill_value, self.dtype.subtype)
if subtype != self.dtype.subtype:
# just coerce up front
arr = self.astype(SparseDtype(subtype, self.fill_value))
else:
arr = self
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)), dtype=arr.dtype
)
if periods > 0:
a = empty
b = arr[:-periods]
else:
a = arr[abs(periods) :]
b = empty
return arr._concat_same_type([a, b])
def _first_fill_value_loc(self):
"""
Get the location of the first fill value.
Returns
-------
int
"""
if len(self) == 0 or self.sp_index.npoints == len(self):
return -1
indices = self.sp_index.indices
if not len(indices) or indices[0] > 0:
return 0
# a number larger than 1 should be appended to
# the last in case of fill value only appears
# in the tail of array
diff = np.r_[np.diff(indices), 2]
return indices[(diff > 1).argmax()] + 1
@doc(ExtensionArray.duplicated)
def duplicated(
self, keep: Literal["first", "last", False] = "first"
) -> npt.NDArray[np.bool_]:
values = np.asarray(self)
mask = np.asarray(self.isna())
return algos.duplicated(values, keep=keep, mask=mask)
def unique(self) -> Self:
uniques = algos.unique(self.sp_values)
if len(self.sp_values) != len(self):
fill_loc = self._first_fill_value_loc()
# Inorder to align the behavior of pd.unique or
# pd.Series.unique, we should keep the original
# order, here we use unique again to find the
# insertion place. Since the length of sp_values
# is not large, maybe minor performance hurt
# is worthwhile to the correctness.
insert_loc = len(algos.unique(self.sp_values[:fill_loc]))
uniques = np.insert(uniques, insert_loc, self.fill_value)
return type(self)._from_sequence(uniques, dtype=self.dtype)
def _values_for_factorize(self):
# Still override this for hash_pandas_object
return np.asarray(self), self.fill_value
def factorize(
self,
use_na_sentinel: bool = True,
) -> tuple[np.ndarray, SparseArray]:
# Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
# The sparsity on this is backwards from what Sparse would want. Want
# ExtensionArray.factorize -> Tuple[EA, EA]
# Given that we have to return a dense array of codes, why bother
# implementing an efficient factorize?
codes, uniques = algos.factorize(
np.asarray(self), use_na_sentinel=use_na_sentinel
)
uniques_sp = SparseArray(uniques, dtype=self.dtype)
return codes, uniques_sp
def value_counts(self, dropna: bool = True) -> Series:
"""
Returns a Series containing counts of unique values.
Parameters
----------
dropna : bool, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Series
"""
from pandas import (
Index,
Series,
)
keys, counts, _ = algos.value_counts_arraylike(self.sp_values, dropna=dropna)
fcounts = self.sp_index.ngaps
if fcounts > 0 and (not self._null_fill_value or not dropna):
mask = isna(keys) if self._null_fill_value else keys == self.fill_value
if mask.any():
counts[mask] += fcounts
else:
# error: Argument 1 to "insert" has incompatible type "Union[
# ExtensionArray,ndarray[Any, Any]]"; expected "Union[
# _SupportsArray[dtype[Any]], Sequence[_SupportsArray[dtype
# [Any]]], Sequence[Sequence[_SupportsArray[dtype[Any]]]],
# Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]], Sequence
# [Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]"
keys = np.insert(keys, 0, self.fill_value) # type: ignore[arg-type]
counts = np.insert(counts, 0, fcounts)
if not isinstance(keys, ABCIndex):
index = Index(keys)
else:
index = keys
return Series(counts, index=index, copy=False)
# --------
# Indexing
# --------
@overload
def __getitem__(self, key: ScalarIndexer) -> Any: ...
@overload
def __getitem__(
self,
key: SequenceIndexer | tuple[int | EllipsisType, ...],
) -> Self: ...
def __getitem__(
self,
key: PositionalIndexer | tuple[int | EllipsisType, ...],
) -> Self | Any:
if isinstance(key, tuple):
key = unpack_tuple_and_ellipses(key)
if key is ...:
raise ValueError("Cannot slice with Ellipsis")
if is_integer(key):
return self._get_val_at(key)
elif isinstance(key, tuple):
data_slice = self.to_dense()[key]
elif isinstance(key, slice):
if key == slice(None):
# to ensure arr[:] (used by view()) does not make a copy
result = type(self)._simple_new(
self.sp_values, self.sp_index, self.dtype
)
result._readonly = self._readonly
return result
# Avoid densifying when handling contiguous slices
if key.step is None or key.step == 1:
start = 0 if key.start is None else key.start
if start < 0:
start += len(self)
end = len(self) if key.stop is None else key.stop
if end < 0:
end += len(self)
indices = self.sp_index.indices
keep_inds = np.flatnonzero((indices >= start) & (indices < end))
sp_vals = self.sp_values[keep_inds]
sp_index = indices[keep_inds].copy()
# If we've sliced to not include the start of the array, all our indices
# should be shifted. NB: here we are careful to also not shift by a
# negative value for a case like [0, 1][-100:] where the start index
# should be treated like 0
if start > 0:
sp_index -= start
# Length of our result should match applying this slice to a range
# of the length of our original array
new_len = len(range(len(self))[key])
new_sp_index = make_sparse_index(new_len, sp_index, self.kind)
return type(self)._simple_new(sp_vals, new_sp_index, self.dtype)
else:
indices = np.arange(len(self), dtype=np.int32)[key]
return self.take(indices)
elif not is_list_like(key):
# e.g. "foo" or 2.5
# exception message copied from numpy
raise IndexError(
r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
r"(`None`) and integer or boolean arrays are valid indices"
)
else:
if isinstance(key, SparseArray):
# NOTE: If we guarantee that SparseDType(bool)
# has only fill_value - true, false or nan
# (see GH PR 44955)
# we can apply mask very fast:
if is_bool_dtype(key):
if isna(key.fill_value):
return self.take(key.sp_index.indices[key.sp_values])
if not key.fill_value:
return self.take(key.sp_index.indices)
n = len(self)
mask = np.full(n, True, dtype=np.bool_)
mask[key.sp_index.indices] = False
return self.take(np.arange(n)[mask])
else:
key = np.asarray(key)
key = check_array_indexer(self, key)
if com.is_bool_indexer(key):
# mypy doesn't know we have an array here
key = cast(np.ndarray, key)
return self.take(np.arange(len(key), dtype=np.int32)[key])
elif hasattr(key, "__len__"):
return self.take(key)
else:
raise ValueError(f"Cannot slice with '{key}'")
return type(self)(data_slice, kind=self.kind)
def _get_val_at(self, loc):
loc = validate_insert_loc(loc, len(self))
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
val = self.sp_values[sp_loc]
val = maybe_box_datetimelike(val, self.sp_values.dtype)
return val
def take(self, indices, *, allow_fill: bool = False, fill_value=None) -> Self:
if is_scalar(indices):
raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.")
indices = np.asarray(indices, dtype=np.int32)
dtype = None
if indices.size == 0:
result = np.array([], dtype="object")
dtype = self.dtype
elif allow_fill:
result = self._take_with_fill(indices, fill_value=fill_value)
else:
return self._take_without_fill(indices)
return type(self)(
result, fill_value=self.fill_value, kind=self.kind, dtype=dtype
)
def _take_with_fill(self, indices, fill_value=None) -> np.ndarray:
if fill_value is None:
fill_value = self.dtype.na_value
if indices.min() < -1:
raise ValueError(
"Invalid value in 'indices'. Must be between -1 "
"and the length of the array."
)
if indices.max() >= len(self):
raise IndexError("out of bounds value in 'indices'.")
if len(self) == 0:
# Empty... Allow taking only if all empty
if (indices == -1).all():
dtype = np.result_type(self.sp_values, type(fill_value))
taken = np.empty_like(indices, dtype=dtype)
taken.fill(fill_value)
return taken
else:
raise IndexError("cannot do a non-empty take from an empty axes.")
# sp_indexer may be -1 for two reasons
# 1.) we took for an index of -1 (new)
# 2.) we took a value that was self.fill_value (old)
sp_indexer = self.sp_index.lookup_array(indices)
new_fill_indices = indices == -1
old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
if self.sp_index.npoints == 0 and old_fill_indices.all():
# We've looked up all valid points on an all-sparse array.
taken = np.full(
sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype
)
elif self.sp_index.npoints == 0:
# Use the old fill_value unless we took for an index of -1
_dtype = np.result_type(self.dtype.subtype, type(fill_value))
taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype)
taken[old_fill_indices] = self.fill_value
else:
taken = self.sp_values.take(sp_indexer)
# Fill in two steps.
# Old fill values
# New fill values
# potentially coercing to a new dtype at each stage.
m0 = sp_indexer[old_fill_indices] < 0
m1 = sp_indexer[new_fill_indices] < 0
result_type = taken.dtype
if m0.any():
result_type = np.result_type(result_type, type(self.fill_value))
taken = taken.astype(result_type)
taken[old_fill_indices] = self.fill_value
if m1.any():
result_type = np.result_type(result_type, type(fill_value))
taken = taken.astype(result_type)
taken[new_fill_indices] = fill_value
return taken
def _take_without_fill(self, indices) -> Self:
to_shift = indices < 0
n = len(self)
if (indices.max() >= n) or (indices.min() < -n):
if n == 0:
raise IndexError("cannot do a non-empty take from an empty axes.")
raise IndexError("out of bounds value in 'indices'.")
if to_shift.any():
indices = indices.copy()
indices[to_shift] += n
sp_indexer = self.sp_index.lookup_array(indices)
value_mask = sp_indexer != -1
new_sp_values = self.sp_values[sp_indexer[value_mask]]
value_indices = np.flatnonzero(value_mask).astype(np.int32, copy=False)
new_sp_index = make_sparse_index(len(indices), value_indices, kind=self.kind)
return type(self)._simple_new(new_sp_values, new_sp_index, dtype=self.dtype)
def searchsorted(
self,
v: ArrayLike | object,
side: Literal["left", "right"] = "left",
sorter: NumpySorter | None = None,
) -> npt.NDArray[np.intp] | np.intp:
if get_option("performance_warnings"):
msg = "searchsorted requires high memory usage."
warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
v = np.asarray(v)
return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter)
def copy(self) -> Self:
values = self.sp_values.copy()
return self._simple_new(values, self.sp_index, self.dtype)
@classmethod
def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self:
fill_value = to_concat[0].fill_value
values = []
length = 0
if to_concat:
sp_kind = to_concat[0].kind
else:
sp_kind = "integer"
sp_index: SparseIndex
if sp_kind == "integer":
indices = []
for arr in to_concat:
int_idx = arr.sp_index.indices.copy()
int_idx += length # TODO: wraparound
length += arr.sp_index.length
values.append(arr.sp_values)
indices.append(int_idx)
data = np.concatenate(values)
indices_arr = np.concatenate(indices)
sp_index = IntIndex(length, indices_arr)
else:
# when concatenating block indices, we don't claim that you'll
# get an identical index as concatenating the values and then
# creating a new index. We don't want to spend the time trying
# to merge blocks across arrays in `to_concat`, so the resulting
# BlockIndex may have more blocks.
blengths = []
blocs = []
for arr in to_concat:
block_idx = arr.sp_index.to_block_index()
values.append(arr.sp_values)
blocs.append(block_idx.blocs.copy() + length)
blengths.append(block_idx.blengths)
length += arr.sp_index.length
data = np.concatenate(values)
blocs_arr = np.concatenate(blocs)
blengths_arr = np.concatenate(blengths)
sp_index = BlockIndex(length, blocs_arr, blengths_arr)
return cls(data, sparse_index=sp_index, fill_value=fill_value)
def astype(self, dtype: AstypeArg | None = None, copy: bool = True):
"""
Change the dtype of a SparseArray.
The output will always be a SparseArray. To convert to a dense
ndarray with a certain dtype, use :meth:`numpy.asarray`.
Parameters
----------
dtype : np.dtype or ExtensionDtype
For SparseDtype, this changes the dtype of
``self.sp_values`` and the ``self.fill_value``.
For other dtypes, this only changes the dtype of
``self.sp_values``.
copy : bool, default True
Whether to ensure a copy is made, even if not necessary.
Returns
-------
SparseArray
Examples
--------
>>> arr = pd.arrays.SparseArray([0, 0, 1, 2])
>>> arr
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
>>> arr.astype(SparseDtype(np.dtype("int32")))
[0, 0, 1, 2]
Fill: 0
IntIndex
Indices: array([2, 3], dtype=int32)
Using a NumPy dtype with a different kind (e.g. float) will coerce
just ``self.sp_values``.
>>> arr.astype(SparseDtype(np.dtype("float64")))
... # doctest: +NORMALIZE_WHITESPACE
[nan, nan, 1.0, 2.0]
Fill: nan
IntIndex
Indices: array([2, 3], dtype=int32)
Using a SparseDtype, you can also change the fill value as well.
>>> arr.astype(SparseDtype("float64", fill_value=0.0))
... # doctest: +NORMALIZE_WHITESPACE
[0.0, 0.0, 1.0, 2.0]
Fill: 0.0
IntIndex
Indices: array([2, 3], dtype=int32)
"""
if dtype == self._dtype:
if not copy:
return self
else:
return self.copy()
future_dtype = pandas_dtype(dtype)
if not isinstance(future_dtype, SparseDtype):
# GH#34457
values = np.asarray(self)
values = ensure_wrapped_if_datetimelike(values)
return astype_array(values, dtype=future_dtype, copy=False)
dtype = self.dtype.update_dtype(dtype)
subtype = pandas_dtype(dtype._subtype_with_str)
subtype = cast(np.dtype, subtype) # ensured by update_dtype
values = ensure_wrapped_if_datetimelike(self.sp_values)
sp_values = astype_array(values, subtype, copy=copy)
sp_values = np.asarray(sp_values)
return self._simple_new(sp_values, self.sp_index, dtype)
def map(self, mapper, na_action: Literal["ignore"] | None = None) -> Self:
"""
Map categories using an input mapping or function.
Parameters
----------
mapper : dict, Series, callable
The correspondence from old values to new.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NA values, without passing them to the
mapping correspondence.
Returns
-------
SparseArray
The output array will have the same density as the input.
The output fill value will be the result of applying the
mapping to ``self.fill_value``
Examples
--------
>>> arr = pd.arrays.SparseArray([0, 1, 2])
>>> arr.map(lambda x: x + 10)
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.map({0: 10, 1: 11, 2: 12})
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
>>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2]))
[10, 11, 12]
Fill: 10
IntIndex
Indices: array([1, 2], dtype=int32)
"""
is_map = isinstance(mapper, (abc.Mapping, ABCSeries))
fill_val = self.fill_value
if na_action is None or notna(fill_val):
fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val)
def func(sp_val):
new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val)
# check identity and equality because nans are not equal to each other
if new_sp_val is fill_val or new_sp_val == fill_val:
msg = "fill value in the sparse values not supported"
raise ValueError(msg)
return new_sp_val
sp_values = [func(x) for x in self.sp_values]
return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val)
def to_dense(self) -> np.ndarray:
"""
Convert SparseArray to a NumPy array.
Returns
-------
arr : NumPy array
"""
return np.asarray(self, dtype=self.sp_values.dtype)
def _where(self, mask, value):
# NB: may not preserve dtype, e.g. result may be Sparse[float64]
# while self is Sparse[int64]
naive_implementation = np.where(mask, self, value)
dtype = SparseDtype(naive_implementation.dtype, fill_value=self.fill_value)
result = type(self)._from_sequence(naive_implementation, dtype=dtype)
return result
# ------------------------------------------------------------------------
# IO
# ------------------------------------------------------------------------
def __setstate__(self, state) -> None:
"""Necessary for making this object picklable"""
if isinstance(state, tuple):
# Compat for pandas < 0.24.0
nd_state, (fill_value, sp_index) = state
sparse_values = np.array([])
sparse_values.__setstate__(nd_state)
self._sparse_values = sparse_values
self._sparse_index = sp_index
self._dtype = SparseDtype(sparse_values.dtype, fill_value)
else:
self.__dict__.update(state)
def nonzero(self) -> tuple[npt.NDArray[np.int32]]:
if self.fill_value == 0:
return (self.sp_index.indices,)
else:
return (self.sp_index.indices[self.sp_values != 0],)
# ------------------------------------------------------------------------
# Reductions
# ------------------------------------------------------------------------
def _reduce(
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
):
method = getattr(self, name, None)
if method is None:
raise TypeError(f"cannot perform {name} with type {self.dtype}")
if skipna:
arr = self
else:
arr = self.dropna()
result = getattr(arr, name)(**kwargs)
if keepdims:
return type(self)([result], dtype=self.dtype)
else:
return result
def all(self, axis=None, *args, **kwargs):
"""
Tests whether all elements evaluate True
Returns
-------
all : bool
See Also
--------
numpy.all
"""
nv.validate_all(args, kwargs)
values = self.sp_values
if len(values) != len(self) and not np.all(self.fill_value):
return False
return values.all()
def any(self, axis: AxisInt = 0, *args, **kwargs) -> bool:
"""
Tests whether at least one of elements evaluate True
Returns
-------
any : bool
See Also
--------
numpy.any
"""
nv.validate_any(args, kwargs)
values = self.sp_values
if len(values) != len(self) and np.any(self.fill_value):
return True
return values.any().item()
def sum(
self,
axis: AxisInt = 0,
min_count: int = 0,
skipna: bool = True,
*args,
**kwargs,
) -> Scalar:
"""
Sum of non-NA/null values
Parameters
----------
axis : int, default 0
Not Used. NumPy compatibility.
min_count : int, default 0
The required number of valid values to perform the summation. If fewer
than ``min_count`` valid values are present, the result will be the missing
value indicator for subarray type.
*args, **kwargs
Not Used. NumPy compatibility.
Returns
-------
scalar
"""
nv.validate_sum(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
has_na = self.sp_index.ngaps > 0 and not self._null_fill_value
if has_na and not skipna:
return na_value_for_dtype(self.dtype.subtype, compat=False)
if self._null_fill_value:
if check_below_min_count(valid_vals.shape, None, min_count):
return na_value_for_dtype(self.dtype.subtype, compat=False)
return sp_sum
else:
nsparse = self.sp_index.ngaps
if check_below_min_count(valid_vals.shape, None, min_count - nsparse):
return na_value_for_dtype(self.dtype.subtype, compat=False)
return sp_sum + self.fill_value * nsparse
def cumsum(self, axis: AxisInt = 0, *args, **kwargs) -> SparseArray:
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseArray will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : int or None
Axis over which to perform the cumulative summation. If None,
perform cumulative summation over flattened array.
Returns
-------
cumsum : SparseArray
"""
nv.validate_cumsum(args, kwargs)
if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
raise ValueError(f"axis(={axis}) out of bounds")
if not self._null_fill_value:
return SparseArray(self.to_dense()).cumsum()
return SparseArray(
self.sp_values.cumsum(),
sparse_index=self.sp_index,
fill_value=self.fill_value,
)
def mean(self, axis: Axis = 0, *args, **kwargs):
"""
Mean of non-NA/null values
Returns
-------
mean : float
"""
nv.validate_mean(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
ct = len(valid_vals)
if self._null_fill_value:
return sp_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
def max(self, *, axis: AxisInt | None = None, skipna: bool = True):
"""
Max of array values, ignoring NA values if specified.
Parameters
----------
axis : int, default 0
Not Used. NumPy compatibility.
skipna : bool, default True
Whether to ignore NA values.
Returns
-------
scalar
"""
nv.validate_minmax_axis(axis, self.ndim)
return self._min_max("max", skipna=skipna)
def min(self, *, axis: AxisInt | None = None, skipna: bool = True):
"""
Min of array values, ignoring NA values if specified.
Parameters
----------
axis : int, default 0
Not Used. NumPy compatibility.
skipna : bool, default True
Whether to ignore NA values.
Returns
-------
scalar
"""
nv.validate_minmax_axis(axis, self.ndim)
return self._min_max("min", skipna=skipna)
def _min_max(self, kind: Literal["min", "max"], skipna: bool) -> Scalar:
"""
Min/max of non-NA/null values
Parameters
----------
kind : {"min", "max"}
skipna : bool
Returns
-------
scalar
"""
valid_vals = self._valid_sp_values
has_nonnull_fill_vals = not self._null_fill_value and self.sp_index.ngaps > 0
if len(valid_vals) > 0:
sp_min_max = getattr(valid_vals, kind)()
# If a non-null fill value is currently present, it might be the min/max
if has_nonnull_fill_vals:
func = max if kind == "max" else min
return func(sp_min_max, self.fill_value)
elif skipna:
return sp_min_max
elif self.sp_index.ngaps == 0:
# No NAs present
return sp_min_max
else:
return na_value_for_dtype(self.dtype.subtype, compat=False)
elif has_nonnull_fill_vals:
return self.fill_value
else:
return na_value_for_dtype(self.dtype.subtype, compat=False)
def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int:
values = self._sparse_values
index = self._sparse_index.indices
mask = np.asarray(isna(values))
func = np.argmax if kind == "argmax" else np.argmin
idx = np.arange(values.shape[0])
non_nans = values[~mask]
non_nan_idx = idx[~mask]
_candidate = non_nan_idx[func(non_nans)]
candidate = index[_candidate]
if isna(self.fill_value):
return candidate
if kind == "argmin" and self[candidate] < self.fill_value:
return candidate
if kind == "argmax" and self[candidate] > self.fill_value:
return candidate
_loc = self._first_fill_value_loc()
if _loc == -1:
# fill_value doesn't exist
return candidate
else:
return _loc
def argmax(self, skipna: bool = True) -> int:
validate_bool_kwarg(skipna, "skipna")
if not skipna and self._hasna:
raise ValueError("Encountered an NA value with skipna=False")
return self._argmin_argmax("argmax")
def argmin(self, skipna: bool = True) -> int:
validate_bool_kwarg(skipna, "skipna")
if not skipna and self._hasna:
raise ValueError("Encountered an NA value with skipna=False")
return self._argmin_argmax("argmin")
# ------------------------------------------------------------------------
# Ufuncs
# ------------------------------------------------------------------------
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
if "out" in kwargs:
# e.g. tests.arrays.sparse.test_arithmetics.test_ndarray_inplace
res = arraylike.dispatch_ufunc_with_out(
self, ufunc, method, *inputs, **kwargs
)
return res
if method == "reduce":
result = arraylike.dispatch_reduction_ufunc(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
# e.g. tests.series.test_ufunc.TestNumpyReductions
return result
if len(inputs) == 1:
# No alignment necessary.
sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)
fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)
if ufunc.nout > 1:
# multiple outputs. e.g. modf
arrays = tuple(
self._simple_new(
sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv)
)
for sp_value, fv in zip(sp_values, fill_value, strict=True)
)
return arrays
elif method == "reduce":
# e.g. reductions
return sp_values
return self._simple_new(
sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)
)
new_inputs = tuple(np.asarray(x) for x in inputs)
result = getattr(ufunc, method)(*new_inputs, **kwargs)
if out:
if len(out) == 1:
out = out[0]
return out
if ufunc.nout > 1:
return tuple(type(self)(x) for x in result)
elif method == "at":
# no return value
return None
else:
return type(self)(result)
# ------------------------------------------------------------------------
# Ops
# ------------------------------------------------------------------------
def _arith_method(self, other, op):
op_name = op.__name__
if isinstance(other, SparseArray):
return _sparse_array_op(self, other, op, op_name)
elif is_scalar(other):
with np.errstate(all="ignore"):
fill = op(_get_fill(self), np.asarray(other))
result = op(self.sp_values, other)
if op_name == "divmod":
left, right = result
lfill, rfill = fill
return (
_wrap_result(op_name, left, self.sp_index, lfill),
_wrap_result(op_name, right, self.sp_index, rfill),
)
return _wrap_result(op_name, result, self.sp_index, fill)
else:
other = np.asarray(other)
with np.errstate(all="ignore"):
if len(self) != len(other):
raise AssertionError(
f"length mismatch: {len(self)} vs. {len(other)}"
)
if not isinstance(other, SparseArray):
dtype = getattr(other, "dtype", None)
other = SparseArray(other, fill_value=self.fill_value, dtype=dtype)
return _sparse_array_op(self, other, op, op_name)
def _cmp_method(self, other, op) -> SparseArray:
if not is_scalar(other) and not isinstance(other, type(self)):
# convert list-like to ndarray
other = np.asarray(other)
if isinstance(other, np.ndarray):
# TODO: make this more flexible than just ndarray...
other = SparseArray(other, fill_value=self.fill_value)
if isinstance(other, SparseArray):
if len(self) != len(other):
raise ValueError(
f"operands have mismatched length {len(self)} and {len(other)}"
)
op_name = op.__name__.strip("_")
return _sparse_array_op(self, other, op, op_name)
else:
# scalar
fill_value = op(self.fill_value, other)
result = np.full(len(self), fill_value, dtype=np.bool_)
result[self.sp_index.indices] = op(self.sp_values, other)
return type(self)(
result,
fill_value=fill_value,
dtype=np.bool_,
)
_logical_method = _cmp_method
def _unary_method(self, op) -> SparseArray:
fill_value = op(np.array(self.fill_value)).item()
dtype = SparseDtype(self.dtype.subtype, fill_value)
# NOTE: if fill_value doesn't change
# we just have to apply op to sp_values
if isna(self.fill_value) or fill_value == self.fill_value:
values = op(self.sp_values)
return type(self)._simple_new(values, self.sp_index, self.dtype)
# In the other case we have to recalc indexes
return type(self)(op(self.to_dense()), dtype=dtype)
def __pos__(self) -> SparseArray:
return self._unary_method(operator.pos)
def __neg__(self) -> SparseArray:
return self._unary_method(operator.neg)
def __invert__(self) -> SparseArray:
return self._unary_method(operator.invert)
def __abs__(self) -> SparseArray:
return self._unary_method(operator.abs)
# ----------
# Formatting
# -----------
def __repr__(self) -> str:
pp_str = printing.pprint_thing(self)
pp_fill = printing.pprint_thing(self.fill_value)
pp_index = printing.pprint_thing(self.sp_index)
return f"{pp_str}\nFill: {pp_fill}\n{pp_index}"
# error: Return type "None" of "_formatter" incompatible with return
# type "Callable[[Any], str | None]" in supertype "ExtensionArray"
def _formatter(self, boxed: bool = False) -> None: # type: ignore[override]
# Defer to the formatter from the GenericArrayFormatter calling us.
# This will infer the correct formatter from the dtype of the values.
return None
def _make_sparse(
arr: np.ndarray,
kind: SparseIndexKind = "block",
fill_value=None,
dtype: np.dtype | None = None,
):
"""
Convert ndarray to sparse format
Parameters
----------
arr : ndarray
kind : {'block', 'integer'}
fill_value : NaN or another value
dtype : np.dtype, optional
copy : bool, default False
Returns
-------
(sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
"""
assert isinstance(arr, np.ndarray)
if arr.ndim > 1:
raise TypeError("expected dimension <= 1 data")
if fill_value is None:
fill_value = na_value_for_dtype(arr.dtype)
if isna(fill_value):
mask = notna(arr)
else:
# cast to object comparison to be safe
if is_string_dtype(arr.dtype):
arr = arr.astype(object)
if is_object_dtype(arr.dtype):
# element-wise equality check method in numpy doesn't treat
# each element type, eg. 0, 0.0, and False are treated as
# same. So we have to check the both of its type and value.
mask = splib.make_mask_object_ndarray(arr, fill_value)
else:
mask = arr != fill_value
length = len(arr)
if length != len(mask):
# the arr is a SparseArray
indices = mask.sp_index.indices
else:
indices = mask.nonzero()[0].astype(np.int32)
index = make_sparse_index(length, indices, kind)
sparsified_values = arr[mask]
if dtype is not None:
sparsified_values = ensure_wrapped_if_datetimelike(sparsified_values)
sparsified_values = astype_array(sparsified_values, dtype=dtype)
sparsified_values = np.asarray(sparsified_values)
# TODO: copy
return sparsified_values, index, fill_value
@overload
def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex: ...
@overload
def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex: ...
def make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex:
index: SparseIndex
if kind == "block":
locs, lens = splib.get_blocks(indices)
index = BlockIndex(length, locs, lens)
elif kind == "integer":
index = IntIndex(length, indices)
else: # pragma: no cover
raise ValueError("must be block or integer type")
return index
| SparseArray |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 153183,
"end": 153615
} | class ____(Buffer, Operation):
# An operation that produces a single output buffer
def get_outputs(self) -> list[Buffer]:
return [self]
def get_defining_op(self) -> Operation:
return self
# Skip implementation in Buffer
get_operation_name = Operation.get_operation_name
def __post_init__(self) -> None:
Buffer.__post_init__(self)
Operation.__post_init__(self)
| OperationBuffer |
python | astropy__astropy | astropy/modeling/tabular.py | {
"start": 786,
"end": 12603
} | class ____(Model):
"""
Returns an interpolated lookup table value.
Parameters
----------
points : tuple of ndarray of float, optional
The points defining the regular grid in n dimensions.
ndarray must have shapes (m1, ), ..., (mn, ),
lookup_table : array-like
The data on a regular grid in n dimensions.
Must have shapes (m1, ..., mn, ...)
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then ``fill_value`` is used.
fill_value : float or `~astropy.units.Quantity`, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d". If Quantity is given, it will be converted to the unit of
``lookup_table``, if applicable.
Returns
-------
value : ndarray
Interpolated values at input coordinates.
Raises
------
ImportError
Scipy is not installed.
Notes
-----
Uses `scipy.interpolate.interpn`.
"""
linear = False
fittable = False
standard_broadcasting = False
_is_dynamic = True
_id = 0
def __init__(
self,
points=None,
lookup_table=None,
method="linear",
bounds_error=True,
fill_value=np.nan,
**kwargs,
):
n_models = kwargs.get("n_models", 1)
if n_models > 1:
raise NotImplementedError("Only n_models=1 is supported.")
super().__init__(**kwargs)
self.outputs = ("y",)
if lookup_table is None:
raise ValueError("Must provide a lookup table.")
if not isinstance(lookup_table, u.Quantity):
lookup_table = np.asarray(lookup_table)
if self.lookup_table.ndim != lookup_table.ndim:
raise ValueError(
"lookup_table should be an array with "
f"{self.lookup_table.ndim} dimensions."
)
if points is None:
points = tuple(np.arange(x, dtype=float) for x in lookup_table.shape)
else:
if lookup_table.ndim == 1 and not isinstance(points, tuple):
points = (points,)
npts = len(points)
if npts != lookup_table.ndim:
raise ValueError(
"Expected grid points in "
f"{lookup_table.ndim} directions, got {npts}."
)
if (
npts > 1
and isinstance(points[0], u.Quantity)
and len({getattr(p, "unit", None) for p in points}) > 1
):
raise ValueError("points must all have the same unit.")
if isinstance(fill_value, u.Quantity):
if not isinstance(lookup_table, u.Quantity):
raise ValueError(
f"fill value is in {fill_value.unit} but expected to be unitless."
)
fill_value = fill_value.to(lookup_table.unit).value
self.points = points
self.lookup_table = lookup_table
self.bounds_error = bounds_error
self.method = method
self.fill_value = fill_value
def __repr__(self):
return (
f"<{self.__class__.__name__}(points={self.points}, "
f"lookup_table={self.lookup_table})>"
)
def __str__(self):
default_keywords = [
("Model", self.__class__.__name__),
("Name", self.name),
("N_inputs", self.n_inputs),
("N_outputs", self.n_outputs),
("Parameters", ""),
(" points", self.points),
(" lookup_table", self.lookup_table),
(" method", self.method),
(" fill_value", self.fill_value),
(" bounds_error", self.bounds_error),
]
parts = [
f"{keyword}: {value}"
for keyword, value in default_keywords
if value is not None
]
return "\n".join(parts)
@property
def input_units(self):
pts = self.points[0]
if not isinstance(pts, u.Quantity):
return None
return dict.fromkeys(self.inputs, pts.unit)
@property
def return_units(self):
if not isinstance(self.lookup_table, u.Quantity):
return None
return {self.outputs[0]: self.lookup_table.unit}
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(points_low, points_high)``.
Examples
--------
>>> from astropy.modeling.models import Tabular1D, Tabular2D
>>> t1 = Tabular1D(points=[1, 2, 3], lookup_table=[10, 20, 30])
>>> t1.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=1, upper=3)
}
model=Tabular1D(inputs=('x',))
order='C'
)
>>> t2 = Tabular2D(points=[[1, 2, 3], [2, 3, 4]],
... lookup_table=[[10, 20, 30], [20, 30, 40]])
>>> t2.bounding_box
ModelBoundingBox(
intervals={
x: Interval(lower=1, upper=3)
y: Interval(lower=2, upper=4)
}
model=Tabular2D(inputs=('x', 'y'))
order='C'
)
"""
bbox = [(min(p), max(p)) for p in self.points][::-1]
if len(bbox) == 1:
bbox = bbox[0]
return bbox
def evaluate(self, *inputs):
"""
Return the interpolated values at the input coordinates.
Parameters
----------
inputs : list of scalar or list of ndarray
Input coordinates. The number of inputs must be equal
to the dimensions of the lookup table.
"""
inputs = np.broadcast_arrays(*inputs)
shape = inputs[0].shape
inputs = [inp.ravel() for inp in inputs[: self.n_inputs]]
inputs = np.array(inputs).T
if not HAS_SCIPY: # pragma: no cover
raise ModuleNotFoundError("Tabular model requires scipy.")
from scipy.interpolate import interpn
result = interpn(
self.points,
self.lookup_table,
inputs,
method=self.method,
bounds_error=self.bounds_error,
fill_value=self.fill_value,
)
# return_units not respected when points has no units
if isinstance(self.lookup_table, u.Quantity) and not isinstance(
result, u.Quantity
):
result = result * self.lookup_table.unit
if self.n_outputs == 1:
result = result.reshape(shape)
else:
result = [r.reshape(shape) for r in result]
return result
@property
def inverse(self):
if self.n_inputs == 1:
# If the wavelength array is descending instead of ascending, both
# points and lookup_table need to be reversed in the inverse transform
# for scipy.interpolate to work properly
if np.all(np.diff(self.lookup_table) > 0):
# ascending case
points = self.lookup_table
lookup_table = self.points[0]
elif np.all(np.diff(self.lookup_table) < 0):
# descending case, reverse order
points = self.lookup_table[::-1]
lookup_table = self.points[0][::-1]
else:
# equal-valued or double-valued lookup_table
raise NotImplementedError
return Tabular1D(
points=points,
lookup_table=lookup_table,
method=self.method,
bounds_error=self.bounds_error,
fill_value=self.fill_value,
)
raise NotImplementedError(
"An analytical inverse transform has not been implemented for this model."
)
def tabular_model(dim, name=None):
"""
Make a ``Tabular`` model where ``n_inputs`` is
based on the dimension of the lookup_table.
This model has to be further initialized and when evaluated
returns the interpolated values.
Parameters
----------
dim : int
Dimensions of the lookup table.
name : str
Name for the class.
Examples
--------
>>> import numpy as np
>>> from astropy.modeling.models import tabular_model
>>> tab = tabular_model(2, name='Tabular2D')
>>> print(tab)
<class 'astropy.modeling.tabular.Tabular2D'>
Name: Tabular2D
N_inputs: 2
N_outputs: 1
Setting ``fill_value`` to `None` allows extrapolation.
>>> points = ([1, 2, 3], [1, 2, 3])
>>> table = np.array([[3., 0., 0.],
... [0., 2., 0.],
... [0., 0., 0.]])
>>> model = tab(points, lookup_table=table, name='my_table',
... bounds_error=False, fill_value=None, method='nearest')
>>> xinterp = [0, 1, 1.5, 2.72, 3.14]
>>> model(xinterp, xinterp) # doctest: +FLOAT_CMP
array([3., 3., 3., 0., 0.])
"""
if dim < 1:
raise ValueError("Lookup table must have at least one dimension.")
table = np.zeros([2] * dim)
members = {"lookup_table": table, "n_inputs": dim, "n_outputs": 1}
if dim == 1:
members["_separable"] = True
else:
members["_separable"] = False
if name is None:
model_id = _Tabular._id
_Tabular._id += 1
name = f"Tabular{model_id}"
model_class = type(str(name), (_Tabular,), members)
model_class.__module__ = "astropy.modeling.tabular"
return model_class
Tabular1D = tabular_model(1, name="Tabular1D")
Tabular2D = tabular_model(2, name="Tabular2D")
_tab_docs = """
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then ``fill_value`` is used.
fill_value : float, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
value : ndarray
Interpolated values at input coordinates.
Raises
------
ImportError
Scipy is not installed.
Notes
-----
Uses `scipy.interpolate.interpn`.
"""
Tabular1D.__doc__ = (
"""
Tabular model in 1D.
Returns an interpolated lookup table value.
Parameters
----------
points : array-like of float of ndim=1.
The points defining the regular grid in n dimensions.
lookup_table : array-like, of ndim=1.
The data in one dimensions.
"""
+ _tab_docs
)
Tabular2D.__doc__ = (
"""
Tabular model in 2D.
Returns an interpolated lookup table value.
Parameters
----------
points : tuple of ndarray of float, optional
The points defining the regular grid in n dimensions.
ndarray with shapes (m1, m2).
lookup_table : array-like
The data on a regular grid in 2 dimensions.
Shape (m1, m2).
"""
+ _tab_docs
)
| _Tabular |
python | getsentry__sentry | tests/sentry/core/endpoints/test_project_keys.py | {
"start": 5476,
"end": 9258
} | class ____(APITestCase):
def test_simple(self) -> None:
project = self.create_project()
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-keys",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
resp = self.client.post(
url, data={"name": "hello world", "rateLimit": {"count": 10, "window": 60}}
)
assert resp.status_code == 201, resp.content
key = ProjectKey.objects.get(public_key=resp.data["public"])
assert key.label == "hello world"
assert key.rate_limit_count == 10
assert key.rate_limit_window == 60
assert "dynamicSdkLoaderOptions" in key.data
assert key.data["dynamicSdkLoaderOptions"] == {
"hasPerformance": True,
"hasReplay": True,
}
def test_minimal_args(self) -> None:
project = self.create_project()
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-keys",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
resp = self.client.post(url)
assert resp.status_code == 201, resp.content
key = ProjectKey.objects.get(public_key=resp.data["public"])
assert key.label
assert "dynamicSdkLoaderOptions" in key.data
assert key.data["dynamicSdkLoaderOptions"] == {
"hasPerformance": True,
"hasReplay": True,
}
def test_keys(self) -> None:
project = self.create_project()
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-keys",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
resp = self.client.post(url, data={"public": "a" * 32, "secret": "b" * 32})
assert resp.status_code == 201, resp.content
key = ProjectKey.objects.get(public_key=resp.data["public"])
assert key.public_key == resp.data["public"] == "a" * 32
assert key.secret_key == resp.data["secret"] == "b" * 32
def test_cannot_create_internal(self) -> None:
"""POST request ignores use case field"""
project = self.create_project()
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-keys",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
resp = self.client.post(
url, data={"public": "a" * 32, "secret": "b" * 32, "useCase": "profiling"}
)
assert resp.status_code == 201, resp.content
key = ProjectKey.objects.get(public_key=resp.data["public"])
assert key.use_case == "user"
def test_superuser_can_create_internal(self) -> None:
project = self.create_project()
self.user = self.create_user(is_superuser=True)
self.login_as(user=self.user, superuser=True)
url = reverse(
"sentry-api-0-project-keys",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
resp = self.client.post(
url, data={"public": "a" * 32, "secret": "b" * 32, "useCase": "demo"}
)
assert resp.status_code == 201, resp.content
key = ProjectKey.objects.get(public_key=resp.data["public"])
assert key.use_case == "demo"
| CreateProjectKeyTest |
python | python__mypy | mypy/stubgen.py | {
"start": 59444,
"end": 78478
} | class ____(mypy.traverser.TraverserVisitor):
def __init__(self) -> None:
self.results: list[tuple[str, Expression, Type | None]] = []
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
lvalue = o.lvalues[0]
if (
isinstance(lvalue, MemberExpr)
and isinstance(lvalue.expr, NameExpr)
and lvalue.expr.name == "self"
):
self.results.append((lvalue.name, o.rvalue, o.unanalyzed_type))
def find_self_initializers(fdef: FuncBase) -> list[tuple[str, Expression, Type | None]]:
"""Find attribute initializers in a method.
Return a list of pairs (attribute name, r.h.s. expression).
"""
traverser = SelfTraverser()
fdef.accept(traverser)
return traverser.results
def get_qualified_name(o: Expression) -> str:
if isinstance(o, NameExpr):
return o.name
elif isinstance(o, MemberExpr):
return f"{get_qualified_name(o.expr)}.{o.name}"
else:
return ERROR_MARKER
def remove_blacklisted_modules(modules: list[StubSource]) -> list[StubSource]:
return [
module for module in modules if module.path is None or not is_blacklisted_path(module.path)
]
def split_pyc_from_py(modules: list[StubSource]) -> tuple[list[StubSource], list[StubSource]]:
py_modules = []
pyc_modules = []
for mod in modules:
if is_pyc_only(mod.path):
pyc_modules.append(mod)
else:
py_modules.append(mod)
return pyc_modules, py_modules
def is_blacklisted_path(path: str) -> bool:
return any(substr in (normalize_path_separators(path) + "\n") for substr in BLACKLIST)
def normalize_path_separators(path: str) -> str:
return path.replace("\\", "/") if sys.platform == "win32" else path
def collect_build_targets(
options: Options, mypy_opts: MypyOptions
) -> tuple[list[StubSource], list[StubSource], list[StubSource]]:
"""Collect files for which we need to generate stubs.
Return list of py modules, pyc modules, and C modules.
"""
if options.packages or options.modules:
if options.no_import:
py_modules = find_module_paths_using_search(
options.modules, options.packages, options.search_path, options.pyversion
)
c_modules: list[StubSource] = []
else:
# Using imports is the default, since we can also find C modules.
py_modules, c_modules = find_module_paths_using_imports(
options.modules, options.packages, options.verbose, options.quiet
)
else:
# Use mypy native source collection for files and directories.
try:
source_list = create_source_list(options.files, mypy_opts)
except InvalidSourceList as e:
raise SystemExit(str(e)) from e
py_modules = [StubSource(m.module, m.path) for m in source_list]
c_modules = []
py_modules = remove_blacklisted_modules(py_modules)
pyc_mod, py_mod = split_pyc_from_py(py_modules)
return py_mod, pyc_mod, c_modules
def find_module_paths_using_imports(
modules: list[str], packages: list[str], verbose: bool, quiet: bool
) -> tuple[list[StubSource], list[StubSource]]:
"""Find path and runtime value of __all__ (if possible) for modules and packages.
This function uses runtime Python imports to get the information.
"""
with ModuleInspect() as inspect:
py_modules: list[StubSource] = []
c_modules: list[StubSource] = []
found = list(walk_packages(inspect, packages, verbose))
modules = modules + found
modules = [
mod for mod in modules if not is_non_library_module(mod)
] # We don't want to run any tests or scripts
for mod in modules:
try:
result = find_module_path_and_all_py3(inspect, mod, verbose)
except CantImport as e:
tb = traceback.format_exc()
if verbose:
sys.stderr.write(tb)
if not quiet:
report_missing(mod, e.message, tb)
continue
if not result:
c_modules.append(StubSource(mod))
else:
path, runtime_all = result
py_modules.append(StubSource(mod, path, runtime_all))
return py_modules, c_modules
def is_non_library_module(module: str) -> bool:
"""Does module look like a test module or a script?"""
if module.endswith(
(
".tests",
".test",
".testing",
"_tests",
"_test_suite",
"test_util",
"test_utils",
"test_base",
".__main__",
".conftest", # Used by pytest
".setup", # Typically an install script
)
):
return True
if module.split(".")[-1].startswith("test_"):
return True
if (
".tests." in module
or ".test." in module
or ".testing." in module
or ".SelfTest." in module
):
return True
return False
def translate_module_name(module: str, relative: int) -> tuple[str, int]:
for pkg in VENDOR_PACKAGES:
for alt in "six.moves", "six":
substr = f"{pkg}.{alt}"
if module.endswith("." + substr) or (module == substr and relative):
return alt, 0
if "." + substr + "." in module:
return alt + "." + module.partition("." + substr + ".")[2], 0
return module, relative
def find_module_paths_using_search(
modules: list[str], packages: list[str], search_path: list[str], pyversion: tuple[int, int]
) -> list[StubSource]:
"""Find sources for modules and packages requested.
This function just looks for source files at the file system level.
This is used if user passes --no-import, and will not find C modules.
Exit if some of the modules or packages can't be found.
"""
result: list[StubSource] = []
typeshed_path = default_lib_path(mypy.build.default_data_dir(), pyversion, None)
search_paths = SearchPaths((".",) + tuple(search_path), (), (), tuple(typeshed_path))
cache = FindModuleCache(search_paths, fscache=None, options=None)
for module in modules:
m_result = cache.find_module(module)
if isinstance(m_result, ModuleNotFoundReason):
fail_missing(module, m_result)
module_path = None
else:
module_path = m_result
result.append(StubSource(module, module_path))
for package in packages:
p_result = cache.find_modules_recursive(package)
if p_result:
fail_missing(package, ModuleNotFoundReason.NOT_FOUND)
sources = [StubSource(m.module, m.path) for m in p_result]
result.extend(sources)
result = [m for m in result if not is_non_library_module(m.module)]
return result
def mypy_options(stubgen_options: Options) -> MypyOptions:
"""Generate mypy options using the flag passed by user."""
options = MypyOptions()
options.follow_imports = "skip"
options.incremental = False
options.ignore_errors = True
options.semantic_analysis_only = True
options.python_version = stubgen_options.pyversion
options.show_traceback = True
options.transform_source = remove_misplaced_type_comments
options.preserve_asts = True
options.include_docstrings = stubgen_options.include_docstrings
# Override cache_dir if provided in the environment
environ_cache_dir = os.getenv("MYPY_CACHE_DIR", "")
if environ_cache_dir.strip():
options.cache_dir = environ_cache_dir
options.cache_dir = os.path.expanduser(options.cache_dir)
return options
def parse_source_file(mod: StubSource, mypy_options: MypyOptions) -> None:
"""Parse a source file.
On success, store AST in the corresponding attribute of the stub source.
If there are syntax errors, print them and exit.
"""
assert mod.path is not None, "Not found module was not skipped"
with open(mod.path, "rb") as f:
data = f.read()
source = mypy.util.decode_python_encoding(data)
errors = Errors(mypy_options)
mod.ast = mypy.parse.parse(
source, fnam=mod.path, module=mod.module, errors=errors, options=mypy_options
)
mod.ast._fullname = mod.module
if errors.is_blockers():
# Syntax error!
for m in errors.new_messages():
sys.stderr.write(f"{m}\n")
sys.exit(1)
def generate_asts_for_modules(
py_modules: list[StubSource], parse_only: bool, mypy_options: MypyOptions, verbose: bool
) -> None:
"""Use mypy to parse (and optionally analyze) source files."""
if not py_modules:
return # Nothing to do here, but there may be C modules
if verbose:
print(f"Processing {len(py_modules)} files...")
if parse_only:
for mod in py_modules:
parse_source_file(mod, mypy_options)
return
# Perform full semantic analysis of the source set.
try:
res = build([module.source for module in py_modules], mypy_options)
except CompileError as e:
raise SystemExit(f"Critical error during semantic analysis: {e}") from e
for mod in py_modules:
mod.ast = res.graph[mod.module].tree
# Use statically inferred __all__ if there is no runtime one.
if mod.runtime_all is None:
mod.runtime_all = res.manager.semantic_analyzer.export_map[mod.module]
def generate_stub_for_py_module(
mod: StubSource,
target: str,
*,
parse_only: bool = False,
inspect: bool = False,
include_private: bool = False,
export_less: bool = False,
include_docstrings: bool = False,
doc_dir: str = "",
all_modules: list[str],
) -> None:
"""Use analysed (or just parsed) AST to generate type stub for single file.
If directory for target doesn't exist it will created. Existing stub
will be overwritten.
"""
if inspect:
ngen = InspectionStubGenerator(
module_name=mod.module,
known_modules=all_modules,
_all_=mod.runtime_all,
doc_dir=doc_dir,
include_private=include_private,
export_less=export_less,
include_docstrings=include_docstrings,
)
ngen.generate_module()
output = ngen.output()
else:
gen = ASTStubGenerator(
mod.runtime_all,
include_private=include_private,
analyzed=not parse_only,
export_less=export_less,
include_docstrings=include_docstrings,
)
assert mod.ast is not None, "This function must be used only with analyzed modules"
mod.ast.accept(gen)
output = gen.output()
# Write output to file.
subdir = os.path.dirname(target)
if subdir and not os.path.isdir(subdir):
os.makedirs(subdir)
with open(target, "w", encoding="utf-8") as file:
file.write(output)
def generate_stubs(options: Options) -> None:
"""Main entry point for the program."""
mypy_opts = mypy_options(options)
py_modules, pyc_modules, c_modules = collect_build_targets(options, mypy_opts)
all_modules = py_modules + pyc_modules + c_modules
all_module_names = sorted(m.module for m in all_modules)
# Use parsed sources to generate stubs for Python modules.
generate_asts_for_modules(py_modules, options.parse_only, mypy_opts, options.verbose)
files = []
for mod in py_modules + pyc_modules:
assert mod.path is not None, "Not found module was not skipped"
target = mod.module.replace(".", "/")
if os.path.basename(mod.path) in ["__init__.py", "__init__.pyc"]:
target += "/__init__.pyi"
else:
target += ".pyi"
target = os.path.join(options.output_dir, target)
files.append(target)
with generate_guarded(mod.module, target, options.ignore_errors, options.verbose):
generate_stub_for_py_module(
mod,
target,
parse_only=options.parse_only,
inspect=options.inspect or mod in pyc_modules,
include_private=options.include_private,
export_less=options.export_less,
include_docstrings=options.include_docstrings,
doc_dir=options.doc_dir,
all_modules=all_module_names,
)
# Separately analyse C modules using different logic.
for mod in c_modules:
if any(py_mod.module.startswith(mod.module + ".") for py_mod in all_modules):
target = mod.module.replace(".", "/") + "/__init__.pyi"
else:
target = mod.module.replace(".", "/") + ".pyi"
target = os.path.join(options.output_dir, target)
files.append(target)
with generate_guarded(mod.module, target, options.ignore_errors, options.verbose):
generate_stub_for_c_module(
mod.module,
target,
known_modules=all_module_names,
doc_dir=options.doc_dir,
include_private=options.include_private,
export_less=options.export_less,
include_docstrings=options.include_docstrings,
)
num_modules = len(all_modules)
if not options.quiet and num_modules > 0:
print("Processed %d modules" % num_modules)
if len(files) == 1:
print(f"Generated {files[0]}")
else:
print(f"Generated files under {common_dir_prefix(files)}" + os.sep)
HEADER = """%(prog)s [-h] [more options, see -h]
[-m MODULE] [-p PACKAGE] [files ...]"""
DESCRIPTION = """
Generate draft stubs for modules.
Stubs are generated in directory ./out, to avoid overriding files with
manual changes. This directory is assumed to exist.
"""
def parse_options(args: list[str]) -> Options:
parser = argparse.ArgumentParser(
prog="stubgen", usage=HEADER, description=DESCRIPTION, fromfile_prefix_chars="@"
)
parser.add_argument(
"--ignore-errors",
action="store_true",
help="ignore errors when trying to generate stubs for modules",
)
parser.add_argument(
"--no-import",
action="store_true",
help="don't import the modules, just parse and analyze them "
"(doesn't work with C extension modules and might not "
"respect __all__)",
)
parser.add_argument(
"--no-analysis",
"--parse-only",
dest="parse_only",
action="store_true",
help="don't perform semantic analysis of sources, just parse them "
"(only applies to Python modules, might affect quality of stubs. "
"Not compatible with --inspect-mode)",
)
parser.add_argument(
"--inspect-mode",
dest="inspect",
action="store_true",
help="import and inspect modules instead of parsing source code."
"This is the default behavior for c modules and pyc-only packages, but "
"it is also useful for pure python modules with dynamically generated members.",
)
parser.add_argument(
"--include-private",
action="store_true",
help="generate stubs for objects and members considered private "
"(single leading underscore and no trailing underscores)",
)
parser.add_argument(
"--export-less",
action="store_true",
help="don't implicitly export all names imported from other modules in the same package",
)
parser.add_argument(
"--include-docstrings",
action="store_true",
help="include existing docstrings with the stubs",
)
parser.add_argument("-v", "--verbose", action="store_true", help="show more verbose messages")
parser.add_argument("-q", "--quiet", action="store_true", help="show fewer messages")
parser.add_argument(
"--doc-dir",
metavar="PATH",
default="",
help="use .rst documentation in PATH (this may result in "
"better stubs in some cases; consider setting this to "
"DIR/Python-X.Y.Z/Doc/library)",
)
parser.add_argument(
"--search-path",
metavar="PATH",
default="",
help="specify module search directories, separated by ':' "
"(currently only used if --no-import is given)",
)
parser.add_argument(
"-o",
"--output",
metavar="PATH",
dest="output_dir",
default="out",
help="change the output directory [default: %(default)s]",
)
parser.add_argument(
"-m",
"--module",
action="append",
metavar="MODULE",
dest="modules",
default=[],
help="generate stub for module; can repeat for more modules",
)
parser.add_argument(
"-p",
"--package",
action="append",
metavar="PACKAGE",
dest="packages",
default=[],
help="generate stubs for package recursively; can be repeated",
)
parser.add_argument(
metavar="files",
nargs="*",
dest="files",
help="generate stubs for given files or directories",
)
parser.add_argument(
"--version", action="version", version="%(prog)s " + mypy.version.__version__
)
ns = parser.parse_args(args)
pyversion = sys.version_info[:2]
ns.interpreter = sys.executable
if ns.modules + ns.packages and ns.files:
parser.error("May only specify one of: modules/packages or files.")
if ns.quiet and ns.verbose:
parser.error("Cannot specify both quiet and verbose messages")
if ns.inspect and ns.parse_only:
parser.error("Cannot specify both --parse-only/--no-analysis and --inspect-mode")
# Create the output folder if it doesn't already exist.
os.makedirs(ns.output_dir, exist_ok=True)
return Options(
pyversion=pyversion,
no_import=ns.no_import,
inspect=ns.inspect,
doc_dir=ns.doc_dir,
search_path=ns.search_path.split(":"),
interpreter=ns.interpreter,
ignore_errors=ns.ignore_errors,
parse_only=ns.parse_only,
include_private=ns.include_private,
output_dir=ns.output_dir,
modules=ns.modules,
packages=ns.packages,
files=ns.files,
verbose=ns.verbose,
quiet=ns.quiet,
export_less=ns.export_less,
include_docstrings=ns.include_docstrings,
)
def main(args: list[str] | None = None) -> None:
mypy.util.check_python_version("stubgen")
# Make sure that the current directory is in sys.path so that
# stubgen can be run on packages in the current directory.
if not ("" in sys.path or "." in sys.path):
sys.path.insert(0, "")
options = parse_options(sys.argv[1:] if args is None else args)
generate_stubs(options)
if __name__ == "__main__":
main()
| SelfTraverser |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 186256,
"end": 206133
} | class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_needs_text(self):
User = self.classes.User
assert_raises_message(
sa_exc.ArgumentError,
"Textual SQL expression",
fixture_session().query(User).from_statement,
"select * from users order by id",
)
def test_select_star(self):
User = self.classes.User
eq_(
fixture_session()
.query(User)
.from_statement(text("select * from users order by id"))
.first(),
User(id=7),
)
eq_(
fixture_session()
.query(User)
.from_statement(
text("select * from users where name='nonexistent'")
)
.first(),
None,
)
def test_select_star_future(self):
User = self.classes.User
sess = fixture_session()
eq_(
sess.execute(
select(User).from_statement(
text("select * from users order by id")
)
)
.scalars()
.first(),
User(id=7),
)
eq_(
sess.execute(
select(User).from_statement(
text("select * from users where name='nonexistent'")
)
)
.scalars()
.first(),
None,
)
def test_columns_mismatched(self):
# test that columns using column._label match, as well as that
# ordering doesn't matter
User = self.classes.User
s = fixture_session()
q = s.query(User).from_statement(
text(
"select name, 27 as foo, id as users_id from users order by id"
)
)
eq_(
q.all(),
[
User(id=7, name="jack"),
User(id=8, name="ed"),
User(id=9, name="fred"),
User(id=10, name="chuck"),
],
)
def test_columns_mismatched_future(self):
# test that columns using column._label match, as well as that
# ordering doesn't matter
User = self.classes.User
s = fixture_session()
q = select(User).from_statement(
text(
"select name, 27 as foo, id as users_id from users order by id"
)
)
eq_(
s.execute(q).scalars().all(),
[
User(id=7, name="jack"),
User(id=8, name="ed"),
User(id=9, name="fred"),
User(id=10, name="chuck"),
],
)
def test_columns_multi_table_uselabels(self):
# test that columns using column._label match, as well as that
# ordering doesn't matter.
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
q = s.query(User, Address).from_statement(
text(
"select users.name AS users_name, users.id AS users_id, "
"addresses.id AS addresses_id FROM users JOIN addresses "
"ON users.id = addresses.user_id WHERE users.id=8 "
"ORDER BY addresses.id"
)
)
eq_(
q.all(),
[
(User(id=8), Address(id=2)),
(User(id=8), Address(id=3)),
(User(id=8), Address(id=4)),
],
)
def test_columns_multi_table_uselabels_future(self):
# test that columns using column._label match, as well as that
# ordering doesn't matter.
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
q = select(User, Address).from_statement(
text(
"select users.name AS users_name, users.id AS users_id, "
"addresses.id AS addresses_id FROM users JOIN addresses "
"ON users.id = addresses.user_id WHERE users.id=8 "
"ORDER BY addresses.id"
)
)
eq_(
s.execute(q).all(),
[
(User(id=8), Address(id=2)),
(User(id=8), Address(id=3)),
(User(id=8), Address(id=4)),
],
)
def test_columns_multi_table_uselabels_contains_eager(self):
# test that columns using column._label match, as well as that
# ordering doesn't matter.
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
q = (
s.query(User)
.from_statement(
text(
"select users.name AS users_name, users.id AS users_id, "
"addresses.id AS addresses_id FROM users JOIN addresses "
"ON users.id = addresses.user_id WHERE users.id=8 "
"ORDER BY addresses.id"
)
)
.options(contains_eager(User.addresses))
)
def go():
r = q.all()
eq_(r[0].addresses, [Address(id=2), Address(id=3), Address(id=4)])
self.assert_sql_count(testing.db, go, 1)
def test_columns_multi_table_uselabels_contains_eager_future(self):
# test that columns using column._label match, as well as that
# ordering doesn't matter.
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
q = (
select(User)
.from_statement(
text(
"select users.name AS users_name, users.id AS users_id, "
"addresses.id AS addresses_id FROM users JOIN addresses "
"ON users.id = addresses.user_id WHERE users.id=8 "
"ORDER BY addresses.id"
)
)
.options(contains_eager(User.addresses))
)
def go():
r = s.execute(q).unique().scalars().all()
eq_(r[0].addresses, [Address(id=2), Address(id=3), Address(id=4)])
self.assert_sql_count(testing.db, go, 1)
def test_columns_multi_table_uselabels_cols_contains_eager(self):
# test that columns using column._label match, as well as that
# ordering doesn't matter.
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
q = (
s.query(User)
.from_statement(
text(
"select users.name AS users_name, users.id AS users_id, "
"addresses.id AS addresses_id FROM users JOIN addresses "
"ON users.id = addresses.user_id WHERE users.id=8 "
"ORDER BY addresses.id"
).columns(User.name, User.id, Address.id)
)
.options(contains_eager(User.addresses))
)
def go():
r = q.all()
eq_(r[0].addresses, [Address(id=2), Address(id=3), Address(id=4)])
self.assert_sql_count(testing.db, go, 1)
def test_columns_multi_table_uselabels_cols_contains_eager_future(self):
# test that columns using column._label match, as well as that
# ordering doesn't matter.
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
q = (
select(User)
.from_statement(
text(
"select users.name AS users_name, users.id AS users_id, "
"addresses.id AS addresses_id FROM users JOIN addresses "
"ON users.id = addresses.user_id WHERE users.id=8 "
"ORDER BY addresses.id"
).columns(User.name, User.id, Address.id)
)
.options(contains_eager(User.addresses))
)
def go():
r = s.execute(q).unique().scalars().all()
eq_(r[0].addresses, [Address(id=2), Address(id=3), Address(id=4)])
self.assert_sql_count(testing.db, go, 1)
def test_textual_select_orm_columns(self):
# test that columns using column._label match, as well as that
# ordering doesn't matter.
User = self.classes.User
Address = self.classes.Address
users = self.tables.users
addresses = self.tables.addresses
s = fixture_session()
q = s.query(User.name, User.id, Address.id).from_statement(
text(
"select users.name AS users_name, users.id AS users_id, "
"addresses.id AS addresses_id FROM users JOIN addresses "
"ON users.id = addresses.user_id WHERE users.id=8 "
"ORDER BY addresses.id"
).columns(users.c.name, users.c.id, addresses.c.id)
)
eq_(q.all(), [("ed", 8, 2), ("ed", 8, 3), ("ed", 8, 4)])
@testing.combinations(
(
False,
subqueryload,
),
(
True,
subqueryload,
),
(False, selectinload),
(True, selectinload),
)
def test_related_eagerload_against_text(self, add_columns, loader_option):
# new in 1.4. textual selects have columns so subqueryloaders
# and selectinloaders can join onto them. we add columns
# automatiacally to TextClause as well, however subqueryloader
# is not working at the moment due to execution model refactor,
# it creates a subquery w/ adapter before those columns are
# available. this is a super edge case and as we want to rewrite
# the loaders to use select(), maybe we can get it then.
User = self.classes.User
text_clause = text("select * from users")
if add_columns:
text_clause = text_clause.columns(User.id, User.name)
s = fixture_session()
q = (
s.query(User)
.from_statement(text_clause)
.options(loader_option(User.addresses))
)
def go():
eq_(set(q.all()), set(self.static.user_address_result))
if loader_option is subqueryload:
# subqueryload necessarily degrades to lazy loads for a text
# statement.
self.assert_sql_count(testing.db, go, 5)
else:
self.assert_sql_count(testing.db, go, 2)
def test_whereclause(self):
User = self.classes.User
eq_(
fixture_session().query(User).filter(text("id in (8, 9)")).all(),
[User(id=8), User(id=9)],
)
eq_(
fixture_session()
.query(User)
.filter(text("name='fred'"))
.filter(text("id=9"))
.all(),
[User(id=9)],
)
eq_(
fixture_session()
.query(User)
.filter(text("name='fred'"))
.filter(User.id == 9)
.all(),
[User(id=9)],
)
def test_whereclause_future(self):
User = self.classes.User
s = fixture_session()
eq_(
s.execute(select(User).filter(text("id in (8, 9)")))
.scalars()
.all(),
[User(id=8), User(id=9)],
)
eq_(
s.execute(
select(User).filter(text("name='fred'")).filter(text("id=9"))
)
.scalars()
.all(),
[User(id=9)],
)
eq_(
s.execute(
select(User).filter(text("name='fred'")).filter(User.id == 9)
)
.scalars()
.all(),
[User(id=9)],
)
def test_binds_coerce(self):
User = self.classes.User
assert_raises_message(
sa_exc.ArgumentError,
r"Textual SQL expression 'id in \(:id1, :id2\)' "
"should be explicitly declared",
fixture_session().query(User).filter,
"id in (:id1, :id2)",
)
def test_plain_textual_column(self):
User = self.classes.User
s = fixture_session()
self.assert_compile(
s.query(User.id, text("users.name")),
"SELECT users.id AS users_id, users.name FROM users",
)
eq_(
s.query(User.id, text("users.name")).all(),
[(7, "jack"), (8, "ed"), (9, "fred"), (10, "chuck")],
)
eq_(
s.query(User.id, literal_column("name")).order_by(User.id).all(),
[(7, "jack"), (8, "ed"), (9, "fred"), (10, "chuck")],
)
def test_via_select(self):
User = self.classes.User
s = fixture_session()
eq_(
s.query(User)
.from_statement(
select(column("id"), column("name"))
.select_from(table("users"))
.order_by("id")
)
.all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_via_textasfrom_from_statement(self):
User = self.classes.User
s = fixture_session()
eq_(
s.query(User)
.from_statement(
text("select * from users order by id").columns(
id=Integer, name=String
)
)
.all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_columns_via_textasfrom_from_statement(self):
User = self.classes.User
s = fixture_session()
eq_(
s.query(User.id, User.name)
.from_statement(
text("select * from users order by id").columns(
id=Integer, name=String
)
)
.all(),
[(7, "jack"), (8, "ed"), (9, "fred"), (10, "chuck")],
)
def test_via_textasfrom_use_mapped_columns(self):
User = self.classes.User
s = fixture_session()
eq_(
s.query(User)
.from_statement(
text("select * from users order by id").columns(
User.id, User.name
)
)
.all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_via_textasfrom_aliased(self):
User = self.classes.User
s = fixture_session()
ua = aliased(
User,
text("select * from users").columns(User.id, User.name).subquery(),
)
eq_(
s.query(ua).order_by(ua.id).all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_group_by_accepts_text(self):
User = self.classes.User
s = fixture_session()
q = s.query(User).group_by(text("name"))
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users GROUP BY name",
)
def test_order_by_w_eager_one(self):
User = self.classes.User
s = fixture_session()
# from 1.0.0 thru 1.0.2, the "name" symbol here was considered
# to be part of the things we need to ORDER BY and it was being
# placed into the inner query's columns clause, as part of
# query._compound_eager_statement where we add unwrap_order_by()
# to the columns clause. However, as #3392 illustrates, unlocatable
# string expressions like "name desc" will only fail in this scenario,
# so in general the changing of the query structure with string labels
# is dangerous.
#
# the queries here are again "invalid" from a SQL perspective, as the
# "name" field isn't matched up to anything.
#
q = (
s.query(User)
.options(joinedload(User.addresses))
.order_by(desc("name"))
.limit(1)
)
assert_raises_message(
sa_exc.CompileError,
"Can't resolve label reference for ORDER BY / GROUP BY.",
q.set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
).statement.compile,
)
def test_order_by_w_eager_two(self):
User = self.classes.User
s = fixture_session()
q = (
s.query(User)
.options(joinedload(User.addresses))
.order_by("name")
.limit(1)
)
assert_raises_message(
sa_exc.CompileError,
"Can't resolve label reference for ORDER BY / GROUP BY.",
q.set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
).statement.compile,
)
def test_order_by_w_eager_three(self):
User = self.classes.User
s = fixture_session()
self.assert_compile(
s.query(User)
.options(joinedload(User.addresses))
.order_by("users_name")
.limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.name "
"LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_name, addresses_1.id",
)
# however! this works (again?)
eq_(
s.query(User)
.options(joinedload(User.addresses))
.order_by("users_name")
.first(),
User(name="chuck", addresses=[]),
)
def test_order_by_w_eager_four(self):
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
self.assert_compile(
s.query(User)
.options(joinedload(User.addresses))
.order_by(desc("users_name"))
.limit(1),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.user_id AS addresses_1_user_id, "
"addresses_1.email_address AS addresses_1_email_address "
"FROM (SELECT users.id AS users_id, users.name AS users_name "
"FROM users ORDER BY users.name DESC "
"LIMIT :param_1) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_name DESC, addresses_1.id",
)
# however! this works (again?)
eq_(
s.query(User)
.options(joinedload(User.addresses))
.order_by(desc("users_name"))
.first(),
User(name="jack", addresses=[Address()]),
)
def test_order_by_w_eager_five(self):
"""essentially the same as test_eager_relations -> test_limit_3,
but test for textual label elements that are freeform.
this is again #3392."""
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
q = sess.query(User, Address.email_address.label("email_address"))
result = (
q.join(User.addresses)
.options(joinedload(User.orders))
.order_by("email_address desc")
.limit(1)
.offset(0)
)
assert_raises_message(
sa_exc.CompileError,
"Can't resolve label reference for ORDER BY / GROUP BY",
result.all,
)
| TextTest |
python | keras-team__keras | keras/src/metrics/f_score_metrics.py | {
"start": 9225,
"end": 11743
} | class ____(FBetaScore):
r"""Computes F-1 Score.
Formula:
```python
f1_score = 2 * (precision * recall) / (precision + recall)
```
This is the harmonic mean of precision and recall.
Its output range is `[0, 1]`. It works for both multi-class
and multi-label classification.
Args:
average: Type of averaging to be performed on data.
Acceptable values are `None`, `"micro"`, `"macro"`
and `"weighted"`. Defaults to `None`.
If `None`, no averaging is performed and `result()` will return
the score for each class.
If `"micro"`, compute metrics globally by counting the total
true positives, false negatives and false positives.
If `"macro"`, compute metrics for each label,
and return their unweighted mean.
This does not take label imbalance into account.
If `"weighted"`, compute metrics for each label,
and return their average weighted by support
(the number of true instances for each label).
This alters `"macro"` to account for label imbalance.
It can result in an score that is not between precision and recall.
threshold: Elements of `y_pred` greater than `threshold` are
converted to be 1, and the rest 0. If `threshold` is
`None`, the argmax of `y_pred` is converted to 1, and the rest to 0.
name: Optional. String name of the metric instance.
dtype: Optional. Data type of the metric result.
Returns:
F-1 Score: float.
Example:
>>> metric = keras.metrics.F1Score(threshold=0.5)
>>> y_true = np.array([[1, 1, 1],
... [1, 0, 0],
... [1, 1, 0]], np.int32)
>>> y_pred = np.array([[0.2, 0.6, 0.7],
... [0.2, 0.6, 0.6],
... [0.6, 0.8, 0.0]], np.float32)
>>> metric.update_state(y_true, y_pred)
>>> result = metric.result()
array([0.5 , 0.8 , 0.6666667], dtype=float32)
"""
def __init__(
self,
average=None,
threshold=None,
name="f1_score",
dtype=None,
):
super().__init__(
average=average,
beta=1.0,
threshold=threshold,
name=name,
dtype=dtype,
)
def get_config(self):
base_config = super().get_config()
del base_config["beta"]
return base_config
| F1Score |
python | pytest-dev__pytest | testing/test_tmpdir.py | {
"start": 1016,
"end": 1441
} | class ____:
basetemp: str | Path
@property
def trace(self):
return self
def get(self, key):
return lambda *k: None
def getini(self, name):
if name == "tmp_path_retention_count":
return 3
elif name == "tmp_path_retention_policy":
return "all"
else:
assert False
@property
def option(self):
return self
| FakeConfig |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 338423,
"end": 339134
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("EnterpriseServerUserAccountsUploadEdge"),
graphql_name="edges",
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("EnterpriseServerUserAccountsUpload"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| EnterpriseServerUserAccountsUploadConnection |
python | langchain-ai__langchain | libs/core/tests/unit_tests/test_tools.py | {
"start": 33106,
"end": 33322
} | class ____(BaseTool):
name: str = "Foo"
description: str = "Foo"
@override
def _run(self, bar: Any, bar_config: RunnableConfig, **kwargs: Any) -> Any:
return assert_bar(bar, bar_config)
| FooBase |
python | kamyu104__LeetCode-Solutions | Python/reorganize-string.py | {
"start": 108,
"end": 917
} | class ____(object):
def reorganizeString(self, S):
"""
:type S: str
:rtype: str
"""
counts = collections.Counter(S)
if any(v > (len(S)+1)/2 for k, v in counts.iteritems()):
return ""
result = []
max_heap = []
for k, v in counts.iteritems():
heapq.heappush(max_heap, (-v, k))
while len(max_heap) > 1:
count1, c1 = heapq.heappop(max_heap)
count2, c2 = heapq.heappop(max_heap)
if not result or c1 != result[-1]:
result.extend([c1, c2])
if count1+1: heapq.heappush(max_heap, (count1+1, c1))
if count2+1: heapq.heappush(max_heap, (count2+1, c2))
return "".join(result) + (max_heap[0][1] if max_heap else '')
| Solution |
python | walkccc__LeetCode | solutions/1909. Remove One Element to Make the Array Strictly Increasing/1909.py | {
"start": 0,
"end": 368
} | class ____:
def canBeIncreasing(self, nums: list[int]) -> bool:
removed = False
for i in range(1, len(nums)):
if nums[i - 1] >= nums[i]:
if removed:
return False
removed = True # Remove nums[i - 1].
if i > 1 and nums[i - 2] >= nums[i]:
nums[i] = nums[i - 1] # Remove nums[i] instead.
return True
| Solution |
python | scrapy__scrapy | tests/test_utils_request.py | {
"start": 8023,
"end": 8282
} | class ____:
def test_fingerprint(self):
crawler = get_crawler()
request = Request("https://example.com")
assert crawler.request_fingerprinter.fingerprint(request) == fingerprint(
request
)
| TestRequestFingerprinter |
python | scikit-learn__scikit-learn | sklearn/preprocessing/_label.py | {
"start": 1000,
"end": 5085
} | class ____(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None):
"""Encode target labels with value between 0 and n_classes-1.
This transformer should be used to encode target values, *i.e.* `y`, and
not the input `X`.
Read more in the :ref:`User Guide <preprocessing_targets>`.
.. versionadded:: 0.12
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Holds the label for each class.
See Also
--------
OrdinalEncoder : Encode categorical features using an ordinal encoding
scheme.
OneHotEncoder : Encode categorical features as a one-hot numeric array.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn.preprocessing import LabelEncoder
>>> le = LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6])
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
[np.str_('amsterdam'), np.str_('paris'), np.str_('tokyo')]
>>> le.transform(["tokyo", "tokyo", "paris"])
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
[np.str_('tokyo'), np.str_('tokyo'), np.str_('paris')]
"""
def fit(self, y):
"""Fit label encoder.
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
Fitted label encoder.
"""
y = column_or_1d(y, warn=True)
self.classes_ = _unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels.
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
y : array-like of shape (n_samples,)
Encoded labels.
"""
y = column_or_1d(y, warn=True)
self.classes_, y = _unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
y : array-like of shape (n_samples,)
Labels as normalized encodings.
"""
check_is_fitted(self)
xp, _ = get_namespace(y)
y = column_or_1d(y, dtype=self.classes_.dtype, warn=True)
# transform of empty array is empty array
if _num_samples(y) == 0:
return xp.asarray([])
return _encode(y, uniques=self.classes_)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
y_original : ndarray of shape (n_samples,)
Original encoding.
"""
check_is_fitted(self)
xp, _ = get_namespace(y)
y = column_or_1d(y, warn=True)
# inverse transform of empty array is empty array
if _num_samples(y) == 0:
return xp.asarray([])
diff = xpx.setdiff1d(
y,
xp.arange(self.classes_.shape[0], device=device(y)),
xp=xp,
)
if diff.shape[0]:
raise ValueError("y contains previously unseen labels: %s" % str(diff))
y = xp.asarray(y)
return xp.take(self.classes_, y, axis=0)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.array_api_support = True
tags.input_tags.two_d_array = False
tags.target_tags.one_d_labels = True
return tags
| LabelEncoder |
python | django__django | django/test/utils.py | {
"start": 12956,
"end": 15394
} | class ____:
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
def setUp(inner_self):
context = self.enable()
inner_self.addCleanup(self.disable)
if self.attr_name:
setattr(inner_self, self.attr_name, context)
decorated_setUp(inner_self)
cls.setUp = setUp
return cls
raise TypeError("Can only decorate subclasses of unittest.TestCase")
def decorate_callable(self, func):
if iscoroutinefunction(func):
# If the inner function is an async function, we must execute async
# as well so that the `with` statement executes at the right time.
@wraps(func)
async def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return await func(*args, **kwargs)
else:
@wraps(func)
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError("Cannot decorate object of type %s" % type(decorated))
| TestContextDecorator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.