repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
metoppv/improver | improver_tests/lightning/test_LightningFromCapePrecip.py | 3 | 8796 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Test methods in lightning.LightningFromCapePrecip"""
from datetime import datetime
import numpy as np
import pytest
from iris.cube import Cube, CubeList
from improver.lightning import LightningFromCapePrecip
from improver.metadata.constants.attributes import MANDATORY_ATTRIBUTE_DEFAULTS
from improver.synthetic_data.set_up_test_cubes import (
add_coordinate,
set_up_probability_cube,
set_up_variable_cube,
)
@pytest.fixture(name="cape_cube")
def cape_cube_fixture() -> Cube:
"""
Set up a CAPE cube for use in tests.
Has 2 realizations, 7 latitudes spanning from 60S to 60N and 3 longitudes.
"""
data = np.full((2, 7, 3), dtype=np.float32, fill_value=400)
cube = set_up_variable_cube(
data,
name="atmosphere_convective_available_potential_energy",
units="J kg-1",
time=datetime(2017, 11, 10, 4, 0),
time_bounds=None,
attributes=None,
standard_grid_metadata="gl_ens",
domain_corner=(-60, 0),
grid_spacing=20,
)
return cube
@pytest.fixture(name="precip_cube")
def precip_cube_fixture() -> Cube:
"""
Set up a precipitation rate cube for use in tests.
Has 2 realizations, 7 latitudes spanning from 60S to 60N and 3 longitudes.
Contains the value of 3 mm h-1 at all points (in SI units)
"""
data = np.full((2, 7, 3), dtype=np.float32, fill_value=3 / 3.6e6)
cube = set_up_variable_cube(
data,
name="precipitation_rate_max-PT01H",
units="m s-1",
time=datetime(2017, 11, 10, 5, 0),
time_bounds=(datetime(2017, 11, 10, 4, 0), datetime(2017, 11, 10, 5, 0)),
attributes=None,
standard_grid_metadata="gl_ens",
domain_corner=(-60, 0),
grid_spacing=20,
)
return cube
@pytest.fixture(name="expected_cube")
def expected_cube_fixture() -> Cube:
"""
Set up the Lightning cube that we expect to get from the plugin
"""
data = np.ones((1, 7, 3), dtype=np.float32)
data[:, 2:-2, :] = 0
cube = set_up_probability_cube(
data,
thresholds=[0.0],
variable_name="number_of_lightning_flashes_per_unit_area",
threshold_units="m-2",
time=datetime(2017, 11, 10, 5, 0),
time_bounds=(datetime(2017, 11, 10, 4, 0), datetime(2017, 11, 10, 5, 0)),
attributes=MANDATORY_ATTRIBUTE_DEFAULTS,
domain_corner=(-60, 0),
grid_spacing=20,
)
cube = add_coordinate(
cube,
coord_name="realization",
coord_points=[0, 1],
coord_units="1",
dtype=np.int32,
)
return cube
def test_basic(cape_cube, precip_cube, expected_cube):
"""Run the plugin and check the result cube matches the expected_cube"""
result = LightningFromCapePrecip()(CubeList([cape_cube, precip_cube]))
assert result.xml().splitlines(keepends=True) == expected_cube.xml().splitlines(
keepends=True
)
assert np.allclose(result.data, expected_cube.data)
def test_3h_cubes(cape_cube, precip_cube, expected_cube):
"""Run the plugin again with 3h cubes"""
cape_cube.coord("time").points = cape_cube.coord("time").points - 2 * 3600
bounds = precip_cube.coord("time").bounds
precip_cube.coord("time").bounds = (bounds[0][0] - 2 * 3600, bounds[0][1])
precip_cube.rename("precipitation_rate_max-PT03H")
expected_cube.coord("time").bounds = (bounds[0][0] - 2 * 3600, bounds[0][1])
result = LightningFromCapePrecip()(CubeList([cape_cube, precip_cube]))
assert result.xml().splitlines(keepends=True) == expected_cube.xml().splitlines(
keepends=True
)
assert np.allclose(result.data, expected_cube.data)
def test_with_model_attribute(cape_cube, precip_cube, expected_cube):
"""Run the plugin with model_id_attr and check the result cube matches the expected_cube"""
expected_cube.attributes["mosg__model_configuration"] = "gl_ens"
result = LightningFromCapePrecip()(
CubeList([cape_cube, precip_cube]), model_id_attr="mosg__model_configuration"
)
assert result.xml().splitlines(keepends=True) == expected_cube.xml().splitlines(
keepends=True
)
assert np.allclose(result.data, expected_cube.data)
def break_time_point(cape_cube, precip_cube):
"""Modifies cape_cube time points to be incremented by 1 second and
returns the error message this will trigger"""
cape_cube.coord("time").points = cape_cube.coord("time").points + 1
return r"CAPE cube time .* should be valid at the precipitation_rate_max cube lower bound .*"
def break_time_bound(cape_cube, precip_cube):
"""Modifies upper bound on precip_cube time coord to be incremented by 1 second and
returns the error message this will trigger"""
bounds = precip_cube.coord("time").bounds
precip_cube.coord("time").bounds = (bounds[0][0], bounds[0][1] + 1)
return r"Precipitation_rate_max cube time window must be one or three hours, not .*"
def break_reference_time(cape_cube, precip_cube):
"""Modifies precip_cube forecast_reference_time points to be incremented by 1 second
and returns the error message this will trigger"""
precip_cube.coord("forecast_reference_time").points = (
precip_cube.coord("forecast_reference_time").points + 1
)
return r"Supplied cubes must have the same forecast reference times"
def break_latitude_point(cape_cube, precip_cube):
"""Modifies the first latitude point on the precip_cube (adds one degree)
and returns the error message this will trigger"""
points = list(precip_cube.coord("latitude").points)
points[0] = points[0] + 1
precip_cube.coord("latitude").points = points
return "Supplied cubes do not have the same spatial coordinates"
def break_units(cape_cube, precip_cube):
"""Modifies the units of the precip_cube to something incompatible with "mm h-1"
and returns the error message this will trigger"""
precip_cube.units = "m"
return r"Unable to convert from 'Unit\('m'\)' to 'Unit\('mm h-1'\)'."
def break_precip_name(cape_cube, precip_cube):
"""Modifies the name of precip_cube and returns the error message this will trigger"""
precip_cube.rename("precipitation_rate")
return "No cube named precipitation_rate_max found in .*"
def break_cape_name(cape_cube, precip_cube):
"""Modifies the name of cape_cube and returns the error message this will trigger"""
cape_cube.rename("CAPE")
return "No cube named atmosphere_convective_available_potential_energy found in .*"
@pytest.mark.parametrize(
"breaking_function",
(
break_time_point,
break_time_bound,
break_reference_time,
break_latitude_point,
break_units,
break_precip_name,
break_cape_name,
),
)
def test_exceptions(cape_cube, precip_cube, breaking_function):
"""Tests that a suitable exception is raised when the precip cube meta-data does
not match the cape cube"""
error_msg = breaking_function(cape_cube, precip_cube)
with pytest.raises(ValueError, match=error_msg):
LightningFromCapePrecip()(CubeList([cape_cube, precip_cube]))
| bsd-3-clause | bbb16a2d45b947260c8fd94d9446068b | 37.748899 | 97 | 0.68156 | 3.590204 | false | true | false | false |
metoppv/improver | improver_tests/generate_ancillaries/test_GenerateTimezoneMask.py | 3 | 16860 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the GenerateTimezoneMask plugin."""
from datetime import datetime
import iris
import numpy as np
import pytest
import pytz
from iris.cube import Cube, CubeList
from numpy.testing import assert_array_almost_equal, assert_array_equal
from improver.generate_ancillaries.generate_timezone_mask import GenerateTimezoneMask
from improver.metadata.constants.time_types import TIME_COORDS
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
pytest.importorskip("timezonefinder")
pytest.importorskip("numba")
GLOBAL_ATTRIBUTES = {
"title": "MOGREPS-G Model Forecast on Global 20 km Standard Grid",
"source": "Met Office Unified Model",
"institution": "Met Office",
}
UK_ATTRIBUTES = {
"title": "MOGREPS-UK Model Forecast on UK 2 km Standard Grid",
"source": "Met Office Unified Model",
"institution": "Met Office",
}
@pytest.fixture(name="global_grid")
def global_grid_fixture() -> Cube:
"""Global grid template"""
data = np.zeros((19, 37), dtype=np.float32)
cube = set_up_variable_cube(
data,
name="template",
grid_spacing=10,
domain_corner=(-90, -180),
attributes=GLOBAL_ATTRIBUTES,
)
return cube
@pytest.fixture(name="global_grid_360")
def global_grid_fixture_360() -> Cube:
"""Global grid template with longitude running 0 to 360"""
data = np.zeros((19, 37), dtype=np.float32)
cube = set_up_variable_cube(
data,
name="template",
grid_spacing=10,
domain_corner=(-90, 0),
attributes=GLOBAL_ATTRIBUTES,
)
return cube
@pytest.fixture(name="europe_grid")
def europe_grid_fixture() -> Cube:
data = np.zeros((10, 10), dtype=np.float32)
cube = set_up_variable_cube(
data,
name="template",
grid_spacing=1,
domain_corner=(45, -5),
attributes=GLOBAL_ATTRIBUTES,
)
return cube
@pytest.fixture(name="uk_grid")
def uk_grid_fixture() -> Cube:
"""UK grid template"""
data = np.zeros((11, 11), dtype=np.float32)
cube = set_up_variable_cube(
data,
name="template",
spatial_grid="equalarea",
grid_spacing=96900.0,
domain_corner=(-1036000.0, -1158000.0),
attributes=UK_ATTRIBUTES,
)
return cube
@pytest.fixture(name="timezone_mask")
def timezone_mask_fixture() -> CubeList:
"""A timezone mask cubelist"""
data = np.zeros((19, 37), dtype=np.float32)
cube = set_up_variable_cube(
data, name="template", grid_spacing=10, domain_corner=(-90, -180)
)
cubelist = CubeList()
for offset in range(0, 4):
mask = cube.copy()
utc_offset_coord = iris.coords.AuxCoord([offset], long_name="UTC_offset")
mask.add_aux_coord(utc_offset_coord)
mask = iris.util.new_axis(mask, "UTC_offset")
cubelist.append(mask)
return cubelist
def test__set_time(uk_grid):
"""Test time is set correctly from either the cube or user."""
# Set by the cube time coordinate
expected = datetime(2017, 11, 10, 4, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask()
plugin._set_time(uk_grid)
assert plugin.time == expected
# Set by the user provided argument
expected = datetime(2020, 7, 16, 15, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask(time="20200716T1500Z")
plugin._set_time(uk_grid)
assert plugin.time == expected
# Check an exception is raised if no time information is provided
uk_grid.remove_coord("time")
plugin = GenerateTimezoneMask()
msg = (
"The input cube does not contain a 'time' coordinate. "
"As such a time must be provided by the user."
)
with pytest.raises(ValueError, match=msg):
plugin._set_time(uk_grid)
@pytest.mark.parametrize("grid_fixture", ["global_grid", "global_grid_360", "uk_grid"])
def test__get_coordinate_pairs(request, grid_fixture):
"""Test that a selection of the points returned by _get_coordinate_pairs
have the expected values. Tests are for both native lat-long grids and for
an equal areas grid that must be transformed."""
sample_points = [0, 10, -1]
expected_data = {
"global_grid": [[-90.0, -180.0], [-90.0, -80.0], [90.0, 180.0]],
"global_grid_360": [[-90.0, -180.0], [-90.0, -80.0], [90.0, 180.0]],
"uk_grid": [[44.517, -17.117], [45.548, -4.913], [54.263, -5.401]],
}
grid = request.getfixturevalue(grid_fixture)
result = GenerateTimezoneMask()._get_coordinate_pairs(grid)
assert isinstance(result, np.ndarray)
assert result.shape == (np.product(grid.shape), 2)
for i, ii in enumerate(sample_points):
assert_array_almost_equal(
result[ii, :], expected_data[grid_fixture][i], decimal=3
)
def test__get_coordinate_pairs_exception(global_grid):
"""Test that an exception is raised if longitudes are found outside the
range -180 to 180."""
global_grid.coord("longitude").points = global_grid.coord("longitude").points + 360
with pytest.raises(ValueError, match=r"TimezoneFinder requires .*"):
GenerateTimezoneMask()._get_coordinate_pairs(global_grid)
def test__calculate_tz_offsets():
"""
Test that the expected offsets are returned for several timezones, with and
without daylight savings.
These test also cover the functionality of _calculate_offset.
"""
# New York, London, and Melbourne
coordinate_pairs = np.array([[41, -74], [51.5, 0], [-37.9, 145]])
# Test ignoring daylight savings, so the result should be consistent
# regardless of the date.
expected = [-5 * 3600, 0, 10 * 3600]
# Northern hemisphere winter
time = datetime(2020, 1, 1, 12, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask(time=time)
result = plugin._calculate_tz_offsets(coordinate_pairs)
assert_array_equal(result, expected)
# Check return type information as well
assert result.ndim == 1
assert isinstance(result, np.ndarray)
assert result.dtype == np.int32
# Southern hemisphere winter
time = datetime(2020, 7, 1, 12, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask(time=time)
result = plugin._calculate_tz_offsets(coordinate_pairs)
assert_array_equal(result, expected)
# Test including daylight savings, so the result should change as the
# date is changed.
# Northern hemisphere winter
expected = [-5 * 3600, 0, 11 * 3600]
time = datetime(2020, 1, 1, 12, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask(include_dst=True, time=time)
result = plugin._calculate_tz_offsets(coordinate_pairs)
assert_array_equal(result, expected)
# Southern hemisphere winter
expected = [-4 * 3600, 1 * 3600, 10 * 3600]
time = datetime(2020, 7, 1, 12, tzinfo=pytz.utc)
plugin = GenerateTimezoneMask(include_dst=True, time=time)
result = plugin._calculate_tz_offsets(coordinate_pairs)
assert_array_equal(result, expected)
@pytest.mark.parametrize("grid_fixture", ["global_grid", "uk_grid"])
@pytest.mark.parametrize("include_dst", [False, True])
def test__create_template_cube(request, grid_fixture, include_dst):
"""Test the construction of a template cube slice, checking the shape
data types, and attributes."""
grid = request.getfixturevalue(grid_fixture)
time = datetime(2020, 1, 1, 12, tzinfo=pytz.utc)
expected = {
"global_grid": {"shape": (19, 37), "attributes": GLOBAL_ATTRIBUTES},
"uk_grid": {"shape": (11, 11), "attributes": UK_ATTRIBUTES},
}
# Set expected includes_daylight_savings attribute
expected[grid_fixture]["attributes"]["includes_daylight_savings"] = str(include_dst)
plugin = GenerateTimezoneMask(include_dst=include_dst, time=time)
result = plugin._create_template_cube(grid)
assert result.name() == "timezone_mask"
assert result.units == 1
assert result.coord("time").points[0] == time.timestamp()
assert result.coord("time").dtype == np.int64
assert result.shape == expected[grid_fixture]["shape"]
assert result.dtype == np.int8
assert result.attributes == expected[grid_fixture]["attributes"]
@pytest.mark.parametrize(
"groups", ({0: [0, 1], 3: [2, 3]}, {0: [0, 2], 3: [3]}, {1: [0, 2], 4: [3, 4]})
)
def test__group_timezones(timezone_mask, groups):
"""Test the grouping of different UTC offsets into larger groups using a
user provided specification. The input cube list contains cubes corresponding
to 4 UTC offsets. Three tests are run, first grouping these into equal sized
groups, then into unequally sized groups. Finally the timezones are grouped
around a point beyond the end of the timezone range found in the timezone
mask.
This final test replicates what we want to achieve when grouping data
to extract from data available at non-hourly intervals. For example we
want to round UTC+14 data to UTC+15 as that is nearest available 3-hourly
data interval, though there is no timezone at UTC+15."""
plugin = GenerateTimezoneMask(groupings=groups)
result = plugin._group_timezones(timezone_mask)
assert len(result) == len(groups)
for (offset, group), cube in zip(groups.items(), result):
assert cube.coord("UTC_offset").points[0] == offset
assert cube.coord("UTC_offset").bounds is not None
if len(group) > 1:
assert_array_equal(cube.coord("UTC_offset").bounds[0], group)
else:
assert cube.coord("UTC_offset").bounds[0][0] == group[0]
assert cube.coord("UTC_offset").bounds[0][-1] == group[0]
def test__group_timezones_empty_group(timezone_mask):
"""Test the grouping of different UTC offsets into larger groups in a case
for which a specified group contains no data."""
groups = {0: [0, 1], 3: [2, 3], 6: [4, 10]}
plugin = GenerateTimezoneMask(groupings=groups)
result = plugin._group_timezones(timezone_mask)
assert len(result) == 2
for (offset, group), cube in zip(list(groups.items())[:-1], result):
assert cube.coord("UTC_offset").points[0] == offset
assert_array_equal(cube.coord("UTC_offset").bounds[0], group)
# Expected data for process tests
EXPECTED_TIME = {None: 1510286400, "20200716T1500Z": 1594911600}
GROUPED_MIN_MAX = {"min": -6 * 3600, "max": 6 * 3600}
EXPECTED = {
"ungrouped": {
"uk": {
None: {
"shape": (3, 11, 11),
"min": -1 * 3600,
"max": 1 * 3600,
"data": np.array(
[
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
),
},
"20200716T1500Z": {
"shape": (4, 11, 11),
"min": -1 * 3600,
"max": 2 * 3600,
"data": np.array(
[
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
),
},
},
"europe": {
None: {
"shape": (2, 10, 10),
"min": 0,
"max": 1 * 3600,
"data": np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
),
},
"20200716T1500Z": {
"shape": (3, 10, 10),
"min": 0,
"max": 2 * 3600,
"data": np.array(
[
[1, 0, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
),
},
},
},
"grouped": {
"uk": {
None: {
"shape": (2, 11, 11),
**GROUPED_MIN_MAX,
"data": np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
),
},
"20200716T1500Z": {
"shape": (2, 11, 11),
**GROUPED_MIN_MAX,
"data": np.array(
[
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1],
],
),
},
},
"europe": {
None: {
"shape": (2, 10, 10),
**GROUPED_MIN_MAX,
"data": np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
),
},
"20200716T1500Z": {
"shape": (2, 10, 10),
**GROUPED_MIN_MAX,
"data": np.array(
[[1, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 1, 1, 1, 1]]
),
},
},
},
}
@pytest.mark.parametrize("grouping", ["ungrouped", "grouped"])
@pytest.mark.parametrize("time", [None, "20200716T1500Z"])
@pytest.mark.parametrize("grid_fixture", ["uk_grid", "europe_grid"])
def test_process(request, grid_fixture, time, grouping):
"""Test that the process method returns cubes that take the expected form
for different grids and different dates.
The output data is primarily checked in the acceptance tests as a reasonably
large number of data points are required to reliably check it. Here we check
only a small sample."""
domain = grid_fixture.split("_")[0]
groupings = None
if grouping == "grouped":
groupings = {-6: [-12, 0], 6: [1, 14]}
expected = EXPECTED[grouping][domain][time]
expected_time = EXPECTED_TIME[time]
grid = request.getfixturevalue(grid_fixture)
result = GenerateTimezoneMask(time=time, include_dst=True, groupings=groupings)(
grid
)
assert result.coord("time").points[0] == expected_time
assert result.shape == expected["shape"]
assert result.coord("UTC_offset").points.min() == expected["min"]
assert result.coord("UTC_offset").points.max() == expected["max"]
assert result.coord("UTC_offset").points.dtype == TIME_COORDS["UTC_offset"].dtype
if grouping == "grouped":
assert (
result.coord("UTC_offset").bounds.dtype == TIME_COORDS["UTC_offset"].dtype
)
# slice the first spatial dimension to moderate size of expected arrays
assert_array_equal(result.data[:, 9, :], expected["data"])
# check each spatial location in the UTC_offset dimension
zone_count = np.count_nonzero(result.data, axis=0)
if grouping == "grouped":
# grouped outputs have a single UTC_offset with a non-zero entry
assert_array_equal(zone_count, 1)
else:
# ungrouped outputs have a single UTC_offset with a zero entry
assert_array_equal(zone_count, expected["shape"][0] - 1)
| bsd-3-clause | 1f1dcbc7485cb9ae6feee9412578ac92 | 35.180258 | 88 | 0.59306 | 3.631273 | false | true | false | false |
metoppv/improver | improver/cube_combiner.py | 1 | 27549 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module containing plugins for combining cubes"""
from operator import eq
from typing import Callable, List, Tuple, Union
import iris
import numpy as np
from iris.coords import AuxCoord, CellMethod, DimCoord
from iris.cube import Cube, CubeList
from iris.exceptions import CoordinateNotFoundError
from improver import BasePlugin
from improver.metadata.check_datatypes import enforce_dtype
from improver.metadata.constants.time_types import TIME_COORDS
from improver.metadata.probabilistic import (
find_threshold_coordinate,
get_diagnostic_cube_name_from_probability_name,
get_threshold_coord_name_from_probability_name,
)
from improver.utilities.cube_manipulation import (
enforce_coordinate_ordering,
expand_bounds,
filter_realizations,
)
class Combine(BasePlugin):
"""Combine input cubes.
Combine the input cubes into a single cube using the requested operation.
The first cube in the input list provides the template for output metadata.
If coordinates are expanded as a result of this combine operation
(e.g. expanding time for accumulations / max in period) the upper bound of
the new coordinate will also be used as the point for the new coordinate.
"""
def __init__(
self,
operation: str,
broadcast_to_threshold: bool = False,
minimum_realizations: Union[str, int, None] = None,
new_name: str = None,
cell_method_coordinate: str = None,
):
r"""
Args:
operation (str):
An operation to use in combining input cubes. One of:
+, -, \*, add, subtract, multiply, min, max, mean
broadcast_to_threshold (bool):
If True, broadcast input cubes to the threshold coord prior to combining -
a threshold coord must already exist on the first input cube.
minimum_realizations (int):
If specified, the input cubes will be filtered to ensure that only realizations that
include all available lead times are combined. If the number of realizations that
meet this criteria are fewer than this integer, an error will be raised.
Minimum value is 1.
new_name (str):
New name for the resulting dataset.
cell_method_coordinate (str):
If specified, a cell method is added to the output with the coordinate
provided. This is only available for max, min and mean operations.
"""
try:
self.minimum_realizations = int(minimum_realizations)
except TypeError:
if minimum_realizations is not None:
raise
self.minimum_realizations = None
self.new_name = new_name
self.broadcast_to_threshold = broadcast_to_threshold
self.cell_method_coordinate = cell_method_coordinate
if operation == "*" or operation == "multiply":
self.plugin = CubeMultiplier(
broadcast_to_threshold=self.broadcast_to_threshold
)
else:
self.plugin = CubeCombiner(
operation, cell_method_coordinate=cell_method_coordinate
)
def process(self, cubes: CubeList) -> Cube:
"""
Preprocesses the cubes, then passes them to the appropriate plugin
Args:
cubes (iris.cube.CubeList or list of iris.cube.Cube):
An iris CubeList to be combined.
Returns:
result (iris.cube.Cube):
Returns a cube with the combined data.
Raises:
TypeError:
If input list of cubes is empty
ValueError:
If minimum_realizations aren't met, or less than one were requested.
"""
if not cubes:
raise TypeError("A cube is needed to be combined.")
if self.new_name is None:
self.new_name = cubes[0].name()
if self.minimum_realizations is None:
filtered_cubes = cubes
else:
if self.minimum_realizations < 1:
raise ValueError(
f"Minimum realizations must be at least 1, not {self.minimum_realizations}"
)
cube = filter_realizations(cubes)
realization_count = len(cube.coord("realization").points)
if realization_count < self.minimum_realizations:
raise ValueError(
f"After filtering, number of realizations {realization_count} "
"is less than the minimum number of realizations allowed "
f"({self.minimum_realizations})"
)
filtered_cubes = cube.slices_over("time")
return self.plugin(CubeList(filtered_cubes), self.new_name)
class CubeCombiner(BasePlugin):
"""Plugin for combining cubes using linear operators"""
COMBINE_OPERATORS = {
"+": np.add,
"add": np.add,
"-": np.subtract,
"subtract": np.subtract,
"max": np.maximum,
"min": np.minimum,
"mean": np.add,
} # mean is calculated in two steps: sum and normalise
def __init__(self, operation: str, cell_method_coordinate: str = None) -> None:
"""Create a CubeCombiner plugin
Args:
operation:
Operation (+, - etc) to apply to the incoming cubes.
Raises:
ValueError: if operation is not recognised in dictionary
"""
self.operation = operation
self.cell_method_coordinate = cell_method_coordinate
try:
self.operator = self.COMBINE_OPERATORS[operation]
except KeyError:
msg = "Unknown operation {}".format(operation)
raise ValueError(msg)
self.normalise = operation == "mean"
@staticmethod
def _check_dimensions_match(
cube_list: Union[List[Cube], CubeList], comparators: List[Callable] = [eq],
) -> None:
"""
Check all coordinate dimensions on the input cubes match according to
the comparators specified.
Args:
cube_list:
List of cubes to compare
comparators:
Comparison operators, at least one of which must return "True"
for each coordinate in order for the match to be valid
Raises:
ValueError: If dimension coordinates do not match
"""
ref_coords = cube_list[0].coords(dim_coords=True)
for cube in cube_list[1:]:
coords = cube.coords(dim_coords=True)
compare = [
np.any([comp(a, b) for comp in comparators])
for a, b in zip(coords, ref_coords)
]
if not np.all(compare):
msg = (
"Cannot combine cubes with different dimensions:\n"
"{} and {}".format(repr(cube_list[0]), repr(cube))
)
raise ValueError(msg)
@staticmethod
def _get_expanded_coord_names(cube_list: Union[List[Cube], CubeList]) -> List[str]:
"""
Get names of coordinates whose bounds need expanding and points
recalculating after combining cubes. These are the scalar coordinates
that are present on all input cubes, but have different values.
Args:
cube_list:
List of cubes to that will be combined
Returns:
List of coordinate names to expand
"""
shared_scalar_coords = {
coord.name() for coord in cube_list[0].coords(dim_coords=False)
}
for cube in cube_list[1:]:
cube_scalar_coords = {
coord.name() for coord in cube.coords(dim_coords=False)
}
shared_scalar_coords = shared_scalar_coords & cube_scalar_coords
expanded_coords = []
for cube in cube_list[1:]:
for coord in shared_scalar_coords:
if (
cube.coord(coord) != cube_list[0].coord(coord)
and coord not in expanded_coords
):
expanded_coords.append(coord)
return expanded_coords
def _add_cell_method(self, cube: Cube) -> None:
"""Add a cell method to record the operation undertaken.
Args:
cube:
Cube to which a cell method will be added.
Raises:
ValueError: If a cell_method_coordinate is provided and the operation
is not max, min or mean.
"""
cell_method_lookup = {"max": "maximum", "min": "minimum", "mean": "mean"}
if self.operation in ["max", "min", "mean"] and self.cell_method_coordinate:
cube.add_cell_method(
CellMethod(
cell_method_lookup[self.operation],
coords=self.cell_method_coordinate,
)
)
elif self.cell_method_coordinate:
msg = (
"A cell method coordinate has been produced with "
f"operation: {self.operation}. A cell method coordinate "
"can only be added if the operation is max, min or mean."
)
raise ValueError(msg)
def _combine_cube_data(self, cube_list: Union[List[Cube], CubeList]) -> Cube:
"""
Perform cumulative operation to combine cube data
Args:
cube_list
Returns:
Combined cube
Raises:
TypeError: if the operation results in an escalated datatype
"""
result = cube_list[0].copy()
# Slice over realization if possible to reduce memory usage.
if "realization" in [crd.name() for crd in result.coords(dim_coords=True)]:
rslices = iris.cube.CubeList(result.slices_over("realization"))
for cube in cube_list[1:]:
cslices = cube.slices_over("realization")
for rslice, cslice in zip(rslices, cslices):
rslice.data = self.operator(rslice.data, cslice.data)
result = rslices.merge_cube()
enforce_coordinate_ordering(
result, [d.name() for d in cube_list[0].coords(dim_coords=True)]
)
else:
for cube in cube_list[1:]:
result.data = self.operator(result.data, cube.data)
if self.normalise:
result.data = result.data / len(cube_list)
enforce_dtype(str(self.operator), cube_list, result)
return result
def process(
self, cube_list: Union[List[Cube], CubeList], new_diagnostic_name: str,
) -> Cube:
"""
Combine data and metadata from a list of input cubes into a single
cube, using the specified operation to combine the cube data. The
first cube in the input list provides the template for the combined
cube metadata.
If coordinates are expanded as a result of this combine operation
(e.g. expanding time for accumulations / max in period) the upper bound
of the new coordinate will also be used as the point for the new coordinate.
Args:
cube_list:
List of cubes to combine.
new_diagnostic_name:
New name for the combined diagnostic.
Returns:
Cube containing the combined data.
Raises:
ValueError: If the cube_list contains only one cube.
"""
if len(cube_list) < 2:
msg = "Expecting 2 or more cubes in cube_list"
raise ValueError(msg)
self._check_dimensions_match(cube_list)
result = self._combine_cube_data(cube_list)
expanded_coord_names = self._get_expanded_coord_names(cube_list)
if expanded_coord_names:
result = expand_bounds(result, cube_list, expanded_coord_names)
self._add_cell_method(result)
result.rename(new_diagnostic_name)
return result
class CubeMultiplier(CubeCombiner):
"""Class to multiply input cubes
The behaviour for the "multiply" operation is different from
other types of cube combination. You can either apply a factor that
conditions an input probability field - that is, to apply Bayes Theorem,
or separate out a fraction of a variable (e.g. rain from precipitation).
The first input field is used as the source of ALL input metadata.
The factor(s) by which this is multiplied are not compared for any
mis-match in scalar coordinates.
"""
def __init__(self, broadcast_to_threshold: bool = False) -> None:
"""Create a CubeMultiplier plugin
Args:
broadcast_to_threshold:
True if the first cube has a threshold coordinate to which the
following cube(s) need(s) to be broadcast prior to combining data.
"""
self.broadcast_to_threshold = broadcast_to_threshold
self.operator = np.multiply
self.normalise = False
def _setup_coords_for_broadcast(self, cube_list: CubeList) -> CubeList:
"""
Adds a scalar threshold to any subsequent cube in cube_list so that they all
match the dimensions, in order, of the first cube in the list
Args:
cube_list
Returns:
Updated version of cube_list
Raises:
CoordinateNotFoundError: if there is no threshold coordinate on the
first cube in the list
TypeError: if there is a scalar threshold coordinate on any of the
later cubes, which would indicate that the cube is only valid for
a single threshold and should not be broadcast to all thresholds.
"""
target_cube = cube_list[0]
try:
target_coord = find_threshold_coordinate(target_cube)
except CoordinateNotFoundError:
raise CoordinateNotFoundError(
f"Cannot find coord threshold in {repr(target_cube)} to broadcast to"
)
new_list = CubeList([])
for cube in cube_list:
try:
found_coord = cube.coord(target_coord)
except CoordinateNotFoundError:
new_coord = target_coord.copy([0], bounds=None)
cube = cube.copy()
cube.add_aux_coord(new_coord, None)
cube = iris.util.new_axis(cube, new_coord)
enforce_coordinate_ordering(
cube, [d.name() for d in target_cube.coords(dim_coords=True)]
)
else:
if found_coord not in cube.dim_coords:
msg = "Cannot broadcast to coord threshold as it already exists as an AuxCoord"
raise TypeError(msg)
new_list.append(cube)
return new_list
@staticmethod
def _coords_are_broadcastable(coord1: DimCoord, coord2: DimCoord) -> bool:
"""
Broadcastable coords will differ only in length, so create a copy of one with
the points and bounds of the other and compare. Also ensure length of at least
one of the coords is 1.
"""
coord_copy = coord1.copy(coord2.points, bounds=coord2.bounds)
return (coord_copy == coord2) and (
(len(coord1.points) == 1) or (len(coord2.points) == 1)
)
@staticmethod
def _update_cell_methods(
cell_methods: Tuple[CellMethod], original_name: str, new_diagnostic_name: str,
) -> List[CellMethod]:
"""
Update any cell methods that include a comment that refers to the
diagnostic name to refer instead to the new diagnostic name. Those cell
methods that do not include the diagnostic name are passed through
unmodified.
Args:
cell_methods:
The cell methods found on the cube that is being used as the
metadata template.
original_name:
The full name of the metadata template cube.
new_diagnostic_name:
The new diagnostic name to use in the modified cell methods.
Returns:
A list of modified cell methods to replace the originals.
"""
try:
# strip probability and vicinity components to provide the diagnostic name
diagnostic_name = get_threshold_coord_name_from_probability_name(
original_name
)
except ValueError:
diagnostic_name = original_name
new_cell_methods = []
for cell_method in cell_methods:
try:
(cell_comment,) = cell_method.comments
except ValueError:
new_cell_methods.append(cell_method)
else:
if diagnostic_name in cell_comment:
new_cell_methods.append(
CellMethod(
cell_method.method,
coords=cell_method.coord_names,
intervals=cell_method.intervals,
comments=f"of {new_diagnostic_name}",
)
)
else:
new_cell_methods.append(cell_method)
return new_cell_methods
def process(
self, cube_list: Union[List[Cube], CubeList], new_diagnostic_name: str
) -> Cube:
"""
Multiply data from a list of input cubes into a single cube. The first
cube in the input list provides the combined cube metadata.
Args:
cube_list:
List of cubes to combine.
new_diagnostic_name:
New name for the combined diagnostic. This should be the diagnostic
name, eg rainfall_rate or rainfall_rate_in_vicinity, rather than the
name of the probabilistic output cube.
Returns:
Cube containing the combined data.
Raises:
ValueError: If the cube_list contains only one cube.
TypeError: If combining data results in float64 data.
"""
if len(cube_list) < 2:
msg = "Expecting 2 or more cubes in cube_list"
raise ValueError(msg)
if self.broadcast_to_threshold:
cube_list = self._setup_coords_for_broadcast(cube_list)
self._check_dimensions_match(
cube_list, comparators=[eq, self._coords_are_broadcastable]
)
result = self._combine_cube_data(cube_list)
# Used for renaming the threshold coordinate and modifying cell methods
# where necessary; excludes the in_vicinity component.
new_base_name = new_diagnostic_name.replace("_in_vicinity", "")
original_name = cube_list[0].name()
if self.broadcast_to_threshold:
diagnostic_name = get_diagnostic_cube_name_from_probability_name(
original_name
)
# Rename the threshold coordinate to match the name of the diagnostic
# that results from the combine operation.
result.coord(var_name="threshold").rename(new_base_name)
result.coord(new_base_name).var_name = "threshold"
new_diagnostic_name = original_name.replace(
diagnostic_name, new_diagnostic_name
)
# Modify cell methods that include the variable name to match the new
# name.
cell_methods = cube_list[0].cell_methods
if cell_methods:
result.cell_methods = self._update_cell_methods(
cell_methods, original_name, new_base_name
)
result.rename(new_diagnostic_name)
return result
class MaxInTimeWindow(BasePlugin):
"""Find the maximum within a time window for a period diagnostic. For example,
find the maximum 3-hour precipitation accumulation within a 24 hour window."""
def __init__(self, minimum_realizations: Union[str, int, None] = None):
"""Initialise class.
Args:
minimum_realizations (int):
If specified, the input cubes will be filtered to ensure that only realizations that
include all available lead times are combined. If the number of realizations that
meet this criteria are fewer than this integer, an error will be raised.
Minimum value is 1.
"""
self.minimum_realizations = minimum_realizations
self.time_units_in_hours = TIME_COORDS["time"].units.replace("seconds", "hours")
def _get_coords_in_hours(
self, cubes: List[Cube]
) -> List[Union[AuxCoord, DimCoord]]:
"""Get the time coordinates from the input cubes in units of hours
since 1970-01-01 00:00:00.
Args:
cubes: Cubes from which the time coordinates will be extracted.
Returns:
The time coordinates extracted from the input cubes.
"""
coords = [c.coord("time").copy() for c in cubes]
[c.convert_units(self.time_units_in_hours) for c in coords]
return coords
def _check_input_cubes(self, coords: List[Union[AuxCoord, DimCoord]]):
"""Check that the input cubes are period diagnostics i.e. where the time
coordinate has bounds representing a period and that the bounds represent
a consistent period.
Args:
coords: The time coordinates extracted from the input cubes.
Raises:
ValueError: The input cubes do not have bounds.
ValueError: The input cubes do not all have bounds.
ValueError: The input cubes have bounds that imply mismatching periods.
"""
msg = None
if not all([c.has_bounds() for c in coords]):
msg = (
"When computing the maximum over a time window, the inputs "
"are expected to be diagnostics representing a time period "
"with bounds. "
)
[c.convert_units(self.time_units_in_hours) for c in coords]
period = np.unique([np.diff(c.bounds) for c in coords if c.has_bounds()])
if not any([c.has_bounds() for c in coords]):
msg = msg + ("The cubes provided do not have bounds.")
else:
msg = msg + (
"The cubes provided do not all have bounds. "
f"Period(s) indicated by bounds: {period} hours"
)
elif len(np.unique([np.diff(c.bounds) for c in coords])) > 1:
[c.convert_units(self.time_units_in_hours) for c in coords]
period = np.unique([np.diff(c.bounds) for c in coords])
msg = (
"The bounds on the cubes imply mismatching periods. "
f"Period(s) indicated by bounds: {period} hours"
)
if msg:
raise ValueError(msg)
def _correct_metadata(
self, cube: Cube, coords_in_hours: List[Union[AuxCoord, DimCoord]]
) -> Cube:
"""Correct metadata in particular to ensure that the cell methods are
updated to represent a period for a time window diagnostic.
Args:
cube: Cube representating the maximum over a time window for a period
diagnostic.
coords_in_hours: List of time coordinates in units of hours since
1970-01-01 00:00:00.
Returns:
Cube representating the maximum over a time window for a period
diagnostic with appropriate metadata.
"""
if cube.name().startswith("probability_of"):
diag_name = cube.coord(var_name="threshold").name()
else:
diag_name = cube.name()
(period,) = np.unique([np.diff(c.bounds) for c in coords_in_hours])
hour_text = "hour" if round(period) == 1 else "hours"
sum_comment = (
f"of {diag_name} over {round(period)} {hour_text} within time window"
)
max_comment = f"of {diag_name}"
# Remove cell methods with the same method and coordinate name as will be added.
cell_methods = []
for cm in cube.cell_methods:
if cm.method in ["sum", "maximum"] and "time" in cm.coord_names:
continue
else:
cell_methods.append(cm)
cube.cell_methods = tuple(cell_methods)
# Add cell methods to record that a maximum over time has been computed,
# as well as some information about the inputs to this value.
cube.add_cell_method(CellMethod("sum", coords=["time"], comments=sum_comment))
cube.add_cell_method(
CellMethod("maximum", coords=["time"], comments=max_comment)
)
return cube
def process(self, cubes: CubeList) -> Cube:
"""Compute the maximum probability or maximum diagnostic value within a
time window for a period diagnostic using the Combine plugin. The resulting
cube has a time coordinate with bounds that represent the time window whilst
the cell method has been updated to represent the period recorded on the input
cubes. For example, the time window might be 24 hours, whilst the period might
be 3 hours.
Args:
cubes (iris.cube.CubeList or list of iris.cube.Cube):
An iris CubeList to be combined.
Returns:
result (iris.cube.Cube):
Returns a cube with the combined data.
"""
coords_in_hours = self._get_coords_in_hours(cubes)
self._check_input_cubes(coords_in_hours)
cube = Combine("max", minimum_realizations=self.minimum_realizations)(cubes)
cube = self._correct_metadata(cube, coords_in_hours)
return cube
| bsd-3-clause | 62117a6d93c4ebac76d9abc72b16fef7 | 38.468481 | 100 | 0.598534 | 4.473693 | false | false | false | false |
astropy/astropy-helpers | docs/conf.py | 3 | 1470 | # -*- coding: utf-8 -*-
project = 'astropy-helpers'
copyright = '2014, The Astropy Developers'
author = 'The Astropy Developers'
# We need to get the version number from the package
import sys # noqa
sys.path.insert(0, '..')
import astropy_helpers # noqa
version = astropy_helpers.__version__
release = astropy_helpers.__version__
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'numpydoc',
'sphinx_automodapi.automodapi'
]
numpydoc_show_class_members = False
intersphinx_mapping = {'https://docs.python.org/': None}
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap-astropy'
html_theme_options = {
'logotext1': 'astropy', # white, semi-bold
'logotext2': '-helpers', # orange, light
'logotext3': ':docs' # white, light
}
| bsd-3-clause | 58d1e162b739a208b054ef53ca4c6e21 | 27.269231 | 75 | 0.706122 | 3.475177 | false | false | false | false |
astropy/astropy-helpers | astropy_helpers/commands/build_sphinx.py | 2 | 8176 |
import os
import pkgutil
import re
import shutil
import subprocess
import sys
from distutils.version import LooseVersion
from distutils import log
from sphinx.setup_command import BuildDoc as SphinxBuildDoc
SUBPROCESS_TEMPLATE = """
import os
import sys
{build_main}
os.chdir({srcdir!r})
{sys_path_inserts}
for builder in {builders!r}:
retcode = build_main(argv={argv!r} + ['-b', builder, '.', os.path.join({output_dir!r}, builder)])
if retcode != 0:
sys.exit(retcode)
"""
def ensure_sphinx_astropy_installed():
"""
Make sure that sphinx-astropy is available.
"""
try:
from sphinx_astropy import __version__ as sphinx_astropy_version # noqa
except ImportError:
sphinx_astropy_version = None
if (sphinx_astropy_version is None
or LooseVersion(sphinx_astropy_version) < LooseVersion('1.2')):
raise ImportError("sphinx-astropy 1.2 or later needs to be installed to build "
"the documentation.")
class AstropyBuildDocs(SphinxBuildDoc):
"""
A version of the ``build_docs`` command that uses the version of Astropy
that is built by the setup ``build`` command, rather than whatever is
installed on the system. To build docs against the installed version, run
``make html`` in the ``astropy/docs`` directory.
"""
description = 'Build Sphinx documentation for Astropy environment'
user_options = SphinxBuildDoc.user_options[:]
user_options.append(
('warnings-returncode', 'w',
'Parses the sphinx output and sets the return code to 1 if there '
'are any warnings. Note that this will cause the sphinx log to '
'only update when it completes, rather than continuously as is '
'normally the case.'))
user_options.append(
('clean-docs', 'l',
'Completely clean previous builds, including '
'automodapi-generated files before building new ones'))
user_options.append(
('no-intersphinx', 'n',
'Skip intersphinx, even if conf.py says to use it'))
user_options.append(
('open-docs-in-browser', 'o',
'Open the docs in a browser (using the webbrowser module) if the '
'build finishes successfully.'))
user_options.append(
('parallel=', 'j',
'Build the docs in parallel on the specified number of '
'processes. If "auto", all the cores on the machine will be '
'used.'))
boolean_options = SphinxBuildDoc.boolean_options[:]
boolean_options.append('warnings-returncode')
boolean_options.append('clean-docs')
boolean_options.append('no-intersphinx')
boolean_options.append('open-docs-in-browser')
_self_iden_rex = re.compile(r"self\.([^\d\W][\w]+)", re.UNICODE)
def initialize_options(self):
SphinxBuildDoc.initialize_options(self)
self.clean_docs = False
self.no_intersphinx = False
self.open_docs_in_browser = False
self.warnings_returncode = False
self.traceback = False
self.parallel = None
def finalize_options(self):
# This has to happen before we call the parent class's finalize_options
if self.build_dir is None:
self.build_dir = 'docs/_build'
SphinxBuildDoc.finalize_options(self)
# Clear out previous sphinx builds, if requested
if self.clean_docs:
dirstorm = [os.path.join(self.source_dir, 'api'),
os.path.join(self.source_dir, 'generated')]
dirstorm.append(self.build_dir)
for d in dirstorm:
if os.path.isdir(d):
log.info('Cleaning directory ' + d)
shutil.rmtree(d)
else:
log.info('Not cleaning directory ' + d + ' because '
'not present or not a directory')
def run(self):
# TODO: Break this method up into a few more subroutines and
# document them better
import webbrowser
from urllib.request import pathname2url
# This is used at the very end of `run` to decide if sys.exit should
# be called. If it's None, it won't be.
retcode = None
# Now make sure Astropy is built and determine where it was built
build_cmd = self.reinitialize_command('build')
build_cmd.inplace = 0
self.run_command('build')
build_cmd = self.get_finalized_command('build')
build_cmd_path = os.path.abspath(build_cmd.build_lib)
ah_importer = pkgutil.get_importer('astropy_helpers')
if ah_importer is None:
ah_path = '.'
else:
ah_path = os.path.abspath(ah_importer.path)
build_main = 'from sphinx.cmd.build import build_main'
# We need to make sure sphinx-astropy is installed
ensure_sphinx_astropy_installed()
sys_path_inserts = [build_cmd_path, ah_path]
sys_path_inserts = os.linesep.join(['sys.path.insert(0, {0!r})'.format(path) for path in sys_path_inserts])
argv = []
if self.warnings_returncode:
argv.append('-W')
if self.no_intersphinx:
argv.extend(['-D', 'disable_intersphinx=1'])
# We now need to adjust the flags based on the parent class's options
if self.fresh_env:
argv.append('-E')
if self.all_files:
argv.append('-a')
if getattr(self, 'pdb', False):
argv.append('-P')
if getattr(self, 'nitpicky', False):
argv.append('-n')
if self.traceback:
argv.append('-T')
# The default verbosity level is 1, so in that case we just don't add a flag
if self.verbose == 0:
argv.append('-q')
elif self.verbose > 1:
argv.append('-v')
if self.parallel is not None:
argv.append(f'-j={self.parallel}')
if isinstance(self.builder, str):
builders = [self.builder]
else:
builders = self.builder
subproccode = SUBPROCESS_TEMPLATE.format(build_main=build_main,
srcdir=self.source_dir,
sys_path_inserts=sys_path_inserts,
builders=builders,
argv=argv,
output_dir=os.path.abspath(self.build_dir))
log.debug('Starting subprocess of {0} with python code:\n{1}\n'
'[CODE END])'.format(sys.executable, subproccode))
proc = subprocess.Popen([sys.executable], stdin=subprocess.PIPE)
proc.communicate(subproccode.encode('utf-8'))
if proc.returncode != 0:
retcode = proc.returncode
if retcode is None:
if self.open_docs_in_browser:
if self.builder == 'html':
absdir = os.path.abspath(self.builder_target_dir)
index_path = os.path.join(absdir, 'index.html')
fileurl = 'file://' + pathname2url(index_path)
webbrowser.open(fileurl)
else:
log.warn('open-docs-in-browser option was given, but '
'the builder is not html! Ignoring.')
# Here we explicitly check proc.returncode since we only want to output
# this for cases where the return code really wasn't 0.
if proc.returncode:
log.warn('Sphinx Documentation subprocess failed with return '
'code ' + str(proc.returncode))
if retcode is not None:
# this is potentially dangerous in that there might be something
# after the call to `setup` in `setup.py`, and exiting here will
# prevent that from running. But there's no other apparent way
# to signal what the return code should be.
sys.exit(retcode)
class AstropyBuildSphinx(AstropyBuildDocs): # pragma: no cover
def run(self):
AstropyBuildDocs.run(self)
| bsd-3-clause | 90a056f5e1fb62b138b72849395844f6 | 33.791489 | 115 | 0.586595 | 4.194972 | false | false | false | false |
pvlib/pvlib-python | pvlib/iotools/crn.py | 3 | 5336 | """Functions to read data from the US Climate Reference Network (CRN).
"""
import pandas as pd
import numpy as np
HEADERS = [
'WBANNO', 'UTC_DATE', 'UTC_TIME', 'LST_DATE', 'LST_TIME', 'CRX_VN',
'LONGITUDE', 'LATITUDE', 'AIR_TEMPERATURE', 'PRECIPITATION',
'SOLAR_RADIATION', 'SR_FLAG', 'SURFACE_TEMPERATURE', 'ST_TYPE', 'ST_FLAG',
'RELATIVE_HUMIDITY', 'RH_FLAG', 'SOIL_MOISTURE_5', 'SOIL_TEMPERATURE_5',
'WETNESS', 'WET_FLAG', 'WIND_1_5', 'WIND_FLAG']
VARIABLE_MAP = {
'LONGITUDE': 'longitude',
'LATITUDE': 'latitude',
'AIR_TEMPERATURE': 'temp_air',
'SOLAR_RADIATION': 'ghi',
'SR_FLAG': 'ghi_flag',
'RELATIVE_HUMIDITY': 'relative_humidity',
'RH_FLAG': 'relative_humidity_flag',
'WIND_1_5': 'wind_speed',
'WIND_FLAG': 'wind_speed_flag'
}
NAN_DICT = {
'CRX_VN': -99999,
'AIR_TEMPERATURE': -9999,
'PRECIPITATION': -9999,
'SOLAR_RADIATION': -99999,
'SURFACE_TEMPERATURE': -9999,
'RELATIVE_HUMIDITY': -9999,
'SOIL_MOISTURE_5': -99,
'SOIL_TEMPERATURE_5': -9999,
'WETNESS': -9999,
'WIND_1_5': -99}
# Add NUL characters to possible NaN values for all columns
NAN_DICT = {k: [v, '\x00\x00\x00\x00\x00\x00'] for k, v in NAN_DICT.items()}
# as specified in CRN README.txt file. excludes 1 space between columns
WIDTHS = [5, 8, 4, 8, 4, 6, 7, 7, 7, 7, 6, 1, 7, 1, 1, 5, 1, 7, 7, 5, 1, 6, 1]
# add 1 to make fields contiguous (required by pandas.read_fwf)
WIDTHS = [w + 1 for w in WIDTHS]
# no space after last column
WIDTHS[-1] -= 1
# specify dtypes for potentially problematic values
DTYPES = [
'int64', 'int64', 'int64', 'int64', 'int64', 'str', 'float64', 'float64',
'float64', 'float64', 'float64', 'int64', 'float64', 'O', 'int64',
'float64', 'int64', 'float64', 'float64', 'int64', 'int64', 'float64',
'int64'
]
def read_crn(filename, map_variables=True):
"""Read a NOAA USCRN fixed-width file into a pandas dataframe.
The CRN network consists of over 100 meteorological stations covering the
U.S. and is described in [1]_ and [2]_. The primary goal of CRN is to
provide long-term measurements of temperature, precipitation, and soil
moisture and temperature. Additionally, global horizontal irradiance (GHI)
is measured at each site using a photodiode pyranometer.
Parameters
----------
filename: str, path object, or file-like
filepath or url to read for the fixed-width file.
map_variables: boolean, default: True
When true, renames columns of the Dataframe to pvlib variable names
where applicable. See variable :const:`VARIABLE_MAP`.
Returns
-------
data: Dataframe
A dataframe with DatetimeIndex and all of the variables in the
file.
Notes
-----
CRN files contain 5 minute averages labeled by the interval ending
time. Here, missing data is flagged as NaN, rather than the lowest
possible integer for a field (e.g. -999 or -99). Air temperature is in
deg C and wind speed is in m/s at a height of 1.5 m above ground level.
Variables corresponding to standard pvlib variables are by default renamed,
e.g. `SOLAR_RADIATION` becomes `ghi`. See the
:const:`pvlib.iotools.crn.VARIABLE_MAP` dict for the complete mapping.
CRN files occasionally have a set of null characters on a line
instead of valid data. This function drops those lines. Sometimes
these null characters appear on a line of their own and sometimes
they occur on the same line as valid data. In the latter case, the
valid data will not be returned. Users may manually remove the null
characters and reparse the file if they need that line.
References
----------
.. [1] U.S. Climate Reference Network
`https://www.ncdc.noaa.gov/crn/qcdatasets.html
<https://www.ncdc.noaa.gov/crn/qcdatasets.html>`_
.. [2] Diamond, H. J. et. al., 2013: U.S. Climate Reference Network
after one decade of operations: status and assessment. Bull.
Amer. Meteor. Soc., 94, 489-498. :doi:`10.1175/BAMS-D-12-00170.1`
"""
# read in data
# TODO: instead of parsing as strings and then post-processing, switch to
# pd.read_fwf(..., dtype=dict(zip(HEADERS, DTYPES)), skip_blank_lines=True)
# when our minimum pandas >= 1.2.0 (skip_blank_lines bug for <1.2.0).
# As a workaround, parse all values as strings, then drop NaN, then cast
# to the appropriate dtypes, and mask "sentinal" NaN (e.g. -9999.0)
data = pd.read_fwf(filename, header=None, names=HEADERS, widths=WIDTHS,
dtype=str)
# drop empty (bad) lines
data = data.dropna(axis=0, how='all')
# can't set dtypes in read_fwf because int cols can't contain NaN, so
# do it here instead
data = data.astype(dict(zip(HEADERS, DTYPES)))
# finally, replace -999 values with NaN
data = data.replace(NAN_DICT, value=np.nan)
# set index
# UTC_TIME does not have leading 0s, so must zfill(4) to comply
# with %H%M format
dts = data[['UTC_DATE', 'UTC_TIME']].astype(str)
dtindex = pd.to_datetime(dts['UTC_DATE'] + dts['UTC_TIME'].str.zfill(4),
format='%Y%m%d%H%M', utc=True)
data = data.set_index(dtindex)
if map_variables:
data = data.rename(columns=VARIABLE_MAP)
return data
| bsd-3-clause | 772d5f6f51a14da745b3df69fbf0fee7 | 37.114286 | 79 | 0.649175 | 3.181872 | false | false | false | false |
pvlib/pvlib-python | pvlib/soiling.py | 4 | 8778 | """
This module contains functions for soiling models
"""
import datetime
import numpy as np
import pandas as pd
from scipy.special import erf
from pvlib.tools import cosd
def hsu(rainfall, cleaning_threshold, tilt, pm2_5, pm10,
depo_veloc=None, rain_accum_period=pd.Timedelta('1h')):
"""
Calculates soiling ratio given particulate and rain data using the
Fixed Velocity model from Humboldt State University (HSU).
The HSU soiling model [1]_ returns the soiling ratio, a value between zero
and one which is equivalent to (1 - transmission loss). Therefore a soiling
ratio of 1.0 is equivalent to zero transmission loss.
Parameters
----------
rainfall : Series
Rain accumulated in each time period. [mm]
cleaning_threshold : float
Amount of rain in an accumulation period needed to clean the PV
modules. [mm]
tilt : float
Tilt of the PV panels from horizontal. [degree]
pm2_5 : numeric
Concentration of airborne particulate matter (PM) with
aerodynamic diameter less than 2.5 microns. [g/m^3]
pm10 : numeric
Concentration of airborne particulate matter (PM) with
aerodynamicdiameter less than 10 microns. [g/m^3]
depo_veloc : dict, default {'2_5': 0.0009, '10': 0.004}
Deposition or settling velocity of particulates. [m/s]
rain_accum_period : Timedelta, default 1 hour
Period for accumulating rainfall to check against `cleaning_threshold`
It is recommended that `rain_accum_period` be between 1 hour and
24 hours.
Returns
-------
soiling_ratio : Series
Values between 0 and 1. Equal to 1 - transmission loss.
References
-----------
.. [1] M. Coello and L. Boyle, "Simple Model For Predicting Time Series
Soiling of Photovoltaic Panels," in IEEE Journal of Photovoltaics.
doi: 10.1109/JPHOTOV.2019.2919628
.. [2] Atmospheric Chemistry and Physics: From Air Pollution to Climate
Change. J. Seinfeld and S. Pandis. Wiley and Sons 2001.
"""
# never use mutable input arguments
if depo_veloc is None:
depo_veloc = {'2_5': 0.0009, '10': 0.004}
# accumulate rainfall into periods for comparison with threshold
accum_rain = rainfall.rolling(rain_accum_period, closed='right').sum()
# cleaning is True for intervals with rainfall greater than threshold
cleaning_times = accum_rain.index[accum_rain >= cleaning_threshold]
# determine the time intervals in seconds (dt_sec)
dt = rainfall.index
# subtract shifted values from original and convert to seconds
dt_diff = (dt[1:] - dt[:-1]).total_seconds()
# ensure same number of elements in the array, assuming that the interval
# prior to the first value is equal in length to the first interval
dt_sec = np.append(dt_diff[0], dt_diff).astype('float64')
horiz_mass_rate = (
pm2_5 * depo_veloc['2_5'] + np.maximum(pm10 - pm2_5, 0.)
* depo_veloc['10']) * dt_sec
tilted_mass_rate = horiz_mass_rate * cosd(tilt) # assuming no rain
# tms -> tilt_mass_rate
tms_cumsum = np.cumsum(tilted_mass_rate * np.ones(rainfall.shape))
mass_no_cleaning = pd.Series(index=rainfall.index, data=tms_cumsum)
# specify dtype so pandas doesn't assume object
mass_removed = pd.Series(index=rainfall.index, dtype='float64')
mass_removed[0] = 0.
mass_removed[cleaning_times] = mass_no_cleaning[cleaning_times]
accum_mass = mass_no_cleaning - mass_removed.ffill()
soiling_ratio = 1 - 0.3437 * erf(0.17 * accum_mass**0.8473)
return soiling_ratio
def kimber(rainfall, cleaning_threshold=6, soiling_loss_rate=0.0015,
grace_period=14, max_soiling=0.3, manual_wash_dates=None,
initial_soiling=0, rain_accum_period=24):
"""
Calculates fraction of energy lost due to soiling given rainfall data and
daily loss rate using the Kimber model.
Kimber soiling model [1]_ assumes soiling builds up at a daily rate unless
the daily rainfall is greater than a threshold. The model also assumes that
if daily rainfall has exceeded the threshold within a grace period, then
the ground is too damp to cause soiling build-up. The model also assumes
there is a maximum soiling build-up. Scheduled manual washes and rain
events are assumed to reset soiling to zero.
Parameters
----------
rainfall: pandas.Series
Accumulated rainfall at the end of each time period. [mm]
cleaning_threshold: float, default 6
Amount of daily rainfall required to clean the panels. [mm]
soiling_loss_rate: float, default 0.0015
Fraction of energy lost due to one day of soiling. [unitless]
grace_period : int, default 14
Number of days after a rainfall event when it's assumed the ground is
damp, and so it's assumed there is no soiling. [days]
max_soiling : float, default 0.3
Maximum fraction of energy lost due to soiling. Soiling will build up
until this value. [unitless]
manual_wash_dates : sequence or None, default None
List or tuple of dates as Python ``datetime.date`` when the panels were
washed manually. Note there is no grace period after a manual wash, so
soiling begins to build up immediately.
initial_soiling : float, default 0
Initial fraction of energy lost due to soiling at time zero in the
`rainfall` series input. [unitless]
rain_accum_period : int, default 24
Period for accumulating rainfall to check against `cleaning_threshold`.
The Kimber model defines this period as one day. [hours]
Returns
-------
pandas.Series
fraction of energy lost due to soiling, has same intervals as input
Notes
-----
The soiling loss rate depends on both the geographical region and the
soiling environment type. Rates measured by Kimber [1]_ are summarized in
the following table:
=================== ======= ========= ======================
Region/Environment Rural Suburban Urban/Highway/Airport
=================== ======= ========= ======================
Central Valley 0.0011 0.0019 0.0020
Northern CA 0.0011 0.0010 0.0016
Southern CA 0 0.0016 0.0019
Desert 0.0030 0.0030 0.0030
=================== ======= ========= ======================
Rainfall thresholds and grace periods may also vary by region. Please
consult [1]_ for more information.
References
----------
.. [1] "The Effect of Soiling on Large Grid-Connected Photovoltaic Systems
in California and the Southwest Region of the United States," Adrianne
Kimber, et al., IEEE 4th World Conference on Photovoltaic Energy
Conference, 2006, :doi:`10.1109/WCPEC.2006.279690`
"""
# convert rain_accum_period to timedelta
rain_accum_period = datetime.timedelta(hours=rain_accum_period)
# convert grace_period to timedelta
grace_period = datetime.timedelta(days=grace_period)
# get indices as numpy datetime64, calculate timestep as numpy timedelta64,
# and convert timestep to fraction of days
rain_index_vals = rainfall.index.values
timestep_interval = (rain_index_vals[1] - rain_index_vals[0])
day_fraction = timestep_interval / np.timedelta64(24, 'h')
# accumulate rainfall
accumulated_rainfall = rainfall.rolling(
rain_accum_period, closed='right').sum()
# soiling rate
soiling = np.ones_like(rainfall.values) * soiling_loss_rate * day_fraction
soiling[0] = initial_soiling
soiling = np.cumsum(soiling)
soiling = pd.Series(soiling, index=rainfall.index, name='soiling')
# rainfall events that clean the panels
rain_events = accumulated_rainfall > cleaning_threshold
# grace periods windows during which ground is assumed damp, so no soiling
grace_windows = rain_events.rolling(grace_period, closed='right').sum() > 0
# clean panels by subtracting soiling for indices in grace period windows
cleaning = pd.Series(float('NaN'), index=rainfall.index)
cleaning.iloc[0] = 0.0
cleaning[grace_windows] = soiling[grace_windows]
# manual wash dates
if manual_wash_dates is not None:
rain_tz = rainfall.index.tz
# convert manual wash dates to datetime index in the timezone of rain
manual_wash_dates = pd.DatetimeIndex(manual_wash_dates, tz=rain_tz)
cleaning[manual_wash_dates] = soiling[manual_wash_dates]
# remove soiling by foward filling cleaning where NaN
soiling -= cleaning.ffill()
# check if soiling has reached the maximum
return soiling.where(soiling < max_soiling, max_soiling)
| bsd-3-clause | 36f228e2c871d87a85dbfacb7d3ea9f2 | 39.638889 | 79 | 0.667578 | 3.652934 | false | false | false | false |
pvlib/pvlib-python | benchmarks/benchmarks/irradiance.py | 4 | 2710 | """
ASV benchmarks for irradiance.py
"""
import pandas as pd
from pvlib import irradiance, location
class Irradiance:
def setup(self):
self.times = pd.date_range(start='20180601', freq='1min',
periods=14400)
self.days = pd.date_range(start='20180601', freq='d', periods=30)
self.location = location.Location(40, -80)
self.solar_position = self.location.get_solarposition(self.times)
self.clearsky_irradiance = self.location.get_clearsky(self.times)
self.tilt = 20
self.azimuth = 180
self.aoi = irradiance.aoi(self.tilt, self.azimuth,
self.solar_position.apparent_zenith,
self.solar_position.azimuth)
def time_get_extra_radiation(self):
irradiance.get_extra_radiation(self.days)
def time_aoi(self):
irradiance.aoi(self.tilt, self.azimuth,
self.solar_position.apparent_zenith,
self.solar_position.azimuth)
def time_aoi_projection(self):
irradiance.aoi_projection(self.tilt, self.azimuth,
self.solar_position.apparent_zenith,
self.solar_position.azimuth)
def time_get_ground_diffuse(self):
irradiance.get_ground_diffuse(self.tilt, self.clearsky_irradiance.ghi)
def time_get_total_irradiance(self):
irradiance.get_total_irradiance(self.tilt, self.azimuth,
self.solar_position.apparent_zenith,
self.solar_position.azimuth,
self.clearsky_irradiance.dni,
self.clearsky_irradiance.ghi,
self.clearsky_irradiance.dhi)
def time_disc(self):
irradiance.disc(self.clearsky_irradiance.ghi,
self.solar_position.apparent_zenith,
self.times)
def time_dirint(self):
irradiance.dirint(self.clearsky_irradiance.ghi,
self.solar_position.apparent_zenith,
self.times)
def time_dirindex(self):
irradiance.dirindex(self.clearsky_irradiance.ghi,
self.clearsky_irradiance.ghi,
self.clearsky_irradiance.dni,
self.solar_position.apparent_zenith,
self.times)
def time_erbs(self):
irradiance.erbs(self.clearsky_irradiance.ghi,
self.solar_position.apparent_zenith,
self.times)
| bsd-3-clause | 506c94cc3e7c889d7d4fe92eaebb9761 | 38.852941 | 78 | 0.544649 | 3.822285 | false | false | false | false |
pvlib/pvlib-python | benchmarks/benchmarks/location.py | 4 | 1596 | """
ASV benchmarks for location.py
"""
import pandas as pd
import pvlib
from pkg_resources import parse_version
def set_solar_position(obj):
obj.location = pvlib.location.Location(32, -110, altitude=700,
tz='Etc/GMT+7')
obj.times = pd.date_range(start='20180601', freq='3min',
periods=1440)
obj.days = pd.date_range(start='20180101', freq='d', periods=365,
tz=obj.location.tz)
obj.solar_position = obj.location.get_solarposition(obj.times)
class Location:
def setup(self):
set_solar_position(self)
# GH 502
def time_location_get_airmass(self):
self.location.get_airmass(solar_position=self.solar_position)
def time_location_get_solarposition(self):
self.location.get_solarposition(times=self.times)
def time_location_get_clearsky(self):
self.location.get_clearsky(times=self.times,
solar_position=self.solar_position)
class Location_0_6_1:
def setup(self):
if parse_version(pvlib.__version__) < parse_version('0.6.1'):
raise NotImplementedError
set_solar_position(self)
def time_location_get_sun_rise_set_transit_pyephem(self):
self.location.get_sun_rise_set_transit(times=self.days,
method='pyephem')
def time_location_get_sun_rise_set_transit_spa(self):
self.location.get_sun_rise_set_transit(times=self.days,
method='spa')
| bsd-3-clause | a8ea51ebf509686c7a94f62ac2e35e74 | 30.294118 | 70 | 0.593358 | 3.602709 | false | false | false | false |
pytorch/vision | torchvision/prototype/datasets/_builtin/country211.py | 1 | 2657 | import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "country211"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Country211(Dataset):
"""
- **homepage**: https://github.com/openai/CLIP/blob/main/data/country211.md
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self._split_folder_name = "valid" if split == "val" else split
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._split_folder_name))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 31_650,
"val": 10_550,
"test": 21_100,
}[self._split]
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
| bsd-3-clause | 0d220ea768006c9768bf80582f411671 | 31.802469 | 100 | 0.62213 | 3.547397 | false | false | false | false |
pytorch/vision | test/test_prototype_transforms_functional.py | 1 | 46207 | import inspect
import math
import os
import re
from typing import get_type_hints
import numpy as np
import PIL.Image
import pytest
import torch
from common_utils import cache, cpu_and_gpu, needs_cuda, set_rng_seed
from prototype_common_utils import assert_close, make_bounding_boxes, make_image, parametrized_error_message
from prototype_transforms_dispatcher_infos import DISPATCHER_INFOS
from prototype_transforms_kernel_infos import KERNEL_INFOS
from torch.utils._pytree import tree_map
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F
from torchvision.prototype.transforms.functional._geometry import _center_crop_compute_padding
from torchvision.prototype.transforms.functional._meta import convert_format_bounding_box
from torchvision.transforms.functional import _get_perspective_coeffs
KERNEL_INFOS_MAP = {info.kernel: info for info in KERNEL_INFOS}
DISPATCHER_INFOS_MAP = {info.dispatcher: info for info in DISPATCHER_INFOS}
@cache
def script(fn):
try:
return torch.jit.script(fn)
except Exception as error:
raise AssertionError(f"Trying to `torch.jit.script` '{fn.__name__}' raised the error above.") from error
# Scripting a function often triggers a warning like
# `UserWarning: operator() profile_node %$INT1 : int[] = prim::profile_ivalue($INT2) does not have profile information`
# with varying `INT1` and `INT2`. Since these are uninteresting for us and only clutter the test summary, we ignore
# them.
ignore_jit_warning_no_profile = pytest.mark.filterwarnings(
f"ignore:{re.escape('operator() profile_node %')}:UserWarning"
)
def make_info_args_kwargs_params(info, *, args_kwargs_fn, test_id=None):
args_kwargs = list(args_kwargs_fn(info))
if not args_kwargs:
raise pytest.UsageError(
f"Couldn't collect a single `ArgsKwargs` for `{info.id}`{f' in {test_id}' if test_id else ''}"
)
idx_field_len = len(str(len(args_kwargs)))
return [
pytest.param(
info,
args_kwargs_,
marks=info.get_marks(test_id, args_kwargs_) if test_id else [],
id=f"{info.id}-{idx:0{idx_field_len}}",
)
for idx, args_kwargs_ in enumerate(args_kwargs)
]
def make_info_args_kwargs_parametrization(infos, *, args_kwargs_fn, condition=None):
if condition is None:
def condition(info):
return True
def decorator(test_fn):
parts = test_fn.__qualname__.split(".")
if len(parts) == 1:
test_class_name = None
test_function_name = parts[0]
elif len(parts) == 2:
test_class_name, test_function_name = parts
else:
raise pytest.UsageError("Unable to parse the test class name and test function name from test function")
test_id = (test_class_name, test_function_name)
argnames = ("info", "args_kwargs")
argvalues = []
for info in infos:
if not condition(info):
continue
argvalues.extend(make_info_args_kwargs_params(info, args_kwargs_fn=args_kwargs_fn, test_id=test_id))
return pytest.mark.parametrize(argnames, argvalues)(test_fn)
return decorator
@pytest.fixture(autouse=True)
def fix_rng_seed():
set_rng_seed(0)
yield
@pytest.fixture()
def test_id(request):
test_class_name = request.cls.__name__ if request.cls is not None else None
test_function_name = request.node.originalname
return test_class_name, test_function_name
class TestKernels:
sample_inputs = make_info_args_kwargs_parametrization(
KERNEL_INFOS,
args_kwargs_fn=lambda kernel_info: kernel_info.sample_inputs_fn(),
)
reference_inputs = make_info_args_kwargs_parametrization(
KERNEL_INFOS,
args_kwargs_fn=lambda info: info.reference_inputs_fn(),
condition=lambda info: info.reference_fn is not None,
)
@ignore_jit_warning_no_profile
@sample_inputs
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_scripted_vs_eager(self, test_id, info, args_kwargs, device):
kernel_eager = info.kernel
kernel_scripted = script(kernel_eager)
(input, *other_args), kwargs = args_kwargs.load(device)
actual = kernel_scripted(input, *other_args, **kwargs)
expected = kernel_eager(input, *other_args, **kwargs)
assert_close(
actual,
expected,
**info.get_closeness_kwargs(test_id, dtype=input.dtype, device=input.device),
msg=parametrized_error_message(*other_args, *kwargs),
)
def _unbatch(self, batch, *, data_dims):
if isinstance(batch, torch.Tensor):
batched_tensor = batch
metadata = ()
else:
batched_tensor, *metadata = batch
if batched_tensor.ndim == data_dims:
return batch
return [
self._unbatch(unbatched, data_dims=data_dims)
for unbatched in (
batched_tensor.unbind(0) if not metadata else [(t, *metadata) for t in batched_tensor.unbind(0)]
)
]
@sample_inputs
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_batched_vs_single(self, test_id, info, args_kwargs, device):
(batched_input, *other_args), kwargs = args_kwargs.load(device)
feature_type = features.Image if features.is_simple_tensor(batched_input) else type(batched_input)
# This dictionary contains the number of rightmost dimensions that contain the actual data.
# Everything to the left is considered a batch dimension.
data_dims = {
features.Image: 3,
features.BoundingBox: 1,
# `Mask`'s are special in the sense that the data dimensions depend on the type of mask. For detection masks
# it is 3 `(*, N, H, W)`, but for segmentation masks it is 2 `(*, H, W)`. Since both a grouped under one
# type all kernels should also work without differentiating between the two. Thus, we go with 2 here as
# common ground.
features.Mask: 2,
features.Video: 4,
}.get(feature_type)
if data_dims is None:
raise pytest.UsageError(
f"The number of data dimensions cannot be determined for input of type {feature_type.__name__}."
) from None
elif batched_input.ndim <= data_dims:
pytest.skip("Input is not batched.")
elif not all(batched_input.shape[:-data_dims]):
pytest.skip("Input has a degenerate batch shape.")
batched_output = info.kernel(batched_input, *other_args, **kwargs)
actual = self._unbatch(batched_output, data_dims=data_dims)
single_inputs = self._unbatch(batched_input, data_dims=data_dims)
expected = tree_map(lambda single_input: info.kernel(single_input, *other_args, **kwargs), single_inputs)
assert_close(
actual,
expected,
**info.get_closeness_kwargs(test_id, dtype=batched_input.dtype, device=batched_input.device),
msg=parametrized_error_message(*other_args, *kwargs),
)
@sample_inputs
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_no_inplace(self, info, args_kwargs, device):
(input, *other_args), kwargs = args_kwargs.load(device)
if input.numel() == 0:
pytest.skip("The input has a degenerate shape.")
input_version = input._version
info.kernel(input, *other_args, **kwargs)
assert input._version == input_version
@sample_inputs
@needs_cuda
def test_cuda_vs_cpu(self, test_id, info, args_kwargs):
(input_cpu, *other_args), kwargs = args_kwargs.load("cpu")
input_cuda = input_cpu.to("cuda")
output_cpu = info.kernel(input_cpu, *other_args, **kwargs)
output_cuda = info.kernel(input_cuda, *other_args, **kwargs)
assert_close(
output_cuda,
output_cpu,
check_device=False,
**info.get_closeness_kwargs(test_id, dtype=input_cuda.dtype, device=input_cuda.device),
msg=parametrized_error_message(*other_args, *kwargs),
)
@sample_inputs
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_dtype_and_device_consistency(self, info, args_kwargs, device):
(input, *other_args), kwargs = args_kwargs.load(device)
output = info.kernel(input, *other_args, **kwargs)
# Most kernels just return a tensor, but some also return some additional metadata
if not isinstance(output, torch.Tensor):
output, *_ = output
assert output.dtype == input.dtype
assert output.device == input.device
@reference_inputs
def test_against_reference(self, test_id, info, args_kwargs):
(input, *other_args), kwargs = args_kwargs.load("cpu")
actual = info.kernel(input, *other_args, **kwargs)
expected = info.reference_fn(input, *other_args, **kwargs)
assert_close(
actual,
expected,
**info.get_closeness_kwargs(test_id, dtype=input.dtype, device=input.device),
msg=parametrized_error_message(*other_args, *kwargs),
)
@make_info_args_kwargs_parametrization(
[info for info in KERNEL_INFOS if info.float32_vs_uint8],
args_kwargs_fn=lambda info: info.reference_inputs_fn(),
)
def test_float32_vs_uint8(self, test_id, info, args_kwargs):
(input, *other_args), kwargs = args_kwargs.load("cpu")
if input.dtype != torch.uint8:
pytest.skip(f"Input dtype is {input.dtype}.")
adapted_other_args, adapted_kwargs = info.float32_vs_uint8(other_args, kwargs)
actual = info.kernel(
F.convert_dtype_image_tensor(input, dtype=torch.float32),
*adapted_other_args,
**adapted_kwargs,
)
expected = F.convert_dtype_image_tensor(info.kernel(input, *other_args, **kwargs), dtype=torch.float32)
assert_close(
actual,
expected,
**info.get_closeness_kwargs(test_id, dtype=torch.float32, device=input.device),
msg=parametrized_error_message(*other_args, *kwargs),
)
@pytest.fixture
def spy_on(mocker):
def make_spy(fn, *, module=None, name=None):
# TODO: we can probably get rid of the non-default modules and names if we eliminate aliasing
module = module or fn.__module__
name = name or fn.__name__
spy = mocker.patch(f"{module}.{name}", wraps=fn)
return spy
return make_spy
class TestDispatchers:
image_sample_inputs = make_info_args_kwargs_parametrization(
DISPATCHER_INFOS,
args_kwargs_fn=lambda info: info.sample_inputs(features.Image),
condition=lambda info: features.Image in info.kernels,
)
@ignore_jit_warning_no_profile
@image_sample_inputs
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_scripted_smoke(self, info, args_kwargs, device):
dispatcher = script(info.dispatcher)
(image_feature, *other_args), kwargs = args_kwargs.load(device)
image_simple_tensor = torch.Tensor(image_feature)
dispatcher(image_simple_tensor, *other_args, **kwargs)
# TODO: We need this until the dispatchers below also have `DispatcherInfo`'s. If they do, `test_scripted_smoke`
# replaces this test for them.
@ignore_jit_warning_no_profile
@pytest.mark.parametrize(
"dispatcher",
[
F.clamp_bounding_box,
F.convert_color_space,
F.get_dimensions,
F.get_image_num_channels,
F.get_image_size,
F.get_num_channels,
F.get_num_frames,
F.get_spatial_size,
F.rgb_to_grayscale,
F.uniform_temporal_subsample,
],
ids=lambda dispatcher: dispatcher.__name__,
)
def test_scriptable(self, dispatcher):
script(dispatcher)
@image_sample_inputs
def test_dispatch_simple_tensor(self, info, args_kwargs, spy_on):
(image_feature, *other_args), kwargs = args_kwargs.load()
image_simple_tensor = torch.Tensor(image_feature)
kernel_info = info.kernel_infos[features.Image]
spy = spy_on(kernel_info.kernel, module=info.dispatcher.__module__, name=kernel_info.id)
info.dispatcher(image_simple_tensor, *other_args, **kwargs)
spy.assert_called_once()
@make_info_args_kwargs_parametrization(
DISPATCHER_INFOS,
args_kwargs_fn=lambda info: info.sample_inputs(features.Image),
condition=lambda info: info.pil_kernel_info is not None,
)
def test_dispatch_pil(self, info, args_kwargs, spy_on):
(image_feature, *other_args), kwargs = args_kwargs.load()
if image_feature.ndim > 3:
pytest.skip("Input is batched")
image_pil = F.to_image_pil(image_feature)
pil_kernel_info = info.pil_kernel_info
spy = spy_on(pil_kernel_info.kernel, module=info.dispatcher.__module__, name=pil_kernel_info.id)
info.dispatcher(image_pil, *other_args, **kwargs)
spy.assert_called_once()
@make_info_args_kwargs_parametrization(
DISPATCHER_INFOS,
args_kwargs_fn=lambda info: info.sample_inputs(),
)
def test_dispatch_feature(self, info, args_kwargs, spy_on):
(feature, *other_args), kwargs = args_kwargs.load()
method_name = info.id
method = getattr(feature, method_name)
feature_type = type(feature)
spy = spy_on(method, module=feature_type.__module__, name=f"{feature_type.__name__}.{method_name}")
info.dispatcher(feature, *other_args, **kwargs)
spy.assert_called_once()
@pytest.mark.parametrize(
("dispatcher_info", "feature_type", "kernel_info"),
[
pytest.param(dispatcher_info, feature_type, kernel_info, id=f"{dispatcher_info.id}-{feature_type.__name__}")
for dispatcher_info in DISPATCHER_INFOS
for feature_type, kernel_info in dispatcher_info.kernel_infos.items()
],
)
def test_dispatcher_kernel_signatures_consistency(self, dispatcher_info, feature_type, kernel_info):
dispatcher_signature = inspect.signature(dispatcher_info.dispatcher)
dispatcher_params = list(dispatcher_signature.parameters.values())[1:]
kernel_signature = inspect.signature(kernel_info.kernel)
kernel_params = list(kernel_signature.parameters.values())[1:]
# We filter out metadata that is implicitly passed to the dispatcher through the input feature, but has to be
# explicit passed to the kernel.
feature_type_metadata = feature_type.__annotations__.keys()
kernel_params = [param for param in kernel_params if param.name not in feature_type_metadata]
dispatcher_params = iter(dispatcher_params)
for dispatcher_param, kernel_param in zip(dispatcher_params, kernel_params):
try:
# In general, the dispatcher parameters are a superset of the kernel parameters. Thus, we filter out
# dispatcher parameters that have no kernel equivalent while keeping the order intact.
while dispatcher_param.name != kernel_param.name:
dispatcher_param = next(dispatcher_params)
except StopIteration:
raise AssertionError(
f"Parameter `{kernel_param.name}` of kernel `{kernel_info.id}` "
f"has no corresponding parameter on the dispatcher `{dispatcher_info.id}`."
) from None
assert dispatcher_param == kernel_param
@pytest.mark.parametrize("info", DISPATCHER_INFOS, ids=lambda info: info.id)
def test_dispatcher_feature_signatures_consistency(self, info):
try:
feature_method = getattr(features._Feature, info.id)
except AttributeError:
pytest.skip("Dispatcher doesn't support arbitrary feature dispatch.")
dispatcher_signature = inspect.signature(info.dispatcher)
dispatcher_params = list(dispatcher_signature.parameters.values())[1:]
feature_signature = inspect.signature(feature_method)
feature_params = list(feature_signature.parameters.values())[1:]
# Because we use `from __future__ import annotations` inside the module where `features._Feature` is defined,
# the annotations are stored as strings. This makes them concrete again, so they can be compared to the natively
# concrete dispatcher annotations.
feature_annotations = get_type_hints(feature_method)
for param in feature_params:
param._annotation = feature_annotations[param.name]
assert dispatcher_params == feature_params
@pytest.mark.parametrize(
("alias", "target"),
[
pytest.param(alias, target, id=alias.__name__)
for alias, target in [
(F.hflip, F.horizontal_flip),
(F.vflip, F.vertical_flip),
(F.get_image_num_channels, F.get_num_channels),
(F.to_pil_image, F.to_image_pil),
(F.elastic_transform, F.elastic),
(F.convert_image_dtype, F.convert_dtype_image_tensor),
]
],
)
def test_alias(alias, target):
assert alias is target
@pytest.mark.parametrize(
("info", "args_kwargs"),
make_info_args_kwargs_params(
KERNEL_INFOS_MAP[F.convert_dtype_image_tensor],
args_kwargs_fn=lambda info: info.sample_inputs_fn(),
),
)
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_convert_dtype_image_tensor_dtype_and_device(info, args_kwargs, device):
(input, *other_args), kwargs = args_kwargs.load(device)
dtype = other_args[0] if other_args else kwargs.get("dtype", torch.float32)
output = info.kernel(input, dtype)
assert output.dtype == dtype
assert output.device == input.device
# TODO: All correctness checks below this line should be ported to be references on a `KernelInfo` in
# `prototype_transforms_kernel_infos.py`
def _compute_affine_matrix(angle_, translate_, scale_, shear_, center_):
rot = math.radians(angle_)
cx, cy = center_
tx, ty = translate_
sx, sy = [math.radians(sh_) for sh_ in shear_]
c_matrix = np.array([[1, 0, cx], [0, 1, cy], [0, 0, 1]])
t_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
c_matrix_inv = np.linalg.inv(c_matrix)
rs_matrix = np.array(
[
[scale_ * math.cos(rot), -scale_ * math.sin(rot), 0],
[scale_ * math.sin(rot), scale_ * math.cos(rot), 0],
[0, 0, 1],
]
)
shear_x_matrix = np.array([[1, -math.tan(sx), 0], [0, 1, 0], [0, 0, 1]])
shear_y_matrix = np.array([[1, 0, 0], [-math.tan(sy), 1, 0], [0, 0, 1]])
rss_matrix = np.matmul(rs_matrix, np.matmul(shear_y_matrix, shear_x_matrix))
true_matrix = np.matmul(t_matrix, np.matmul(c_matrix, np.matmul(rss_matrix, c_matrix_inv)))
return true_matrix
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_correctness_affine_bounding_box_on_fixed_input(device):
# Check transformation against known expected output
spatial_size = (64, 64)
# xyxy format
in_boxes = [
[20, 25, 35, 45],
[50, 5, 70, 22],
[spatial_size[1] // 2 - 10, spatial_size[0] // 2 - 10, spatial_size[1] // 2 + 10, spatial_size[0] // 2 + 10],
[1, 1, 5, 5],
]
in_boxes = features.BoundingBox(
in_boxes, format=features.BoundingBoxFormat.XYXY, spatial_size=spatial_size, dtype=torch.float64, device=device
)
# Tested parameters
angle = 63
scale = 0.89
dx = 0.12
dy = 0.23
# Expected bboxes computed using albumentations:
# from albumentations.augmentations.geometric.functional import bbox_shift_scale_rotate
# from albumentations.augmentations.geometric.functional import normalize_bbox, denormalize_bbox
# expected_bboxes = []
# for in_box in in_boxes:
# n_in_box = normalize_bbox(in_box, *spatial_size)
# n_out_box = bbox_shift_scale_rotate(n_in_box, -angle, scale, dx, dy, *spatial_size)
# out_box = denormalize_bbox(n_out_box, *spatial_size)
# expected_bboxes.append(out_box)
expected_bboxes = [
(24.522435977922218, 34.375689508290854, 46.443125279998114, 54.3516575015695),
(54.88288587110401, 50.08453280875634, 76.44484547743795, 72.81332520036864),
(27.709526487041554, 34.74952648704156, 51.650473512958435, 58.69047351295844),
(48.56528888843238, 9.611532109828834, 53.35347829361575, 14.39972151501221),
]
output_boxes = F.affine_bounding_box(
in_boxes,
in_boxes.format,
in_boxes.spatial_size,
angle,
(dx * spatial_size[1], dy * spatial_size[0]),
scale,
shear=(0, 0),
)
torch.testing.assert_close(output_boxes.tolist(), expected_bboxes)
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_correctness_affine_segmentation_mask_on_fixed_input(device):
# Check transformation against known expected output and CPU/CUDA devices
# Create a fixed input segmentation mask with 2 square masks
# in top-left, bottom-left corners
mask = torch.zeros(1, 32, 32, dtype=torch.long, device=device)
mask[0, 2:10, 2:10] = 1
mask[0, 32 - 9 : 32 - 3, 3:9] = 2
# Rotate 90 degrees and scale
expected_mask = torch.rot90(mask, k=-1, dims=(-2, -1))
expected_mask = torch.nn.functional.interpolate(expected_mask[None, :].float(), size=(64, 64), mode="nearest")
expected_mask = expected_mask[0, :, 16 : 64 - 16, 16 : 64 - 16].long()
out_mask = F.affine_mask(mask, 90, [0.0, 0.0], 64.0 / 32.0, [0.0, 0.0])
torch.testing.assert_close(out_mask, expected_mask)
@pytest.mark.parametrize("angle", range(-90, 90, 56))
@pytest.mark.parametrize("expand, center", [(True, None), (False, None), (False, (12, 14))])
def test_correctness_rotate_bounding_box(angle, expand, center):
def _compute_expected_bbox(bbox, angle_, expand_, center_):
affine_matrix = _compute_affine_matrix(angle_, [0.0, 0.0], 1.0, [0.0, 0.0], center_)
affine_matrix = affine_matrix[:2, :]
height, width = bbox.spatial_size
bbox_xyxy = convert_format_bounding_box(
bbox, old_format=bbox.format, new_format=features.BoundingBoxFormat.XYXY
)
points = np.array(
[
[bbox_xyxy[0].item(), bbox_xyxy[1].item(), 1.0],
[bbox_xyxy[2].item(), bbox_xyxy[1].item(), 1.0],
[bbox_xyxy[0].item(), bbox_xyxy[3].item(), 1.0],
[bbox_xyxy[2].item(), bbox_xyxy[3].item(), 1.0],
# image frame
[0.0, 0.0, 1.0],
[0.0, height, 1.0],
[width, height, 1.0],
[width, 0.0, 1.0],
]
)
transformed_points = np.matmul(points, affine_matrix.T)
out_bbox = [
np.min(transformed_points[:4, 0]),
np.min(transformed_points[:4, 1]),
np.max(transformed_points[:4, 0]),
np.max(transformed_points[:4, 1]),
]
if expand_:
tr_x = np.min(transformed_points[4:, 0])
tr_y = np.min(transformed_points[4:, 1])
out_bbox[0] -= tr_x
out_bbox[1] -= tr_y
out_bbox[2] -= tr_x
out_bbox[3] -= tr_y
height = int(height - 2 * tr_y)
width = int(width - 2 * tr_x)
out_bbox = features.BoundingBox(
out_bbox,
format=features.BoundingBoxFormat.XYXY,
spatial_size=(height, width),
dtype=bbox.dtype,
device=bbox.device,
)
return (
convert_format_bounding_box(out_bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox.format),
(height, width),
)
spatial_size = (32, 38)
for bboxes in make_bounding_boxes(spatial_size=spatial_size, extra_dims=((4,),)):
bboxes_format = bboxes.format
bboxes_spatial_size = bboxes.spatial_size
output_bboxes, output_spatial_size = F.rotate_bounding_box(
bboxes,
bboxes_format,
spatial_size=bboxes_spatial_size,
angle=angle,
expand=expand,
center=center,
)
center_ = center
if center_ is None:
center_ = [s * 0.5 for s in bboxes_spatial_size[::-1]]
if bboxes.ndim < 2:
bboxes = [bboxes]
expected_bboxes = []
for bbox in bboxes:
bbox = features.BoundingBox(bbox, format=bboxes_format, spatial_size=bboxes_spatial_size)
expected_bbox, expected_spatial_size = _compute_expected_bbox(bbox, -angle, expand, center_)
expected_bboxes.append(expected_bbox)
if len(expected_bboxes) > 1:
expected_bboxes = torch.stack(expected_bboxes)
else:
expected_bboxes = expected_bboxes[0]
torch.testing.assert_close(output_bboxes, expected_bboxes, atol=1, rtol=0)
torch.testing.assert_close(output_spatial_size, expected_spatial_size, atol=1, rtol=0)
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("expand", [False]) # expand=True does not match D2
def test_correctness_rotate_bounding_box_on_fixed_input(device, expand):
# Check transformation against known expected output
spatial_size = (64, 64)
# xyxy format
in_boxes = [
[1, 1, 5, 5],
[1, spatial_size[0] - 6, 5, spatial_size[0] - 2],
[spatial_size[1] - 6, spatial_size[0] - 6, spatial_size[1] - 2, spatial_size[0] - 2],
[spatial_size[1] // 2 - 10, spatial_size[0] // 2 - 10, spatial_size[1] // 2 + 10, spatial_size[0] // 2 + 10],
]
in_boxes = features.BoundingBox(
in_boxes, format=features.BoundingBoxFormat.XYXY, spatial_size=spatial_size, dtype=torch.float64, device=device
)
# Tested parameters
angle = 45
center = None if expand else [12, 23]
# # Expected bboxes computed using Detectron2:
# from detectron2.data.transforms import RotationTransform, AugmentationList
# from detectron2.data.transforms import AugInput
# import cv2
# inpt = AugInput(im1, boxes=np.array(in_boxes, dtype="float32"))
# augs = AugmentationList([RotationTransform(*size, angle, expand=expand, center=center, interp=cv2.INTER_NEAREST), ])
# out = augs(inpt)
# print(inpt.boxes)
if expand:
expected_bboxes = [
[1.65937957, 42.67157288, 7.31623382, 48.32842712],
[41.96446609, 82.9766594, 47.62132034, 88.63351365],
[82.26955262, 42.67157288, 87.92640687, 48.32842712],
[31.35786438, 31.35786438, 59.64213562, 59.64213562],
]
else:
expected_bboxes = [
[-11.33452378, 12.39339828, -5.67766953, 18.05025253],
[28.97056275, 52.69848481, 34.627417, 58.35533906],
[69.27564928, 12.39339828, 74.93250353, 18.05025253],
[18.36396103, 1.07968978, 46.64823228, 29.36396103],
]
output_boxes, _ = F.rotate_bounding_box(
in_boxes,
in_boxes.format,
in_boxes.spatial_size,
angle,
expand=expand,
center=center,
)
torch.testing.assert_close(output_boxes.tolist(), expected_bboxes)
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_correctness_rotate_segmentation_mask_on_fixed_input(device):
# Check transformation against known expected output and CPU/CUDA devices
# Create a fixed input segmentation mask with 2 square masks
# in top-left, bottom-left corners
mask = torch.zeros(1, 32, 32, dtype=torch.long, device=device)
mask[0, 2:10, 2:10] = 1
mask[0, 32 - 9 : 32 - 3, 3:9] = 2
# Rotate 90 degrees
expected_mask = torch.rot90(mask, k=1, dims=(-2, -1))
out_mask = F.rotate_mask(mask, 90, expand=False)
torch.testing.assert_close(out_mask, expected_mask)
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize(
"format",
[features.BoundingBoxFormat.XYXY, features.BoundingBoxFormat.XYWH, features.BoundingBoxFormat.CXCYWH],
)
@pytest.mark.parametrize(
"top, left, height, width, expected_bboxes",
[
[8, 12, 30, 40, [(-2.0, 7.0, 13.0, 27.0), (38.0, -3.0, 58.0, 14.0), (33.0, 38.0, 44.0, 54.0)]],
[-8, 12, 70, 40, [(-2.0, 23.0, 13.0, 43.0), (38.0, 13.0, 58.0, 30.0), (33.0, 54.0, 44.0, 70.0)]],
],
)
def test_correctness_crop_bounding_box(device, format, top, left, height, width, expected_bboxes):
# Expected bboxes computed using Albumentations:
# import numpy as np
# from albumentations.augmentations.crops.functional import crop_bbox_by_coords, normalize_bbox, denormalize_bbox
# expected_bboxes = []
# for in_box in in_boxes:
# n_in_box = normalize_bbox(in_box, *size)
# n_out_box = crop_bbox_by_coords(
# n_in_box, (left, top, left + width, top + height), height, width, *size
# )
# out_box = denormalize_bbox(n_out_box, height, width)
# expected_bboxes.append(out_box)
size = (64, 76)
# xyxy format
in_boxes = [
[10.0, 15.0, 25.0, 35.0],
[50.0, 5.0, 70.0, 22.0],
[45.0, 46.0, 56.0, 62.0],
]
in_boxes = features.BoundingBox(in_boxes, format=features.BoundingBoxFormat.XYXY, spatial_size=size, device=device)
if format != features.BoundingBoxFormat.XYXY:
in_boxes = convert_format_bounding_box(in_boxes, features.BoundingBoxFormat.XYXY, format)
output_boxes, output_spatial_size = F.crop_bounding_box(
in_boxes,
format,
top,
left,
size[0],
size[1],
)
if format != features.BoundingBoxFormat.XYXY:
output_boxes = convert_format_bounding_box(output_boxes, format, features.BoundingBoxFormat.XYXY)
torch.testing.assert_close(output_boxes.tolist(), expected_bboxes)
torch.testing.assert_close(output_spatial_size, size)
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_correctness_horizontal_flip_segmentation_mask_on_fixed_input(device):
mask = torch.zeros((3, 3, 3), dtype=torch.long, device=device)
mask[:, :, 0] = 1
out_mask = F.horizontal_flip_mask(mask)
expected_mask = torch.zeros((3, 3, 3), dtype=torch.long, device=device)
expected_mask[:, :, -1] = 1
torch.testing.assert_close(out_mask, expected_mask)
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_correctness_vertical_flip_segmentation_mask_on_fixed_input(device):
mask = torch.zeros((3, 3, 3), dtype=torch.long, device=device)
mask[:, 0, :] = 1
out_mask = F.vertical_flip_mask(mask)
expected_mask = torch.zeros((3, 3, 3), dtype=torch.long, device=device)
expected_mask[:, -1, :] = 1
torch.testing.assert_close(out_mask, expected_mask)
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize(
"format",
[features.BoundingBoxFormat.XYXY, features.BoundingBoxFormat.XYWH, features.BoundingBoxFormat.CXCYWH],
)
@pytest.mark.parametrize(
"top, left, height, width, size",
[
[0, 0, 30, 30, (60, 60)],
[-5, 5, 35, 45, (32, 34)],
],
)
def test_correctness_resized_crop_bounding_box(device, format, top, left, height, width, size):
def _compute_expected_bbox(bbox, top_, left_, height_, width_, size_):
# bbox should be xyxy
bbox[0] = (bbox[0] - left_) * size_[1] / width_
bbox[1] = (bbox[1] - top_) * size_[0] / height_
bbox[2] = (bbox[2] - left_) * size_[1] / width_
bbox[3] = (bbox[3] - top_) * size_[0] / height_
return bbox
spatial_size = (100, 100)
# xyxy format
in_boxes = [
[10.0, 10.0, 20.0, 20.0],
[5.0, 10.0, 15.0, 20.0],
]
expected_bboxes = []
for in_box in in_boxes:
expected_bboxes.append(_compute_expected_bbox(list(in_box), top, left, height, width, size))
expected_bboxes = torch.tensor(expected_bboxes, device=device)
in_boxes = features.BoundingBox(
in_boxes, format=features.BoundingBoxFormat.XYXY, spatial_size=spatial_size, device=device
)
if format != features.BoundingBoxFormat.XYXY:
in_boxes = convert_format_bounding_box(in_boxes, features.BoundingBoxFormat.XYXY, format)
output_boxes, output_spatial_size = F.resized_crop_bounding_box(in_boxes, format, top, left, height, width, size)
if format != features.BoundingBoxFormat.XYXY:
output_boxes = convert_format_bounding_box(output_boxes, format, features.BoundingBoxFormat.XYXY)
torch.testing.assert_close(output_boxes, expected_bboxes)
torch.testing.assert_close(output_spatial_size, size)
def _parse_padding(padding):
if isinstance(padding, int):
return [padding] * 4
if isinstance(padding, list):
if len(padding) == 1:
return padding * 4
if len(padding) == 2:
return padding * 2 # [left, up, right, down]
return padding
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("padding", [[1], [1, 1], [1, 1, 2, 2]])
def test_correctness_pad_bounding_box(device, padding):
def _compute_expected_bbox(bbox, padding_):
pad_left, pad_up, _, _ = _parse_padding(padding_)
bbox_format = bbox.format
bbox_dtype = bbox.dtype
bbox = (
bbox.clone()
if bbox_format == features.BoundingBoxFormat.XYXY
else convert_format_bounding_box(bbox, bbox_format, features.BoundingBoxFormat.XYXY)
)
bbox[0::2] += pad_left
bbox[1::2] += pad_up
bbox = convert_format_bounding_box(bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox_format)
if bbox.dtype != bbox_dtype:
# Temporary cast to original dtype
# e.g. float32 -> int
bbox = bbox.to(bbox_dtype)
return bbox
def _compute_expected_spatial_size(bbox, padding_):
pad_left, pad_up, pad_right, pad_down = _parse_padding(padding_)
height, width = bbox.spatial_size
return height + pad_up + pad_down, width + pad_left + pad_right
for bboxes in make_bounding_boxes():
bboxes = bboxes.to(device)
bboxes_format = bboxes.format
bboxes_spatial_size = bboxes.spatial_size
output_boxes, output_spatial_size = F.pad_bounding_box(
bboxes, format=bboxes_format, spatial_size=bboxes_spatial_size, padding=padding
)
torch.testing.assert_close(output_spatial_size, _compute_expected_spatial_size(bboxes, padding))
if bboxes.ndim < 2 or bboxes.shape[0] == 0:
bboxes = [bboxes]
expected_bboxes = []
for bbox in bboxes:
bbox = features.BoundingBox(bbox, format=bboxes_format, spatial_size=bboxes_spatial_size)
expected_bboxes.append(_compute_expected_bbox(bbox, padding))
if len(expected_bboxes) > 1:
expected_bboxes = torch.stack(expected_bboxes)
else:
expected_bboxes = expected_bboxes[0]
torch.testing.assert_close(output_boxes, expected_bboxes, atol=1, rtol=0)
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_correctness_pad_segmentation_mask_on_fixed_input(device):
mask = torch.ones((1, 3, 3), dtype=torch.long, device=device)
out_mask = F.pad_mask(mask, padding=[1, 1, 1, 1])
expected_mask = torch.zeros((1, 5, 5), dtype=torch.long, device=device)
expected_mask[:, 1:-1, 1:-1] = 1
torch.testing.assert_close(out_mask, expected_mask)
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize(
"startpoints, endpoints",
[
[[[0, 0], [33, 0], [33, 25], [0, 25]], [[3, 2], [32, 3], [30, 24], [2, 25]]],
[[[3, 2], [32, 3], [30, 24], [2, 25]], [[0, 0], [33, 0], [33, 25], [0, 25]]],
[[[3, 2], [32, 3], [30, 24], [2, 25]], [[5, 5], [30, 3], [33, 19], [4, 25]]],
],
)
def test_correctness_perspective_bounding_box(device, startpoints, endpoints):
def _compute_expected_bbox(bbox, pcoeffs_):
m1 = np.array(
[
[pcoeffs_[0], pcoeffs_[1], pcoeffs_[2]],
[pcoeffs_[3], pcoeffs_[4], pcoeffs_[5]],
]
)
m2 = np.array(
[
[pcoeffs_[6], pcoeffs_[7], 1.0],
[pcoeffs_[6], pcoeffs_[7], 1.0],
]
)
bbox_xyxy = convert_format_bounding_box(
bbox, old_format=bbox.format, new_format=features.BoundingBoxFormat.XYXY
)
points = np.array(
[
[bbox_xyxy[0].item(), bbox_xyxy[1].item(), 1.0],
[bbox_xyxy[2].item(), bbox_xyxy[1].item(), 1.0],
[bbox_xyxy[0].item(), bbox_xyxy[3].item(), 1.0],
[bbox_xyxy[2].item(), bbox_xyxy[3].item(), 1.0],
]
)
numer = np.matmul(points, m1.T)
denom = np.matmul(points, m2.T)
transformed_points = numer / denom
out_bbox = [
np.min(transformed_points[:, 0]),
np.min(transformed_points[:, 1]),
np.max(transformed_points[:, 0]),
np.max(transformed_points[:, 1]),
]
out_bbox = features.BoundingBox(
np.array(out_bbox),
format=features.BoundingBoxFormat.XYXY,
spatial_size=bbox.spatial_size,
dtype=bbox.dtype,
device=bbox.device,
)
return convert_format_bounding_box(out_bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox.format)
spatial_size = (32, 38)
pcoeffs = _get_perspective_coeffs(startpoints, endpoints)
inv_pcoeffs = _get_perspective_coeffs(endpoints, startpoints)
for bboxes in make_bounding_boxes(spatial_size=spatial_size, extra_dims=((4,),)):
bboxes = bboxes.to(device)
bboxes_format = bboxes.format
bboxes_spatial_size = bboxes.spatial_size
output_bboxes = F.perspective_bounding_box(
bboxes,
bboxes_format,
None,
None,
coefficients=pcoeffs,
)
if bboxes.ndim < 2:
bboxes = [bboxes]
expected_bboxes = []
for bbox in bboxes:
bbox = features.BoundingBox(bbox, format=bboxes_format, spatial_size=bboxes_spatial_size)
expected_bboxes.append(_compute_expected_bbox(bbox, inv_pcoeffs))
if len(expected_bboxes) > 1:
expected_bboxes = torch.stack(expected_bboxes)
else:
expected_bboxes = expected_bboxes[0]
torch.testing.assert_close(output_bboxes, expected_bboxes, rtol=0, atol=1)
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize(
"output_size",
[(18, 18), [18, 15], (16, 19), [12], [46, 48]],
)
def test_correctness_center_crop_bounding_box(device, output_size):
def _compute_expected_bbox(bbox, output_size_):
format_ = bbox.format
spatial_size_ = bbox.spatial_size
dtype = bbox.dtype
bbox = convert_format_bounding_box(bbox.float(), format_, features.BoundingBoxFormat.XYWH)
if len(output_size_) == 1:
output_size_.append(output_size_[-1])
cy = int(round((spatial_size_[0] - output_size_[0]) * 0.5))
cx = int(round((spatial_size_[1] - output_size_[1]) * 0.5))
out_bbox = [
bbox[0].item() - cx,
bbox[1].item() - cy,
bbox[2].item(),
bbox[3].item(),
]
out_bbox = torch.tensor(out_bbox)
out_bbox = convert_format_bounding_box(out_bbox, features.BoundingBoxFormat.XYWH, format_)
return out_bbox.to(dtype=dtype, device=bbox.device)
for bboxes in make_bounding_boxes(extra_dims=((4,),)):
bboxes = bboxes.to(device)
bboxes_format = bboxes.format
bboxes_spatial_size = bboxes.spatial_size
output_boxes, output_spatial_size = F.center_crop_bounding_box(
bboxes, bboxes_format, bboxes_spatial_size, output_size
)
if bboxes.ndim < 2:
bboxes = [bboxes]
expected_bboxes = []
for bbox in bboxes:
bbox = features.BoundingBox(bbox, format=bboxes_format, spatial_size=bboxes_spatial_size)
expected_bboxes.append(_compute_expected_bbox(bbox, output_size))
if len(expected_bboxes) > 1:
expected_bboxes = torch.stack(expected_bboxes)
else:
expected_bboxes = expected_bboxes[0]
torch.testing.assert_close(output_boxes, expected_bboxes)
torch.testing.assert_close(output_spatial_size, output_size)
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("output_size", [[4, 2], [4], [7, 6]])
def test_correctness_center_crop_mask(device, output_size):
def _compute_expected_mask(mask, output_size):
crop_height, crop_width = output_size if len(output_size) > 1 else [output_size[0], output_size[0]]
_, image_height, image_width = mask.shape
if crop_width > image_height or crop_height > image_width:
padding = _center_crop_compute_padding(crop_height, crop_width, image_height, image_width)
mask = F.pad_image_tensor(mask, padding, fill=0)
left = round((image_width - crop_width) * 0.5)
top = round((image_height - crop_height) * 0.5)
return mask[:, top : top + crop_height, left : left + crop_width]
mask = torch.randint(0, 2, size=(1, 6, 6), dtype=torch.long, device=device)
actual = F.center_crop_mask(mask, output_size)
expected = _compute_expected_mask(mask, output_size)
torch.testing.assert_close(expected, actual)
# Copied from test/test_functional_tensor.py
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("spatial_size", ("small", "large"))
@pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize("ksize", [(3, 3), [3, 5], (23, 23)])
@pytest.mark.parametrize("sigma", [[0.5, 0.5], (0.5, 0.5), (0.8, 0.8), (1.7, 1.7)])
def test_correctness_gaussian_blur_image_tensor(device, spatial_size, dt, ksize, sigma):
fn = F.gaussian_blur_image_tensor
# true_cv2_results = {
# # np_img = np.arange(3 * 10 * 12, dtype="uint8").reshape((10, 12, 3))
# # cv2.GaussianBlur(np_img, ksize=(3, 3), sigmaX=0.8)
# "3_3_0.8": ...
# # cv2.GaussianBlur(np_img, ksize=(3, 3), sigmaX=0.5)
# "3_3_0.5": ...
# # cv2.GaussianBlur(np_img, ksize=(3, 5), sigmaX=0.8)
# "3_5_0.8": ...
# # cv2.GaussianBlur(np_img, ksize=(3, 5), sigmaX=0.5)
# "3_5_0.5": ...
# # np_img2 = np.arange(26 * 28, dtype="uint8").reshape((26, 28))
# # cv2.GaussianBlur(np_img2, ksize=(23, 23), sigmaX=1.7)
# "23_23_1.7": ...
# }
p = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "gaussian_blur_opencv_results.pt")
true_cv2_results = torch.load(p)
if spatial_size == "small":
tensor = (
torch.from_numpy(np.arange(3 * 10 * 12, dtype="uint8").reshape((10, 12, 3))).permute(2, 0, 1).to(device)
)
else:
tensor = torch.from_numpy(np.arange(26 * 28, dtype="uint8").reshape((1, 26, 28))).to(device)
if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case
return
if dt is not None:
tensor = tensor.to(dtype=dt)
_ksize = (ksize, ksize) if isinstance(ksize, int) else ksize
_sigma = sigma[0] if sigma is not None else None
shape = tensor.shape
gt_key = f"{shape[-2]}_{shape[-1]}_{shape[-3]}__{_ksize[0]}_{_ksize[1]}_{_sigma}"
if gt_key not in true_cv2_results:
return
true_out = (
torch.tensor(true_cv2_results[gt_key]).reshape(shape[-2], shape[-1], shape[-3]).permute(2, 0, 1).to(tensor)
)
image = features.Image(tensor)
out = fn(image, kernel_size=ksize, sigma=sigma)
torch.testing.assert_close(out, true_out, rtol=0.0, atol=1.0, msg=f"{ksize}, {sigma}")
def test_normalize_output_type():
inpt = torch.rand(1, 3, 32, 32)
output = F.normalize(inpt, mean=[0.5, 0.5, 0.5], std=[1.0, 1.0, 1.0])
assert type(output) is torch.Tensor
torch.testing.assert_close(inpt - 0.5, output)
inpt = make_image(color_space=features.ColorSpace.RGB)
output = F.normalize(inpt, mean=[0.5, 0.5, 0.5], std=[1.0, 1.0, 1.0])
assert type(output) is torch.Tensor
torch.testing.assert_close(inpt - 0.5, output)
@pytest.mark.parametrize(
"inpt",
[
127 * np.ones((32, 32, 3), dtype="uint8"),
PIL.Image.new("RGB", (32, 32), 122),
],
)
def test_to_image_tensor(inpt):
output = F.to_image_tensor(inpt)
assert isinstance(output, torch.Tensor)
assert output.shape == (3, 32, 32)
assert np.asarray(inpt).sum() == output.sum().item()
@pytest.mark.parametrize(
"inpt",
[
torch.randint(0, 256, size=(3, 32, 32), dtype=torch.uint8),
127 * np.ones((32, 32, 3), dtype="uint8"),
],
)
@pytest.mark.parametrize("mode", [None, "RGB"])
def test_to_image_pil(inpt, mode):
output = F.to_image_pil(inpt, mode=mode)
assert isinstance(output, PIL.Image.Image)
assert np.asarray(inpt).sum() == np.asarray(output).sum()
def test_equalize_image_tensor_edge_cases():
inpt = torch.zeros(3, 200, 200, dtype=torch.uint8)
output = F.equalize_image_tensor(inpt)
torch.testing.assert_close(inpt, output)
inpt = torch.zeros(5, 3, 200, 200, dtype=torch.uint8)
inpt[..., 100:, 100:] = 1
output = F.equalize_image_tensor(inpt)
assert output.unique().tolist() == [0, 255]
@pytest.mark.parametrize("device", cpu_and_gpu())
def test_correctness_uniform_temporal_subsample(device):
video = torch.arange(10, device=device)[:, None, None, None].expand(-1, 3, 8, 8)
out_video = F.uniform_temporal_subsample(video, 5)
assert out_video.unique().tolist() == [0, 2, 4, 6, 9]
out_video = F.uniform_temporal_subsample(video, 8)
assert out_video.unique().tolist() == [0, 1, 2, 3, 5, 6, 7, 9]
| bsd-3-clause | e693dcc8ec18c86a65f6042af5e6be63 | 37.282519 | 122 | 0.616032 | 3.347848 | false | true | false | false |
pytorch/vision | torchvision/ops/ps_roi_align.py | 1 | 3625 | import torch
import torch.fx
from torch import nn, Tensor
from torch.nn.modules.utils import _pair
from torchvision.extension import _assert_has_ops
from ..utils import _log_api_usage_once
from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format
@torch.fx.wrap
def ps_roi_align(
input: Tensor,
boxes: Tensor,
output_size: int,
spatial_scale: float = 1.0,
sampling_ratio: int = -1,
) -> Tensor:
"""
Performs Position-Sensitive Region of Interest (RoI) Align operator
mentioned in Light-Head R-CNN.
Args:
input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
contains ``C`` feature maps of dimensions ``H x W``.
boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
format where the regions will be taken from.
The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
If a single Tensor is passed, then the first column should
contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
in the batch.
output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
is performed, as (height, width).
spatial_scale (float): a scaling factor that maps the box coordinates to
the input coordinates. For example, if your boxes are defined on the scale
of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
the original image), you'll want to set this to 0.5. Default: 1.0
sampling_ratio (int): number of sampling points in the interpolation grid
used to compute the output value of each pooled output bin. If > 0,
then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If
<= 0, then an adaptive number of grid points are used (computed as
``ceil(roi_width / output_width)``, and likewise for height). Default: -1
Returns:
Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs
"""
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(ps_roi_align)
_assert_has_ops()
check_roi_boxes_shape(boxes)
rois = boxes
output_size = _pair(output_size)
if not isinstance(rois, torch.Tensor):
rois = convert_boxes_to_roi_format(rois)
output, _ = torch.ops.torchvision.ps_roi_align(
input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
class PSRoIAlign(nn.Module):
"""
See :func:`ps_roi_align`.
"""
def __init__(
self,
output_size: int,
spatial_scale: float,
sampling_ratio: int,
):
super().__init__()
_log_api_usage_once(self)
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input: Tensor, rois: Tensor) -> Tensor:
return ps_roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio)
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"output_size={self.output_size}"
f", spatial_scale={self.spatial_scale}"
f", sampling_ratio={self.sampling_ratio}"
f")"
)
return s
| bsd-3-clause | c128e29e89d3408acefcd611d9792c2b | 39.277778 | 106 | 0.61931 | 3.702758 | false | false | false | false |
pytorch/vision | torchvision/extension.py | 1 | 3914 | import ctypes
import os
import sys
from warnings import warn
import torch
from ._internally_replaced_utils import _get_extension_path
_HAS_OPS = False
def _has_ops():
return False
try:
# On Windows Python-3.8.x has `os.add_dll_directory` call,
# which is called to configure dll search path.
# To find cuda related dlls we need to make sure the
# conda environment/bin path is configured Please take a look:
# https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
# Please note: if some path can't be added using add_dll_directory we simply ignore this path
if os.name == "nt" and sys.version_info >= (3, 8) and sys.version_info < (3, 9):
env_path = os.environ["PATH"]
path_arr = env_path.split(";")
for path in path_arr:
if os.path.exists(path):
try:
os.add_dll_directory(path) # type: ignore[attr-defined]
except Exception:
pass
lib_path = _get_extension_path("_C")
torch.ops.load_library(lib_path)
_HAS_OPS = True
def _has_ops(): # noqa: F811
return True
except (ImportError, OSError):
pass
def _assert_has_ops():
if not _has_ops():
raise RuntimeError(
"Couldn't load custom C++ ops. This can happen if your PyTorch and "
"torchvision versions are incompatible, or if you had errors while compiling "
"torchvision from source. For further information on the compatible versions, check "
"https://github.com/pytorch/vision#installation for the compatibility matrix. "
"Please check your PyTorch version with torch.__version__ and your torchvision "
"version with torchvision.__version__ and verify if they are compatible, and if not "
"please reinstall torchvision so that it matches your PyTorch install."
)
def _check_cuda_version():
"""
Make sure that CUDA versions match between the pytorch install and torchvision install
"""
if not _HAS_OPS:
return -1
from torch.version import cuda as torch_version_cuda
_version = torch.ops.torchvision._cuda_version()
if _version != -1 and torch_version_cuda is not None:
tv_version = str(_version)
if int(tv_version) < 10000:
tv_major = int(tv_version[0])
tv_minor = int(tv_version[2])
else:
tv_major = int(tv_version[0:2])
tv_minor = int(tv_version[3])
t_version = torch_version_cuda.split(".")
t_major = int(t_version[0])
t_minor = int(t_version[1])
if t_major != tv_major or t_minor != tv_minor:
raise RuntimeError(
"Detected that PyTorch and torchvision were compiled with different CUDA versions. "
f"PyTorch has CUDA Version={t_major}.{t_minor} and torchvision has "
f"CUDA Version={tv_major}.{tv_minor}. "
"Please reinstall the torchvision that matches your PyTorch install."
)
return _version
def _load_library(lib_name):
lib_path = _get_extension_path(lib_name)
# On Windows Python-3.8+ has `os.add_dll_directory` call,
# which is called from _get_extension_path to configure dll search path
# Condition below adds a workaround for older versions by
# explicitly calling `LoadLibraryExW` with the following flags:
# - LOAD_LIBRARY_SEARCH_DEFAULT_DIRS (0x1000)
# - LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR (0x100)
if os.name == "nt" and sys.version_info < (3, 8):
_kernel32 = ctypes.WinDLL("kernel32.dll", use_last_error=True)
if hasattr(_kernel32, "LoadLibraryExW"):
_kernel32.LoadLibraryExW(lib_path, None, 0x00001100)
else:
warn("LoadLibraryExW is missing in kernel32.dll")
torch.ops.load_library(lib_path)
_check_cuda_version()
| bsd-3-clause | a13f101ace5f9955ca1f8a64d32b400d | 35.579439 | 100 | 0.62928 | 3.727619 | false | false | false | false |
pytorch/vision | torchvision/prototype/datasets/utils/_internal.py | 1 | 6563 | import csv
import functools
import pathlib
import pickle
from typing import Any, BinaryIO, Callable, Dict, IO, Iterator, List, Sequence, Sized, Tuple, TypeVar, Union
import torch
import torch.distributed as dist
import torch.utils.data
from torchdata.datapipes.iter import IoPathFileLister, IoPathFileOpener, IterDataPipe, ShardingFilter, Shuffler
from torchvision.prototype.utils._internal import fromfile
__all__ = [
"INFINITE_BUFFER_SIZE",
"BUILTIN_DIR",
"read_mat",
"MappingIterator",
"getitem",
"path_accessor",
"path_comparator",
"read_flo",
"hint_sharding",
"hint_shuffling",
]
K = TypeVar("K")
D = TypeVar("D")
# pseudo-infinite until a true infinite buffer is supported by all datapipes
INFINITE_BUFFER_SIZE = 1_000_000_000
BUILTIN_DIR = pathlib.Path(__file__).parent.parent / "_builtin"
def read_mat(buffer: BinaryIO, **kwargs: Any) -> Any:
try:
import scipy.io as sio
except ImportError as error:
raise ModuleNotFoundError("Package `scipy` is required to be installed to read .mat files.") from error
data = sio.loadmat(buffer, **kwargs)
buffer.close()
return data
class MappingIterator(IterDataPipe[Union[Tuple[K, D], D]]):
def __init__(self, datapipe: IterDataPipe[Dict[K, D]], *, drop_key: bool = False) -> None:
self.datapipe = datapipe
self.drop_key = drop_key
def __iter__(self) -> Iterator[Union[Tuple[K, D], D]]:
for mapping in self.datapipe:
yield from iter(mapping.values() if self.drop_key else mapping.items())
def _getitem_closure(obj: Any, *, items: Sequence[Any]) -> Any:
for item in items:
obj = obj[item]
return obj
def getitem(*items: Any) -> Callable[[Any], Any]:
return functools.partial(_getitem_closure, items=items)
def _getattr_closure(obj: Any, *, attrs: Sequence[str]) -> Any:
for attr in attrs:
obj = getattr(obj, attr)
return obj
def _path_attribute_accessor(path: pathlib.Path, *, name: str) -> Any:
return _getattr_closure(path, attrs=name.split("."))
def _path_accessor_closure(data: Tuple[str, Any], *, getter: Callable[[pathlib.Path], D]) -> D:
return getter(pathlib.Path(data[0]))
def path_accessor(getter: Union[str, Callable[[pathlib.Path], D]]) -> Callable[[Tuple[str, Any]], D]:
if isinstance(getter, str):
getter = functools.partial(_path_attribute_accessor, name=getter)
return functools.partial(_path_accessor_closure, getter=getter)
def _path_comparator_closure(data: Tuple[str, Any], *, accessor: Callable[[Tuple[str, Any]], D], value: D) -> bool:
return accessor(data) == value
def path_comparator(getter: Union[str, Callable[[pathlib.Path], D]], value: D) -> Callable[[Tuple[str, Any]], bool]:
return functools.partial(_path_comparator_closure, accessor=path_accessor(getter), value=value)
class PicklerDataPipe(IterDataPipe):
def __init__(self, source_datapipe: IterDataPipe[Tuple[str, IO[bytes]]]) -> None:
self.source_datapipe = source_datapipe
def __iter__(self) -> Iterator[Any]:
for _, fobj in self.source_datapipe:
data = pickle.load(fobj)
for _, d in enumerate(data):
yield d
class SharderDataPipe(torch.utils.data.datapipes.iter.grouping.ShardingFilterIterDataPipe):
def __init__(self, source_datapipe: IterDataPipe) -> None:
super().__init__(source_datapipe)
self.rank = 0
self.world_size = 1
if dist.is_available() and dist.is_initialized():
self.rank = dist.get_rank()
self.world_size = dist.get_world_size()
self.apply_sharding(self.world_size, self.rank)
def __iter__(self) -> Iterator[Any]:
num_workers = self.world_size
worker_id = self.rank
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
worker_id = worker_id + worker_info.id * num_workers
num_workers *= worker_info.num_workers
self.apply_sharding(num_workers, worker_id)
yield from super().__iter__()
class TakerDataPipe(IterDataPipe):
def __init__(self, source_datapipe: IterDataPipe, num_take: int) -> None:
super().__init__()
self.source_datapipe = source_datapipe
self.num_take = num_take
self.world_size = 1
if dist.is_available() and dist.is_initialized():
self.world_size = dist.get_world_size()
def __iter__(self) -> Iterator[Any]:
num_workers = self.world_size
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
num_workers *= worker_info.num_workers
# TODO: this is weird as it drops more elements than it should
num_take = self.num_take // num_workers
for i, data in enumerate(self.source_datapipe):
if i < num_take:
yield data
else:
break
def __len__(self) -> int:
num_take = self.num_take // self.world_size
if isinstance(self.source_datapipe, Sized):
if len(self.source_datapipe) < num_take:
num_take = len(self.source_datapipe)
# TODO: might be weird to not take `num_workers` into account
return num_take
def _make_sharded_datapipe(root: str, dataset_size: int) -> IterDataPipe[Dict[str, Any]]:
dp = IoPathFileLister(root=root)
dp = SharderDataPipe(dp)
dp = dp.shuffle(buffer_size=INFINITE_BUFFER_SIZE)
dp = IoPathFileOpener(dp, mode="rb")
dp = PicklerDataPipe(dp)
# dp = dp.cycle(2)
dp = TakerDataPipe(dp, dataset_size)
return dp
def read_flo(file: BinaryIO) -> torch.Tensor:
if file.read(4) != b"PIEH":
raise ValueError("Magic number incorrect. Invalid .flo file")
width, height = fromfile(file, dtype=torch.int32, byte_order="little", count=2)
flow = fromfile(file, dtype=torch.float32, byte_order="little", count=height * width * 2)
return flow.reshape((height, width, 2)).permute((2, 0, 1))
def hint_sharding(datapipe: IterDataPipe) -> ShardingFilter:
return ShardingFilter(datapipe)
def hint_shuffling(datapipe: IterDataPipe[D]) -> Shuffler[D]:
return Shuffler(datapipe, buffer_size=INFINITE_BUFFER_SIZE).set_shuffle(False)
def read_categories_file(name: str) -> List[Union[str, Sequence[str]]]:
path = BUILTIN_DIR / f"{name}.categories"
with open(path, newline="") as file:
rows = list(csv.reader(file))
rows = [row[0] if len(row) == 1 else row for row in rows]
return rows
| bsd-3-clause | ab742da5a4d46dfe31aa14ce6ebed82f | 32.829897 | 116 | 0.645284 | 3.463325 | false | false | false | false |
pytorch/vision | references/depth/stereo/transforms.py | 1 | 25981 | import random
from typing import Callable, List, Optional, Sequence, Tuple, Union
import numpy as np
import PIL.Image
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from torch import Tensor
T_FLOW = Union[Tensor, np.ndarray, None]
T_MASK = Union[Tensor, np.ndarray, None]
T_STEREO_TENSOR = Tuple[Tensor, Tensor]
T_COLOR_AUG_PARAM = Union[float, Tuple[float, float]]
def rand_float_range(size: Sequence[int], low: float, high: float) -> Tensor:
return (low - high) * torch.rand(size) + high
class InterpolationStrategy:
_valid_modes: List[str] = ["mixed", "bicubic", "bilinear"]
def __init__(self, mode: str = "mixed") -> None:
if mode not in self._valid_modes:
raise ValueError(f"Invalid interpolation mode: {mode}. Valid modes are: {self._valid_modes}")
if mode == "mixed":
self.strategies = [F.InterpolationMode.BILINEAR, F.InterpolationMode.BICUBIC]
elif mode == "bicubic":
self.strategies = [F.InterpolationMode.BICUBIC]
elif mode == "bilinear":
self.strategies = [F.InterpolationMode.BILINEAR]
def __call__(self) -> F.InterpolationMode:
return random.choice(self.strategies)
@classmethod
def is_valid(mode: str) -> bool:
return mode in InterpolationStrategy._valid_modes
@property
def valid_modes() -> List[str]:
return InterpolationStrategy._valid_modes
class ValidateModelInput(torch.nn.Module):
# Pass-through transform that checks the shape and dtypes to make sure the model gets what it expects
def forward(self, images: T_STEREO_TENSOR, disparities: T_FLOW, masks: T_MASK):
if images[0].shape != images[1].shape:
raise ValueError("img1 and img2 should have the same shape.")
h, w = images[0].shape[-2:]
if disparities[0] is not None and disparities[0].shape != (1, h, w):
raise ValueError(f"disparities[0].shape should be (1, {h}, {w}) instead of {disparities[0].shape}")
if masks[0] is not None:
if masks[0].shape != (h, w):
raise ValueError(f"masks[0].shape should be ({h}, {w}) instead of {masks[0].shape}")
if masks[0].dtype != torch.bool:
raise TypeError(f"masks[0] should be of dtype torch.bool instead of {masks[0].dtype}")
return images, disparities, masks
class ConvertToGrayscale(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self,
images: Tuple[PIL.Image.Image, PIL.Image.Image],
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
img_left = F.rgb_to_grayscale(images[0], num_output_channels=3)
img_right = F.rgb_to_grayscale(images[1], num_output_channels=3)
return (img_left, img_right), disparities, masks
class MakeValidDisparityMask(torch.nn.Module):
def __init__(self, max_disparity: Optional[int] = 256) -> None:
super().__init__()
self.max_disparity = max_disparity
def forward(
self,
images: T_STEREO_TENSOR,
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
valid_masks = tuple(
torch.ones(images[idx].shape[-2:], dtype=torch.bool, device=images[idx].device) if mask is None else mask
for idx, mask in enumerate(masks)
)
valid_masks = tuple(
torch.logical_and(mask, disparity > 0).squeeze(0) if disparity is not None else mask
for mask, disparity in zip(valid_masks, disparities)
)
if self.max_disparity is not None:
valid_masks = tuple(
torch.logical_and(mask, disparity < self.max_disparity).squeeze(0) if disparity is not None else mask
for mask, disparity in zip(valid_masks, disparities)
)
return images, disparities, valid_masks
class ToGPU(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self,
images: T_STEREO_TENSOR,
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
dev_images = tuple(image.cuda() for image in images)
dev_disparities = tuple(map(lambda x: x.cuda() if x is not None else None, disparities))
dev_masks = tuple(map(lambda x: x.cuda() if x is not None else None, masks))
return dev_images, dev_disparities, dev_masks
class ConvertImageDtype(torch.nn.Module):
def __init__(self, dtype: torch.dtype):
super().__init__()
self.dtype = dtype
def forward(
self,
images: T_STEREO_TENSOR,
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
img_left = F.convert_image_dtype(images[0], dtype=self.dtype)
img_right = F.convert_image_dtype(images[1], dtype=self.dtype)
img_left = img_left.contiguous()
img_right = img_right.contiguous()
return (img_left, img_right), disparities, masks
class Normalize(torch.nn.Module):
def __init__(self, mean: List[float], std: List[float]) -> None:
super().__init__()
self.mean = mean
self.std = std
def forward(
self,
images: T_STEREO_TENSOR,
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
img_left = F.normalize(images[0], mean=self.mean, std=self.std)
img_right = F.normalize(images[1], mean=self.mean, std=self.std)
img_left = img_left.contiguous()
img_right = img_right.contiguous()
return (img_left, img_right), disparities, masks
class ToTensor(torch.nn.Module):
def forward(
self,
images: Tuple[PIL.Image.Image, PIL.Image.Image],
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
if images[0] is None:
raise ValueError("img_left is None")
if images[1] is None:
raise ValueError("img_right is None")
img_left = F.pil_to_tensor(images[0])
img_right = F.pil_to_tensor(images[1])
disparity_tensors = ()
mask_tensors = ()
for idx in range(2):
disparity_tensors += (torch.from_numpy(disparities[idx]),) if disparities[idx] is not None else (None,)
mask_tensors += (torch.from_numpy(masks[idx]),) if masks[idx] is not None else (None,)
return (img_left, img_right), disparity_tensors, mask_tensors
class AsymmetricColorJitter(T.ColorJitter):
# p determines the probability of doing asymmetric vs symmetric color jittering
def __init__(
self,
brightness: T_COLOR_AUG_PARAM = 0,
contrast: T_COLOR_AUG_PARAM = 0,
saturation: T_COLOR_AUG_PARAM = 0,
hue: T_COLOR_AUG_PARAM = 0,
p: float = 0.2,
):
super().__init__(brightness=brightness, contrast=contrast, saturation=saturation, hue=hue)
self.p = p
def forward(
self,
images: T_STEREO_TENSOR,
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
if torch.rand(1) < self.p:
# asymmetric: different transform for img1 and img2
img_left = super().forward(images[0])
img_right = super().forward(images[1])
else:
# symmetric: same transform for img1 and img2
batch = torch.stack(images)
batch = super().forward(batch)
img_left, img_right = batch[0], batch[1]
return (img_left, img_right), disparities, masks
class AsymetricGammaAdjust(torch.nn.Module):
def __init__(self, p: float, gamma_range: Tuple[float, float], gain: float = 1) -> None:
super().__init__()
self.gamma_range = gamma_range
self.gain = gain
self.p = p
def forward(
self,
images: T_STEREO_TENSOR,
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
gamma = rand_float_range((1,), low=self.gamma_range[0], high=self.gamma_range[1]).item()
if torch.rand(1) < self.p:
# asymmetric: different transform for img1 and img2
img_left = F.adjust_gamma(images[0], gamma, gain=self.gain)
img_right = F.adjust_gamma(images[1], gamma, gain=self.gain)
else:
# symmetric: same transform for img1 and img2
batch = torch.stack(images)
batch = F.adjust_gamma(batch, gamma, gain=self.gain)
img_left, img_right = batch[0], batch[1]
return (img_left, img_right), disparities, masks
class RandomErase(torch.nn.Module):
# Produces multiple symetric random erasures
# these can be viewed as occlusions present in both camera views.
# Similarly to Optical Flow occlusion prediction tasks, we mask these pixels in the disparity map
def __init__(
self,
p: float = 0.5,
erase_px_range: Tuple[int, int] = (50, 100),
value: Union[Tensor, float] = 0,
inplace: bool = False,
max_erase: int = 2,
):
super().__init__()
self.min_px_erase = erase_px_range[0]
self.max_px_erase = erase_px_range[1]
if self.max_px_erase < 0:
raise ValueError("erase_px_range[1] should be equal or greater than 0")
if self.min_px_erase < 0:
raise ValueError("erase_px_range[0] should be equal or greater than 0")
if self.min_px_erase > self.max_px_erase:
raise ValueError("erase_prx_range[0] should be equal or lower than erase_px_range[1]")
self.p = p
self.value = value
self.inplace = inplace
self.max_erase = max_erase
def forward(
self,
images: T_STEREO_TENSOR,
disparities: T_STEREO_TENSOR,
masks: T_STEREO_TENSOR,
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
if torch.rand(1) < self.p:
return images, disparities, masks
image_left, image_right = images
mask_left, mask_right = masks
for _ in range(torch.randint(self.max_erase, size=(1,)).item()):
y, x, h, w, v = self._get_params(image_left)
image_right = F.erase(image_right, y, x, h, w, v, self.inplace)
image_left = F.erase(image_left, y, x, h, w, v, self.inplace)
# similarly to optical flow occlusion prediction, we consider
# any erasure pixels that are in both images to be occluded therefore
# we mark them as invalid
if mask_left is not None:
mask_left = F.erase(mask_left, y, x, h, w, False, self.inplace)
if mask_right is not None:
mask_right = F.erase(mask_right, y, x, h, w, False, self.inplace)
return (image_left, image_right), disparities, (mask_left, mask_right)
def _get_params(self, img: torch.Tensor) -> Tuple[int, int, int, int, float]:
img_h, img_w = img.shape[-2:]
crop_h, crop_w = (
random.randint(self.min_px_erase, self.max_px_erase),
random.randint(self.min_px_erase, self.max_px_erase),
)
crop_x, crop_y = (random.randint(0, img_w - crop_w), random.randint(0, img_h - crop_h))
return crop_y, crop_x, crop_h, crop_w, self.value
class RandomOcclusion(torch.nn.Module):
# This adds an occlusion in the right image
# the occluded patch works as a patch erase where the erase value is the mean
# of the pixels from the selected zone
def __init__(self, p: float = 0.5, occlusion_px_range: Tuple[int, int] = (50, 100), inplace: bool = False):
super().__init__()
self.min_px_occlusion = occlusion_px_range[0]
self.max_px_occlusion = occlusion_px_range[1]
if self.max_px_occlusion < 0:
raise ValueError("occlusion_px_range[1] should be greater or equal than 0")
if self.min_px_occlusion < 0:
raise ValueError("occlusion_px_range[0] should be greater or equal than 0")
if self.min_px_occlusion > self.max_px_occlusion:
raise ValueError("occlusion_px_range[0] should be lower than occlusion_px_range[1]")
self.p = p
self.inplace = inplace
def forward(
self,
images: T_STEREO_TENSOR,
disparities: T_STEREO_TENSOR,
masks: T_STEREO_TENSOR,
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
left_image, right_image = images
if torch.rand(1) < self.p:
return images, disparities, masks
y, x, h, w, v = self._get_params(right_image)
right_image = F.erase(right_image, y, x, h, w, v, self.inplace)
return ((left_image, right_image), disparities, masks)
def _get_params(self, img: torch.Tensor) -> Tuple[int, int, int, int, float]:
img_h, img_w = img.shape[-2:]
crop_h, crop_w = (
random.randint(self.min_px_occlusion, self.max_px_occlusion),
random.randint(self.min_px_occlusion, self.max_px_occlusion),
)
crop_x, crop_y = (random.randint(0, img_w - crop_w), random.randint(0, img_h - crop_h))
occlusion_value = img[..., crop_y : crop_y + crop_h, crop_x : crop_x + crop_w].mean(dim=(-2, -1), keepdim=True)
return (crop_y, crop_x, crop_h, crop_w, occlusion_value)
class RandomSpatialShift(torch.nn.Module):
# This transform applies a vertical shift and a slight angle rotation and the same time
def __init__(
self, p: float = 0.5, max_angle: float = 0.1, max_px_shift: int = 2, interpolation_type: str = "bilinear"
) -> None:
super().__init__()
self.p = p
self.max_angle = max_angle
self.max_px_shift = max_px_shift
self._interpolation_mode_strategy = InterpolationStrategy(interpolation_type)
def forward(
self,
images: T_STEREO_TENSOR,
disparities: T_STEREO_TENSOR,
masks: T_STEREO_TENSOR,
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
# the transform is applied only on the right image
# in order to mimic slight calibration issues
img_left, img_right = images
INTERP_MODE = self._interpolation_mode_strategy()
if torch.rand(1) < self.p:
# [0, 1] -> [-a, a]
shift = rand_float_range((1,), low=-self.max_px_shift, high=self.max_px_shift).item()
angle = rand_float_range((1,), low=-self.max_angle, high=self.max_angle).item()
# sample center point for the rotation matrix
y = torch.randint(size=(1,), low=0, high=img_right.shape[-2]).item()
x = torch.randint(size=(1,), low=0, high=img_right.shape[-1]).item()
# apply affine transformations
img_right = F.affine(
img_right,
angle=angle,
translate=[0, shift], # translation only on the y axis
center=[x, y],
scale=1.0,
shear=0.0,
interpolation=INTERP_MODE,
)
return ((img_left, img_right), disparities, masks)
class RandomHorizontalFlip(torch.nn.Module):
def __init__(self, p: float = 0.5) -> None:
super().__init__()
self.p = p
def forward(
self,
images: T_STEREO_TENSOR,
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
img_left, img_right = images
dsp_left, dsp_right = disparities
mask_left, mask_right = masks
if dsp_right is not None and torch.rand(1) < self.p:
img_left, img_right = F.hflip(img_left), F.hflip(img_right)
dsp_left, dsp_right = F.hflip(dsp_left), F.hflip(dsp_right)
if mask_left is not None and mask_right is not None:
mask_left, mask_right = F.hflip(mask_left), F.hflip(mask_right)
return ((img_right, img_left), (dsp_right, dsp_left), (mask_right, mask_left))
return images, disparities, masks
class Resize(torch.nn.Module):
def __init__(self, resize_size: Tuple[int, ...], interpolation_type: str = "bilinear") -> None:
super().__init__()
self.resize_size = list(resize_size) # doing this to keep mypy happy
self._interpolation_mode_strategy = InterpolationStrategy(interpolation_type)
def forward(
self,
images: T_STEREO_TENSOR,
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
resized_images = ()
resized_disparities = ()
resized_masks = ()
INTERP_MODE = self._interpolation_mode_strategy()
for img in images:
resized_images += (F.resize(img, self.resize_size, interpolation=INTERP_MODE),)
for dsp in disparities:
if dsp is not None:
# rescale disparity to match the new image size
scale_x = self.resize_size[1] / dsp.shape[-1]
resized_disparities += (F.resize(dsp, self.resize_size, interpolation=INTERP_MODE) * scale_x,)
else:
resized_disparities += (None,)
for mask in masks:
if mask is not None:
resized_masks += (
# we squeeze and unsqueeze because the API requires > 3D tensors
F.resize(
mask.unsqueeze(0),
self.resize_size,
interpolation=F.InterpolationMode.NEAREST,
).squeeze(0),
)
else:
resized_masks += (None,)
return resized_images, resized_disparities, resized_masks
class RandomRescaleAndCrop(torch.nn.Module):
# This transform will resize the input with a given proba, and then crop it.
# These are the reversed operations of the built-in RandomResizedCrop,
# although the order of the operations doesn't matter too much: resizing a
# crop would give the same result as cropping a resized image, up to
# interpolation artifact at the borders of the output.
#
# The reason we don't rely on RandomResizedCrop is because of a significant
# difference in the parametrization of both transforms, in particular,
# because of the way the random parameters are sampled in both transforms,
# which leads to fairly different resuts (and different epe). For more details see
# https://github.com/pytorch/vision/pull/5026/files#r762932579
def __init__(
self,
crop_size: Tuple[int, int],
scale_range: Tuple[float, float] = (-0.2, 0.5),
rescale_prob: float = 0.8,
scaling_type: str = "exponential",
interpolation_type: str = "bilinear",
) -> None:
super().__init__()
self.crop_size = crop_size
self.min_scale = scale_range[0]
self.max_scale = scale_range[1]
self.rescale_prob = rescale_prob
self.scaling_type = scaling_type
self._interpolation_mode_strategy = InterpolationStrategy(interpolation_type)
if self.scaling_type == "linear" and self.min_scale < 0:
raise ValueError("min_scale must be >= 0 for linear scaling")
def forward(
self,
images: T_STEREO_TENSOR,
disparities: Tuple[T_FLOW, T_FLOW],
masks: Tuple[T_MASK, T_MASK],
) -> Tuple[T_STEREO_TENSOR, Tuple[T_FLOW, T_FLOW], Tuple[T_MASK, T_MASK]]:
img_left, img_right = images
dsp_left, dsp_right = disparities
mask_left, mask_right = masks
INTERP_MODE = self._interpolation_mode_strategy()
# randomly sample scale
h, w = img_left.shape[-2:]
# Note: in original code, they use + 1 instead of + 8 for sparse datasets (e.g. Kitti)
# It shouldn't matter much
min_scale = max((self.crop_size[0] + 8) / h, (self.crop_size[1] + 8) / w)
# exponential scaling will draw a random scale in (min_scale, max_scale) and then raise
# 2 to the power of that random value. This final scale distribution will have a different
# mean and variance than a uniform distribution. Note that a scale of 1 will result in
# in a rescaling of 2X the original size, whereas a scale of -1 will result in a rescaling
# of 0.5X the original size.
if self.scaling_type == "exponential":
scale = 2 ** torch.empty(1, dtype=torch.float32).uniform_(self.min_scale, self.max_scale).item()
# linear scaling will draw a random scale in (min_scale, max_scale)
elif self.scaling_type == "linear":
scale = torch.empty(1, dtype=torch.float32).uniform_(self.min_scale, self.max_scale).item()
scale = max(scale, min_scale)
new_h, new_w = round(h * scale), round(w * scale)
if torch.rand(1).item() < self.rescale_prob:
# rescale the images
img_left = F.resize(img_left, size=(new_h, new_w), interpolation=INTERP_MODE)
img_right = F.resize(img_right, size=(new_h, new_w), interpolation=INTERP_MODE)
resized_masks, resized_disparities = (), ()
for disparity, mask in zip(disparities, masks):
if disparity is not None:
if mask is None:
resized_disparity = F.resize(disparity, size=(new_h, new_w), interpolation=INTERP_MODE)
# rescale the disparity
resized_disparity = (
resized_disparity * torch.tensor([scale], device=resized_disparity.device)[:, None, None]
)
resized_mask = None
else:
resized_disparity, resized_mask = _resize_sparse_flow(
disparity, mask, scale_x=scale, scale_y=scale
)
resized_masks += (resized_mask,)
resized_disparities += (resized_disparity,)
else:
resized_disparities = disparities
resized_masks = masks
disparities = resized_disparities
masks = resized_masks
# Note: For sparse datasets (Kitti), the original code uses a "margin"
# See e.g. https://github.com/princeton-vl/RAFT/blob/master/core/utils/augmentor.py#L220:L220
# We don't, not sure it matters much
y0 = torch.randint(0, img_left.shape[1] - self.crop_size[0], size=(1,)).item()
x0 = torch.randint(0, img_right.shape[2] - self.crop_size[1], size=(1,)).item()
img_left = F.crop(img_left, y0, x0, self.crop_size[0], self.crop_size[1])
img_right = F.crop(img_right, y0, x0, self.crop_size[0], self.crop_size[1])
if dsp_left is not None:
dsp_left = F.crop(disparities[0], y0, x0, self.crop_size[0], self.crop_size[1])
if dsp_right is not None:
dsp_right = F.crop(disparities[1], y0, x0, self.crop_size[0], self.crop_size[1])
cropped_masks = ()
for mask in masks:
if mask is not None:
mask = F.crop(mask, y0, x0, self.crop_size[0], self.crop_size[1])
cropped_masks += (mask,)
return ((img_left, img_right), (dsp_left, dsp_right), cropped_masks)
def _resize_sparse_flow(
flow: Tensor, valid_flow_mask: Tensor, scale_x: float = 1.0, scale_y: float = 0.0
) -> Tuple[Tensor, Tensor]:
# This resizes both the flow and the valid_flow_mask mask (which is assumed to be reasonably sparse)
# There are as-many non-zero values in the original flow as in the resized flow (up to OOB)
# So for example if scale_x = scale_y = 2, the sparsity of the output flow is multiplied by 4
h, w = flow.shape[-2:]
h_new = int(round(h * scale_y))
w_new = int(round(w * scale_x))
flow_new = torch.zeros(size=[1, h_new, w_new], dtype=flow.dtype)
valid_new = torch.zeros(size=[h_new, w_new], dtype=valid_flow_mask.dtype)
jj, ii = torch.meshgrid(torch.arange(w), torch.arange(h), indexing="xy")
ii_valid, jj_valid = ii[valid_flow_mask], jj[valid_flow_mask]
ii_valid_new = torch.round(ii_valid.to(float) * scale_y).to(torch.long)
jj_valid_new = torch.round(jj_valid.to(float) * scale_x).to(torch.long)
within_bounds_mask = (0 <= ii_valid_new) & (ii_valid_new < h_new) & (0 <= jj_valid_new) & (jj_valid_new < w_new)
ii_valid = ii_valid[within_bounds_mask]
jj_valid = jj_valid[within_bounds_mask]
ii_valid_new = ii_valid_new[within_bounds_mask]
jj_valid_new = jj_valid_new[within_bounds_mask]
valid_flow_new = flow[:, ii_valid, jj_valid]
valid_flow_new *= scale_x
flow_new[:, ii_valid_new, jj_valid_new] = valid_flow_new
valid_new[ii_valid_new, jj_valid_new] = valid_flow_mask[ii_valid, jj_valid]
return flow_new, valid_new.bool()
class Compose(torch.nn.Module):
def __init__(self, transforms: List[Callable]):
super().__init__()
self.transforms = transforms
@torch.inference_mode()
def forward(self, images, disparities, masks):
for t in self.transforms:
images, disparities, masks = t(images, disparities, masks)
return images, disparities, masks
| bsd-3-clause | f662d1b3dba09026db481c4f84fe01ae | 39.218266 | 119 | 0.59709 | 3.386029 | false | false | false | false |
pytorch/vision | .github/process_commit.py | 1 | 2470 | """
This script finds the merger responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'. If there exists no PR associated with the commit or the PR is properly labeled,
this script is a no-op.
Note: we ping the merger only, not the reviewers, as the reviewers can sometimes be external to torchvision
with no labeling responsibility, so we don't want to bother them.
"""
import sys
from typing import Any, Optional, Set, Tuple
import requests
# For a PR to be properly labeled it should have one primary label and one secondary label
PRIMARY_LABELS = {
"new feature",
"bug",
"code quality",
"enhancement",
"bc-breaking",
"deprecation",
"other",
"prototype",
}
SECONDARY_LABELS = {
"dependency issue",
"module: c++ frontend",
"module: ci",
"module: datasets",
"module: documentation",
"module: io",
"module: models.quantization",
"module: models",
"module: onnx",
"module: ops",
"module: reference scripts",
"module: rocm",
"module: tests",
"module: transforms",
"module: utils",
"module: video",
"Perf",
"Revert(ed)",
"topic: build",
}
def query_torchvision(cmd: str, *, accept) -> Any:
response = requests.get(f"https://api.github.com/repos/pytorch/vision/{cmd}", headers=dict(Accept=accept))
return response.json()
def get_pr_number(commit_hash: str) -> Optional[int]:
# See https://docs.github.com/en/rest/reference/repos#list-pull-requests-associated-with-a-commit
data = query_torchvision(f"commits/{commit_hash}/pulls", accept="application/vnd.github.groot-preview+json")
if not data:
return None
return data[0]["number"]
def get_pr_merger_and_labels(pr_number: int) -> Tuple[str, Set[str]]:
# See https://docs.github.com/en/rest/reference/pulls#get-a-pull-request
data = query_torchvision(f"pulls/{pr_number}", accept="application/vnd.github.v3+json")
merger = data["merged_by"]["login"]
labels = {label["name"] for label in data["labels"]}
return merger, labels
if __name__ == "__main__":
commit_hash = sys.argv[1]
pr_number = get_pr_number(commit_hash)
if not pr_number:
sys.exit(0)
merger, labels = get_pr_merger_and_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
print(f"@{merger}")
| bsd-3-clause | 8df7576776a7c828e826938d194953eb | 29.493827 | 114 | 0.668421 | 3.383562 | false | false | false | false |
pytorch/vision | test/preprocess-bench.py | 1 | 2402 | import argparse
import os
from timeit import default_timer as timer
import torch
import torch.utils.data
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.model_zoo import tqdm
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
parser.add_argument("--data", metavar="PATH", required=True, help="path to dataset")
parser.add_argument(
"--nThreads", "-j", default=2, type=int, metavar="N", help="number of data loading threads (default: 2)"
)
parser.add_argument(
"--batchSize", "-b", default=256, type=int, metavar="N", help="mini-batch size (1 = pure stochastic) Default: 256"
)
parser.add_argument("--accimage", action="store_true", help="use accimage")
if __name__ == "__main__":
args = parser.parse_args()
if args.accimage:
torchvision.set_image_backend("accimage")
print(f"Using {torchvision.get_image_backend()}")
# Data loading code
transform = transforms.Compose(
[
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.PILToTensor(),
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
traindir = os.path.join(args.data, "train")
valdir = os.path.join(args.data, "val")
train = datasets.ImageFolder(traindir, transform)
val = datasets.ImageFolder(valdir, transform)
train_loader = torch.utils.data.DataLoader(
train, batch_size=args.batchSize, shuffle=True, num_workers=args.nThreads
)
train_iter = iter(train_loader)
start_time = timer()
batch_count = 20 * args.nThreads
with tqdm(total=batch_count) as pbar:
for _ in tqdm(range(batch_count)):
pbar.update(1)
batch = next(train_iter)
end_time = timer()
print(
"Performance: {dataset:.0f} minutes/dataset, {batch:.1f} ms/batch,"
" {image:.2f} ms/image {rate:.0f} images/sec".format(
dataset=(end_time - start_time) * (float(len(train_loader)) / batch_count / 60.0),
batch=(end_time - start_time) / float(batch_count) * 1.0e3,
image=(end_time - start_time) / (batch_count * args.batchSize) * 1.0e3,
rate=(batch_count * args.batchSize) / (end_time - start_time),
)
)
| bsd-3-clause | 1b3f62a6a54e631810790940e9704a56 | 35.393939 | 118 | 0.646128 | 3.411932 | false | false | false | false |
automl/auto-sklearn | autosklearn/ensemble_building/manager.py | 1 | 15177 | from __future__ import annotations
from typing import Any, Dict, Sequence, Type
import logging.handlers
import time
import traceback
import dask.distributed
import numpy as np
from sklearn.utils.validation import check_random_state
from smac.callbacks import IncorporateRunResultCallback
from smac.optimizer.smbo import SMBO
from smac.runhistory.runhistory import RunInfo, RunValue
from smac.tae.base import StatusType
from autosklearn.automl_common.common.utils.backend import Backend
from autosklearn.ensemble_building.builder import EnsembleBuilder
from autosklearn.ensembles.abstract_ensemble import AbstractEnsemble
from autosklearn.ensembles.ensemble_selection import EnsembleSelection
from autosklearn.metrics import Scorer
from autosklearn.util.logging_ import get_named_client_logger
class EnsembleBuilderManager(IncorporateRunResultCallback):
def __init__(
self,
backend: Backend,
dataset_name: str,
task: int,
metrics: Sequence[Scorer],
time_left_for_ensembles: float = np.inf,
max_iterations: int | None = None,
pynisher_context: str = "fork",
ensemble_class: Type[AbstractEnsemble] = EnsembleSelection,
ensemble_kwargs: Dict[str, Any] | None = None,
ensemble_nbest: int | float = 50,
max_models_on_disc: int | float | None = None,
seed: int = 1,
precision: int = 32,
memory_limit: int | None = None,
read_at_most: int | None = None,
logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT,
random_state: int | np.random.RandomState | None = None,
start_time: float | None = None,
):
"""SMAC callback to handle ensemble building
Parameters
----------
backend: Backend
backend to write and read files
dataset_name: str
name of dataset
task: int
Type of ML task
metrics: Sequence[Scorer]
Metrics to optimize the ensemble for
time_left_for_ensemble: float = np.inf
How much time is left for the task in seconds.
Job should finish within this allocated time
max_iterations: int | None = None
maximal number of iterations to run this script. None indicates no limit
on iterations.
pynisher_context: "spawn" | "fork" | "forkserver" = "fork"
The multiprocessing context for pynisher.
ensemble_class : Type[AbstractEnsemble] (default=EnsembleSelection)
Class implementing the post-hoc ensemble algorithm. Set to
``None`` to disable ensemble building or use ``SingleBest``
to obtain only use the single best model instead of an
ensemble.
ensemble_kwargs : Dict, optional
Keyword arguments that are passed to the ensemble class upon
initialization.
ensemble_nbest: int | float = 50
If int: consider only the n best prediction
If float: consider only this fraction of the best models
max_models_on_disc: int | float | None = None
Defines the maximum number of models that are kept in the disc.
If int, it must be greater or equal than 1, and dictates the max
number of models to keep.
If float, it will be interpreted as the max megabytes allowed of
disc space. That is, if the number of ensemble candidates require more
disc space than this float value, the worst models will be deleted to
keep within this budget. Models and predictions of the worst-performing
models will be deleted then.
If None, the feature is disabled. It defines an upper bound on the
models that can be used in the ensemble.
seed: int = 1
Seed used for the inidividual runs
precision: 16 | 32 | 64 | 128 = 32
Precision of floats to read the predictions
memory_limit: int | None = None
Memory limit in mb. If ``None``, no memory limit is enforced.
read_at_most: int | None = None
read at most n new prediction files in each iteration. If `None`, will read
the predictions and calculate losses for all runs that require it.
logger_port: int = DEFAULT_TCP_LOGGING_PORT
Port that receives logging records
start_time: float | None = None
DISABLED: Just using time.time() to set it
The time when this job was started, to account for any latency in job
allocation.
"""
self.time_left_for_ensembles = time_left_for_ensembles
self.backend = backend
self.dataset_name = dataset_name
self.task = task
self.metrics = metrics
self.ensemble_class = ensemble_class
self.ensemble_kwargs = ensemble_kwargs
self.ensemble_nbest = ensemble_nbest
self.max_models_on_disc = max_models_on_disc
self.seed = seed
self.precision = precision
self.max_iterations = max_iterations
self.read_at_most = read_at_most
self.memory_limit = memory_limit
self.random_state = check_random_state(random_state)
self.logger_port = logger_port
self.pynisher_context = pynisher_context
# Store something similar to SMAC's runhistory
self.history: list[dict[str, Any]] = []
# We only submit new ensembles when there is not an active ensemble job
self.futures: list[dask.distributed.Future] = []
# The last criteria is the number of iterations
self.iteration = 0
# Keep track of when we started to know when we need to finish!
self.start_time = time.time()
def __call__(
self,
smbo: "SMBO",
run_info: RunInfo,
result: RunValue,
time_left: float,
) -> None:
"""
Returns
-------
List[Tuple[int, float, float, float]]:
A list with the performance history of this ensemble, of the form
[(pandas_timestamp, train_performance, val_performance, test_performance)]
"""
if result.status in (StatusType.STOP, StatusType.ABORT) or smbo._stop:
return
self.build_ensemble(smbo.tae_runner.client)
def build_ensemble(
self,
dask_client: dask.distributed.Client,
) -> None:
"""Build the ensemble
Parameters
----------
dask_client: dask.distributed.Client
The dask client to use
"""
# The second criteria is elapsed time
elapsed_time = time.time() - self.start_time
logger = get_named_client_logger(
name="EnsembleBuilder",
port=self.logger_port,
)
# First test for termination conditions
if self.time_left_for_ensembles < elapsed_time:
logger.info(
"Terminate ensemble building as not time is left (run for {}s)".format(
elapsed_time
),
)
return
if self.max_iterations is not None and self.max_iterations <= self.iteration:
logger.info(
"Terminate ensemble building because of max iterations:"
f" {self.max_iterations} of {self.iteration}"
)
return
if len(self.futures) != 0:
if self.futures[0].done():
result = self.futures.pop().result()
if result:
ensemble_history, self.ensemble_nbest = result
logger.debug(
f"iteration={self.iteration} @ elapsed_time={elapsed_time}"
f" has history={ensemble_history}"
)
self.history.extend(ensemble_history)
# Only submit new jobs if the previous ensemble job finished
if len(self.futures) == 0:
# Add the result of the run
# On the next while iteration, no references to
# ensemble builder object, so it should be garbage collected to
# save memory while waiting for resources
# Also, notice how ensemble nbest is returned, so we don't waste
# iterations testing if the deterministic predictions size can
# be fitted in memory
try:
# Submit a Dask job from this job, to properly
# see it in the dask diagnostic dashboard
# Notice that the forked ensemble_builder_process will
# wait for the below function to be done
self.futures.append(
dask_client.submit(
EnsembleBuilderManager.fit_and_return_ensemble,
backend=self.backend,
dataset_name=self.dataset_name,
task_type=self.task,
metrics=self.metrics,
ensemble_class=self.ensemble_class,
ensemble_kwargs=self.ensemble_kwargs,
ensemble_nbest=self.ensemble_nbest,
max_models_on_disc=self.max_models_on_disc,
seed=self.seed,
precision=self.precision,
memory_limit=self.memory_limit,
read_at_most=self.read_at_most,
random_state=self.random_state,
end_at=self.start_time + self.time_left_for_ensembles,
iteration=self.iteration,
pynisher_context=self.pynisher_context,
logger_port=self.logger_port,
)
)
logger.info(
"{}/{} Started Ensemble builder job at {} for iteration {}.".format(
# Log the client to make sure we
# remain connected to the scheduler
self.futures[0],
dask_client,
time.strftime("%Y.%m.%d-%H.%M.%S"),
self.iteration,
),
)
self.iteration += 1
except Exception as e:
exception_traceback = traceback.format_exc()
error_message = repr(e)
logger.critical(exception_traceback)
logger.critical(error_message)
@staticmethod
def fit_and_return_ensemble(
iteration: int,
end_at: float,
backend: Backend,
dataset_name: str,
task_type: int,
metrics: Sequence[Scorer],
pynisher_context: str,
ensemble_class: Type[AbstractEnsemble] = EnsembleSelection,
ensemble_kwargs: Dict[str, Any] | None = None,
ensemble_nbest: int | float = 50,
max_models_on_disc: int | float | None = None,
seed: int = 1,
precision: int = 32,
memory_limit: int | None = None,
read_at_most: int | None = None,
logger_port: int = logging.handlers.DEFAULT_TCP_LOGGING_PORT,
random_state: int | np.random.RandomState | None = None,
) -> tuple[list[dict[str, Any]], int | float]:
"""
A short function to fit and create an ensemble. It is just a wrapper to easily
send a request to dask to create an ensemble and clean the memory when finished
Parameters
----------
iteration: int
The current iteration
end_at: float
At what time the job must finish. Needs to be the endtime and not the
time left because we do not know when dask schedules the job.
backend: Backend
Backend to write and read files
dataset_name: str
name of dataset
task_type: int
type of ML task
metrics: Sequence[Scorer]
Metrics to optimize the ensemble for.
pynisher_context: "fork" | "spawn" | "forkserver" = "fork"
Context to use for multiprocessing, can be either fork, spawn or forkserver.
ensemble_class : Type[AbstractEnsemble] (default=EnsembleSelection)
Class implementing the post-hoc ensemble algorithm. Set to
``None`` to disable ensemble building or use ``SingleBest``
to obtain only use the single best model instead of an
ensemble.
ensemble_kwargs : Dict, optional
Keyword arguments that are passed to the ensemble class upon
initialization.
ensemble_nbest: int | float = 50
If int: consider only the n best prediction
If float: consider only this fraction of the best models
max_models_on_disc: int | float | None = 100
Defines the maximum number of models that are kept in the disc.
If int, it must be greater or equal than 1, and dictates the max number of
models to keep.
If float, it will be interpreted as the max megabytes allowed of disc space.
That is, if the number of ensemble candidates require more disc space than
this float value, the worst models will be deleted to keep within this
budget. Models and predictions of the worst-performing models will be
deleted then.
If None, the feature is disabled.
seed: int = 1
Seed used for training the models in the backend
precision: 16 | 32 | 64 | 128 = 32
Precision of floats to read the predictions
memory_limit: int | None = None
Memory limit in mb. If ``None``, no memory limit is enforced.
read_at_most: int | None = None
read at most n new prediction files in each iteration. If `None`, will read
the predictions and calculate losses for all runs that require it.
logger_port: int = DEFAULT_TCP_LOGGING_PORT
The port where the logging server is listening to.
random_state: int | RandomState | None = None
A random state used for the ensemble selection process.
Returns
-------
(ensemble_history: list[dict[str, Any]], nbest: int | float)
The ensemble history and the nbest chosen members
"""
random_state = check_random_state(random_state)
result = EnsembleBuilder(
backend=backend,
dataset_name=dataset_name,
task_type=task_type,
metrics=metrics,
ensemble_class=ensemble_class,
ensemble_kwargs=ensemble_kwargs,
ensemble_nbest=ensemble_nbest,
max_models_on_disc=max_models_on_disc,
seed=seed,
precision=precision,
memory_limit=memory_limit,
read_at_most=read_at_most,
random_state=random_state.randint(10000000),
logger_port=logger_port,
).run(
end_at=end_at,
iteration=iteration,
pynisher_context=pynisher_context,
)
return result
| bsd-3-clause | b21e5301ccb990eac7b01a68f78da6e9 | 37.618321 | 88 | 0.586545 | 4.596305 | false | false | false | false |
automl/auto-sklearn | examples/60_search/example_parallel_n_jobs.py | 1 | 1775 | # -*- encoding: utf-8 -*-
"""
===================================
Parallel Usage on a single machine
===================================
*Auto-sklearn* uses
`dask.distributed <https://distributed.dask.org/en/latest/index.html`>_
for parallel optimization.
This example shows how to start *Auto-sklearn* to use multiple cores on a
single machine. Using this mode, *Auto-sklearn* starts a dask cluster,
manages the workers and takes care of shutting down the cluster once the
computation is done.
To run *Auto-sklearn* on multiple machines check the example
:ref:`sphx_glr_examples_60_search_example_parallel_manual_spawning_cli.py`.
"""
import sklearn.model_selection
import sklearn.datasets
import sklearn.metrics
import autosklearn.classification
############################################################################
# Data Loading
# ============
X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
X, y, random_state=1
)
############################################################################
# Build and fit a classifier
# ==========================
#
# To use ``n_jobs_`` we must guard the code
if __name__ == "__main__":
automl = autosklearn.classification.AutoSklearnClassifier(
time_left_for_this_task=120,
per_run_time_limit=30,
tmp_folder="/tmp/autosklearn_parallel_1_example_tmp",
n_jobs=4,
# Each one of the 4 jobs is allocated 3GB
memory_limit=3072,
seed=5,
)
automl.fit(X_train, y_train, dataset_name="breast_cancer")
# Print statistics about the auto-sklearn run such as number of
# iterations, number of models failed with a time out.
print(automl.sprint_statistics())
| bsd-3-clause | 3556149e5162bbe3b45768352a00c5fc | 31.87037 | 76 | 0.604507 | 3.760593 | false | true | false | false |
automl/auto-sklearn | test/test_pipeline/components/data_preprocessing/test_one_hot_encoding.py | 1 | 3571 | import numpy as np
from scipy import sparse
from autosklearn.pipeline.components.data_preprocessing.categorical_encoding.no_encoding import ( # noqa: E501
NoEncoding,
)
from autosklearn.pipeline.components.data_preprocessing.categorical_encoding.one_hot_encoding import ( # noqa: E501
OneHotEncoder,
)
from autosklearn.pipeline.util import _test_preprocessing
import unittest
def create_X(instances=1000, n_feats=10, categs_per_feat=5, seed=0):
rs = np.random.RandomState(seed)
size = (instances, n_feats)
X = rs.randint(0, categs_per_feat, size=size)
return X
class OneHotEncoderTest(unittest.TestCase):
def setUp(self):
self.X_train = create_X()
def test_data_type_consistency(self):
X = np.random.randint(3, 6, (3, 4))
Y = OneHotEncoder().fit_transform(X)
self.assertFalse(sparse.issparse(Y))
X = sparse.csc_matrix(
([3, 6, 4, 5], ([0, 1, 2, 1], [3, 2, 1, 0])), shape=(3, 4)
)
Y = OneHotEncoder().fit_transform(X)
self.assertTrue(sparse.issparse(Y))
def test_default_configuration(self):
transformations = []
for i in range(2):
configuration_space = OneHotEncoder.get_hyperparameter_search_space()
default_config = configuration_space.get_default_configuration()
preprocessor = OneHotEncoder(random_state=1, **default_config)
transformer = preprocessor.fit(self.X_train.copy())
Xt = transformer.transform(self.X_train.copy())
transformations.append(Xt)
if len(transformations) > 1:
np.testing.assert_array_equal(transformations[-1], transformations[-2])
def test_default_configuration_no_encoding(self):
transformations = []
for i in range(2):
transformation, original = _test_preprocessing(NoEncoding)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation == original).all())
transformations.append(transformation)
if len(transformations) > 1:
self.assertTrue((transformations[-1] == transformations[-2]).all())
def test_default_configuration_sparse_data(self):
transformations = []
self.X_train[~np.isfinite(self.X_train)] = 0
self.X_train = sparse.csc_matrix(self.X_train)
for i in range(2):
configuration_space = OneHotEncoder.get_hyperparameter_search_space()
default_config = configuration_space.get_default_configuration()
preprocessor = OneHotEncoder(random_state=1, **default_config)
transformer = preprocessor.fit(self.X_train.copy())
Xt = transformer.transform(self.X_train.copy())
transformations.append(Xt)
if len(transformations) > 1:
self.assertEqual(
(transformations[-1] != transformations[-2]).count_nonzero(), 0
)
def test_default_configuration_sparse_no_encoding(self):
transformations = []
for i in range(2):
transformation, original = _test_preprocessing(NoEncoding, make_sparse=True)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation.todense() == original.todense()).all())
transformations.append(transformation)
if len(transformations) > 1:
self.assertEqual(
(transformations[-1] != transformations[-2]).count_nonzero(), 0
)
| bsd-3-clause | 64bb7b33ec9c4cafe39fd75925fc12bf | 37.815217 | 116 | 0.629236 | 4.085812 | false | true | false | false |
automl/auto-sklearn | autosklearn/ensembles/ensemble_selection.py | 1 | 14007 | from __future__ import annotations
from typing import Dict, List, Sequence, Tuple, Union
import random
import warnings
from collections import Counter
import numpy as np
from sklearn.utils import check_random_state
from autosklearn.automl_common.common.utils.backend import Backend
from autosklearn.constants import TASK_TYPES
from autosklearn.data.validation import SUPPORTED_FEAT_TYPES
from autosklearn.ensemble_building.run import Run
from autosklearn.ensembles.abstract_ensemble import AbstractEnsemble
from autosklearn.metrics import Scorer, calculate_losses
from autosklearn.pipeline.base import BasePipeline
class EnsembleSelection(AbstractEnsemble):
def __init__(
self,
task_type: int,
metrics: Sequence[Scorer] | Scorer,
backend: Backend,
ensemble_size: int = 50,
bagging: bool = False,
mode: str = "fast",
random_state: int | np.random.RandomState | None = None,
) -> None:
"""An ensemble of selected algorithms
Fitting an EnsembleSelection generates an ensemble from the the models
generated during the search process. Can be further used for prediction.
Parameters
----------
task_type: int
An identifier indicating which task is being performed.
metrics: Sequence[Scorer] | Scorer
The metric used to evaluate the models. If multiple metrics are passed,
ensemble selection only optimizes for the first
backend : Backend
Gives access to the backend of Auto-sklearn. Not used by Ensemble Selection.
bagging: bool = False
Whether to use bagging in ensemble selection
mode: str in ['fast', 'slow'] = 'fast'
Which kind of ensemble generation to use
* 'slow' - The original method used in Rich Caruana's ensemble selection.
* 'fast' - A faster version of Rich Caruanas' ensemble selection.
random_state: int | RandomState | None = None
The random_state used for ensemble selection.
* None - Uses numpy's default RandomState object
* int - Successive calls to fit will produce the same results
* RandomState - Truly random, each call to fit will produce
different results, even with the same object.
References
----------
| Ensemble selection from libraries of models
| Rich Caruana, Alexandru Niculescu-Mizil, Geoff Crew and Alex Ksikes
| ICML 2004
| https://dl.acm.org/doi/10.1145/1015330.1015432
| https://www.cs.cornell.edu/~caruana/ctp/ct.papers/caruana.icml04.icdm06long.pdf
""" # noqa: E501
self.ensemble_size = ensemble_size
self.task_type = task_type
if isinstance(metrics, Sequence):
if len(metrics) > 1:
warnings.warn(
"Ensemble selection can only optimize one metric, "
"but multiple metrics were passed, dropping all "
"except for the first metric."
)
self.metric = metrics[0]
else:
self.metric = metrics
self.bagging = bagging
self.mode = mode
# Behaviour similar to sklearn
# int - Deteriministic with succesive calls to fit
# RandomState - Successive calls to fit will produce differences
# None - Uses numpmys global singleton RandomState
# https://scikit-learn.org/stable/common_pitfalls.html#controlling-randomness
self.random_state = random_state
def fit(
self,
base_models_predictions: List[np.ndarray],
true_targets: np.ndarray,
model_identifiers: List[Tuple[int, int, float]],
runs: Sequence[Run],
X_data: SUPPORTED_FEAT_TYPES | None = None,
) -> EnsembleSelection:
self.ensemble_size = int(self.ensemble_size)
if self.ensemble_size < 1:
raise ValueError("Ensemble size cannot be less than one!")
if self.task_type not in TASK_TYPES:
raise ValueError("Unknown task type %s." % self.task_type)
if not isinstance(self.metric, Scorer):
raise ValueError(
"The provided metric must be an instance of Scorer, "
"nevertheless it is {}({})".format(
self.metric,
type(self.metric),
)
)
if self.mode not in ("fast", "slow"):
raise ValueError("Unknown mode %s" % self.mode)
if self.bagging:
self._bagging(base_models_predictions, true_targets)
else:
self._fit(
predictions=base_models_predictions,
X_data=X_data,
labels=true_targets,
)
self._calculate_weights()
self.identifiers_ = model_identifiers
return self
def _fit(
self,
predictions: List[np.ndarray],
labels: np.ndarray,
*,
X_data: SUPPORTED_FEAT_TYPES | None = None,
) -> EnsembleSelection:
if self.mode == "fast":
self._fast(predictions=predictions, X_data=X_data, labels=labels)
else:
self._slow(predictions=predictions, X_data=X_data, labels=labels)
return self
def _fast(
self,
predictions: List[np.ndarray],
labels: np.ndarray,
*,
X_data: SUPPORTED_FEAT_TYPES | None = None,
) -> None:
"""Fast version of Rich Caruana's ensemble selection method."""
self.num_input_models_ = len(predictions)
rand = check_random_state(self.random_state)
ensemble = [] # type: List[np.ndarray]
trajectory = []
order = []
ensemble_size = self.ensemble_size
weighted_ensemble_prediction = np.zeros(
predictions[0].shape,
dtype=np.float64,
)
fant_ensemble_prediction = np.zeros(
weighted_ensemble_prediction.shape,
dtype=np.float64,
)
for i in range(ensemble_size):
losses = np.zeros(
(len(predictions)),
dtype=np.float64,
)
s = len(ensemble)
if s > 0:
np.add(
weighted_ensemble_prediction,
ensemble[-1],
out=weighted_ensemble_prediction,
)
# Memory-efficient averaging!
for j, pred in enumerate(predictions):
# fant_ensemble_prediction is the prediction of the current ensemble
# and should be
#
# ([predictions[selected_prev_iterations] + predictions[j])/(s+1)
#
# We overwrite the contents of fant_ensemble_prediction directly with
# weighted_ensemble_prediction + new_prediction and then scale for avg
np.add(weighted_ensemble_prediction, pred, out=fant_ensemble_prediction)
np.multiply(
fant_ensemble_prediction,
(1.0 / float(s + 1)),
out=fant_ensemble_prediction,
)
losses[j] = calculate_losses(
solution=labels,
prediction=fant_ensemble_prediction,
task_type=self.task_type,
metrics=[self.metric],
X_data=X_data,
scoring_functions=None,
)[self.metric.name]
all_best = np.argwhere(losses == np.nanmin(losses)).flatten()
best = rand.choice(all_best)
ensemble.append(predictions[best])
trajectory.append(losses[best])
order.append(best)
# Handle special case
if len(predictions) == 1:
break
self.indices_ = order
self.trajectory_ = trajectory
self.train_loss_ = trajectory[-1]
def _slow(
self,
predictions: List[np.ndarray],
labels: np.ndarray,
*,
X_data: SUPPORTED_FEAT_TYPES | None = None,
) -> None:
"""Rich Caruana's ensemble selection method."""
self.num_input_models_ = len(predictions)
ensemble = []
trajectory = []
order = []
ensemble_size = self.ensemble_size
for i in range(ensemble_size):
losses = np.zeros(
[np.shape(predictions)[0]],
dtype=np.float64,
)
for j, pred in enumerate(predictions):
ensemble.append(pred)
ensemble_prediction = np.mean(np.array(ensemble), axis=0)
losses[j] = calculate_losses(
solution=labels,
prediction=ensemble_prediction,
task_type=self.task_type,
metrics=[self.metric],
X_data=X_data,
scoring_functions=None,
)[self.metric.name]
ensemble.pop()
best = np.nanargmin(losses)
ensemble.append(predictions[best])
trajectory.append(losses[best])
order.append(best)
# Handle special case
if len(predictions) == 1:
break
self.indices_ = np.array(
order,
dtype=np.int64,
)
self.trajectory_ = np.array(
trajectory,
dtype=np.float64,
)
self.train_loss_ = trajectory[-1]
def _calculate_weights(self) -> None:
ensemble_members = Counter(self.indices_).most_common()
weights = np.zeros(
(self.num_input_models_,),
dtype=np.float64,
)
for ensemble_member in ensemble_members:
weight = float(ensemble_member[1]) / self.ensemble_size
weights[ensemble_member[0]] = weight
if np.sum(weights) < 1:
weights = weights / np.sum(weights)
self.weights_ = weights
def _bagging(
self,
predictions: List[np.ndarray],
labels: np.ndarray,
fraction: float = 0.5,
n_bags: int = 20,
) -> np.ndarray:
"""Rich Caruana's ensemble selection method with bagging."""
raise ValueError("Bagging might not work with class-based interface!")
n_models = predictions.shape[0]
bag_size = int(n_models * fraction)
order_of_each_bag = []
for j in range(n_bags):
# Bagging a set of models
indices = sorted(random.sample(range(0, n_models), bag_size))
bag = predictions[indices, :, :]
order, _ = self._fit(predictions=bag, labels=labels)
order_of_each_bag.append(order)
return np.array(
order_of_each_bag,
dtype=np.int64,
)
def predict(
self, base_models_predictions: Union[np.ndarray, List[np.ndarray]]
) -> np.ndarray:
average = np.zeros_like(base_models_predictions[0], dtype=np.float64)
tmp_predictions = np.empty_like(base_models_predictions[0], dtype=np.float64)
# if predictions.shape[0] == len(self.weights_),
# predictions include those of zero-weight models.
if len(base_models_predictions) == len(self.weights_):
for pred, weight in zip(base_models_predictions, self.weights_):
np.multiply(pred, weight, out=tmp_predictions)
np.add(average, tmp_predictions, out=average)
# if prediction model.shape[0] == len(non_null_weights),
# predictions do not include those of zero-weight models.
elif len(base_models_predictions) == np.count_nonzero(self.weights_):
non_null_weights = [w for w in self.weights_ if w > 0]
for pred, weight in zip(base_models_predictions, non_null_weights):
np.multiply(pred, weight, out=tmp_predictions)
np.add(average, tmp_predictions, out=average)
# If none of the above applies, then something must have gone wrong.
else:
raise ValueError(
"The dimensions of ensemble predictions"
" and ensemble weights do not match!"
)
del tmp_predictions
return average
def __str__(self) -> str:
trajectory_str = " ".join(
[f"{id}: {perf:.5f}" for id, perf in enumerate(self.trajectory_)]
)
identifiers_str = " ".join(
[
f"{identifier}"
for idx, identifier in enumerate(self.identifiers_)
if self.weights_[idx] > 0
]
)
return (
"Ensemble Selection:\n"
f"\tTrajectory: {trajectory_str}\n"
f"\tMembers: {self.indices_}\n"
f"\tWeights: {self.weights_}\n"
f"\tIdentifiers: {identifiers_str}\n"
)
def get_models_with_weights(
self, models: Dict[Tuple[int, int, float], BasePipeline]
) -> List[Tuple[float, BasePipeline]]:
output = []
for i, weight in enumerate(self.weights_):
if weight > 0.0:
identifier = self.identifiers_[i]
model = models[identifier]
output.append((weight, model))
output.sort(reverse=True, key=lambda t: t[0])
return output
def get_identifiers_with_weights(
self,
) -> List[Tuple[Tuple[int, int, float], float]]:
return list(zip(self.identifiers_, self.weights_))
def get_selected_model_identifiers(self) -> List[Tuple[int, int, float]]:
output = []
for i, weight in enumerate(self.weights_):
identifier = self.identifiers_[i]
if weight > 0.0:
output.append(identifier)
return output
def get_validation_performance(self) -> float:
return self.trajectory_[-1]
| bsd-3-clause | f7f9485cf35185a613697d29fc33b628 | 34.460759 | 89 | 0.560862 | 4.3125 | false | false | false | false |
automl/auto-sklearn | examples/60_search/example_parallel_manual_spawning_cli.py | 1 | 7869 | # -*- encoding: utf-8 -*-
"""
======================================================
Parallel Usage: Spawning workers from the command line
======================================================
*Auto-sklearn* uses
`dask.distributed <https://distributed.dask.org/en/latest/index.html>`_
for parallel optimization.
This example shows how to start the dask scheduler and spawn
workers for *Auto-sklearn* manually from the command line. Use this example
as a starting point to parallelize *Auto-sklearn* across multiple
machines.
To run *Auto-sklearn* in parallel on a single machine check out the example
:ref:`sphx_glr_examples_60_search_example_parallel_n_jobs.py`.
If you want to start everything manually from within Python
please see ``:ref:sphx_glr_examples_60_search_example_parallel_manual_spawning_python.py``.
**NOTE:** Above example is disabled due to issue https://github.com/dask/distributed/issues/5627
You can learn more about the dask command line interface from
https://docs.dask.org/en/latest/setup/cli.html.
When manually passing a dask client to Auto-sklearn, all logic
must be guarded by ``if __name__ == "__main__":`` statements! We use
multiple such statements to properly render this example as a notebook
and also allow execution via the command line.
Background
==========
To run Auto-sklearn distributed on multiple machines we need to set
up three components:
1. **Auto-sklearn and a dask client**. This will manage all workload, find new
configurations to evaluate and submit jobs via a dask client. As this
runs Bayesian optimization it should be executed on its own CPU.
2. **The dask workers**. They will do the actual work of running machine
learning algorithms and require their own CPU each.
3. **The scheduler**. It manages the communication between the dask client
and the different dask workers. As the client and all workers connect
to the scheduler it must be started first. This is a light-weight job
and does not require its own CPU.
We will now start these three components in reverse order: scheduler,
workers and client. Also, in a real setup, the scheduler and the workers should
be started from the command line and not from within a Python file via
the ``subprocess`` module as done here (for the sake of having a self-contained
example).
"""
###########################################################################
# Import statements
# =================
import multiprocessing
import subprocess
import time
import dask.distributed
import sklearn.datasets
import sklearn.metrics
from autosklearn.classification import AutoSklearnClassifier
from autosklearn.constants import MULTICLASS_CLASSIFICATION
tmp_folder = "/tmp/autosklearn_parallel_3_example_tmp"
worker_processes = []
###########################################################################
# 0. Setup client-scheduler communication
# =======================================
#
# In this examples the dask scheduler is started without an explicit
# address and port. Instead, the scheduler takes a free port and stores
# relevant information in a file for which we provided the name and
# location. This filename is also given to the worker so they can find all
# relevant information to connect to the scheduler.
scheduler_file_name = "scheduler-file.json"
############################################################################
# 1. Start scheduler
# ==================
#
# Starting the scheduler is done with the following bash command:
#
# .. code:: bash
#
# dask-scheduler --scheduler-file scheduler-file.json --idle-timeout 10
#
# We will now execute this bash command from within Python to have a
# self-contained example:
def cli_start_scheduler(scheduler_file_name):
command = f"dask-scheduler --scheduler-file {scheduler_file_name} --idle-timeout 10"
proc = subprocess.run(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
check=True,
)
while proc.returncode is None:
time.sleep(1)
if __name__ == "__main__":
process_python_worker = multiprocessing.Process(
target=cli_start_scheduler,
args=(scheduler_file_name,),
)
process_python_worker.start()
worker_processes.append(process_python_worker)
# Wait a second for the scheduler to become available
time.sleep(1)
############################################################################
# 2. Start two workers
# ====================
#
# Starting the scheduler is done with the following bash command:
#
# .. code:: bash
#
# DASK_DISTRIBUTED__WORKER__DAEMON=False \
# dask-worker --nthreads 1 --lifetime 35 --memory-limit 0 \
# --scheduler-file scheduler-file.json
#
# We will now execute this bash command from within Python to have a
# self-contained example. Please note, that
# ``DASK_DISTRIBUTED__WORKER__DAEMON=False`` is required in this
# case as dask-worker creates a new process, which by default is not
# compatible with Auto-sklearn creating new processes in the workers itself.
# We disable dask's memory management by passing ``--memory-limit`` as
# Auto-sklearn does the memory management itself.
def cli_start_worker(scheduler_file_name):
command = (
"DASK_DISTRIBUTED__WORKER__DAEMON=False "
"dask-worker --nthreads 1 --lifetime 35 --memory-limit 0 "
f"--scheduler-file {scheduler_file_name}"
)
proc = subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True
)
while proc.returncode is None:
time.sleep(1)
if __name__ == "__main__":
for _ in range(2):
process_cli_worker = multiprocessing.Process(
target=cli_start_worker,
args=(scheduler_file_name,),
)
process_cli_worker.start()
worker_processes.append(process_cli_worker)
# Wait a second for workers to become available
time.sleep(1)
############################################################################
# 3. Creating a client in Python
# ==============================
#
# Finally we create a dask cluster which also connects to the scheduler via
# the information in the file created by the scheduler.
client = dask.distributed.Client(scheduler_file=scheduler_file_name)
############################################################################
# Start Auto-sklearn
# ~~~~~~~~~~~~~~~~~~
if __name__ == "__main__":
X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(
X, y, random_state=1
)
automl = AutoSklearnClassifier(
delete_tmp_folder_after_terminate=False,
time_left_for_this_task=30,
per_run_time_limit=10,
memory_limit=2048,
tmp_folder=tmp_folder,
seed=777,
# n_jobs is ignored internally as we pass a dask client.
n_jobs=1,
# Pass a dask client which connects to the previously constructed cluster.
dask_client=client,
)
automl.fit(X_train, y_train)
automl.fit_ensemble(
y_train,
task=MULTICLASS_CLASSIFICATION,
dataset_name="digits",
ensemble_kwargs={"ensemble_size": 20},
ensemble_nbest=50,
)
predictions = automl.predict(X_test)
print(automl.sprint_statistics())
print("Accuracy score", sklearn.metrics.accuracy_score(y_test, predictions))
############################################################################
# Wait until all workers are closed
# =================================
#
# This is only necessary if the workers are started from within this python
# script. In a real application one would start them directly from the command
# line.
if __name__ == "__main__":
process_python_worker.join()
for process in worker_processes:
process.join()
| bsd-3-clause | 85675f7655593216faab413aadc6c322 | 33.362445 | 96 | 0.63693 | 4.141579 | false | false | false | false |
automl/auto-sklearn | autosklearn/util/disk.py | 1 | 2506 | from __future__ import annotations
from typing import Any
import math
import shutil
import tempfile
import uuid
from pathlib import Path
sizes = {
"B": 0,
"KB": 1,
"MB": 2,
"GB": 3,
"TB": 4,
}
def sizeof(path: Path | str, unit: str = "B") -> float:
"""Get the size of some path object
Parameters
----------
path : Path | str
The path of the file or directory to get the size of
unit : "B" | "KB" | "MB" | "GB" | "TB" = "B"
What unit to get the answer in
Returns
-------
float
The size of the folder/file in the given units
"""
if unit not in sizes:
raise ValueError(f"Not a known unit {unit}")
if not isinstance(path, Path):
path = Path(path)
if path.is_file():
size = path.stat().st_size
else:
size = sum(f.stat().st_size for f in path.glob("**/*") if f.is_file())
power = sizes[unit]
return size / math.pow(1024, power)
def rmtree(
path: Path | str,
*,
atomic: bool = False,
tmp: bool | Path | str = False,
**kwargs: Any,
) -> None:
"""Delete a file or directory
Parameters
----------
path: Path | str
The path to delete
atomic: bool = False
Whether to delete the file/folder atomically. This is done by first
using a `move` before `rmtree`.
The `move` is not guaranteed to be atomic if moving between
different file systems which can happen when moving to /tmp,
depending on the OS and setup.
The deletion part is not atomic.
* https://docs.python.org/3/library/shutil.html#shutil.move
tmp: bool | Path | str = False
If bool, this defines whether atomic should use the tmp dir
for it's move. Otherwise, a path can be specified to use
**kwargs
Forwarded to `rmtree`
* https://docs.python.org/3/library/shutil.html#shutil.rmtree
"""
if isinstance(path, str):
path = Path(path)
if atomic:
if tmp is True:
dir = Path(tempfile.gettempdir())
uid = uuid.uuid4()
mvpath = dir / f"autosklearn-{path.name}.old_{uid}"
elif tmp is False:
uid = uuid.uuid4()
mvpath = path.parent / f"{path.name}.old_{uid}"
else:
mvpath = tmp if isinstance(tmp, Path) else Path(tmp)
shutil.move(str(path), str(mvpath))
shutil.rmtree(mvpath, **kwargs)
else:
shutil.rmtree(path, **kwargs)
| bsd-3-clause | bf5bc57c62fa5a444817bfa6a0d9c15b | 23.330097 | 78 | 0.569433 | 3.774096 | false | false | false | false |
automl/auto-sklearn | scripts/04_create_aslib_files.py | 1 | 5742 | from argparse import ArgumentParser
import itertools
import os
import arff
from autosklearn.constants import *
from autosklearn.metrics import CLASSIFICATION_METRICS, REGRESSION_METRICS
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--working-directory", type=str, required=True)
parser.add_argument("--scenario_id", type=str, default="auto-sklearn")
parser.add_argument("--algorithm_cutoff_time", type=int, default=1800)
parser.add_argument("--algorithm_cutoff_memory", type=int, default=3072)
args = parser.parse_args()
working_directory = args.working_directory
output_dir = os.path.join(working_directory, "metadata")
results_dir = os.path.join(working_directory, "configuration_results")
metafeatures_dir = os.path.join(working_directory, "metafeatures")
scenario_id = args.scenario_id
algorithm_cutoff_time = args.algorithm_cutoff_time
algorithm_cutoff_memory = args.algorithm_cutoff_memory
# Create the output directory if necessary
try:
os.makedirs(output_dir)
except (OSError, IOError):
pass
for task_type in ("classification", "regression"):
if task_type == "classification":
metadata_sets = itertools.product(
[0, 1],
[BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION],
CLASSIFICATION_METRICS,
)
elif task_type == "regression":
metadata_sets = itertools.product([0, 1], [REGRESSION], REGRESSION_METRICS)
else:
raise ValueError(task_type)
input_directory = os.path.join(working_directory, "configuration", task_type)
metafeatures_dir_for_task = os.path.join(metafeatures_dir, task_type)
for sparse, task, metric in metadata_sets:
print(TASK_TYPES_TO_STRING[task], metric, sparse)
dir_name = "%s_%s_%s" % (
metric,
TASK_TYPES_TO_STRING[task],
"sparse" if sparse else "dense",
)
output_dir_ = os.path.join(output_dir, dir_name)
results_dir_ = os.path.join(results_dir, dir_name)
if not os.path.exists(results_dir_):
print("Results directory %s does not exist!" % results_dir_)
continue
try:
os.makedirs(output_dir_)
except Exception:
pass
# Create a readme.txt
with open(os.path.join(output_dir_, "readme.txt"), "w") as fh:
pass
# Create description.txt
with open(
os.path.join(metafeatures_dir_for_task, "description.features.txt")
) as fh:
description_metafeatures = fh.read()
with open(os.path.join(results_dir_, "description.results.txt")) as fh:
description_results = fh.read()
description = [description_metafeatures, description_results]
description.append("scenario_id: %s" % scenario_id)
description.append("maximize: false")
description.append("algorithm_cutoff_time: %d" % algorithm_cutoff_time)
description.append("algorithm_cutoff_memory: %d" % algorithm_cutoff_memory)
with open(os.path.join(output_dir_, "description.txt"), "w") as fh:
for line in description:
fh.write(line)
fh.write("\n")
# Copy feature values and add instance id
with open(
os.path.join(metafeatures_dir_for_task, "feature_values.arff")
) as fh:
feature_values = arff.load(fh)
feature_values["relation"] = scenario_id + "_" + feature_values["relation"]
with open(os.path.join(output_dir_, "feature_values.arff"), "w") as fh:
arff.dump(feature_values, fh)
# Copy feature runstatus and add instance id
with open(
os.path.join(metafeatures_dir_for_task, "feature_runstatus.arff")
) as fh:
feature_runstatus = arff.load(fh)
feature_runstatus["relation"] = (
scenario_id + "_" + feature_runstatus["relation"]
)
with open(os.path.join(output_dir_, "feature_runstatus.arff"), "w") as fh:
arff.dump(feature_runstatus, fh)
# Copy feature runstatus and add instance id
with open(
os.path.join(metafeatures_dir_for_task, "feature_costs.arff")
) as fh:
feature_costs = arff.load(fh)
feature_costs["relation"] = scenario_id + "_" + feature_costs["relation"]
for i in range(len(feature_costs["data"])):
for j in range(2, len(feature_costs["data"][i])):
feature_costs["data"][i][j] = round(feature_costs["data"][i][j], 5)
with open(os.path.join(output_dir_, "feature_costs.arff"), "w") as fh:
arff.dump(feature_costs, fh)
# Copy algorithm runs and add instance id
with open(os.path.join(results_dir_, "algorithm_runs.arff")) as fh:
algorithm_runs = arff.load(fh)
algorithm_runs["relation"] = scenario_id + "_" + algorithm_runs["relation"]
with open(os.path.join(output_dir_, "algorithm_runs.arff"), "w") as fh:
arff.dump(algorithm_runs, fh)
# Copy configurations file
with open(os.path.join(results_dir_, "configurations.csv")) as fh:
algorithm_runs = fh.read()
with open(os.path.join(output_dir_, "configurations.csv"), "w") as fh:
fh.write(algorithm_runs)
| bsd-3-clause | 5abd95de3a4cda7765410225793de8ec | 38.875 | 87 | 0.576977 | 4.004184 | false | false | false | false |
mozilla/kitsune | kitsune/announcements/models.py | 1 | 3271 | from datetime import datetime
from django.contrib.auth.models import User, Group
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from kitsune.sumo.templatetags.jinja_helpers import wiki_to_html
from kitsune.sumo.models import ModelBase
from kitsune.wiki.models import Locale
class Announcement(ModelBase):
created = models.DateTimeField(default=datetime.now)
creator = models.ForeignKey(User, on_delete=models.CASCADE)
show_after = models.DateTimeField(
default=datetime.now,
db_index=True,
verbose_name="Start displaying",
help_text=("When this announcement will start appearing. " "(US/Pacific)"),
)
show_until = models.DateTimeField(
db_index=True,
null=True,
blank=True,
verbose_name="Stop displaying",
help_text=(
"When this announcement will stop appearing. "
"Leave blank for indefinite. (US/Pacific)"
),
)
content = models.TextField(
max_length=10000,
help_text=("Use wiki syntax or HTML. It will display similar to a document's content."),
)
group = models.ForeignKey(Group, on_delete=models.CASCADE, null=True, blank=True)
locale = models.ForeignKey(Locale, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
excerpt = self.content[:50]
if self.group:
return "[{group}] {excerpt}".format(group=self.group, excerpt=excerpt)
return "{excerpt}".format(excerpt=excerpt)
def is_visible(self):
now = datetime.now()
if now > self.show_after and (not self.show_until or now < self.show_until):
return True
return False
@property
def content_parsed(self):
return wiki_to_html(self.content.strip())
@classmethod
def get_site_wide(cls):
return cls._visible_query(group=None, locale=None)
@classmethod
def get_for_group_id(cls, group_id):
"""Returns visible announcements for a given group id."""
return cls._visible_query(group__id=group_id)
@classmethod
def get_for_locale_name(cls, locale_name):
"""Returns visible announcements for a given locale name."""
return cls._visible_query(locale__locale=locale_name)
@classmethod
def _visible_query(cls, **query_kwargs):
"""Return visible announcements given a group query."""
return Announcement.objects.filter(
# Show if interval is specified and current or show_until is None
Q(show_after__lt=datetime.now())
& (Q(show_until__gt=datetime.now()) | Q(show_until__isnull=True)),
**query_kwargs,
)
def connector(sender, instance, created, **kw):
# Only email new announcements in a group. We don't want to email everyone.
if created and instance.group:
from kitsune.announcements.tasks import send_group_email
now = datetime.now()
if instance.is_visible():
send_group_email.delay(instance.pk)
elif now < instance.show_after:
send_group_email.delay(instance.pk, eta=instance.show_after)
post_save.connect(connector, sender=Announcement, dispatch_uid="email_announcement")
| bsd-3-clause | ee4f4732066f757d4a4b50c4b8a2d428 | 34.554348 | 96 | 0.656374 | 3.861865 | false | false | false | false |
mozilla/kitsune | scripts/cron.py | 1 | 8102 | import datetime
import os
import sys
from subprocess import check_call
import babis
from apscheduler.schedulers.blocking import BlockingScheduler
from django.conf import settings
from django.utils import timezone
MANAGE = os.path.join(settings.ROOT, "manage.py")
schedule = BlockingScheduler()
def call_command(command):
check_call("python {0} {1}".format(MANAGE, command), shell=True)
class scheduled_job(object):
"""Decorator for scheduled jobs. Takes same args as apscheduler.schedule_job."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.skip = self.kwargs.pop("skip", False)
def __call__(self, fn):
self.name = fn.__name__
if self.skip:
self.log("Skipped, not registered.")
return None
self.callback = fn
schedule.add_job(self.run, id=self.name, *self.args, **self.kwargs)
self.log("Registered.")
return self.run
def run(self):
self.log("starting")
try:
self.callback()
except Exception as e:
self.log("CRASHED: {}".format(e))
raise
else:
self.log("finished successfully")
def log(self, message):
msg = "[{}] Clock job {}@{}: {}".format(
datetime.datetime.utcnow(), self.name, settings.PLATFORM_NAME, message
)
print(msg, file=sys.stderr)
# Every 10 minutes.
@scheduled_job("cron", month="*", day="*", hour="*", minute="*/10", max_instances=1, coalesce=True)
@babis.decorator(ping_after=settings.DMS_ENQUEUE_LAG_MONITOR_TASK)
def job_enqueue_lag_monitor_task():
call_command("enqueue_lag_monitor_task")
# Every hour.
@scheduled_job(
"cron",
month="*",
day="*",
hour="*",
minute="30",
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_SEND_WELCOME_EMAILS)
def job_send_welcome_emails():
call_command("send_welcome_emails")
@scheduled_job(
"cron",
month="*",
day="*",
hour="*",
minute="45",
max_instances=1,
coalesce=True,
# only run on readonly clusters, where no signals will be triggered:
skip=(not settings.READ_ONLY),
)
@babis.decorator(ping_after=settings.DMS_REINDEX_ES7)
def job_reindex_es7():
# Index items newer than 90 minutes old in ES7
after = (timezone.now() - datetime.timedelta(minutes=90)).isoformat()
call_command("es7_reindex --updated-after {}".format(after))
# Every 6 hours.
@scheduled_job(
"cron",
month="*",
day="*",
hour="*/6",
minute="00",
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_UPDATE_PRODUCT_DETAILS)
def job_update_product_details():
call_command("update_product_details")
@scheduled_job(
"cron",
month="*",
day="*",
hour="*/6",
minute="20",
max_instances=1,
coalesce=True,
skip=(settings.READ_ONLY or settings.STAGE),
)
@babis.decorator(ping_after=settings.DMS_GENERATE_MISSING_SHARE_LINKS)
def job_generate_missing_share_links():
call_command("generate_missing_share_links")
# Once per day.
@scheduled_job(
"cron",
month="*",
day="*",
hour="00",
minute="00",
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_REBUILD_KB)
def job_rebuild_kb():
call_command("rebuild_kb")
@scheduled_job(
"cron",
month="*",
day="*",
hour="00",
minute="42",
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_UPDATE_TOP_CONTRIBUTORS)
def job_update_top_contributors():
call_command("update_top_contributors")
@scheduled_job(
"cron",
month="*",
day="*",
hour="01",
minute="00",
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_UPDATE_L10N_COVERAGE_METRICS)
def job_update_l10n_coverage_metrics():
call_command("update_l10n_coverage_metrics")
@scheduled_job(
"cron",
month="*",
day="*",
hour="01",
minute="11",
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_REPORT_EMPLOYEE_ANSWERS)
def job_report_employee_answers():
call_command("report_employee_answers")
@scheduled_job(
"cron",
month="*",
day="*",
hour="01",
minute="40",
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_UPDATE_WEEKLY_VOTES)
def job_update_weekly_votes():
call_command("update_weekly_votes")
@scheduled_job(
"cron",
month="*",
day="*",
hour="03",
minute="00",
max_instances=1,
coalesce=True,
skip=(settings.READ_ONLY or settings.STAGE),
)
@babis.decorator(ping_after=settings.DMS_UPDATE_CONTRIBUTOR_METRICS)
def job_update_contributor_metrics():
call_command("update_contributor_metrics")
@scheduled_job(
"cron",
month="*",
day="*",
hour="04",
minute="00",
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_AUTO_ARCHIVE_OLD_QUESTIONS)
def job_auto_archive_old_questions():
call_command("auto_archive_old_questions")
@scheduled_job(
"cron",
month="*",
day="*",
hour="10",
minute="00",
max_instances=1,
coalesce=True,
skip=(settings.READ_ONLY or settings.STAGE),
)
@babis.decorator(ping_after=settings.DMS_UPDATE_L10N_METRIC)
def job_update_l10n_metric():
call_command("update_l10n_metric")
@scheduled_job(
"cron",
month="*",
day="*",
hour="16",
minute="00",
max_instances=1,
coalesce=True,
skip=(settings.READ_ONLY or settings.STAGE),
)
@babis.decorator(ping_after=settings.DMS_RELOAD_WIKI_TRAFFIC_STATS)
def job_reload_wiki_traffic_stats():
call_command("reload_wiki_traffic_stats")
@scheduled_job(
"cron",
month="*",
day="*",
hour="21",
minute="00",
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_CACHE_MOST_UNHELPFUL_KB_ARTICLES)
def job_cache_most_unhelpful_kb_articles():
call_command("cache_most_unhelpful_kb_articles")
@scheduled_job(
"cron",
month="*",
day="*",
hour="23",
minute="00",
max_instances=1,
coalesce=True,
skip=(settings.READ_ONLY or settings.STAGE),
)
@babis.decorator(ping_after=settings.DMS_RELOAD_QUESTION_TRAFFIC_STATS)
def job_reload_question_traffic_stats():
call_command("reload_question_traffic_stats")
@scheduled_job(
"cron",
month="*",
day="*",
hour="04",
minute="00",
day_of_week=5,
max_instances=1,
coalesce=True,
skip=(settings.READ_ONLY or settings.STAGE),
)
@babis.decorator(ping_after=settings.DMS_SEND_WEEKLY_READY_FOR_REVIEW_DIGEST)
def job_send_weekly_ready_for_review_digest():
call_command("send_weekly_ready_for_review_digest")
@scheduled_job(
"cron",
month="*",
day="*",
hour="00",
minute="00",
day_of_week=0,
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_FIX_CURRENT_REVISIONS)
def job_fix_current_revisions():
call_command("fix_current_revisions")
@scheduled_job(
"cron",
month="*",
day="*",
hour="00",
minute="30",
day_of_week=1,
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_COHORT_ANALYSIS)
def job_cohort_analysis():
call_command("cohort_analysis")
# Once per month
@scheduled_job(
"cron",
month="*",
day="1",
hour="00",
minute="30",
max_instances=1,
coalesce=True,
skip=settings.READ_ONLY,
)
@babis.decorator(ping_after=settings.DMS_UPDATE_L10N_CONTRIBUTOR_METRICS)
def job_update_l10n_contributor_metrics():
call_command("update_l10n_contributor_metrics")
def run():
try:
schedule.start()
except (KeyboardInterrupt, SystemExit):
pass
| bsd-3-clause | 719721b540179bb34fd48d14d22249c2 | 21.505556 | 99 | 0.638361 | 3.156213 | false | false | false | false |
mozilla/kitsune | kitsune/tidings/utils.py | 1 | 3654 | from importlib import import_module
from zlib import crc32
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse as django_reverse
from django.utils.module_loading import import_string
def collate(*iterables, **kwargs):
"""Return an iterable ordered collation of the already-sorted items
from each of ``iterables``, compared by kwarg ``key``.
If ``reverse=True`` is passed, iterables must return their results in
descending order rather than ascending.
"""
key = kwargs.pop("key", lambda a: a)
reverse = kwargs.pop("reverse", False)
min_or_max = max if reverse else min
rows = [iter(iterable) for iterable in iterables if iterable]
next_values = {}
by_key = []
def gather_next_value(row, index):
try:
next_value = next(row)
except StopIteration:
pass
else:
next_values[index] = next_value
by_key.append((key(next_value), index))
for index, row in enumerate(rows):
gather_next_value(row, index)
while by_key:
key_value, index = min_or_max(by_key)
by_key.remove((key_value, index))
next_value = next_values.pop(index)
yield next_value
gather_next_value(rows[index], index)
def hash_to_unsigned(data):
"""If ``data`` is a string or unicode string, return an unsigned 4-byte int
hash of it. If ``data`` is already an int that fits those parameters,
return it verbatim.
If ``data`` is an int outside that range, behavior is undefined at the
moment. We rely on the ``PositiveIntegerField`` on
:class:`~tidings.models.WatchFilter` to scream if the int is too long for
the field.
We use CRC32 to do the hashing. Though CRC32 is not a good general-purpose
hash function, it has no collisions on a dictionary of 38,470 English
words, which should be fine for the small sets that :class:`WatchFilters
<tidings.models.WatchFilter>` are designed to enumerate. As a bonus, it is
fast and available as a built-in function in some DBs. If your set of
filter values is very large or has different CRC32 distribution properties
than English words, you might want to do your own hashing in your
:class:`~tidings.events.Event` subclass and pass ints when specifying
filter values.
"""
if isinstance(data, str):
# Return a CRC32 value identical across Python versions and platforms
# by stripping the sign bit as on
# http://docs.python.org/library/zlib.html.
return crc32(data.encode("utf-8")) & 0xFFFFFFFF
else:
return int(data)
def import_from_setting(setting_name, fallback):
"""Return the resolution of an import path stored in a Django setting.
:arg setting_name: The name of the setting holding the import path
:arg fallback: An alternate object to use if the setting is empty or
doesn't exist
Raise ImproperlyConfigured if a path is given that can't be resolved.
"""
path = getattr(settings, setting_name, None)
if path:
try:
return import_string(path)
except ImportError:
raise ImproperlyConfigured("%s: No such path." % path)
else:
return fallback
# Here to be imported by others:
reverse = import_from_setting("TIDINGS_REVERSE", django_reverse) # no QA
def get_class(module_name, class_name):
"""
Convenience function for extracting a class from the given module name using
the given class name.
"""
module = import_module(module_name)
return getattr(module, class_name)
| bsd-3-clause | 696bae4799e5a480b8c96a0c04af4b9e | 33.471698 | 80 | 0.67734 | 4.119504 | false | false | false | false |
mozilla/kitsune | kitsune/notifications/migrations/0001_initial.py | 1 | 2362 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('actstream', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('read_at', models.DateTimeField(null=True, blank=True)),
('action', models.ForeignKey(on_delete=models.CASCADE, to='actstream.Action')),
('owner', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PushNotificationRegistration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=datetime.datetime.now)),
('push_url', models.CharField(max_length=256)),
('creator', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RealtimeRegistration',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(default=datetime.datetime.now)),
('endpoint', models.CharField(max_length=256)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=models.CASCADE, to='contenttypes.ContentType')),
('creator', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| bsd-3-clause | 51dc8cf5d11ba4dd434b607db02c6f67 | 39.033898 | 114 | 0.558848 | 4.677228 | false | false | false | false |
mozilla/kitsune | kitsune/kpi/management/commands/update_l10n_metric.py | 1 | 2630 | from datetime import date, timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from kitsune.kpi.management import utils
from kitsune.kpi.models import L10N_METRIC_CODE, Metric, MetricKind
from kitsune.sumo import googleanalytics
class Command(BaseCommand):
help = "Calculate new l10n coverage numbers and save."
def handle(self, **options):
"""
L10n coverage is a measure of the amount of translations that are
up to date, weighted by the number of visits for each locale.
The "algorithm" (see Bug 727084):
SUMO visits = Total SUMO visits for the last 30 days;
Total translated = 0;
For each locale {
Total up to date = Total up to date +
((Number of up to date articles in the en-US top 50 visited)/50 ) *
(Visitors for that locale / SUMO visits));
}
An up to date article is any of the following:
* An en-US article (by definition it is always up to date)
* The latest en-US revision has been translated
* There are only new revisions with TYPO_SIGNIFICANCE not translated
* There is only one revision of MEDIUM_SIGNIFICANCE not translated
"""
# Get the top 60 visited articles. We will only use the top 50
# but a handful aren't localizable so we get some extras.
top_60_docs = utils._get_top_docs(60)
# Get the visits to each locale in the last 30 days.
end = date.today() - timedelta(days=1) # yesterday
start = end - timedelta(days=30)
locale_visits = googleanalytics.visitors_by_locale(start, end)
# Total visits.
total_visits = sum(locale_visits.values())
# Calculate the coverage.
coverage = 0
for locale, visits in locale_visits.items():
if locale == settings.WIKI_DEFAULT_LANGUAGE:
num_docs = utils.MAX_DOCS_UP_TO_DATE
up_to_date_docs = utils.MAX_DOCS_UP_TO_DATE
else:
up_to_date_docs, num_docs = utils._get_up_to_date_count(top_60_docs, locale)
if num_docs and total_visits:
coverage += (float(up_to_date_docs) / num_docs) * (float(visits) / total_visits)
# Save the value to Metric table.
metric_kind = MetricKind.objects.get_or_create(code=L10N_METRIC_CODE)[0]
day = date.today()
Metric.objects.create(
kind=metric_kind,
start=day,
end=day + timedelta(days=1),
value=int(coverage * 100),
) # Store as a % int.
| bsd-3-clause | 355034297cdc32e7e4993b457356c472 | 38.253731 | 96 | 0.620152 | 3.861968 | false | false | false | false |
mozilla/kitsune | kitsune/wiki/migrations/0010_change_locale_bn_BD_to_bn.py | 2 | 1573 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-08-16 16:54
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import OuterRef, Exists
def change_locale_bn_bd_to_bn_forwards(apps, schema_editor):
Document = apps.get_model('wiki', 'Document')
DraftRevision = apps.get_model('wiki', 'DraftRevision')
Locale = apps.get_model('wiki', 'Locale')
# Change the locale of the documents that does not have bn-BD translation
bn_bd_document = (Document.objects.only('id').all()
.filter(parent=OuterRef('parent'), locale='bn-BD'))
bn_in_documents = (Document.objects.all().filter(locale='bn-IN')
.annotate(has_bn_bd=Exists(bn_bd_document))
.exclude(has_bn_bd=True)
.values_list('id', flat=True))
Document.objects.all().filter(locale='bn-IN', id__in=list(bn_in_documents)).update(locale='bn')
Document.objects.all().filter(locale='bn-BD').update(locale='bn')
DraftRevision.objects.all().filter(locale='bn-BD').update(locale='bn')
Locale.objects.all().filter(locale='bn-BD').update(locale='bn')
def change_locale_bn_to_bn_bd_backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('wiki', '0009_auto_20190507_1052'),
]
operations = [
migrations.RunPython(change_locale_bn_bd_to_bn_forwards, change_locale_bn_to_bn_bd_backwards)
]
| bsd-3-clause | 57bcff2041fcfa324c91724b8b7fb91a | 36.452381 | 101 | 0.612842 | 3.616092 | false | false | false | false |
mozilla/kitsune | kitsune/sumo/tests/test_json_decorator.py | 1 | 2178 | import json
from django import http
from django.core.exceptions import PermissionDenied
from django.test import RequestFactory, TestCase
from kitsune.sumo.decorators import json_view
rf = RequestFactory()
JSON = "application/json"
class JsonViewTests(TestCase):
def test_object(self):
data = {
"foo": "bar",
"baz": "qux",
"quz": [{"foo": "bar"}],
}
expect = json.dumps(data).encode()
@json_view
def temp(req):
return data
res = temp(rf.get("/"))
self.assertEqual(200, res.status_code)
self.assertEqual(expect, res.content)
self.assertEqual(JSON, res["content-type"])
def test_list(self):
data = ["foo", "bar", "baz"]
expect = json.dumps(data).encode()
@json_view
def temp(req):
return data
res = temp(rf.get("/"))
self.assertEqual(200, res.status_code)
self.assertEqual(expect, res.content)
self.assertEqual(JSON, res["content-type"])
def test_404(self):
@json_view
def temp(req):
raise http.Http404("foo")
res = temp(rf.get("/"))
self.assertEqual(404, res.status_code)
self.assertEqual(JSON, res["content-type"])
data = json.loads(res.content)
self.assertEqual(404, data["error"])
self.assertEqual("foo", data["message"])
def test_permission(self):
@json_view
def temp(req):
raise PermissionDenied("bar")
res = temp(rf.get("/"))
self.assertEqual(403, res.status_code)
self.assertEqual(JSON, res["content-type"])
data = json.loads(res.content)
self.assertEqual(403, data["error"])
self.assertEqual("bar", data["message"])
def test_server_error(self):
@json_view
def temp(req):
raise TypeError("fail")
res = temp(rf.get("/"))
self.assertEqual(500, res.status_code)
self.assertEqual(JSON, res["content-type"])
data = json.loads(res.content)
self.assertEqual(500, data["error"])
self.assertEqual("fail", data["message"])
| bsd-3-clause | df20faa3f02503c4a3e50628fe6ce56c | 26.923077 | 51 | 0.572084 | 3.917266 | false | true | false | false |
mozilla/kitsune | kitsune/kbadge/migrations/0004_auto_20200629_0826.py | 1 | 1098 | # Generated by Django 2.2.13 on 2020-06-29 08:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kbadge', '0003_auto_20190816_1824'),
]
operations = [
migrations.AlterField(
model_name='award',
name='description',
field=models.TextField(blank=True, help_text='Explanation and evidence for the badge award'),
),
migrations.AlterField(
model_name='award',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='uploads/badges/'),
),
migrations.AlterField(
model_name='badge',
name='image',
field=models.ImageField(blank=True, help_text='Must be square. Recommended 256x256.', null=True, upload_to='uploads/badges/'),
),
migrations.AlterField(
model_name='badge',
name='unique',
field=models.BooleanField(default=True, help_text='Should awards of this badge be limited to one-per-person?'),
),
]
| bsd-3-clause | 733bc1ff560f0f32f19cbf0204d57e3a | 32.272727 | 138 | 0.590164 | 4.159091 | false | false | false | false |
mozilla/kitsune | kitsune/upload/tests/test_models.py | 1 | 1333 | from django.contrib.contenttypes.models import ContentType
from django.core.files import File
from kitsune.questions.tests import QuestionFactory
from kitsune.sumo.tests import TestCase
from kitsune.upload.models import ImageAttachment
from kitsune.upload.tasks import generate_thumbnail
from kitsune.users.tests import UserFactory
class ImageAttachmentTestCase(TestCase):
def setUp(self):
super(ImageAttachmentTestCase, self).setUp()
self.user = UserFactory()
self.obj = QuestionFactory()
self.ct = ContentType.objects.get_for_model(self.obj)
def tearDown(self):
ImageAttachment.objects.all().delete()
super(ImageAttachmentTestCase, self).tearDown()
def test_thumbnail_if_set(self):
"""thumbnail_if_set() returns self.thumbnail if set, or else returns
self.file"""
image = ImageAttachment(content_object=self.obj, creator=self.user)
with open("kitsune/upload/tests/media/test.jpg", "rb") as f:
up_file = File(f)
image.file.save(up_file.name, up_file, save=True)
self.assertEqual(image.file, image.thumbnail_if_set())
generate_thumbnail("upload.ImageAttachment", image.id, "file", "thumbnail")
image.refresh_from_db()
self.assertEqual(image.thumbnail, image.thumbnail_if_set())
| bsd-3-clause | b07ca4c705d8344406a3bf81071a573a | 38.205882 | 83 | 0.705176 | 3.920588 | false | true | false | false |
mozilla/kitsune | kitsune/search/tests/test_views.py | 1 | 2661 | import json
from pyquery import PyQuery as pq
from kitsune.search.tests import Elastic7TestCase
from kitsune.sumo.urlresolvers import reverse
class TestSearchSEO(Elastic7TestCase):
"""Test SEO-related aspects of the SUMO search view."""
def test_simple_search(self):
"""
Test SEO-related response for search.
"""
url = reverse("search", locale="en-US")
response = self.client.get(f"{url}?q=firefox")
self.assertEqual(response.status_code, 200)
self.assertTrue("text/html" in response["content-type"])
doc = pq(response.content)
self.assertEqual(doc('meta[name="robots"]').attr("content"), "noindex, nofollow")
# TODO: Are these old Webtrends meta tags even useful any longer?
self.assertEqual(doc('meta[name="WT.oss"]').attr("content"), "firefox")
self.assertEqual(doc('meta[name="WT.oss_r"]').attr("content"), "0")
def test_simple_search_json(self):
"""
Test SEO-related response for search when JSON is requested.
"""
url = reverse("search", locale="en-US")
response = self.client.get(f"{url}?format=json&q=firefox")
self.assertEqual(response.status_code, 200)
self.assertTrue("application/json" in response["content-type"])
self.assertTrue("x-robots-tag" in response)
self.assertEqual(response["x-robots-tag"], "noindex, nofollow")
def test_invalid_search(self):
"""
Test SEO-related response for invalid search.
"""
url = reverse("search", locale="en-US")
response = self.client.get(f"{url}?abc=firefox")
self.assertEqual(response.status_code, 200)
self.assertTrue("text/html" in response["content-type"])
doc = pq(response.content)
self.assertEqual(doc('meta[name="robots"]').attr("content"), "noindex, nofollow")
# TODO: Are these old Webtrends meta tags even useful any longer?
self.assertFalse(doc.find('meta[name="WT.oss"]'))
self.assertFalse(doc.find('meta[name="WT.oss_r"]'))
def test_invalid_search_json(self):
"""
Test SEO-related response for invalid search when JSON is requested.
"""
url = reverse("search", locale="en-US")
response = self.client.get(f"{url}?format=json&abc=firefox")
self.assertEqual(response.status_code, 400)
self.assertTrue("application/json" in response["content-type"])
self.assertEqual(json.loads(response.content), {"error": "Invalid search data."})
self.assertTrue("x-robots-tag" in response)
self.assertEqual(response["x-robots-tag"], "noindex")
| bsd-3-clause | 34515864ed6f3cbb930119e1dddb5688 | 42.622951 | 89 | 0.639985 | 3.790598 | false | true | false | false |
web2py/pydal | pydal/restapi.py | 1 | 22878 | import collections
import copy
import datetime
import fnmatch
import functools
import re
import traceback
__version__ = "0.1"
__all__ = ["RestAPI", "Policy", "ALLOW_ALL_POLICY", "DENY_ALL_POLICY"]
MAX_LIMIT = 1000
class PolicyViolation(ValueError):
pass
class InvalidFormat(ValueError):
pass
class NotFound(ValueError):
pass
def maybe_call(value):
return value() if callable(value) else value
def error_wrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
data = {}
try:
data = func(*args, **kwargs)
if not data.get("errors"):
data["status"] = "success"
data["code"] = 200
else:
data["status"] = "error"
data["message"] = "Validation Errors"
data["code"] = 422
except PolicyViolation as e:
print(traceback.format_exc())
data["status"] = "error"
data["message"] = str(e)
data["code"] = 401
except NotFound as e:
print(traceback.format_exc())
data["status"] = "error"
data["message"] = str(e)
data["code"] = 404
except (InvalidFormat, KeyError, ValueError) as e:
print(traceback.format_exc())
data["status"] = "error"
data["message"] = str(e)
data["code"] = 400
finally:
data["timestamp"] = datetime.datetime.utcnow().isoformat()
data["api_version"] = __version__
return data
return wrapper
class Policy(object):
model = {
"POST": {"authorize": False, "fields": None},
"PUT": {"authorize": False, "fields": None},
"DELETE": {"authorize": False},
"GET": {
"authorize": False,
"fields": None,
"query": None,
"allowed_patterns": [],
"denied_patterns": [],
"limit": MAX_LIMIT,
"allow_lookup": False,
},
}
def __init__(self):
self.info = {}
def set(self, tablename, method="GET", **attributes):
method = method.upper()
if not method in self.model:
raise InvalidFormat("Invalid policy method: %s" % method)
invalid_keys = [key for key in attributes if key not in self.model[method]]
if invalid_keys:
raise InvalidFormat("Invalid keys: %s" % ",".join(invalid_keys))
if not tablename in self.info:
self.info[tablename] = copy.deepcopy(self.model)
self.info[tablename][method].update(attributes)
def get(self, tablename, method, name):
policy = self.info.get(tablename) or self.info.get("*")
if not policy:
raise PolicyViolation("No policy for this object")
return maybe_call(policy[method][name])
def check_if_allowed(
self, method, tablename, id=None, get_vars=None, post_vars=None, exceptions=True
):
get_vars = get_vars or {}
post_vars = post_vars or {}
policy = self.info.get(tablename) or self.info.get("*")
if not policy:
if exceptions:
raise PolicyViolation("No policy for this object")
return False
policy = policy.get(method.upper())
if not policy:
if exceptions:
raise PolicyViolation("No policy for this method")
return False
authorize = policy.get("authorize")
if authorize is False or (
callable(authorize) and not authorize(tablename, id, get_vars, post_vars)
):
if exceptions:
raise PolicyViolation("Not authorized")
return False
for key in get_vars:
if any(fnmatch.fnmatch(key, p) for p in policy["denied_patterns"]):
if exceptions:
raise PolicyViolation("Pattern is not allowed")
return False
allowed_patterns = policy["allowed_patterns"]
if "**" not in allowed_patterns and not any(
fnmatch.fnmatch(key, p) for p in allowed_patterns
):
if exceptions:
raise PolicyViolation("Pattern is not explicitely allowed")
return False
return True
def check_if_lookup_allowed(self, tablename, exceptions=True):
policy = self.info.get(tablename) or self.info.get("*")
if not policy:
if exceptions:
raise PolicyViolation("No policy for this object")
return False
policy = policy.get("GET")
if not policy:
if exceptions:
raise PolicyViolation("No policy for this method")
return False
if policy.get("allow_lookup"):
return True
return False
def allowed_fieldnames(self, table, method="GET"):
method = method.upper()
policy = self.info.get(table._tablename) or self.info.get("*", {})
policy = policy[method]
allowed_fieldnames = policy.get("fields")
if allowed_fieldnames is None:
allowed_fieldnames = [
f.name
for f in table
if (method == "GET" and maybe_call(f.readable))
or (method != "GET" and maybe_call(f.writable))
]
return allowed_fieldnames
def check_fieldnames(self, table, fieldnames, method="GET"):
allowed_fieldnames = self.allowed_fieldnames(table, method)
invalid_fieldnames = set(fieldnames) - set(allowed_fieldnames)
if invalid_fieldnames:
raise InvalidFormat("Invalid fields: %s" % list(invalid_fieldnames))
DENY_ALL_POLICY = Policy()
ALLOW_ALL_POLICY = Policy()
ALLOW_ALL_POLICY.set(
tablename="*",
method="GET",
authorize=True,
allowed_patterns=["**"],
allow_lookup=True,
)
ALLOW_ALL_POLICY.set(tablename="*", method="POST", authorize=True)
ALLOW_ALL_POLICY.set(tablename="*", method="PUT", authorize=True)
ALLOW_ALL_POLICY.set(tablename="*", method="DELETE", authorize=True)
class RestAPI(object):
re_table_and_fields = re.compile(r"\w+([\w+(,\w+)+])?")
re_lookups = re.compile(
r"((\w*\!?\:)?(\w+(\[\w+(,\w+)*\])?)(\.\w+(\[\w+(,\w+)*\])?)*)"
)
re_no_brackets = re.compile(r"\[.*?\]")
def __init__(self, db, policy):
self.db = db
self.policy = policy
self.allow_count = "legacy"
@error_wrapper
def __call__(
self,
method,
tablename,
id=None,
get_vars=None,
post_vars=None,
allow_count="legacy",
):
method = method.upper()
get_vars = get_vars or {}
post_vars = post_vars or {}
self.allow_count = allow_count
# validate incoming request
tname, tfieldnames = RestAPI.parse_table_and_fields(tablename)
if not tname in self.db.tables:
raise InvalidFormat("Invalid table name: %s" % tname)
if self.policy:
self.policy.check_if_allowed(method, tablename, id, get_vars, post_vars)
if method in ["POST", "PUT"]:
self.policy.check_fieldnames(
self.db[tablename], post_vars.keys(), method
)
# apply rules
if method == "GET":
if id:
get_vars["id.eq"] = id
return self.search(tablename, get_vars)
elif method == "POST":
table = self.db[tablename]
return table.validate_and_insert(**post_vars).as_dict()
elif method == "PUT":
id = id or post_vars["id"]
if not id:
raise InvalidFormat("No item id specified")
table = self.db[tablename]
data = table.validate_and_update(id, **post_vars).as_dict()
if not data.get("errors") and not data.get("updated"):
raise NotFound("Item not found")
return data
elif method == "DELETE":
id = id or post_vars["id"]
if not id:
raise InvalidFormat("No item id specified")
table = self.db[tablename]
deleted = self.db(table._id == id).delete()
if not deleted:
raise NotFound("Item not found")
return {"deleted": deleted}
def table_model(self, table, fieldnames):
""" converts a table into its form template """
items = []
fields = post_fields = put_fields = table.fields
if self.policy:
fields = self.policy.allowed_fieldnames(table, method="GET")
put_fields = self.policy.allowed_fieldnames(table, method="PUT")
post_fields = self.policy.allowed_fieldnames(table, method="POST")
for fieldname in fields:
if fieldnames and not fieldname in fieldnames:
continue
field = table[fieldname]
item = {"name": field.name, "label": field.label}
# https://github.com/collection-json/extensions/blob/master/template-validation.md
item["default"] = (
field.default() if callable(field.default) else field.default
)
parts = field.type.split()
item["type"] = parts[0].split("(")[0]
if len(parts) > 1:
item["references"] = parts[1]
if hasattr(field, "regex"):
item["regex"] = field.regex
item["required"] = field.required
item["unique"] = field.unique
item["post_writable"] = field.name in post_fields
item["put_writable"] = field.name in put_fields
item["options"] = field.options
if field.type == "id":
item["referenced_by"] = [
"%s.%s" % (f._tablename, f.name)
for f in table._referenced_by
if self.policy
and self.policy.check_if_allowed(
"GET", f._tablename, exceptions=False
)
]
items.append(item)
return items
@staticmethod
def make_query(field, condition, value):
expression = {
"eq": lambda: field == value,
"ne": lambda: field != value,
"lt": lambda: field < value,
"gt": lambda: field > value,
"le": lambda: field <= value,
"ge": lambda: field >= value,
"startswith": lambda: field.startswith(str(value)),
"in": lambda: field.belongs(
value.split(",") if isinstance(value, str) else list(value)
),
"contains": lambda: field.contains(value),
}
return expression[condition]()
@staticmethod
def parse_table_and_fields(text):
if not RestAPI.re_table_and_fields.match(text):
raise ValueError
parts = text.split("[")
if len(parts) == 1:
return parts[0], []
elif len(parts) == 2:
return parts[0], parts[1][:-1].split(",")
def search(self, tname, vars):
def check_table_permission(tablename):
if self.policy:
self.policy.check_if_allowed("GET", tablename)
def check_table_lookup_permission(tablename):
if self.policy:
self.policy.check_if_lookup_allowed(tablename)
def filter_fieldnames(table, fieldnames):
if self.policy:
if fieldnames:
self.policy.check_fieldnames(table, fieldnames)
else:
fieldnames = self.policy.allowed_fieldnames(table)
elif not fieldnames:
fieldnames = table.fields
return fieldnames
db = self.db
tname, tfieldnames = RestAPI.parse_table_and_fields(tname)
check_table_permission(tname)
tfieldnames = filter_fieldnames(db[tname], tfieldnames)
query = []
offset = 0
limit = 100
model = False
options_list = False
table = db[tname]
queries = []
if self.policy:
common_query = self.policy.get(tname, "GET", "query")
if common_query:
queries.append(common_query)
hop1 = collections.defaultdict(list)
hop2 = collections.defaultdict(list)
hop3 = collections.defaultdict(list)
model_fieldnames = tfieldnames
lookup = {}
orderby = None
do_count = False
for key, value in vars.items():
if key == "@offset":
offset = int(value)
elif key == "@limit":
limit = min(
int(value),
self.policy.get(tname, "GET", "limit")
if self.policy
else MAX_LIMIT,
)
elif key == "@order":
orderby = [
~table[f[1:]] if f[:1] == "~" else table[f]
for f in value.split(",")
if f.lstrip("~") in table.fields
] or None
elif key == "@lookup":
lookup = {item[0]: {} for item in RestAPI.re_lookups.findall(value)}
elif key == "@model":
model = str(value).lower()[:1] == "t"
elif key == "@options_list":
options_list = str(value).lower()[:1] == "t"
elif key == "@count":
if self.allow_count:
do_count = str(value).lower()[:1] == "t"
else:
key_parts = key.rsplit(".")
if not key_parts[-1] in (
"eq",
"ne",
"gt",
"lt",
"ge",
"le",
"startswith",
"contains",
"in",
):
key_parts.append("eq")
is_negated = key_parts[0] == "not"
if is_negated:
key_parts = key_parts[1:]
key, condition = key_parts[:-1], key_parts[-1]
if len(key) == 1: # example: name.eq=='Chair'
query = self.make_query(table[key[0]], condition, value)
queries.append(query if not is_negated else ~query)
elif len(key) == 2: # example: color.name.eq=='red'
hop1[is_negated, key[0]].append((key[1], condition, value))
elif len(key) == 3: # example: a.rel.desc.eq=='above'
hop2[is_negated, key[0], key[1]].append((key[2], condition, value))
elif len(key) == 4: # example: a.rel.b.name.eq == 'Table'
hop3[is_negated, key[0], key[1], key[2]].append(
(key[3], condition, value)
)
for item in hop1:
is_negated, fieldname = item
ref_tablename = table[fieldname].type.split(" ")[1]
ref_table = db[ref_tablename]
subqueries = [self.make_query(ref_table[k], c, v) for k, c, v in hop1[item]]
subquery = functools.reduce(lambda a, b: a & b, subqueries)
query = table[fieldname].belongs(db(subquery)._select(ref_table._id))
queries.append(query if not is_negated else ~query)
for item in hop2:
is_negated, linkfield, linktable = item
ref_table = db[linktable]
subqueries = [self.make_query(ref_table[k], c, v) for k, c, v in hop2[item]]
subquery = functools.reduce(lambda a, b: a & b, subqueries)
query = table._id.belongs(db(subquery)._select(ref_table[linkfield]))
queries.append(query if not is_negated else ~query)
for item in hop3:
is_negated, linkfield, linktable, otherfield = item
ref_table = db[linktable]
ref_ref_tablename = ref_table[otherfield].type.split(" ")[1]
ref_ref_table = db[ref_ref_tablename]
subqueries = [
self.make_query(ref_ref_table[k], c, v) for k, c, v in hop3[item]
]
subquery = functools.reduce(lambda a, b: a & b, subqueries)
subquery &= ref_ref_table._id == ref_table[otherfield]
query = table._id.belongs(
db(subquery)._select(ref_table[linkfield], groupby=ref_table[linkfield])
)
queries.append(query if not is_negated else ~query)
if not queries:
queries.append(table)
query = functools.reduce(lambda a, b: a & b, queries)
tfields = [
table[tfieldname]
for tfieldname in tfieldnames
if table[tfieldname].type != "password"
]
passwords = [
tfieldname
for tfieldname in tfieldnames
if table[tfieldname].type == "password"
]
rows = db(query).select(
*tfields, limitby=(offset, limit + offset), orderby=orderby
)
if passwords:
dpass = {password: "******" for password in passwords}
for row in rows:
row.update(dpass)
lookup_map = {}
for key in list(lookup.keys()):
name, key = key.split(":") if ":" in key else ("", key)
clean_key = RestAPI.re_no_brackets.sub("", key)
lookup_map[clean_key] = {
"name": name.rstrip("!") or clean_key,
"collapsed": name.endswith("!"),
}
key = key.split(".")
if len(key) == 1:
key, tfieldnames = RestAPI.parse_table_and_fields(key[0])
ref_tablename = table[key].type.split(" ")[1]
ref_table = db[ref_tablename]
tfieldnames = filter_fieldnames(ref_table, tfieldnames)
check_table_lookup_permission(ref_tablename)
ids = [row[key] for row in rows]
tfields = [
ref_table[tfieldname]
for tfieldname in tfieldnames
if ref_table[tfieldname].type != "password"
]
if not "id" in tfieldnames:
tfields.append(ref_table["id"])
drows = db(ref_table._id.belongs(ids)).select(*tfields).as_dict()
if tfieldnames and not "id" in tfieldnames:
for row in drows.values():
del row["id"]
lkey, collapsed = lookup_map[key]["name"], lookup_map[key]["collapsed"]
for row in rows:
new_row = drows.get(row[key])
if collapsed:
del row[key]
for rkey in tfieldnames:
row[lkey + "." + rkey] = new_row[rkey] if new_row else None
else:
row[lkey] = new_row
elif len(key) == 2:
lfield, key = key
key, tfieldnames = RestAPI.parse_table_and_fields(key)
check_table_lookup_permission(key)
ref_table = db[key]
tfieldnames = filter_fieldnames(ref_table, tfieldnames)
ids = [row["id"] for row in rows]
tfields = [ref_table[tfieldname] for tfieldname in tfieldnames]
if not lfield in tfieldnames:
tfields.append(ref_table[lfield])
lrows = db(ref_table[lfield].belongs(ids)).select(*tfields)
drows = collections.defaultdict(list)
for row in lrows:
row = row.as_dict()
drows[row[lfield]].append(row)
if not lfield in tfieldnames:
del row[lfield]
lkey = lookup_map[lfield + "." + key]["name"]
for row in rows:
row[lkey] = drows.get(row.id, [])
elif len(key) == 3:
lfield, key, rfield = key
key, tfieldnames = RestAPI.parse_table_and_fields(key)
rfield, tfieldnames2 = RestAPI.parse_table_and_fields(rfield)
check_table_lookup_permission(key)
ref_table = db[key]
ref_ref_tablename = ref_table[rfield].type.split(" ")[1]
check_table_lookup_permission(ref_ref_tablename)
ref_ref_table = db[ref_ref_tablename]
tfieldnames = filter_fieldnames(ref_table, tfieldnames)
tfieldnames2 = filter_fieldnames(ref_ref_table, tfieldnames2)
ids = [row["id"] for row in rows]
tfields = [ref_table[tfieldname] for tfieldname in tfieldnames]
if not lfield in tfieldnames:
tfields.append(ref_table[lfield])
if not rfield in tfieldnames:
tfields.append(ref_table[rfield])
tfields += [ref_ref_table[tfieldname] for tfieldname in tfieldnames2]
left = ref_ref_table.on(ref_table[rfield] == ref_ref_table["id"])
lrows = db(ref_table[lfield].belongs(ids)).select(*tfields, left=left)
drows = collections.defaultdict(list)
lkey = lfield + "." + key + "." + rfield
lkey, collapsed = (
lookup_map[lkey]["name"],
lookup_map[lkey]["collapsed"],
)
for row in lrows:
row = row.as_dict()
new_row = row[key]
lfield_value, rfield_value = new_row[lfield], new_row[rfield]
if not lfield in tfieldnames:
del new_row[lfield]
if not rfield in tfieldnames:
del new_row[rfield]
if collapsed:
new_row.update(row[ref_ref_tablename])
else:
new_row[rfield] = row[ref_ref_tablename]
drows[lfield_value].append(new_row)
for row in rows:
row[lkey] = drows.get(row.id, [])
response = {}
if not options_list:
response["items"] = rows.as_list()
else:
if table._format:
response["items"] = [
dict(value=row.id, text=(table._format % row)) for row in rows
]
else:
response["items"] = [dict(value=row.id, text=row.id) for row in rows]
if do_count or (self.allow_count == "legacy" and offset == 0):
response["count"] = db(query).count()
if model:
response["model"] = self.table_model(table, model_fieldnames)
return response
| bsd-3-clause | 2cbe6f608ae460dca8b3ea3103984f03 | 37.974446 | 94 | 0.508523 | 4.164179 | false | false | false | false |
web2py/pydal | pydal/helpers/classes.py | 1 | 16435 | # -*- coding: utf-8 -*-
import copy
import marshal
import struct
import threading
import time
import traceback
from .._compat import (PY2, copyreg, exists, implements_bool, iteritems,
iterkeys, itervalues, long, to_bytes)
from .._globals import THREAD_LOCAL
from .serializers import serializers
class cachedprop(object):
#: a read-only @property that is only evaluated once.
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
@implements_bool
class BasicStorage(object):
def __init__(self, *args, **kwargs):
return self.__dict__.__init__(*args, **kwargs)
def __getitem__(self, key):
return self.__dict__.__getitem__(str(key))
__setitem__ = object.__setattr__
def __delitem__(self, key):
try:
delattr(self, key)
except AttributeError:
raise KeyError(key)
def __bool__(self):
return len(self.__dict__) > 0
__iter__ = lambda self: self.__dict__.__iter__()
__str__ = lambda self: self.__dict__.__str__()
__repr__ = lambda self: self.__dict__.__repr__()
has_key = __contains__ = lambda self, key: key in self.__dict__
def get(self, key, default=None):
return self.__dict__.get(key, default)
def update(self, *args, **kwargs):
return self.__dict__.update(*args, **kwargs)
def keys(self):
return self.__dict__.keys()
def iterkeys(self):
return iterkeys(self.__dict__)
def values(self):
return self.__dict__.values()
def itervalues(self):
return itervalues(self.__dict__)
def items(self):
return self.__dict__.items()
def iteritems(self):
return iteritems(self.__dict__)
pop = lambda self, *args, **kwargs: self.__dict__.pop(*args, **kwargs)
clear = lambda self, *args, **kwargs: self.__dict__.clear(*args, **kwargs)
copy = lambda self, *args, **kwargs: self.__dict__.copy(*args, **kwargs)
def pickle_basicstorage(s):
return BasicStorage, (dict(s),)
copyreg.pickle(BasicStorage, pickle_basicstorage)
class OpRow(object):
__slots__ = ("_table", "_fields", "_values")
def __init__(self, table):
object.__setattr__(self, "_table", table)
object.__setattr__(self, "_fields", {})
object.__setattr__(self, "_values", {})
def set_value(self, key, value, field=None):
self._values[key] = value
self._fields[key] = self._fields.get(key, field or self._table[key])
def del_value(self, key):
del self._values[key]
del self._fields[key]
def __getitem__(self, key):
return self._values[key]
def __setitem__(self, key, value):
return self.set_value(key, value)
def __delitem__(self, key):
return self.del_value(key)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError
def __setattr__(self, key, value):
return self.set_value(key, value)
def __delattr__(self, key):
return self.del_value(key)
def __iter__(self):
return self._values.__iter__()
def __contains__(self, key):
return key in self._values
def get(self, key, default=None):
try:
rv = self[key]
except KeyError:
rv = default
return rv
def keys(self):
return self._values.keys()
def iterkeys(self):
return iterkeys(self._values)
def values(self):
return self._values.values()
def itervalues(self):
return itervalues(self._values)
def items(self):
return self._values.items()
def iteritems(self):
return iteritems(self._values)
def op_values(self):
return [(self._fields[key], value) for key, value in iteritems(self._values)]
def __repr__(self):
return "<OpRow %s>" % repr(self._values)
class Serializable(object):
def as_dict(self, flat=False, sanitize=True):
return self.__dict__
def as_xml(self, sanitize=True):
return serializers.xml(self.as_dict(flat=True, sanitize=sanitize))
def as_json(self, sanitize=True):
return serializers.json(self.as_dict(flat=True, sanitize=sanitize))
def as_yaml(self, sanitize=True):
return serializers.yaml(self.as_dict(flat=True, sanitize=sanitize))
class Reference(long):
def __allocate(self):
if not self._record:
self._record = self._table[long(self)]
if not self._record:
raise RuntimeError(
"Using a recursive select but encountered a broken "
+ "reference: %s %d" % (self._table, long(self))
)
def __getattr__(self, key, default=None):
if key == "id":
return long(self)
if key in self._table:
self.__allocate()
if self._record:
# to deal with case self.update_record()
return self._record.get(key, default)
else:
return None
def get(self, key, default=None):
return self.__getattr__(key, default)
def __setattr__(self, key, value):
if key.startswith("_"):
long.__setattr__(self, key, value)
return
self.__allocate()
self._record[key] = value
def __getitem__(self, key):
if key == "id":
return long(self)
self.__allocate()
return self._record.get(key, None)
def __setitem__(self, key, value):
self.__allocate()
self._record[key] = value
def Reference_unpickler(data):
return marshal.loads(data)
def Reference_pickler(data):
try:
marshal_dump = marshal.dumps(long(data))
except AttributeError:
marshal_dump = "i%s" % struct.pack("<i", long(data))
return (Reference_unpickler, (marshal_dump,))
copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
class SQLALL(object):
"""
Helper class providing a comma-separated string having all the field names
(prefixed by table name and '.')
normally only called from within gluon.dal
"""
def __init__(self, table):
self._table = table
def __str__(self):
return ", ".join([str(field) for field in self._table])
class SQLCustomType(object):
"""
Allows defining of custom SQL types
Args:
type: the web2py type (default = 'string')
native: the backend type
encoder: how to encode the value to store it in the backend
decoder: how to decode the value retrieved from the backend
validator: what validators to use ( default = None, will use the
default validator for type)
Example::
Define as:
decimal = SQLCustomType(
type ='double',
native ='integer',
encoder =(lambda x: int(float(x) * 100)),
decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) )
)
db.define_table(
'example',
Field('value', type=decimal)
)
"""
def __init__(
self,
type="string",
native=None,
encoder=None,
decoder=None,
validator=None,
_class=None,
widget=None,
represent=None,
):
self.type = type
self.native = native
self.encoder = encoder or (lambda x: x)
self.decoder = decoder or (lambda x: x)
self.validator = validator
self._class = _class or type
self.widget = widget
self.represent = represent
def startswith(self, text=None):
try:
return self.type.startswith(self, text)
except TypeError:
return False
def endswith(self, text=None):
try:
return self.type.endswith(self, text)
except TypeError:
return False
def __getslice__(self, a=0, b=100):
return None
def __getitem__(self, i):
return None
def __str__(self):
return self._class
class RecordOperator(object):
def __init__(self, colset, table, id):
self.colset, self.db, self.tablename, self.id = (
colset,
table._db,
table._tablename,
id,
)
def __call__(self):
pass
class RecordUpdater(RecordOperator):
def __call__(self, **fields):
colset, db, tablename, id = self.colset, self.db, self.tablename, self.id
table = db[tablename]
newfields = fields or dict(colset)
for fieldname in list(newfields.keys()):
if fieldname not in table.fields or table[fieldname].type == "id":
del newfields[fieldname]
table._db(table._id == id, ignore_common_filters=True).update(**newfields)
colset.update(newfields)
return colset
class RecordDeleter(RecordOperator):
def __call__(self):
return self.db(self.db[self.tablename]._id == self.id).delete()
class MethodAdder(object):
def __init__(self, table):
self.table = table
def __call__(self):
return self.register()
def __getattr__(self, method_name):
return self.register(method_name)
def register(self, method_name=None):
def _decorated(f):
instance = self.table
import types
if PY2:
method = types.MethodType(f, instance, instance.__class__)
else:
method = types.MethodType(f, instance)
name = method_name or f.func_name
setattr(instance, name, method)
return f
return _decorated
class FakeCursor(object):
"""
The Python Database API Specification has a cursor() method, which
NoSql drivers generally don't support. If the exception in this
function is taken then it likely means that some piece of
functionality has not yet been implemented in the driver. And
something is using the cursor.
https://www.python.org/dev/peps/pep-0249/
"""
def warn_bad_usage(self, attr):
raise Exception("FakeCursor.%s is not implemented" % attr)
def __getattr__(self, attr):
self.warn_bad_usage(attr)
def __setattr__(self, attr, value):
self.warn_bad_usage(attr)
def close(self):
return
class NullCursor(FakeCursor):
lastrowid = 1
def __getattr__(self, attr):
return lambda *a, **b: []
class FakeDriver(BasicStorage):
def __init__(self, *args, **kwargs):
super(FakeDriver, self).__init__(*args, **kwargs)
self._build_cursor_()
def _build_cursor_(self):
self._fake_cursor_ = FakeCursor()
def cursor(self):
return self._fake_cursor_
def close(self):
return None
def commit(self):
return None
def __str__(self):
state = ["%s=%r" % (attribute, value) for (attribute, value) in self.items()]
return "\n".join(state)
class NullDriver(FakeDriver):
def _build_cursor_(self):
self._fake_cursor_ = NullCursor()
class ExecutionHandler(object):
def __init__(self, adapter):
self.adapter = adapter
def before_execute(self, command):
pass
def after_execute(self, command):
pass
class TimingHandler(ExecutionHandler):
MAXSTORAGE = 100
def _timings(self):
THREAD_LOCAL._pydal_timings_ = getattr(THREAD_LOCAL, "_pydal_timings_", [])
return THREAD_LOCAL._pydal_timings_
@property
def timings(self):
return self._timings()
def before_execute(self, command):
self.t = time.time()
def after_execute(self, command):
dt = time.time() - self.t
self.timings.append((command, dt))
del self.timings[: -self.MAXSTORAGE]
class DatabaseStoredFile:
web2py_filesystems = set()
def escape(self, obj):
return self.db._adapter.escape(obj)
@staticmethod
def try_create_web2py_filesystem(db):
if db._uri not in DatabaseStoredFile.web2py_filesystems:
if db._adapter.dbengine not in ("mysql", "postgres", "sqlite"):
raise NotImplementedError(
"DatabaseStoredFile only supported by mysql, potresql, sqlite"
)
blobType = "BYTEA" if db._adapter.dbengine == "postgres" else "BLOB"
sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content %(blobType)s, PRIMARY KEY(path));" % {"blobType": blobType}
if db._adapter.dbengine == "mysql":
sql = sql[:-1] + " ENGINE=InnoDB;"
db.executesql(sql)
DatabaseStoredFile.web2py_filesystems.add(db._uri)
def __init__(self, db, filename, mode):
if db._adapter.dbengine not in ("mysql", "postgres", "sqlite"):
raise RuntimeError(
"only MySQL/Postgres/SQLite can store metadata .table files"
+ " in database for now"
)
self.db = db
self.filename = filename
self.mode = mode
DatabaseStoredFile.try_create_web2py_filesystem(db)
self.p = 0
self.data = b""
if mode in ("r", "rw", "rb", "a", "ab"):
query = "SELECT content FROM web2py_filesystem WHERE path='%s'" % filename
rows = self.db.executesql(query)
if rows:
self.data = to_bytes(rows[0][0])
elif exists(filename):
datafile = open(filename, "rb")
try:
self.data = datafile.read()
finally:
datafile.close()
elif mode in ("r", "rw", "rb"):
raise RuntimeError("File %s does not exist" % filename)
def read(self, bytes=None):
if bytes is None:
bytes = len(self.data)
data = self.data[self.p : self.p + bytes]
self.p += len(data)
return data
def readinto(self, bytes):
return self.read(bytes)
def readline(self):
i = self.data.find(b"\n", self.p) + 1
if i > 0:
data, self.p = self.data[self.p : i], i
else:
data, self.p = self.data[self.p :], len(self.data)
return data
def write(self, data):
self.data += data
def close_connection(self):
if self.db is not None:
self.db.executesql(
"DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename
)
placeholder = "?" if self.db._adapter.dbengine == "sqlite" else "%s"
query = "INSERT INTO web2py_filesystem(path,content) VALUES (%(placeholder)s, %(placeholder)s)" % {"placeholder": placeholder}
args = (self.filename, self.data)
self.db.executesql(query, args)
self.db.commit()
self.db = None
def close(self):
self.close_connection()
@staticmethod
def is_operational_error(db, error):
if not hasattr(db._adapter.driver, "OperationalError"):
return None
return isinstance(error, db._adapter.driver.OperationalError)
@staticmethod
def is_programming_error(db, error):
if not hasattr(db._adapter.driver, "ProgrammingError"):
return None
return isinstance(error, db._adapter.driver.ProgrammingError)
@staticmethod
def exists(db, filename):
if exists(filename):
return True
DatabaseStoredFile.try_create_web2py_filesystem(db)
query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename
try:
if db.executesql(query):
return True
except Exception as e:
if not (
DatabaseStoredFile.is_operational_error(db, e)
or DatabaseStoredFile.is_programming_error(db, e)
):
raise
# no web2py_filesystem found?
tb = traceback.format_exc()
db.logger.error("Could not retrieve %s\n%s" % (filename, tb))
return False
| bsd-3-clause | ecc0a54e22c70d8f2f65a4b24e4dbea0 | 26.761824 | 151 | 0.567995 | 4.018337 | false | false | false | false |
openstates/billy | billy/web/public/views/region.py | 2 | 6017 | """
views that are specific to a region
"""
import re
import urllib
from collections import defaultdict
from django.shortcuts import redirect, render
from django.http import Http404
from billy.core import settings
from billy.models import db, Metadata, DoesNotExist, Bill
from ..forms import get_region_select_form
from .utils import templatename, GEO_BOUNDS
def region_selection(request):
'''Handle submission of the region selection form in the base template. '''
form = get_region_select_form(request.GET)
abbr = form.data.get('abbr')
if not abbr or len(abbr) != 2:
return redirect('homepage')
return redirect('region', abbr=abbr)
def region(request, abbr):
'''
Context:
- abbr
- metadata
- sessions
- chambers
- joint_committee_count
- geo_bounds
- nav_active
Templates:
- bill/web/public/region.html
'''
report = db.reports.find_one({'_id': abbr})
try:
meta = Metadata.get_object(abbr)
except DoesNotExist:
raise Http404
fallback_bounds = GEO_BOUNDS['US']
geo_bounds = GEO_BOUNDS.get(abbr.upper(), fallback_bounds)
# count legislators
legislators = meta.legislators({'active': True}, {'party': True,
'chamber': True})
# Maybe later, mapreduce instead?
party_counts = defaultdict(lambda: defaultdict(int))
for leg in legislators:
if 'chamber' in leg: # exclude lt. governors
party_counts[leg['chamber']][leg['party']] += 1
chambers = []
for chamber_type, chamber in meta['chambers'].items():
res = {}
# chamber metadata
res['type'] = chamber_type
res['title'] = chamber['title']
res['name'] = chamber['name']
# legislators
res['legislators'] = {
'count': sum(party_counts[chamber_type].values()),
'party_counts': dict(party_counts[chamber_type]),
}
# committees
res['committees_count'] = meta.committees({'chamber': chamber_type}
).count()
res['latest_bills'] = meta.bills({'chamber': chamber_type}).sort(
[('action_dates.first', -1)]).limit(2)
res['passed_bills'] = meta.bills({'chamber': chamber_type}).sort(
[('action_dates.passed_' + chamber_type, -1)]).limit(2)
chambers.append(res)
joint_committee_count = meta.committees({'chamber': 'joint'}).count()
# add bill counts to session listing
sessions = meta.sessions()
for s in sessions:
try:
s['bill_count'] = (
report['bills']['sessions'][s['id']]['upper_count']
+ report['bills']['sessions'][s['id']]['lower_count'])
except KeyError:
# there's a chance that the session had no bills
s['bill_count'] = 0
return render(request, templatename('region'),
dict(abbr=abbr, metadata=meta, sessions=sessions,
chambers=chambers,
joint_committee_count=joint_committee_count,
geo_bounds=geo_bounds,
nav_active='home'))
def search(request, abbr):
'''
Context:
- search_text
- abbr
- metadata
- found_by_id
- bill_results
- more_bills_available
- legislators_list
- nav_active
Tempaltes:
- billy/web/public/search_results_no_query.html
- billy/web/public/search_results_bills_legislators.html
- billy/web/public/bills_list_row_with_abbr_and_session.html
'''
if not request.GET:
return render(request, templatename('search_results_no_query'),
{'abbr': abbr})
search_text = unicode(request.GET['search_text']).encode('utf8')
# First try to get by bill_id.
if re.search(r'\d', search_text):
url = '/%s/bills?' % abbr
url += urllib.urlencode([('search_text', search_text)])
return redirect(url)
else:
found_by_id = False
kwargs = {}
if abbr != 'all':
kwargs['abbr'] = abbr
bill_results = Bill.search(search_text, sort='last', **kwargs)
# Limit the bills if it's a search.
bill_result_count = len(bill_results)
more_bills_available = (bill_result_count > 5)
bill_results = bill_results[:5]
# See if any legislator names match. First split up name to avoid
# the Richard S. Madaleno problem. See Jira issue OS-32.
textbits = search_text.split()
textbits = filter(lambda s: 2 < len(s), textbits)
textbits = filter(lambda s: '.' not in s, textbits)
andspec = []
for text in textbits:
andspec.append({'full_name': {'$regex': text, '$options': 'i'}})
if andspec:
spec = {'$and': andspec}
else:
spec = {'full_name': {'$regex': search_text, '$options': 'i'}}
# Run the query.
if abbr != 'all':
spec[settings.LEVEL_FIELD] = abbr
legislator_results = list(db.legislators.find(spec).sort(
[('active', -1)]))
if abbr != 'all':
metadata = Metadata.get_object(abbr)
else:
metadata = None
return render(
request, templatename('search_results_bills_legislators'),
dict(search_text=search_text,
abbr=abbr,
metadata=metadata,
found_by_id=found_by_id,
bill_results=bill_results,
bill_result_count=bill_result_count,
more_bills_available=more_bills_available,
legislators_list=legislator_results,
column_headers_tmplname=None, # not used
rowtemplate_name=templatename('bills_list_row_with'
'_abbr_and_session'),
show_chamber_column=True,
nav_active=None))
| bsd-3-clause | 5a5dc5297e53fe039e490c41ff86c7d5 | 31.524324 | 79 | 0.561243 | 3.716492 | false | false | false | false |
openstates/billy | billy/models/metadata.py | 2 | 6491 | import operator
import itertools
from django.core import urlresolvers
from billy.core import mdb as db, settings
from .base import (Document, RelatedDocument, RelatedDocuments, ListManager,
DictManager, AttrManager, DoesNotExist)
from ..utils import metadata as get_metadata
_distinct_subjects = {}
_distinct_types = {}
_distinct_action_types = {}
class Term(DictManager):
methods_only = True
def session_info(self):
details = self.metadata['session_details']
for session_name in self['sessions']:
yield dict(details[session_name], name=session_name)
def session_names(self):
'''The display names of sessions occuring in this term.
'''
details = self.metadata['session_details']
for sess in self['sessions']:
yield details[sess]['display_name']
class TermsManager(ListManager):
wrapper = Term
@property
def dict_(self):
wrapper = self._wrapper
grouped = itertools.groupby(self.metadata['terms'],
operator.itemgetter('name'))
data = []
for term, termdata in grouped:
termdata = list(termdata)
assert len(termdata) is 1
data.append((term, wrapper(termdata[0])))
return dict(data)
class MetadataVotesManager(AttrManager):
def __iter__(self):
for bill in self.document.bills():
for vote in bill.votes_manager:
yield vote
class Metadata(Document):
'''
The metadata can also be thought as the jurisdiction (i.e., Montana, Texas)
when it's an attribute of another object. For example, if you have a
bill, you can do this:
>>> bill.metadata.abbr
'de'
'''
instance_key = settings.LEVEL_FIELD
collection = db.metadata
legislators = RelatedDocuments('Legislator',
model_keys=[settings.LEVEL_FIELD],
instance_key='abbreviation')
committees = RelatedDocuments('Committee',
model_keys=[settings.LEVEL_FIELD],
instance_key='abbreviation')
bills = RelatedDocuments('Bill', model_keys=[settings.LEVEL_FIELD],
instance_key='abbreviation')
events = RelatedDocuments('Event', model_keys=[settings.LEVEL_FIELD],
instance_key='abbreviation')
report = RelatedDocument('Report', instance_key='_id')
votes_manager = MetadataVotesManager()
terms_manager = TermsManager()
@classmethod
def get_object(cls, abbr):
'''
This particular model needs its own constructor in order to take
advantage of the metadata cache in billy.util, which would otherwise
return unwrapped objects.
'''
obj = get_metadata(abbr)
if obj is None:
msg = 'No metadata found for abbreviation %r' % abbr
raise DoesNotExist(msg)
return cls(obj)
@property
def abbr(self):
'''Return the two letter abbreviation.'''
return self['_id']
@property
def most_recent_session(self):
'Get the most recent session.'
session = self['terms'][-1]['sessions'][-1]
return session
def sessions(self):
sessions = []
for t in self['terms']:
for s in t['sessions']:
sobj = {'id': s,
'name': self['session_details'][s]['display_name']}
sessions.append(sobj)
return sessions
def display_name(self):
return self['name']
def get_absolute_url(self):
return urlresolvers.reverse('region', args=[self['abbreviation']])
def _bills_by_chamber_action(self, chamber, action, *args, **kwargs):
bills = self.bills({'session': self.most_recent_session,
'chamber': chamber,
'actions.type': action,
'type': 'bill'}, *args, **kwargs)
# Not worrying about date sorting until later.
return bills
def bills_introduced_upper(self, *args, **kwargs):
return self._bills_by_chamber_action('upper', 'bill:introduced')
def bills_introduced_lower(self, *args, **kwargs):
return self._bills_by_chamber_action('lower', 'bill:introduced')
def bills_passed_upper(self, *args, **kwargs):
return self._bills_by_chamber_action('upper', 'bill:passed')
def bills_passed_lower(self, *args, **kwargs):
return self._bills_by_chamber_action('lower', 'bill:passed')
@property
def term_dict(self):
try:
return self._term_dict
except AttributeError:
term_dict = itertools.groupby(self['terms'],
operator.itemgetter('name'))
term_dict = dict((name, list(data)) for (name, data) in term_dict)
self._term_dict = term_dict
return term_dict
def distinct_bill_subjects(self):
return settings.BILLY_SUBJECTS
def distinct_action_types(self):
if self.abbr not in _distinct_action_types:
_distinct_action_types[self.abbr] = sorted(self.bills().distinct('actions.type'))
return _distinct_action_types[self.abbr]
def distinct_bill_types(self):
return ['bill', 'resolution', 'joint resolution']
def committees_legislators(self, *args, **kwargs):
'''Return an iterable of committees with all the
legislators cached for reference in the Committee model.
So do a "select_related" operation on committee members.
'''
committees = list(self.committees(*args, **kwargs))
legislators = self.legislators({'active': True},
fields=['full_name',
settings.LEVEL_FIELD])
_legislators = {}
# This will be a cache of legislator objects used in
# the committees.html template. Includes ids in each
# legislator's _all_ids field (if it exists.)
for obj in legislators:
if 'all_ids' in obj:
for _id in obj['_all_ids']:
_legislators[_id] = obj
else:
_legislators[obj['_id']] = obj
del legislators
for com in committees:
com._legislators = _legislators
return committees
| bsd-3-clause | 43f43cbca38bd5121588c55210afe612 | 32.984293 | 93 | 0.578955 | 4.239713 | false | false | false | false |
openstates/billy | billy/importers/bills.py | 2 | 17884 | from __future__ import print_function
import os
import glob
import json
import logging
import datetime
from time import time
from collections import defaultdict
from billy.core import settings, db
from billy.utils import (metadata, term_for_session, fix_bill_id,
JSONEncoderPlus)
from billy.importers.names import get_legislator_id
from billy.importers.filters import apply_filters
from billy.importers.subjects import SubjectCategorizer
from billy.importers.utils import (insert_with_id, update, prepare_obj,
next_big_id, get_committee_id)
if hasattr(settings, "ENABLE_GIT") and settings.ENABLE_GIT:
from dulwich.repo import Repo
from dulwich.objects import Blob
from dulwich.objects import Tree
from dulwich.objects import Commit, parse_timezone
filters = settings.BILL_FILTERS
logger = logging.getLogger('billy')
def match_sponsor_ids(abbr, bill):
for sponsor in bill['sponsors']:
# use sponsor's chamber if specified
sponsor['leg_id'] = get_legislator_id(abbr, bill['session'],
sponsor.get('chamber',
bill['chamber']),
sponsor['name'])
if sponsor['leg_id'] is None:
sponsor['leg_id'] = get_legislator_id(abbr, bill['session'], None,
sponsor['name'])
if sponsor['leg_id'] is None:
sponsor['committee_id'] = get_committee_id(abbr, bill['chamber'],
sponsor['name'])
def load_standalone_votes(data_dir):
pattern = os.path.join(data_dir, 'votes', '*.json')
paths = glob.glob(pattern)
votes = defaultdict(list)
for path in paths:
with open(path) as f:
data = prepare_obj(json.load(f))
# need to match bill_id already in the database
bill_id = fix_bill_id(data.pop('bill_id'))
votes[(data['bill_chamber'], data['session'], bill_id)].append(data)
logger.info('imported %s vote files' % len(paths))
return votes
git_active_repo = None
git_active_commit = None
git_active_tree = None
git_old_tree = None
HEAD = None
def git_add_bill(data):
if not hasattr(settings, "ENABLE_GIT") or not settings.ENABLE_GIT:
return
global git_active_repo
global git_active_tree
global git_active_commit
bill = json.dumps(data, cls=JSONEncoderPlus, sort_keys=True, indent=4)
spam = Blob.from_string(bill)
bid = str(data['_id'])
git_active_repo.object_store.add_object(spam)
# 0100644 octal -> 33188 decimal
git_active_tree[bid] = (33188, spam.id)
git_active_tree.check()
print("added %s - %s" % (data['_id'], spam.id))
def git_commit(message):
if not hasattr(settings, "ENABLE_GIT") or not settings.ENABLE_GIT:
return
print("Commiting import as '%s'" % message)
global git_active_repo
global git_active_tree
global git_old_tree
global git_active_commit
global HEAD
repo = git_active_repo
if git_old_tree == git_active_tree.id:
# We don't wait t commit twice.
print("Nothing new here. Bailing out.")
return
c = git_active_commit
c.tree = git_active_tree.id
c.parents = [HEAD]
repo.object_store.add_object(git_active_tree)
c.author = c.committer = "Billy <billy@localhost>"
c.commit_time = c.author_time = int(time())
tz = parse_timezone("-0400")[0]
c.commit_timezone = c.author_timezone = tz
c.encoding = "UTF-8"
c.message = message
repo.object_store.add_object(c)
repo.refs['refs/heads/master'] = c.id
def git_repo_init(gitdir):
os.mkdir(gitdir)
repo = Repo.init_bare(gitdir)
blob = Blob.from_string("""Why, Hello there!
This is your friendly Legislation tracker, Billy here.
This is a git repo full of everything I write to the DB. This isn't super
useful unless you're debugging production issues.
Fondly,
Bill, your local Billy instance.""")
tree = Tree()
tree.add("README", 33188, blob.id)
commit = Commit()
commit.tree = tree.id
author = "Billy <billy@localhost>"
commit.author = commit.committer = author
commit.commit_time = commit.author_time = int(time())
tz = parse_timezone('-0400')[0]
commit.commit_timezone = commit.author_timezone = tz
commit.encoding = "UTF-8"
commit.message = "Initial commit"
repo.object_store.add_object(blob)
repo.object_store.add_object(tree)
repo.object_store.add_object(commit)
repo.refs['refs/heads/master'] = commit.id
def git_prelod(abbr):
if not hasattr(settings, "ENABLE_GIT") or not settings.ENABLE_GIT:
return
global git_active_repo
global git_active_commit
global git_active_tree
global git_old_tree
global HEAD
gitdir = "%s/%s.git" % (settings.GIT_PATH, abbr)
if not os.path.exists(gitdir):
git_repo_init(gitdir)
git_active_repo = Repo(gitdir)
git_active_commit = Commit()
HEAD = git_active_repo.head()
commit = git_active_repo.commit(HEAD)
tree = git_active_repo.tree(commit.tree)
git_old_tree = tree.id
git_active_tree = tree
def import_bill(data, standalone_votes, categorizer):
"""
insert or update a bill
data - raw bill JSON
standalone_votes - votes scraped separately
categorizer - SubjectCategorizer (None - no categorization)
"""
abbr = data[settings.LEVEL_FIELD]
# clean up bill_ids
data['bill_id'] = fix_bill_id(data['bill_id'])
if 'alternate_bill_ids' in data:
data['alternate_bill_ids'] = [fix_bill_id(bid) for bid in
data['alternate_bill_ids']]
# move subjects to scraped_subjects
# NOTE: intentionally doesn't copy blank lists of subjects
# this avoids the problem where a bill is re-run but we can't
# get subjects anymore (quite common)
subjects = data.pop('subjects', None)
if subjects:
data['scraped_subjects'] = subjects
# update categorized subjects
if categorizer:
categorizer.categorize_bill(data)
# companions
for companion in data['companions']:
companion['bill_id'] = fix_bill_id(companion['bill_id'])
# query based on companion
spec = companion.copy()
spec[settings.LEVEL_FIELD] = abbr
if not spec['chamber']:
spec.pop('chamber')
companion_obj = db.bills.find_one(spec)
if companion_obj:
companion['internal_id'] = companion_obj['_id']
else:
logger.warning('Unknown companion: {chamber} {session} {bill_id}'
.format(**companion))
# look for a prior version of this bill
bill = db.bills.find_one({settings.LEVEL_FIELD: abbr,
'session': data['session'],
'chamber': data['chamber'],
'bill_id': data['bill_id']})
# keep doc ids consistent
doc_matcher = DocumentMatcher(abbr)
if bill:
doc_matcher.learn_ids(bill['versions'] + bill['documents'])
doc_matcher.set_ids(data['versions'] + data['documents'])
# match sponsor leg_ids
match_sponsor_ids(abbr, data)
# process votes ############
# pull votes off bill
bill_votes = data.pop('votes', [])
# grab the external bill votes if present
if metadata(abbr).get('_partial_vote_bill_id'):
# this is a hack initially added for Rhode Island where we can't
# determine the full bill_id, if this key is in the metadata
# we just use the numeric portion, not ideal as it won't work
# where HB/SBs overlap, but in RI they never do
# pull off numeric portion of bill_id
numeric_bill_id = data['bill_id'].split()[1]
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
numeric_bill_id), [])
else:
# add loaded votes to data
bill_votes += standalone_votes.pop((data['chamber'], data['session'],
data['bill_id']), [])
# do id matching and other vote prep
if bill:
prepare_votes(abbr, data['session'], bill['_id'], bill_votes)
else:
prepare_votes(abbr, data['session'], None, bill_votes)
# process actions ###########
dates = {'first': None, 'last': None, 'passed_upper': None,
'passed_lower': None, 'signed': None}
vote_flags = {
"bill:passed",
"bill:failed",
"bill:veto_override:passed",
"bill:veto_override:failed",
"amendment:passed",
"amendment:failed",
"committee:passed",
"committee:passed:favorable",
"committee:passed:unfavorable",
"committee:passed:failed"
}
already_linked = set()
remove_vote = set()
for action in data['actions']:
adate = action['date']
def _match_committee(name):
return get_committee_id(abbr, action['actor'], name)
def _match_legislator(name):
return get_legislator_id(abbr,
data['session'],
action['actor'],
name)
resolvers = {
"committee": _match_committee,
"legislator": _match_legislator
}
if "related_entities" in action:
for entity in action['related_entities']:
try:
resolver = resolvers[entity['type']]
except KeyError as e:
# We don't know how to deal.
logger.error("I don't know how to sort a %s" % e)
continue
id = resolver(entity['name'])
entity['id'] = id
# first & last dates
if not dates['first'] or adate < dates['first']:
dates['first'] = adate
if not dates['last'] or adate > dates['last']:
dates['last'] = adate
# passed & signed dates
if (not dates['passed_upper'] and action['actor'] == 'upper'
and 'bill:passed' in action['type']):
dates['passed_upper'] = adate
elif (not dates['passed_lower'] and action['actor'] == 'lower'
and 'bill:passed' in action['type']):
dates['passed_lower'] = adate
elif (not dates['signed'] and 'governor:signed' in action['type']):
dates['signed'] = adate
# vote-action matching
action_attached = False
# only attempt vote matching if action has a date and is one of the
# designated vote action types
if set(action['type']).intersection(vote_flags) and action['date']:
for vote in bill_votes:
if not vote['date']:
continue
delta = abs(vote['date'] - action['date'])
if (delta < datetime.timedelta(hours=20) and
vote['chamber'] == action['actor']):
if action_attached:
# multiple votes match, we can't guess
action.pop('related_votes', None)
else:
related_vote = vote['vote_id']
if related_vote in already_linked:
remove_vote.add(related_vote)
already_linked.add(related_vote)
action['related_votes'] = [related_vote]
action_attached = True
# remove related_votes that we linked to multiple actions
for action in data['actions']:
for vote in remove_vote:
if vote in action.get('related_votes', []):
action['related_votes'].remove(vote)
# save action dates to data
data['action_dates'] = dates
data['_term'] = term_for_session(abbr, data['session'])
alt_titles = set(data.get('alternate_titles', []))
for version in data['versions']:
# Merge any version titles into the alternate_titles list
if 'title' in version:
alt_titles.add(version['title'])
if '+short_title' in version:
alt_titles.add(version['+short_title'])
try:
# Make sure the primary title isn't included in the
# alternate title list
alt_titles.remove(data['title'])
except KeyError:
pass
data['alternate_titles'] = list(alt_titles)
data = apply_filters(filters, data)
if not bill:
insert_with_id(data)
git_add_bill(data)
save_votes(data, bill_votes)
return "insert"
else:
update(bill, data, db.bills)
git_add_bill(bill)
save_votes(bill, bill_votes)
return "update"
def import_bills(abbr, data_dir):
data_dir = os.path.join(data_dir, abbr)
pattern = os.path.join(data_dir, 'bills', '*.json')
git_prelod(abbr)
counts = {
"update": 0,
"insert": 0,
"total": 0
}
votes = load_standalone_votes(data_dir)
try:
categorizer = SubjectCategorizer(abbr)
except Exception as e:
logger.debug('Proceeding without subject categorizer: %s' % e)
categorizer = None
paths = glob.glob(pattern)
for path in paths:
with open(path) as f:
data = prepare_obj(json.load(f))
counts["total"] += 1
ret = import_bill(data, votes, categorizer)
counts[ret] += 1
logger.info('imported %s bill files' % len(paths))
for remaining in votes.keys():
logger.debug('Failed to match vote %s %s %s' % tuple([
r.encode('ascii', 'replace') for r in remaining]))
populate_current_fields(abbr)
git_commit("Import Update")
return counts
def populate_current_fields(abbr):
"""
Set/update _current_term and _current_session fields on all bills
for a given location.
"""
meta = db.metadata.find_one({'_id': abbr})
current_term = meta['terms'][-1]
current_session = current_term['sessions'][-1]
for bill in db.bills.find({settings.LEVEL_FIELD: abbr}):
if bill['session'] == current_session:
bill['_current_session'] = True
else:
bill['_current_session'] = False
if bill['session'] in current_term['sessions']:
bill['_current_term'] = True
else:
bill['_current_term'] = False
db.bills.save(bill, safe=True)
def prepare_votes(abbr, session, bill_id, scraped_votes):
# if bill already exists, try and preserve vote_ids
vote_matcher = VoteMatcher(abbr)
if bill_id:
existing_votes = list(db.votes.find({'bill_id': bill_id}))
if existing_votes:
vote_matcher.learn_ids(existing_votes)
vote_matcher.set_ids(scraped_votes)
# link votes to committees and legislators
for vote in scraped_votes:
# committee_ids
if 'committee' in vote:
committee_id = get_committee_id(abbr, vote['chamber'],
vote['committee'])
vote['committee_id'] = committee_id
# vote leg_ids
vote['_voters'] = []
for vtype in ('yes_votes', 'no_votes', 'other_votes'):
svlist = []
for svote in vote[vtype]:
id = get_legislator_id(abbr, session, vote['chamber'], svote)
svlist.append({'name': svote, 'leg_id': id})
vote['_voters'].append(id)
vote[vtype] = svlist
def save_votes(bill, votes):
# doesn't delete votes if none were scraped this time
if not votes:
return
# remove all existing votes for this bill
db.votes.remove({'bill_id': bill['_id']}, safe=True)
# save the votes
for vote in votes:
vote['_id'] = vote['vote_id']
vote['bill_id'] = bill['_id']
vote[settings.LEVEL_FIELD] = bill[settings.LEVEL_FIELD]
vote['session'] = bill['session']
db.votes.save(vote, safe=True)
class GenericIDMatcher(object):
def __init__(self, abbr):
self.abbr = abbr
self.ids = {}
def _reset_sequence(self):
self.seq_for_key = defaultdict(int)
def _get_next_id(self):
return next_big_id(self.abbr, self.id_letter, self.id_collection)
def nondup_key_for_item(self, item):
# call user's key_for_item
key = self.key_for_item(item)
# running count of how many of this key we've seen
seq_num = self.seq_for_key[key]
self.seq_for_key[key] += 1
# append seq_num to key to avoid sharing key for multiple items
return key + (seq_num,)
def learn_ids(self, item_list):
""" read in already set ids on objects """
self._reset_sequence()
for item in item_list:
key = self.nondup_key_for_item(item)
self.ids[key] = item[self.id_key]
def set_ids(self, item_list):
""" set ids on an object, using internal mapping then new ids """
self._reset_sequence()
for item in item_list:
key = self.nondup_key_for_item(item)
item[self.id_key] = self.ids.get(key) or self._get_next_id()
class VoteMatcher(GenericIDMatcher):
id_letter = 'V'
id_collection = 'vote_ids'
id_key = 'vote_id'
def key_for_item(self, vote):
return (vote['motion'], vote['chamber'], vote['date'],
vote['yes_count'], vote['no_count'], vote['other_count'])
class DocumentMatcher(GenericIDMatcher):
id_letter = 'D'
id_collection = 'document_ids'
id_key = 'doc_id'
def key_for_item(self, document):
# URL is good enough as a key
return (document['url'],)
| bsd-3-clause | eb0d76e068c9e98b4f84298e442c04c8 | 31.281588 | 78 | 0.576605 | 3.734391 | false | false | false | false |
openstates/billy | billy/web/api/emitters.py | 2 | 2154 | import json
import datetime
from billy.utils import chamber_name
from billy.core import settings
from django.template import defaultfilters
from piston.emitters import Emitter, JSONEmitter
class DateTimeAwareJSONEncoder(json.JSONEncoder):
# We wouldn't need this if django's stupid DateTimeAwareJSONEncoder
# used settings.DATETIME_FORMAT instead of hard coding its own
# format string.
def default(self, o):
if isinstance(o, datetime.datetime):
return defaultfilters.date(o, 'DATETIME_FORMAT')
elif isinstance(o, datetime.date):
return defaultfilters.date(o, 'DATE_FORMAT')
elif isinstance(o, datetime.time):
return defaultfilters.date(o, 'TIME_FORMAT')
return super(DateTimeAwareJSONEncoder, self).default(o)
class BillyJSONEmitter(JSONEmitter):
"""
Removes private fields (keys preceded by '_') recursively and
outputs as JSON, with datetimes converted to strings.
"""
def render(self, request):
cb = request.GET.get('callback', None)
seria = json.dumps(self.construct(), cls=DateTimeAwareJSONEncoder,
ensure_ascii=False)
if cb:
return "%s(%s)" % (cb, seria)
return seria
def construct(self):
return self._clean(super(BillyJSONEmitter, self).construct())
def _clean(self, obj):
if isinstance(obj, dict):
# convert _id to id
if '_id' in obj:
obj['id'] = obj['_id']
if '_all_ids' in obj:
obj['all_ids'] = obj['_all_ids']
for key, value in obj.items():
if key.startswith('_'):
del obj[key]
else:
obj[key] = self._clean(value)
elif isinstance(obj, list):
obj = [self._clean(item) for item in obj]
elif hasattr(obj, '__dict__'):
for key, value in obj.__dict__.items():
if key.startswith('_'):
del obj.__dict__[key]
else:
obj.__dict__[key] = self._clean(value)
return obj
| bsd-3-clause | 8080cd5aeffa49acd967ffc1e1241768 | 31.149254 | 74 | 0.574745 | 4.240157 | false | false | false | false |
gautamkrishnar/socli | socli/socli.py | 1 | 18764 | """
# Stack Overflow CLI
# Created by
# Gautam Krishna R : www.github.com/gautamkrishnar
# And open source contributors at GitHub: https://github.com/gautamkrishnar/socli#contributors
"""
import os
import re
import sys
import logging
import requests
from bs4 import BeautifulSoup
import urwid
import urllib3
try:
import simplejson as json
except ImportError:
import json
try:
JSONDecodeError = json.JSONDecodeError
except AttributeError:
JSONDecodeError = ValueError
# Importing SoCli modules
import socli.tui as tui
import socli.user as user_module
import socli.search as search
import socli.printer as printer
from socli.parser import parse_arguments
from socli.printer import display_results
from socli.version import __version__
tag = "" # tag based search
query = "" # Query
# Suppressing InsecureRequestWarning and many others
urllib3.disable_warnings()
# logger for debugging
logger = logging.getLogger(__name__)
# Switch on logging of the requests module.
def debug_requests_on():
try:
from http.client import HTTPConnection
HTTPConnection.set_debuglevel(HTTPConnection, 1)
except ImportError:
import httplib
httplib.HTTPConnection.debuglevel = 2
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger('requests.packages.urllib3')
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Fixes windows active page code errors
def fix_code_page():
if sys.platform == 'win32':
if sys.stdout.encoding != 'cp65001':
os.system("echo off")
os.system("chcp 65001") # Change active page code
sys.stdout.write("\x1b[A") # Removes the output of chcp command
sys.stdout.flush()
return
return
def wrongsyn(query):
"""
Exits if query value is empty
:param query:
:return:
"""
if not query:
printer.print_warning("Wrong syntax!...\n")
printer.helpman()
sys.exit(1)
else:
return
def has_tags():
"""
Gets the tags and adds them to query url
:return:
"""
global tag
for tags in tag:
search.so_qurl = search.so_qurl + "[" + tags + "]" + "+"
def socli(query, json_output=False):
"""
SoCLI Code
:param query: Query to search on Stack Overflow.
If google_search is true uses Google search to find the best result.
Else, use Stack Overflow default search mechanism.
:return:
"""
query = printer.urlencode(query)
try:
if search.google_search:
questions = search.get_questions_for_query_google(query)
res_url = questions[0][2] # Gets the first result
display_results(res_url, json_output=json_output)
else:
questions = search.get_questions_for_query(query)
res_url = questions[0][2]
display_results(search.so_url + res_url, json_output=json_output) # Returned URL is relative to SO homepage
except IndexError:
if len(questions) > 1:
for i in range(1, len(questions)):
res_url = questions[i][2]
try:
display_results(res_url, json_output=json_output)
except IndexError:
continue
break
printer.print_warning("No results found...")
except UnicodeEncodeError as e:
printer.showerror(e)
printer.print_warning("\n\nEncoding error: Use \"chcp 65001\" command before using socli...")
sys.exit(0)
except requests.exceptions.ConnectionError:
printer.print_fail("Please check your internet connectivity...")
except Exception as e:
printer.showerror(e)
sys.exit(0)
def socli_browse_interactive_windows(query_tag):
"""
Interactive mode for -b browse
:param query_tag:
:return:
"""
try:
search_res = requests.get(search.so_burl + query_tag)
search.captcha_check(search_res.url)
soup = BeautifulSoup(search_res.text, 'html.parser')
try:
soup.find_all("div", class_="question-summary")[0] # For explicitly raising exception
tmp = (soup.find_all("div", class_="question-summary"))
i = 0
question_local_url = []
print(printer.bold("\nSelect a question below:\n"))
while i < len(tmp):
if i == 10:
break # limiting results
question_text = ' '.join((tmp[i].a.get_text()).split())
question_text = question_text.replace("Q: ", "")
printer.print_warning(str(i + 1) + ". " + printer.display_str(question_text))
q_tag = (soup.find_all("div", class_="question-summary"))[i]
answers = [s.get_text() for s in q_tag.find_all("a", class_="post-tag")][0:]
ques_tags = " ".join(str(x) for x in answers)
question_local_url.append(tmp[i].a.get("href"))
print(" " + printer.display_str(ques_tags) + "\n")
i = i + 1
try:
op = int(printer.inputs("\nType the option no to continue or any other key to exit:"))
while 1:
if (op > 0) and (op <= i):
display_results(search.so_burl + question_local_url[op - 1])
cnt = 1 # this is because the 1st post is the question itself
while 1:
global tmpsoup
qna = printer.inputs(
"Type " + printer.bold("o") + " to open in browser, " + printer.bold(
"n") + " to next answer, " + printer.bold(
"b") + " for previous answer or any other key to exit:")
if qna in ["n", "N"]:
try:
answer = (tmpsoup.find_all("div", class_="js-post-body")[cnt + 1].get_text())
printer.print_green("\n\nAnswer:\n")
print("-------\n" + answer + "\n-------\n")
cnt = cnt + 1
except IndexError:
printer.print_warning(" No more answers found for this question. Exiting...")
sys.exit(0)
continue
if qna in ["b", "B"]:
if cnt == 1:
printer.print_warning(" You cant go further back. You are on the first answer!")
continue
answer = (tmpsoup.find_all("div", class_="js-post-body")[cnt - 1].get_text())
printer.print_green("\n\nAnswer:\n")
print("-------\n" + answer + "\n-------\n")
cnt = cnt - 1
continue
if qna in ["o", "O"]:
import webbrowser
printer.print_warning("Opening in your browser...")
webbrowser.open(search.so_burl + question_local_url[op - 1])
else:
break
sys.exit(0)
else:
op = int(input("\n\nWrong option. select the option no to continue:"))
except Exception as e:
printer.showerror(e)
printer.print_warning("\n Exiting...")
sys.exit(0)
except IndexError:
printer.print_warning("No results found...")
sys.exit(0)
except UnicodeEncodeError:
printer.print_warning("\n\nEncoding error: Use \"chcp 65001\" command before using socli...")
sys.exit(0)
except requests.exceptions.ConnectionError:
printer.print_fail("Please check your internet connectivity...")
except Exception as e:
printer.showerror(e)
sys.exit(0)
def socli_browse_interactive(query_tag):
"""
Interactive mode
:return:
"""
if sys.platform == 'win32':
return socli_browse_interactive_windows(query_tag)
class SelectQuestionPage(urwid.WidgetWrap):
def display_text(self, index, question):
question_text, question_desc, _ = question
text = [
("warning", u"{}. {}\n".format(index, question_text)),
question_desc + "\n",
]
return text
def __init__(self, questions):
self.questions = questions
self.cachedQuestions = [None for _ in range(10)]
widgets = [self.display_text(i, q) for i, q in enumerate(questions)]
self.questions_box = tui.ScrollableTextBox(widgets)
self.header = tui.UnicodeText(('less-important', 'Select a question below:\n'))
self.footerText = '0-' + str(len(self.questions) - 1) + ': select a question, any other key: exit.'
self.errorText = tui.UnicodeText.to_unicode('Question numbers range from 0-' +
str(len(self.questions) - 1) +
". Please select a valid question number.")
self.footer = tui.UnicodeText(self.footerText)
self.footerText = tui.UnicodeText.to_unicode(self.footerText)
frame = urwid.Frame(header=self.header,
body=urwid.Filler(self.questions_box, height=('relative', 100), valign='top'),
footer=self.footer)
urwid.WidgetWrap.__init__(self, frame)
# Override parent method
def selectable(self):
return True
def keypress(self, size, key):
if key in '0123456789':
try:
question_url = self.questions[int(key)][2]
self.footer.set_text(self.footerText)
self.select_question(question_url, int(key))
except IndexError:
self.footer.set_text(self.errorText)
elif key in {'down', 'up'}:
self.questions_box.keypress(size, key)
else:
raise urwid.ExitMainLoop()
def select_question(self, url, index):
if self.cachedQuestions[index] is not None:
tui.question_post = self.cachedQuestions[index]
tui.MAIN_LOOP.widget = tui.question_post
else:
if not search.google_search:
url = search.so_url + url
question_title, question_desc, question_stats, answers, comments, dup_url = \
search.get_question_stats_and_answer_and_comments(url)
question_post = tui.QuestionPage(
(url, question_title, question_desc, question_stats, answers, comments, dup_url, None)
)
self.cachedQuestions[index] = question_post
tui.MAIN_LOOP.widget = question_post
tui.display_header = tui.Header()
try:
if search.google_search:
questions = search.get_questions_for_query_google(query)
else:
# print('hurr')
questions = search.get_questions_for_query(query_tag)
# print(questions)
question_page = SelectQuestionPage(questions)
tui.MAIN_LOOP = tui.EditedMainLoop(question_page, printer.palette)
tui.MAIN_LOOP.run()
except UnicodeEncodeError:
printer.print_warning("\n\nEncoding error: Use \"chcp 65001\" command before using socli...")
sys.exit(0)
except requests.exceptions.ConnectionError:
printer.print_fail("Please check your internet connectivity...")
except Exception as e:
printer.showerror(e)
# print("Hurra")
print("exiting...")
sys.exit(0)
def main():
"""
The main logic for how options in a command is checked.
"""
global query
namespace = parse_arguments(sys.argv[1:])
search.load_user_agents() # Populates the user agents array
query_tag = ' '.join(namespace.browse) # Tags
# Query
if namespace.query:
# Query when args are present
query = ' '.join(namespace.query)
elif namespace.userQuery:
# Query without any args
query = ' '.join(namespace.userQuery)
if namespace.help:
# Display command line syntax
printer.helpman()
sys.exit(0)
json_output = bool(namespace.json)
if namespace.debug: # If --debug flag is present
# Prints out error used for debugging
printer.DEBUG = True
debug_requests_on()
if namespace.register:
os.system('eval "$(register-python-argcomplete socli)"')
sys.exit(0)
if namespace.new: # If --new flag is present
# Opens StackOverflow website in the browser to create a new question
import webbrowser
printer.print_warning("Opening stack overflow in your browser...")
webbrowser.open(search.so_url + "/questions/ask")
sys.exit(0)
if namespace.api: # If --api flag is present
# Sets custom API key
user_module.set_api_key()
sys.exit(0)
if namespace.user is not None: # If --user flag is present
# Stackoverflow user profile support
if namespace.user != '(RANDOM_STRING_CONSTANT)': # If user provided a user ID
user_module.manual = 1 # Enabling manual mode
user = namespace.user
else: # If user did not provide a user id
user = user_module.retrieve_saved_profile() # Reading saved user id from app data
user_module.user_page(user)
sys.exit(0)
if namespace.delete: # If --delete flag is present
# Deletes user data
user_module.del_datafile()
printer.print_warning("Data files deleted...")
sys.exit(0)
if namespace.sosearch: # If --sosearch flag is present
# Disables google search
search.google_search = False
if namespace.tag: # If --tag flag is present
global tag
search.google_search = False
tag = namespace.tag
has_tags() # Adds tags to StackOverflow url (when not using google search.
if namespace.open_url:
import webbrowser
open_in_browser = False
display_condition = True
url_to_use = namespace.open_url[0]
if re.findall(r"^https:\/\/", url_to_use):
pass
else:
url_to_use = "https://" + url_to_use
try:
if url_to_use == "https://stackoverflow.com/questions/":
raise Exception('URL Error')
if url_to_use == "https://www.stackoverflow.com/questions/":
raise Exception('URL Error')
requests.get(url_to_use)
except Exception:
printer.print_warning(
"Error, could be:\n- invalid url\n- url cannot be opened in socli\n- internet connection error")
sys.exit(0)
nostackoverflow = re.findall(r"stackoverflow\.com", url_to_use)
if not nostackoverflow:
open_in_browser = True
display_condition = False
printer.print_warning("Your url is not a stack overflow url.\nOpening in your browser...")
tag_matcher = re.findall(r"\/tag.+\/", url_to_use)
blog_matcher = re.findall(r"blog", url_to_use)
if tag_matcher:
extracted_tag = ""
if not re.findall(r"tagged", url_to_use):
extracted_tag = re.split(r"\/", url_to_use)[4]
else:
extracted_tag = re.split(r"\/", url_to_use)[5]
open_in_browser = False
display_condition = False
tag = extracted_tag
search.socli_interactive(tag)
if blog_matcher:
open_in_browser = True
display_condition = False
printer.print_warning("Your url belongs to blog")
printer.print_warning("Opening in browser...")
if display_condition:
open_in_browser = False
try:
display_results(url_to_use, json_output=json_output)
except IndexError:
printer.print_fail("The URL specified is returning a 404, please check the url and try again!")
sys.exit(0)
if open_in_browser:
webbrowser.open(url_to_use)
if namespace.res is not None: # If --res flag is present
# Automatically displays the result specified by the number
question_number = namespace.res
if namespace.query != [] or namespace.tag is not None: # There must either be a tag or a query
search.socli_manual_search(query, question_number)
else:
printer.print_warning(
'You must specify a query or a tag. For example, use: "socli -r 3 -q python for loop" '
'to retrieve the third result when searching about "python for loop". '
'You can also use "socli -r 3 -t python" '
'to retrieve the third result when searching for posts with the "python" tag.')
if namespace.version:
print('Socli ' + __version__)
sys.exit(0)
if namespace.browse:
# Browse mode
search.google_search = False
socli_browse_interactive(query_tag)
elif namespace.query != [] or namespace.tag is not None: # If query and tag are not both empty
if namespace.interactive:
search.socli_interactive(query)
else:
socli(query, json_output=json_output)
elif query not in [' ', ''] and not (
namespace.tag or namespace.res or namespace.interactive or namespace.browse): # If there are no flags
socli(query, json_output=json_output)
else:
# Help text for interactive mode
if namespace.interactive and namespace.query == [] and namespace.tag is None:
printer.print_warning('You must specify a query or a tag. For example, use: "socli -iq python for loop" '
'to search about "python for loop" in interactive mode. '
'You can also use "socli -it python" '
'to search posts with the "python" tag in interactive mode.')
else:
printer.helpman()
if __name__ == '__main__':
main()
| bsd-3-clause | 15749312a1efbd0432f17659721639cd | 38.25523 | 120 | 0.554519 | 4.223273 | false | false | false | false |
numpy/numpy | numpy/core/tests/test_getlimits.py | 5 | 5306 | """ Test functions for limits module.
"""
import warnings
import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
from numpy.testing import assert_equal, assert_, assert_raises
from numpy.core.getlimits import _discovered_machar, _float_ma
##################################################
class TestPythonFloat:
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
class TestHalf:
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
class TestSingle:
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
class TestDouble:
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
class TestLongdouble:
def test_singleton(self):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
class TestFinfo:
def test_basic(self):
dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
[np.float16, np.float32, np.float64, np.complex64,
np.complex128]))
for dt1, dt2 in dts:
for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep',
'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp',
'nmant', 'precision', 'resolution', 'tiny',
'smallest_normal', 'smallest_subnormal'):
assert_equal(getattr(finfo(dt1), attr),
getattr(finfo(dt2), attr), attr)
assert_raises(ValueError, finfo, 'i4')
class TestIinfo:
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
for attr in ('bits', 'min', 'max'):
assert_equal(getattr(iinfo(dt1), attr),
getattr(iinfo(dt2), attr), attr)
assert_raises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
with np.errstate(over="ignore"):
max_calculated = T(0) - T(1)
assert_equal(iinfo(T).max, max_calculated)
class TestRepr:
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
def test_finfo_repr(self):
expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \
" max=3.4028235e+38, dtype=float32)"
assert_equal(repr(np.finfo(np.float32)), expected)
def test_instances():
iinfo(10)
finfo(3.0)
def assert_ma_equal(discovered, ma_like):
# Check MachAr-like objects same as calculated MachAr instances
for key, value in discovered.__dict__.items():
assert_equal(value, getattr(ma_like, key))
if hasattr(value, 'shape'):
assert_equal(value.shape, getattr(ma_like, key).shape)
assert_equal(value.dtype, getattr(ma_like, key).dtype)
def test_known_types():
# Test we are correctly compiling parameters for known types
for ftype, ma_like in ((np.float16, _float_ma[16]),
(np.float32, _float_ma[32]),
(np.float64, _float_ma[64])):
assert_ma_equal(_discovered_machar(ftype), ma_like)
# Suppress warning for broken discovery of double double on PPC
with np.errstate(all='ignore'):
ld_ma = _discovered_machar(np.longdouble)
bytes = np.dtype(np.longdouble).itemsize
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
assert_ma_equal(ld_ma, _float_ma[80])
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
assert_ma_equal(ld_ma, _float_ma[128])
def test_subnormal_warning():
"""Test that the subnormal is zero warning is not being raised."""
with np.errstate(all='ignore'):
ld_ma = _discovered_machar(np.longdouble)
bytes = np.dtype(np.longdouble).itemsize
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
ld_ma.smallest_subnormal
assert len(w) == 0
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
ld_ma.smallest_subnormal
assert len(w) == 0
else:
# Double double
ld_ma.smallest_subnormal
# This test may fail on some platforms
assert len(w) == 0
def test_plausible_finfo():
# Assert that finfo returns reasonable results for all types
for ftype in np.sctypes['float'] + np.sctypes['complex']:
info = np.finfo(ftype)
assert_(info.nmant > 1)
assert_(info.minexp < -1)
assert_(info.maxexp > 1)
| bsd-3-clause | 140ba4d48849b7e21804c64ff591df2d | 35.095238 | 75 | 0.570486 | 3.392583 | false | true | false | false |
numpy/numpy | numpy/ma/bench.py | 12 | 4859 | #!/usr/bin/env python3
import timeit
import numpy
###############################################################################
# Global variables #
###############################################################################
# Small arrays
xs = numpy.random.uniform(-1, 1, 6).reshape(2, 3)
ys = numpy.random.uniform(-1, 1, 6).reshape(2, 3)
zs = xs + 1j * ys
m1 = [[True, False, False], [False, False, True]]
m2 = [[True, False, True], [False, False, True]]
nmxs = numpy.ma.array(xs, mask=m1)
nmys = numpy.ma.array(ys, mask=m2)
nmzs = numpy.ma.array(zs, mask=m1)
# Big arrays
xl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100)
yl = numpy.random.uniform(-1, 1, 100*100).reshape(100, 100)
zl = xl + 1j * yl
maskx = xl > 0.8
masky = yl < -0.8
nmxl = numpy.ma.array(xl, mask=maskx)
nmyl = numpy.ma.array(yl, mask=masky)
nmzl = numpy.ma.array(zl, mask=maskx)
###############################################################################
# Functions #
###############################################################################
def timer(s, v='', nloop=500, nrep=3):
units = ["s", "ms", "µs", "ns"]
scaling = [1, 1e3, 1e6, 1e9]
print("%s : %-50s : " % (v, s), end=' ')
varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz']
setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames)
Timer = timeit.Timer(stmt=s, setup=setup)
best = min(Timer.repeat(nrep, nloop)) / nloop
if best > 0.0:
order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3)
else:
order = 3
print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep,
3,
best * scaling[order],
units[order]))
def compare_functions_1v(func, nloop=500,
xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
funcname = func.__name__
print("-"*50)
print(f'{funcname} on small arrays')
module, data = "numpy.ma", "nmxs"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
print("%s on large arrays" % funcname)
module, data = "numpy.ma", "nmxl"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
return
def compare_methods(methodname, args, vars='x', nloop=500, test=True,
xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
print("-"*50)
print(f'{methodname} on small arrays')
data, ver = f'nm{vars}l', 'numpy.ma'
timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
print("%s on large arrays" % methodname)
data, ver = "nm%sl" % vars, 'numpy.ma'
timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
return
def compare_functions_2v(func, nloop=500, test=True,
xs=xs, nmxs=nmxs,
ys=ys, nmys=nmys,
xl=xl, nmxl=nmxl,
yl=yl, nmyl=nmyl):
funcname = func.__name__
print("-"*50)
print(f'{funcname} on small arrays')
module, data = "numpy.ma", "nmxs,nmys"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
print(f'{funcname} on large arrays')
module, data = "numpy.ma", "nmxl,nmyl"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
return
if __name__ == '__main__':
compare_functions_1v(numpy.sin)
compare_functions_1v(numpy.log)
compare_functions_1v(numpy.sqrt)
compare_functions_2v(numpy.multiply)
compare_functions_2v(numpy.divide)
compare_functions_2v(numpy.power)
compare_methods('ravel', '', nloop=1000)
compare_methods('conjugate', '', 'z', nloop=1000)
compare_methods('transpose', '', nloop=1000)
compare_methods('compressed', '', nloop=1000)
compare_methods('__getitem__', '0', nloop=1000)
compare_methods('__getitem__', '(0,0)', nloop=1000)
compare_methods('__getitem__', '[0,-1]', nloop=1000)
compare_methods('__setitem__', '0, 17', nloop=1000, test=False)
compare_methods('__setitem__', '(0,0), 17', nloop=1000, test=False)
print("-"*50)
print("__setitem__ on small arrays")
timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000)
print("-"*50)
print("__setitem__ on large arrays")
timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ', nloop=10000)
print("-"*50)
print("where on small arrays")
timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ', nloop=1000)
print("-"*50)
print("where on large arrays")
timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ', nloop=100)
| bsd-3-clause | da1b107409d04ab6be2083feb0f869b6 | 36.369231 | 89 | 0.511733 | 3.16895 | false | false | false | false |
numpy/numpy | numpy/array_api/__init__.py | 6 | 10221 | """
A NumPy sub-namespace that conforms to the Python array API standard.
This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
is still considered experimental, and will issue a warning when imported.
This is a proof-of-concept namespace that wraps the corresponding NumPy
functions to give a conforming implementation of the Python array API standard
(https://data-apis.github.io/array-api/latest/). The standard is currently in
an RFC phase and comments on it are both welcome and encouraged. Comments
should be made either at https://github.com/data-apis/array-api or at
https://github.com/data-apis/consortium-feedback/discussions.
NumPy already follows the proposed spec for the most part, so this module
serves mostly as a thin wrapper around it. However, NumPy also implements a
lot of behavior that is not included in the spec, so this serves as a
restricted subset of the API. Only those functions that are part of the spec
are included in this namespace, and all functions are given with the exact
signature given in the spec, including the use of position-only arguments, and
omitting any extra keyword arguments implemented by NumPy but not part of the
spec. The behavior of some functions is also modified from the NumPy behavior
to conform to the standard. Note that the underlying array object itself is
wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
is implemented in pure Python with no C extensions.
The array API spec is designed as a "minimal API subset" and explicitly allows
libraries to include behaviors not specified by it. But users of this module
that intend to write portable code should be aware that only those behaviors
that are listed in the spec are guaranteed to be implemented across libraries.
Consequently, the NumPy implementation was chosen to be both conforming and
minimal, so that users can use this implementation of the array API namespace
and be sure that behaviors that it defines will be available in conforming
namespaces from other libraries.
A few notes about the current state of this submodule:
- There is a test suite that tests modules against the array API standard at
https://github.com/data-apis/array-api-tests. The test suite is still a work
in progress, but the existing tests pass on this module, with a few
exceptions:
- DLPack support (see https://github.com/data-apis/array-api/pull/106) is
not included here, as it requires a full implementation in NumPy proper
first.
The test suite is not yet complete, and even the tests that exist are not
guaranteed to give a comprehensive coverage of the spec. Therefore, when
reviewing and using this submodule, you should refer to the standard
documents themselves. There are some tests in numpy.array_api.tests, but
they primarily focus on things that are not tested by the official array API
test suite.
- There is a custom array object, numpy.array_api.Array, which is returned by
all functions in this module. All functions in the array API namespace
implicitly assume that they will only receive this object as input. The only
way to create instances of this object is to use one of the array creation
functions. It does not have a public constructor on the object itself. The
object is a small wrapper class around numpy.ndarray. The main purpose of it
is to restrict the namespace of the array object to only those dtypes and
only those methods that are required by the spec, as well as to limit/change
certain behavior that differs in the spec. In particular:
- The array API namespace does not have scalar objects, only 0-D arrays.
Operations on Array that would create a scalar in NumPy create a 0-D
array.
- Indexing: Only a subset of indices supported by NumPy are required by the
spec. The Array object restricts indexing to only allow those types of
indices that are required by the spec. See the docstring of the
numpy.array_api.Array._validate_indices helper function for more
information.
- Type promotion: Some type promotion rules are different in the spec. In
particular, the spec does not have any value-based casting. The spec also
does not require cross-kind casting, like integer -> floating-point. Only
those promotions that are explicitly required by the array API
specification are allowed in this module. See NEP 47 for more info.
- Functions do not automatically call asarray() on their input, and will not
work if the input type is not Array. The exception is array creation
functions, and Python operators on the Array object, which accept Python
scalars of the same type as the array dtype.
- All functions include type annotations, corresponding to those given in the
spec (see _typing.py for definitions of some custom types). These do not
currently fully pass mypy due to some limitations in mypy.
- Dtype objects are just the NumPy dtype objects, e.g., float64 =
np.dtype('float64'). The spec does not require any behavior on these dtype
objects other than that they be accessible by name and be comparable by
equality, but it was considered too much extra complexity to create custom
objects to represent dtypes.
- All places where the implementations in this submodule are known to deviate
from their corresponding functions in NumPy are marked with "# Note:"
comments.
Still TODO in this module are:
- DLPack support for numpy.ndarray is still in progress. See
https://github.com/numpy/numpy/pull/19083.
- The copy=False keyword argument to asarray() is not yet implemented. This
requires support in numpy.asarray() first.
- Some functions are not yet fully tested in the array API test suite, and may
require updates that are not yet known until the tests are written.
- The spec is still in an RFC phase and may still have minor updates, which
will need to be reflected here.
- Complex number support in array API spec is planned but not yet finalized,
as are the fft extension and certain linear algebra functions such as eig
that require complex dtypes.
"""
import warnings
warnings.warn(
"The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
)
__array_api_version__ = "2021.12"
__all__ = ["__array_api_version__"]
from ._constants import e, inf, nan, pi
__all__ += ["e", "inf", "nan", "pi"]
from ._creation_functions import (
asarray,
arange,
empty,
empty_like,
eye,
from_dlpack,
full,
full_like,
linspace,
meshgrid,
ones,
ones_like,
tril,
triu,
zeros,
zeros_like,
)
__all__ += [
"asarray",
"arange",
"empty",
"empty_like",
"eye",
"from_dlpack",
"full",
"full_like",
"linspace",
"meshgrid",
"ones",
"ones_like",
"tril",
"triu",
"zeros",
"zeros_like",
]
from ._data_type_functions import (
astype,
broadcast_arrays,
broadcast_to,
can_cast,
finfo,
iinfo,
result_type,
)
__all__ += [
"astype",
"broadcast_arrays",
"broadcast_to",
"can_cast",
"finfo",
"iinfo",
"result_type",
]
from ._dtypes import (
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
bool,
)
__all__ += [
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"bool",
]
from ._elementwise_functions import (
abs,
acos,
acosh,
add,
asin,
asinh,
atan,
atan2,
atanh,
bitwise_and,
bitwise_left_shift,
bitwise_invert,
bitwise_or,
bitwise_right_shift,
bitwise_xor,
ceil,
cos,
cosh,
divide,
equal,
exp,
expm1,
floor,
floor_divide,
greater,
greater_equal,
isfinite,
isinf,
isnan,
less,
less_equal,
log,
log1p,
log2,
log10,
logaddexp,
logical_and,
logical_not,
logical_or,
logical_xor,
multiply,
negative,
not_equal,
positive,
pow,
remainder,
round,
sign,
sin,
sinh,
square,
sqrt,
subtract,
tan,
tanh,
trunc,
)
__all__ += [
"abs",
"acos",
"acosh",
"add",
"asin",
"asinh",
"atan",
"atan2",
"atanh",
"bitwise_and",
"bitwise_left_shift",
"bitwise_invert",
"bitwise_or",
"bitwise_right_shift",
"bitwise_xor",
"ceil",
"cos",
"cosh",
"divide",
"equal",
"exp",
"expm1",
"floor",
"floor_divide",
"greater",
"greater_equal",
"isfinite",
"isinf",
"isnan",
"less",
"less_equal",
"log",
"log1p",
"log2",
"log10",
"logaddexp",
"logical_and",
"logical_not",
"logical_or",
"logical_xor",
"multiply",
"negative",
"not_equal",
"positive",
"pow",
"remainder",
"round",
"sign",
"sin",
"sinh",
"square",
"sqrt",
"subtract",
"tan",
"tanh",
"trunc",
]
# linalg is an extension in the array API spec, which is a sub-namespace. Only
# a subset of functions in it are imported into the top-level namespace.
from . import linalg
__all__ += ["linalg"]
from .linalg import matmul, tensordot, matrix_transpose, vecdot
__all__ += ["matmul", "tensordot", "matrix_transpose", "vecdot"]
from ._manipulation_functions import (
concat,
expand_dims,
flip,
permute_dims,
reshape,
roll,
squeeze,
stack,
)
__all__ += ["concat", "expand_dims", "flip", "permute_dims", "reshape", "roll", "squeeze", "stack"]
from ._searching_functions import argmax, argmin, nonzero, where
__all__ += ["argmax", "argmin", "nonzero", "where"]
from ._set_functions import unique_all, unique_counts, unique_inverse, unique_values
__all__ += ["unique_all", "unique_counts", "unique_inverse", "unique_values"]
from ._sorting_functions import argsort, sort
__all__ += ["argsort", "sort"]
from ._statistical_functions import max, mean, min, prod, std, sum, var
__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
from ._utility_functions import all, any
__all__ += ["all", "any"]
| bsd-3-clause | efe4b22526c911b4114130324f91a491 | 26.111406 | 99 | 0.681832 | 3.796805 | false | false | false | false |
numpy/numpy | numpy/array_api/_array_object.py | 11 | 43226 | """
Wrapper class around the ndarray object for the array API standard.
The array API standard defines some behaviors differently than ndarray, in
particular, type promotion rules are different (the standard has no
value-based casting). The standard also specifies a more limited subset of
array methods and functionalities than are implemented on ndarray. Since the
goal of the array_api namespace is to be a minimal implementation of the array
API standard, we need to define a separate wrapper class for the array_api
namespace.
The standard compliant class is only a wrapper class. It is *not* a subclass
of ndarray.
"""
from __future__ import annotations
import operator
from enum import IntEnum
from ._creation_functions import asarray
from ._dtypes import (
_all_dtypes,
_boolean_dtypes,
_integer_dtypes,
_integer_or_boolean_dtypes,
_floating_dtypes,
_numeric_dtypes,
_result_type,
_dtype_categories,
)
from typing import TYPE_CHECKING, Optional, Tuple, Union, Any, SupportsIndex
import types
if TYPE_CHECKING:
from ._typing import Any, PyCapsule, Device, Dtype
import numpy.typing as npt
import numpy as np
from numpy import array_api
class Array:
"""
n-d array object for the array API namespace.
See the docstring of :py:obj:`np.ndarray <numpy.ndarray>` for more
information.
This is a wrapper around numpy.ndarray that restricts the usage to only
those things that are required by the array API namespace. Note,
attributes on this object that start with a single underscore are not part
of the API specification and should only be used internally. This object
should not be constructed directly. Rather, use one of the creation
functions, such as asarray().
"""
_array: np.ndarray
# Use a custom constructor instead of __init__, as manually initializing
# this class is not supported API.
@classmethod
def _new(cls, x, /):
"""
This is a private method for initializing the array API Array
object.
Functions outside of the array_api submodule should not use this
method. Use one of the creation functions instead, such as
``asarray``.
"""
obj = super().__new__(cls)
# Note: The spec does not have array scalars, only 0-D arrays.
if isinstance(x, np.generic):
# Convert the array scalar to a 0-D array
x = np.asarray(x)
if x.dtype not in _all_dtypes:
raise TypeError(
f"The array_api namespace does not support the dtype '{x.dtype}'"
)
obj._array = x
return obj
# Prevent Array() from working
def __new__(cls, *args, **kwargs):
raise TypeError(
"The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead."
)
# These functions are not required by the spec, but are implemented for
# the sake of usability.
def __str__(self: Array, /) -> str:
"""
Performs the operation __str__.
"""
return self._array.__str__().replace("array", "Array")
def __repr__(self: Array, /) -> str:
"""
Performs the operation __repr__.
"""
suffix = f", dtype={self.dtype.name})"
if 0 in self.shape:
prefix = "empty("
mid = str(self.shape)
else:
prefix = "Array("
mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix)
return prefix + mid + suffix
# This function is not required by the spec, but we implement it here for
# convenience so that np.asarray(np.array_api.Array) will work.
def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]:
"""
Warning: this method is NOT part of the array API spec. Implementers
of other libraries need not include it, and users should not assume it
will be present in other implementations.
"""
return np.asarray(self._array, dtype=dtype)
# These are various helper functions to make the array behavior match the
# spec in places where it either deviates from or is more strict than
# NumPy behavior
def _check_allowed_dtypes(self, other: bool | int | float | Array, dtype_category: str, op: str) -> Array:
"""
Helper function for operators to only allow specific input dtypes
Use like
other = self._check_allowed_dtypes(other, 'numeric', '__add__')
if other is NotImplemented:
return other
"""
if self.dtype not in _dtype_categories[dtype_category]:
raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
if isinstance(other, (int, float, bool)):
other = self._promote_scalar(other)
elif isinstance(other, Array):
if other.dtype not in _dtype_categories[dtype_category]:
raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
else:
return NotImplemented
# This will raise TypeError for type combinations that are not allowed
# to promote in the spec (even if the NumPy array operator would
# promote them).
res_dtype = _result_type(self.dtype, other.dtype)
if op.startswith("__i"):
# Note: NumPy will allow in-place operators in some cases where
# the type promoted operator does not match the left-hand side
# operand. For example,
# >>> a = np.array(1, dtype=np.int8)
# >>> a += np.array(1, dtype=np.int16)
# The spec explicitly disallows this.
if res_dtype != self.dtype:
raise TypeError(
f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}"
)
return other
# Helper function to match the type promotion rules in the spec
def _promote_scalar(self, scalar):
"""
Returns a promoted version of a Python scalar appropriate for use with
operations on self.
This may raise an OverflowError in cases where the scalar is an
integer that is too large to fit in a NumPy integer dtype, or
TypeError when the scalar type is incompatible with the dtype of self.
"""
# Note: Only Python scalar types that match the array dtype are
# allowed.
if isinstance(scalar, bool):
if self.dtype not in _boolean_dtypes:
raise TypeError(
"Python bool scalars can only be promoted with bool arrays"
)
elif isinstance(scalar, int):
if self.dtype in _boolean_dtypes:
raise TypeError(
"Python int scalars cannot be promoted with bool arrays"
)
elif isinstance(scalar, float):
if self.dtype not in _floating_dtypes:
raise TypeError(
"Python float scalars can only be promoted with floating-point arrays."
)
else:
raise TypeError("'scalar' must be a Python scalar")
# Note: scalars are unconditionally cast to the same dtype as the
# array.
# Note: the spec only specifies integer-dtype/int promotion
# behavior for integers within the bounds of the integer dtype.
# Outside of those bounds we use the default NumPy behavior (either
# cast or raise OverflowError).
return Array._new(np.array(scalar, self.dtype))
@staticmethod
def _normalize_two_args(x1, x2) -> Tuple[Array, Array]:
"""
Normalize inputs to two arg functions to fix type promotion rules
NumPy deviates from the spec type promotion rules in cases where one
argument is 0-dimensional and the other is not. For example:
>>> import numpy as np
>>> a = np.array([1.0], dtype=np.float32)
>>> b = np.array(1.0, dtype=np.float64)
>>> np.add(a, b) # The spec says this should be float64
array([2.], dtype=float32)
To fix this, we add a dimension to the 0-dimension array before passing it
through. This works because a dimension would be added anyway from
broadcasting, so the resulting shape is the same, but this prevents NumPy
from not promoting the dtype.
"""
# Another option would be to use signature=(x1.dtype, x2.dtype, None),
# but that only works for ufuncs, so we would have to call the ufuncs
# directly in the operator methods. One should also note that this
# sort of trick wouldn't work for functions like searchsorted, which
# don't do normal broadcasting, but there aren't any functions like
# that in the array API namespace.
if x1.ndim == 0 and x2.ndim != 0:
# The _array[None] workaround was chosen because it is relatively
# performant. broadcast_to(x1._array, x2.shape) is much slower. We
# could also manually type promote x2, but that is more complicated
# and about the same performance as this.
x1 = Array._new(x1._array[None])
elif x2.ndim == 0 and x1.ndim != 0:
x2 = Array._new(x2._array[None])
return (x1, x2)
# Note: A large fraction of allowed indices are disallowed here (see the
# docstring below)
def _validate_index(self, key):
"""
Validate an index according to the array API.
The array API specification only requires a subset of indices that are
supported by NumPy. This function will reject any index that is
allowed by NumPy but not required by the array API specification. We
always raise ``IndexError`` on such indices (the spec does not require
any specific behavior on them, but this makes the NumPy array API
namespace a minimal implementation of the spec). See
https://data-apis.org/array-api/latest/API_specification/indexing.html
for the full list of required indexing behavior
This function raises IndexError if the index ``key`` is invalid. It
only raises ``IndexError`` on indices that are not already rejected by
NumPy, as NumPy will already raise the appropriate error on such
indices. ``shape`` may be None, in which case, only cases that are
independent of the array shape are checked.
The following cases are allowed by NumPy, but not specified by the array
API specification:
- Indices to not include an implicit ellipsis at the end. That is,
every axis of an array must be explicitly indexed or an ellipsis
included. This behaviour is sometimes referred to as flat indexing.
- The start and stop of a slice may not be out of bounds. In
particular, for a slice ``i:j:k`` on an axis of size ``n``, only the
following are allowed:
- ``i`` or ``j`` omitted (``None``).
- ``-n <= i <= max(0, n - 1)``.
- For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
- For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
- Boolean array indices are not allowed as part of a larger tuple
index.
- Integer array indices are not allowed (with the exception of 0-D
arrays, which are treated the same as scalars).
Additionally, it should be noted that indices that would return a
scalar in NumPy will return a 0-D array. Array scalars are not allowed
in the specification, only 0-D arrays. This is done in the
``Array._new`` constructor, not this function.
"""
_key = key if isinstance(key, tuple) else (key,)
for i in _key:
if isinstance(i, bool) or not (
isinstance(i, SupportsIndex) # i.e. ints
or isinstance(i, slice)
or i == Ellipsis
or i is None
or isinstance(i, Array)
or isinstance(i, np.ndarray)
):
raise IndexError(
f"Single-axes index {i} has {type(i)=}, but only "
"integers, slices (:), ellipsis (...), newaxis (None), "
"zero-dimensional integer arrays and boolean arrays "
"are specified in the Array API."
)
nonexpanding_key = []
single_axes = []
n_ellipsis = 0
key_has_mask = False
for i in _key:
if i is not None:
nonexpanding_key.append(i)
if isinstance(i, Array) or isinstance(i, np.ndarray):
if i.dtype in _boolean_dtypes:
key_has_mask = True
single_axes.append(i)
else:
# i must not be an array here, to avoid elementwise equals
if i == Ellipsis:
n_ellipsis += 1
else:
single_axes.append(i)
n_single_axes = len(single_axes)
if n_ellipsis > 1:
return # handled by ndarray
elif n_ellipsis == 0:
# Note boolean masks must be the sole index, which we check for
# later on.
if not key_has_mask and n_single_axes < self.ndim:
raise IndexError(
f"{self.ndim=}, but the multi-axes index only specifies "
f"{n_single_axes} dimensions. If this was intentional, "
"add a trailing ellipsis (...) which expands into as many "
"slices (:) as necessary - this is what np.ndarray arrays "
"implicitly do, but such flat indexing behaviour is not "
"specified in the Array API."
)
if n_ellipsis == 0:
indexed_shape = self.shape
else:
ellipsis_start = None
for pos, i in enumerate(nonexpanding_key):
if not (isinstance(i, Array) or isinstance(i, np.ndarray)):
if i == Ellipsis:
ellipsis_start = pos
break
assert ellipsis_start is not None # sanity check
ellipsis_end = self.ndim - (n_single_axes - ellipsis_start)
indexed_shape = (
self.shape[:ellipsis_start] + self.shape[ellipsis_end:]
)
for i, side in zip(single_axes, indexed_shape):
if isinstance(i, slice):
if side == 0:
f_range = "0 (or None)"
else:
f_range = f"between -{side} and {side - 1} (or None)"
if i.start is not None:
try:
start = operator.index(i.start)
except TypeError:
pass # handled by ndarray
else:
if not (-side <= start <= side):
raise IndexError(
f"Slice {i} contains {start=}, but should be "
f"{f_range} for an axis of size {side} "
"(out-of-bounds starts are not specified in "
"the Array API)"
)
if i.stop is not None:
try:
stop = operator.index(i.stop)
except TypeError:
pass # handled by ndarray
else:
if not (-side <= stop <= side):
raise IndexError(
f"Slice {i} contains {stop=}, but should be "
f"{f_range} for an axis of size {side} "
"(out-of-bounds stops are not specified in "
"the Array API)"
)
elif isinstance(i, Array):
if i.dtype in _boolean_dtypes and len(_key) != 1:
assert isinstance(key, tuple) # sanity check
raise IndexError(
f"Single-axes index {i} is a boolean array and "
f"{len(key)=}, but masking is only specified in the "
"Array API when the array is the sole index."
)
elif i.dtype in _integer_dtypes and i.ndim != 0:
raise IndexError(
f"Single-axes index {i} is a non-zero-dimensional "
"integer array, but advanced integer indexing is not "
"specified in the Array API."
)
elif isinstance(i, tuple):
raise IndexError(
f"Single-axes index {i} is a tuple, but nested tuple "
"indices are not specified in the Array API."
)
# Everything below this line is required by the spec.
def __abs__(self: Array, /) -> Array:
"""
Performs the operation __abs__.
"""
if self.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in __abs__")
res = self._array.__abs__()
return self.__class__._new(res)
def __add__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __add__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__add__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__add__(other._array)
return self.__class__._new(res)
def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __and__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__and__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__and__(other._array)
return self.__class__._new(res)
def __array_namespace__(
self: Array, /, *, api_version: Optional[str] = None
) -> types.ModuleType:
if api_version is not None and not api_version.startswith("2021."):
raise ValueError(f"Unrecognized array API version: {api_version!r}")
return array_api
def __bool__(self: Array, /) -> bool:
"""
Performs the operation __bool__.
"""
# Note: This is an error here.
if self._array.ndim != 0:
raise TypeError("bool is only allowed on arrays with 0 dimensions")
if self.dtype not in _boolean_dtypes:
raise ValueError("bool is only allowed on boolean arrays")
res = self._array.__bool__()
return res
def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule:
"""
Performs the operation __dlpack__.
"""
return self._array.__dlpack__(stream=stream)
def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:
"""
Performs the operation __dlpack_device__.
"""
# Note: device support is required for this
return self._array.__dlpack_device__()
def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
"""
Performs the operation __eq__.
"""
# Even though "all" dtypes are allowed, we still require them to be
# promotable with each other.
other = self._check_allowed_dtypes(other, "all", "__eq__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__eq__(other._array)
return self.__class__._new(res)
def __float__(self: Array, /) -> float:
"""
Performs the operation __float__.
"""
# Note: This is an error here.
if self._array.ndim != 0:
raise TypeError("float is only allowed on arrays with 0 dimensions")
if self.dtype not in _floating_dtypes:
raise ValueError("float is only allowed on floating-point arrays")
res = self._array.__float__()
return res
def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __floordiv__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__floordiv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__floordiv__(other._array)
return self.__class__._new(res)
def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __ge__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__ge__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__ge__(other._array)
return self.__class__._new(res)
def __getitem__(
self: Array,
key: Union[
int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
],
/,
) -> Array:
"""
Performs the operation __getitem__.
"""
# Note: Only indices required by the spec are allowed. See the
# docstring of _validate_index
self._validate_index(key)
if isinstance(key, Array):
# Indexing self._array with array_api arrays can be erroneous
key = key._array
res = self._array.__getitem__(key)
return self._new(res)
def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __gt__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__gt__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__gt__(other._array)
return self.__class__._new(res)
def __int__(self: Array, /) -> int:
"""
Performs the operation __int__.
"""
# Note: This is an error here.
if self._array.ndim != 0:
raise TypeError("int is only allowed on arrays with 0 dimensions")
if self.dtype not in _integer_dtypes:
raise ValueError("int is only allowed on integer arrays")
res = self._array.__int__()
return res
def __index__(self: Array, /) -> int:
"""
Performs the operation __index__.
"""
res = self._array.__index__()
return res
def __invert__(self: Array, /) -> Array:
"""
Performs the operation __invert__.
"""
if self.dtype not in _integer_or_boolean_dtypes:
raise TypeError("Only integer or boolean dtypes are allowed in __invert__")
res = self._array.__invert__()
return self.__class__._new(res)
def __le__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __le__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__le__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__le__(other._array)
return self.__class__._new(res)
def __lshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __lshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__lshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__lshift__(other._array)
return self.__class__._new(res)
def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __lt__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__lt__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__lt__(other._array)
return self.__class__._new(res)
def __matmul__(self: Array, other: Array, /) -> Array:
"""
Performs the operation __matmul__.
"""
# matmul is not defined for scalars, but without this, we may get
# the wrong error message from asarray.
other = self._check_allowed_dtypes(other, "numeric", "__matmul__")
if other is NotImplemented:
return other
res = self._array.__matmul__(other._array)
return self.__class__._new(res)
def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __mod__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__mod__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__mod__(other._array)
return self.__class__._new(res)
def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __mul__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__mul__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__mul__(other._array)
return self.__class__._new(res)
def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
"""
Performs the operation __ne__.
"""
other = self._check_allowed_dtypes(other, "all", "__ne__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__ne__(other._array)
return self.__class__._new(res)
def __neg__(self: Array, /) -> Array:
"""
Performs the operation __neg__.
"""
if self.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in __neg__")
res = self._array.__neg__()
return self.__class__._new(res)
def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __or__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__or__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__or__(other._array)
return self.__class__._new(res)
def __pos__(self: Array, /) -> Array:
"""
Performs the operation __pos__.
"""
if self.dtype not in _numeric_dtypes:
raise TypeError("Only numeric dtypes are allowed in __pos__")
res = self._array.__pos__()
return self.__class__._new(res)
def __pow__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __pow__.
"""
from ._elementwise_functions import pow
other = self._check_allowed_dtypes(other, "numeric", "__pow__")
if other is NotImplemented:
return other
# Note: NumPy's __pow__ does not follow type promotion rules for 0-d
# arrays, so we use pow() here instead.
return pow(self, other)
def __rshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __rshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__rshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rshift__(other._array)
return self.__class__._new(res)
def __setitem__(
self,
key: Union[
int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
],
value: Union[int, float, bool, Array],
/,
) -> None:
"""
Performs the operation __setitem__.
"""
# Note: Only indices required by the spec are allowed. See the
# docstring of _validate_index
self._validate_index(key)
if isinstance(key, Array):
# Indexing self._array with array_api arrays can be erroneous
key = key._array
self._array.__setitem__(key, asarray(value)._array)
def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __sub__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__sub__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__sub__(other._array)
return self.__class__._new(res)
# PEP 484 requires int to be a subtype of float, but __truediv__ should
# not accept int.
def __truediv__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __truediv__.
"""
other = self._check_allowed_dtypes(other, "floating-point", "__truediv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__truediv__(other._array)
return self.__class__._new(res)
def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __xor__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__xor__(other._array)
return self.__class__._new(res)
def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __iadd__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__iadd__")
if other is NotImplemented:
return other
self._array.__iadd__(other._array)
return self
def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __radd__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__radd__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__radd__(other._array)
return self.__class__._new(res)
def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __iand__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__")
if other is NotImplemented:
return other
self._array.__iand__(other._array)
return self
def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __rand__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rand__(other._array)
return self.__class__._new(res)
def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __ifloordiv__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__ifloordiv__")
if other is NotImplemented:
return other
self._array.__ifloordiv__(other._array)
return self
def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rfloordiv__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rfloordiv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rfloordiv__(other._array)
return self.__class__._new(res)
def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __ilshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__ilshift__")
if other is NotImplemented:
return other
self._array.__ilshift__(other._array)
return self
def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __rlshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__rlshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rlshift__(other._array)
return self.__class__._new(res)
def __imatmul__(self: Array, other: Array, /) -> Array:
"""
Performs the operation __imatmul__.
"""
# Note: NumPy does not implement __imatmul__.
# matmul is not defined for scalars, but without this, we may get
# the wrong error message from asarray.
other = self._check_allowed_dtypes(other, "numeric", "__imatmul__")
if other is NotImplemented:
return other
# __imatmul__ can only be allowed when it would not change the shape
# of self.
other_shape = other.shape
if self.shape == () or other_shape == ():
raise ValueError("@= requires at least one dimension")
if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]:
raise ValueError("@= cannot change the shape of the input array")
self._array[:] = self._array.__matmul__(other._array)
return self
def __rmatmul__(self: Array, other: Array, /) -> Array:
"""
Performs the operation __rmatmul__.
"""
# matmul is not defined for scalars, but without this, we may get
# the wrong error message from asarray.
other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__")
if other is NotImplemented:
return other
res = self._array.__rmatmul__(other._array)
return self.__class__._new(res)
def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __imod__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__imod__")
if other is NotImplemented:
return other
self._array.__imod__(other._array)
return self
def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rmod__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rmod__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rmod__(other._array)
return self.__class__._new(res)
def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __imul__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__imul__")
if other is NotImplemented:
return other
self._array.__imul__(other._array)
return self
def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rmul__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rmul__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rmul__(other._array)
return self.__class__._new(res)
def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __ior__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__")
if other is NotImplemented:
return other
self._array.__ior__(other._array)
return self
def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __ror__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__ror__(other._array)
return self.__class__._new(res)
def __ipow__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __ipow__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__ipow__")
if other is NotImplemented:
return other
self._array.__ipow__(other._array)
return self
def __rpow__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rpow__.
"""
from ._elementwise_functions import pow
other = self._check_allowed_dtypes(other, "numeric", "__rpow__")
if other is NotImplemented:
return other
# Note: NumPy's __pow__ does not follow the spec type promotion rules
# for 0-d arrays, so we use pow() here instead.
return pow(other, self)
def __irshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __irshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__irshift__")
if other is NotImplemented:
return other
self._array.__irshift__(other._array)
return self
def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:
"""
Performs the operation __rrshift__.
"""
other = self._check_allowed_dtypes(other, "integer", "__rrshift__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rrshift__(other._array)
return self.__class__._new(res)
def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __isub__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__isub__")
if other is NotImplemented:
return other
self._array.__isub__(other._array)
return self
def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:
"""
Performs the operation __rsub__.
"""
other = self._check_allowed_dtypes(other, "numeric", "__rsub__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rsub__(other._array)
return self.__class__._new(res)
def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __itruediv__.
"""
other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__")
if other is NotImplemented:
return other
self._array.__itruediv__(other._array)
return self
def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:
"""
Performs the operation __rtruediv__.
"""
other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rtruediv__(other._array)
return self.__class__._new(res)
def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __ixor__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__")
if other is NotImplemented:
return other
self._array.__ixor__(other._array)
return self
def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:
"""
Performs the operation __rxor__.
"""
other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__")
if other is NotImplemented:
return other
self, other = self._normalize_two_args(self, other)
res = self._array.__rxor__(other._array)
return self.__class__._new(res)
def to_device(self: Array, device: Device, /, stream: None = None) -> Array:
if stream is not None:
raise ValueError("The stream argument to to_device() is not supported")
if device == 'cpu':
return self
raise ValueError(f"Unsupported device {device!r}")
@property
def dtype(self) -> Dtype:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.dtype <numpy.ndarray.dtype>`.
See its docstring for more information.
"""
return self._array.dtype
@property
def device(self) -> Device:
return "cpu"
# Note: mT is new in array API spec (see matrix_transpose)
@property
def mT(self) -> Array:
from .linalg import matrix_transpose
return matrix_transpose(self)
@property
def ndim(self) -> int:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.ndim <numpy.ndarray.ndim>`.
See its docstring for more information.
"""
return self._array.ndim
@property
def shape(self) -> Tuple[int, ...]:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.shape <numpy.ndarray.shape>`.
See its docstring for more information.
"""
return self._array.shape
@property
def size(self) -> int:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.size <numpy.ndarray.size>`.
See its docstring for more information.
"""
return self._array.size
@property
def T(self) -> Array:
"""
Array API compatible wrapper for :py:meth:`np.ndarray.T <numpy.ndarray.T>`.
See its docstring for more information.
"""
# Note: T only works on 2-dimensional arrays. See the corresponding
# note in the specification:
# https://data-apis.org/array-api/latest/API_specification/array_object.html#t
if self.ndim != 2:
raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.")
return self.__class__._new(self._array.T)
| bsd-3-clause | e79d7e93aeb471d42b97d0c40aedfd94 | 37.663685 | 151 | 0.556262 | 4.351319 | false | false | false | false |
django-oscar/django-oscar | src/oscar/management/commands/oscar_cleanup_alerts.py | 11 | 1727 | import logging
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from oscar.core.loading import get_model
ProductAlert = get_model('customer', 'ProductAlert')
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Command to remove all stale unconfirmed alerts
"""
help = "Check unconfirmed alerts and clean them up"
def add_arguments(self, parser):
parser.add_argument(
'--days',
dest='days',
default=0,
help='cleanup alerts older then DAYS from now.')
parser.add_argument(
'--hours',
dest='hours',
default=0,
help='cleanup alerts older then HOURS from now.')
def handle(self, *args, **options):
"""
Generate a threshold date from the input options or 24 hours
if no options specified. All alerts that have the
status ``UNCONFIRMED`` and have been created before the
threshold date will be removed assuming that the emails
are wrong or the customer changed their mind.
"""
delta = timedelta(days=int(options['days']),
hours=int(options['hours']))
if not delta:
delta = timedelta(hours=24)
threshold_date = now() - delta
logger.info('Deleting unconfirmed alerts older than %s',
threshold_date.strftime("%Y-%m-%d %H:%M"))
qs = ProductAlert.objects.filter(
status=ProductAlert.UNCONFIRMED,
date_created__lt=threshold_date
)
logger.info("Found %d stale alerts to delete", qs.count())
qs.delete()
| bsd-3-clause | 406a6e2d47b0841679bf1ad08be12a7b | 30.4 | 68 | 0.605096 | 4.532808 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/communication/notifications/views.py | 3 | 3424 | from django.conf import settings
from django.contrib import messages
from django.utils.html import strip_tags
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext
from django.views import generic
from oscar.core.loading import get_class, get_model
from oscar.core.utils import redirect_to_referrer
from oscar.views.generic import BulkEditMixin
PageTitleMixin = get_class('customer.mixins', 'PageTitleMixin')
Notification = get_model('communication', 'Notification')
class NotificationListView(PageTitleMixin, generic.ListView):
model = Notification
template_name = 'oscar/communication/notifications/list.html'
context_object_name = 'notifications'
paginate_by = settings.OSCAR_NOTIFICATIONS_PER_PAGE
page_title = _("Notifications")
active_tab = 'notifications'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['list_type'] = self.list_type
return ctx
class InboxView(NotificationListView):
list_type = 'inbox'
def get_queryset(self):
return self.model._default_manager.filter(
recipient=self.request.user,
location=self.model.INBOX)
class ArchiveView(NotificationListView):
list_type = 'archive'
def get_queryset(self):
return self.model._default_manager.filter(
recipient=self.request.user,
location=self.model.ARCHIVE)
class DetailView(PageTitleMixin, generic.DetailView):
model = Notification
template_name = 'oscar/communication/notifications/detail.html'
context_object_name = 'notification'
active_tab = 'notifications'
def get_object(self, queryset=None):
obj = super().get_object()
if not obj.date_read:
obj.date_read = now()
obj.save()
return obj
def get_page_title(self):
"""Append subject to page title"""
title = strip_tags(self.object.subject)
return '%s: %s' % (_('Notification'), title)
def get_queryset(self):
return self.model._default_manager.filter(
recipient=self.request.user)
class UpdateView(BulkEditMixin, generic.View):
model = Notification
http_method_names = ['post']
actions = ('archive', 'delete')
checkbox_object_name = 'notification'
def get_object_dict(self, ids):
return self.model.objects.filter(
recipient=self.request.user).in_bulk(ids)
def get_success_response(self):
return redirect_to_referrer(
self.request, 'communication:notifications-inbox')
def archive(self, request, notifications):
for notification in notifications:
notification.archive()
msg = ngettext(
'%(count)d notification archived',
'%(count)d notifications archived', len(notifications)) \
% {'count': len(notifications)}
messages.success(request, msg)
return self.get_success_response()
def delete(self, request, notifications):
for notification in notifications:
notification.delete()
msg = ngettext(
'%(count)d notification deleted',
'%(count)d notifications deleted', len(notifications)) \
% {'count': len(notifications)}
messages.success(request, msg)
return self.get_success_response()
| bsd-3-clause | 327e462e75abbfa63a8e9b9cfa0be1f1 | 31.923077 | 69 | 0.667348 | 4.211562 | false | false | false | false |
django-oscar/django-oscar | tests/integration/dashboard/test_voucher_views.py | 2 | 4803 | import pytest
from django.contrib.messages import get_messages
from django.urls import reverse
from oscar.apps.dashboard.vouchers import views
from oscar.core.loading import get_model
from oscar.test.factories import voucher
from oscar.test.factories.offer import ConditionalOfferFactory
from tests.fixtures import RequestFactory
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Voucher = get_model('voucher', 'Voucher')
VoucherSet = get_model('voucher', 'VoucherSet')
@pytest.fixture
def many_voucher_sets():
voucher.VoucherSetFactory.create_batch(30)
return VoucherSet.objects.all()
@pytest.mark.django_db
class TestDashboardVouchers:
def test_voucher_update_view_for_voucher_in_set(self):
vs = voucher.VoucherSetFactory(count=10)
v = vs.vouchers.first()
view = views.VoucherUpdateView.as_view()
request = RequestFactory().get('/')
response = view(request, pk=v.pk)
assert response.status_code == 302
assert response.url == reverse('dashboard:voucher-set-update', kwargs={'pk': vs.pk})
assert [(m.level_tag, str(m.message)) for m in get_messages(request)][0] == (
'warning', "The voucher can only be edited as part of its set")
data = {
'code': v.code,
'name': "New name",
'start_datetime': v.start_datetime,
'end_datetime': v.end_datetime,
'usage': v.usage,
'offers': [v.offers],
}
request = RequestFactory().post('/', data=data)
response = view(request, pk=v.pk)
assert response.status_code == 302
assert response.url == reverse('dashboard:voucher-set-update', kwargs={'pk': vs.pk})
assert [(m.level_tag, str(m.message)) for m in get_messages(request)][0] == (
'warning', "The voucher can only be edited as part of its set")
v.refresh_from_db()
assert v.name != "New name"
def test_voucher_delete_view(self):
v = voucher.VoucherFactory()
v.offers.add(ConditionalOfferFactory(offer_type=ConditionalOffer.VOUCHER))
assert Voucher.objects.count() == 1
assert ConditionalOffer.objects.count() == 1
request = RequestFactory().post('/')
response = views.VoucherDeleteView.as_view()(request, pk=v.pk)
assert Voucher.objects.count() == 0
# Related offer is not deleted
assert ConditionalOffer.objects.count() == 1
assert response.status_code == 302
assert response.url == reverse('dashboard:voucher-list')
assert [(m.level_tag, str(m.message)) for m in get_messages(request)][0] == ('warning', "Voucher deleted")
def test_voucher_delete_view_for_voucher_in_set(self):
vs = voucher.VoucherSetFactory(count=10)
assert Voucher.objects.count() == 10
request = RequestFactory().post('/')
response = views.VoucherDeleteView.as_view()(request, pk=vs.vouchers.first().pk)
vs.refresh_from_db()
assert vs.count == 9 # "count" is updated
assert Voucher.objects.count() == 9
assert response.status_code == 302
assert response.url == reverse('dashboard:voucher-set-detail', kwargs={'pk': vs.pk})
assert [(m.level_tag, str(m.message)) for m in get_messages(request)][0] == ('warning', "Voucher deleted")
@pytest.mark.django_db
class TestDashboardVoucherSets:
def test_voucher_set_list_view(self, rf, many_voucher_sets):
view = views.VoucherSetListView.as_view()
request = rf.get('/')
response = view(request)
# if these are missing the pagination is broken
assert response.context_data['paginator']
assert response.context_data['page_obj']
assert response.status_code == 200
def test_voucher_set_detail_view(self, rf):
voucher.VoucherSetFactory(count=10)
vs2 = voucher.VoucherSetFactory(count=15)
request = rf.get('/')
response = views.VoucherSetDetailView.as_view()(request, pk=vs2.pk)
# The view should only list vouchers for vs2
assert len(response.context_data['vouchers']) == 15
assert response.status_code == 200
def test_voucher_set_delete_view(self):
vs = voucher.VoucherSetFactory(count=10)
assert VoucherSet.objects.count() == 1
assert Voucher.objects.count() == 10
request = RequestFactory().post('/')
response = views.VoucherSetDeleteView.as_view()(request, pk=vs.pk)
assert VoucherSet.objects.count() == 0
assert Voucher.objects.count() == 0
assert response.status_code == 302
assert response.url == reverse('dashboard:voucher-set-list')
assert [(m.level_tag, str(m.message)) for m in get_messages(request)][0] == ('warning', "Voucher set deleted")
| bsd-3-clause | 94fcd0e42f7c368a38fe0654f6e18171 | 41.504425 | 118 | 0.647096 | 3.772977 | false | true | false | false |
django-oscar/django-oscar | src/oscar/apps/basket/admin.py | 31 | 1153 | from django.contrib import admin
from oscar.core.loading import get_model
Line = get_model('basket', 'line')
class LineInline(admin.TabularInline):
model = Line
readonly_fields = ('line_reference', 'product', 'price_excl_tax',
'price_incl_tax', 'price_currency', 'stockrecord')
class LineAdmin(admin.ModelAdmin):
list_display = ('id', 'basket', 'product', 'stockrecord', 'quantity',
'price_excl_tax', 'price_currency', 'date_created')
readonly_fields = ('basket', 'stockrecord', 'line_reference', 'product',
'price_currency', 'price_incl_tax', 'price_excl_tax',
'quantity')
class BasketAdmin(admin.ModelAdmin):
list_display = ('id', 'owner', 'status', 'num_lines',
'contains_a_voucher', 'date_created', 'date_submitted',
'time_before_submit')
readonly_fields = ('owner', 'date_merged', 'date_submitted')
inlines = [LineInline]
admin.site.register(get_model('basket', 'basket'), BasketAdmin)
admin.site.register(Line, LineAdmin)
admin.site.register(get_model('basket', 'LineAttribute'))
| bsd-3-clause | 785068cff264f183edf9b3e12b0871f6 | 35.03125 | 76 | 0.619254 | 3.695513 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/customer/apps.py | 3 | 10953 | from django.contrib.auth.decorators import login_required
from django.urls import path, re_path
from django.utils.translation import gettext_lazy as _
from django.views import generic
from oscar.core.application import OscarConfig
from oscar.core.loading import get_class
class CustomerConfig(OscarConfig):
label = 'customer'
name = 'oscar.apps.customer'
verbose_name = _('Customer')
namespace = 'customer'
def ready(self):
from . import receivers # noqa
from .alerts import receivers # noqa
self.summary_view = get_class('customer.views', 'AccountSummaryView')
self.order_history_view = get_class('customer.views', 'OrderHistoryView')
self.order_detail_view = get_class('customer.views', 'OrderDetailView')
self.anon_order_detail_view = get_class('customer.views',
'AnonymousOrderDetailView')
self.order_line_view = get_class('customer.views', 'OrderLineView')
self.address_list_view = get_class('customer.views', 'AddressListView')
self.address_create_view = get_class('customer.views', 'AddressCreateView')
self.address_update_view = get_class('customer.views', 'AddressUpdateView')
self.address_delete_view = get_class('customer.views', 'AddressDeleteView')
self.address_change_status_view = get_class('customer.views',
'AddressChangeStatusView')
self.email_list_view = get_class('customer.views', 'EmailHistoryView')
self.email_detail_view = get_class('customer.views', 'EmailDetailView')
self.login_view = get_class('customer.views', 'AccountAuthView')
self.logout_view = get_class('customer.views', 'LogoutView')
self.register_view = get_class('customer.views', 'AccountRegistrationView')
self.profile_view = get_class('customer.views', 'ProfileView')
self.profile_update_view = get_class('customer.views', 'ProfileUpdateView')
self.profile_delete_view = get_class('customer.views', 'ProfileDeleteView')
self.change_password_view = get_class('customer.views', 'ChangePasswordView')
self.notification_inbox_view = get_class('communication.notifications.views',
'InboxView')
self.notification_archive_view = get_class('communication.notifications.views',
'ArchiveView')
self.notification_update_view = get_class('communication.notifications.views',
'UpdateView')
self.notification_detail_view = get_class('communication.notifications.views',
'DetailView')
self.alert_list_view = get_class('customer.alerts.views',
'ProductAlertListView')
self.alert_create_view = get_class('customer.alerts.views',
'ProductAlertCreateView')
self.alert_confirm_view = get_class('customer.alerts.views',
'ProductAlertConfirmView')
self.alert_cancel_view = get_class('customer.alerts.views',
'ProductAlertCancelView')
self.wishlists_add_product_view = get_class('customer.wishlists.views',
'WishListAddProduct')
self.wishlists_list_view = get_class('customer.wishlists.views',
'WishListListView')
self.wishlists_detail_view = get_class('customer.wishlists.views',
'WishListDetailView')
self.wishlists_create_view = get_class('customer.wishlists.views',
'WishListCreateView')
self.wishlists_create_with_product_view = get_class('customer.wishlists.views',
'WishListCreateView')
self.wishlists_update_view = get_class('customer.wishlists.views',
'WishListUpdateView')
self.wishlists_delete_view = get_class('customer.wishlists.views',
'WishListDeleteView')
self.wishlists_remove_product_view = get_class('customer.wishlists.views',
'WishListRemoveProduct')
self.wishlists_move_product_to_another_view = get_class(
'customer.wishlists.views', 'WishListMoveProductToAnotherWishList')
def get_urls(self):
urls = [
# Login, logout and register doesn't require login
path('login/', self.login_view.as_view(), name='login'),
path('logout/', self.logout_view.as_view(), name='logout'),
path('register/', self.register_view.as_view(), name='register'),
path('', login_required(self.summary_view.as_view()), name='summary'),
path('change-password/', login_required(self.change_password_view.as_view()), name='change-password'),
# Profile
path('profile/', login_required(self.profile_view.as_view()), name='profile-view'),
path('profile/edit/', login_required(self.profile_update_view.as_view()), name='profile-update'),
path('profile/delete/', login_required(self.profile_delete_view.as_view()), name='profile-delete'),
# Order history
path('orders/', login_required(self.order_history_view.as_view()), name='order-list'),
re_path(
r'^order-status/(?P<order_number>[\w-]*)/(?P<hash>[A-z0-9-_=:]+)/$',
self.anon_order_detail_view.as_view(), name='anon-order'
),
path('orders/<str:order_number>/', login_required(self.order_detail_view.as_view()), name='order'),
path(
'orders/<str:order_number>/<int:line_id>/',
login_required(self.order_line_view.as_view()),
name='order-line'),
# Address book
path('addresses/', login_required(self.address_list_view.as_view()), name='address-list'),
path('addresses/add/', login_required(self.address_create_view.as_view()), name='address-create'),
path('addresses/<int:pk>/', login_required(self.address_update_view.as_view()), name='address-detail'),
path(
'addresses/<int:pk>/delete/',
login_required(self.address_delete_view.as_view()),
name='address-delete'),
re_path(
r'^addresses/(?P<pk>\d+)/(?P<action>default_for_(billing|shipping))/$',
login_required(self.address_change_status_view.as_view()),
name='address-change-status'),
# Email history
path('emails/', login_required(self.email_list_view.as_view()), name='email-list'),
path('emails/<int:email_id>/', login_required(self.email_detail_view.as_view()), name='email-detail'),
# Notifications
# Redirect to notification inbox
path(
'notifications/', generic.RedirectView.as_view(url='/accounts/notifications/inbox/', permanent=False)),
path(
'notifications/inbox/',
login_required(self.notification_inbox_view.as_view()),
name='notifications-inbox'),
path(
'notifications/archive/',
login_required(self.notification_archive_view.as_view()),
name='notifications-archive'),
path(
'notifications/update/',
login_required(self.notification_update_view.as_view()),
name='notifications-update'),
path(
'notifications/<int:pk>/',
login_required(self.notification_detail_view.as_view()),
name='notifications-detail'),
# Alerts
# Alerts can be setup by anonymous users: some views do not
# require login
path('alerts/', login_required(self.alert_list_view.as_view()), name='alerts-list'),
path('alerts/create/<int:pk>/', self.alert_create_view.as_view(), name='alert-create'),
path('alerts/confirm/<str:key>/', self.alert_confirm_view.as_view(), name='alerts-confirm'),
path('alerts/cancel/key/<str:key>/', self.alert_cancel_view.as_view(), name='alerts-cancel-by-key'),
path(
'alerts/cancel/<int:pk>/',
login_required(self.alert_cancel_view.as_view()),
name='alerts-cancel-by-pk'),
# Wishlists
path('wishlists/', login_required(self.wishlists_list_view.as_view()), name='wishlists-list'),
path(
'wishlists/add/<int:product_pk>/',
login_required(self.wishlists_add_product_view.as_view()),
name='wishlists-add-product'),
path(
'wishlists/<str:key>/add/<int:product_pk>/',
login_required(self.wishlists_add_product_view.as_view()),
name='wishlists-add-product'),
path(
'wishlists/create/',
login_required(self.wishlists_create_view.as_view()),
name='wishlists-create'),
path(
'wishlists/create/with-product/<int:product_pk>/',
login_required(self.wishlists_create_view.as_view()),
name='wishlists-create-with-product'),
# Wishlists can be publicly shared, no login required
path('wishlists/<str:key>/', self.wishlists_detail_view.as_view(), name='wishlists-detail'),
path(
'wishlists/<str:key>/update/',
login_required(self.wishlists_update_view.as_view()),
name='wishlists-update'),
path(
'wishlists/<str:key>/delete/',
login_required(self.wishlists_delete_view.as_view()),
name='wishlists-delete'),
path(
'wishlists/<str:key>/lines/<int:line_pk>/delete/',
login_required(self.wishlists_remove_product_view.as_view()),
name='wishlists-remove-product'),
path(
'wishlists/<str:key>/products/<int:product_pk>/delete/',
login_required(self.wishlists_remove_product_view.as_view()),
name='wishlists-remove-product'),
path(
'wishlists/<str:key>/lines/<int:line_pk>/move-to/<str:to_key>/',
login_required(self.wishlists_move_product_to_another_view.as_view()),
name='wishlists-move-product-to-another')
]
return self.post_process_urls(urls)
| bsd-3-clause | fa328c50b41ebde3e51c4e1fce11cf27 | 53.765 | 119 | 0.563681 | 4.201381 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/checkout/applicator.py | 3 | 1544 | class SurchargeList(list):
@property
def total(self):
return sum([surcharge.price for surcharge in self])
class SurchargePrice():
surcharge = None
price = None
def __init__(self, surcharge, price):
self.surcharge = surcharge
self.price = price
class SurchargeApplicator():
def __init__(self, request=None, context=None):
self.context = context
self.request = request
def get_surcharges(self, basket, **kwargs):
"""
For example::
return (
PercentageCharge(percentage=D("2.00")),
FlatCharge(excl_tax=D("20.0"), incl_tax=D("20.0")),
)
Surcharges must implement the minimal API in ``oscar.apps.checkout.surcharges.BaseSurcharge``.
Note that you can also make it a model if you want, just like shipping methods.
"""
return ()
def get_applicable_surcharges(self, basket, **kwargs):
methods = [
SurchargePrice(
surcharge,
surcharge.calculate(basket=basket, **kwargs)
)
for surcharge in self.get_surcharges(basket=basket, **kwargs)
if self.is_applicable(surcharge=surcharge, basket=basket, **kwargs)
]
if methods:
return SurchargeList(methods)
else:
return None
def is_applicable(self, surcharge, basket, **kwargs):
"""
Checks if surcharge is applicable to certain conditions
"""
return True
| bsd-3-clause | ee5c7b696ef5f3a6aac3682db90f0a40 | 27.072727 | 102 | 0.57772 | 4.253444 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/catalogue/receivers.py | 3 | 1340 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from oscar.core.loading import get_model
Category = get_model("catalogue", "Category")
if settings.OSCAR_DELETE_IMAGE_FILES:
from django.db import models
from oscar.core.thumbnails import get_thumbnailer
ProductImage = get_model('catalogue', 'ProductImage')
def delete_image_files(sender, instance, **kwargs):
"""
Deletes the original image and created thumbnails.
"""
image_fields = (models.ImageField,)
thumbnailer = get_thumbnailer()
for field in instance._meta.fields:
if isinstance(field, image_fields):
# Make Django return ImageFieldFile instead of ImageField
field_file = getattr(instance, field.name)
thumbnailer.delete_thumbnails(field_file)
# Connect for all models with ImageFields - add as needed
models_with_images = [ProductImage, Category]
for sender in models_with_images:
post_delete.connect(delete_image_files, sender=sender)
@receiver(post_save, sender=Category, dispatch_uid='set_ancestors_are_public')
def post_save_set_ancestors_are_public(sender, instance, **kwargs):
instance.set_ancestors_are_public()
| bsd-3-clause | 8409d48a012f43d9c605d150fc7bf3ee | 34.263158 | 78 | 0.693284 | 4 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/catalogue/migrations/0003_data_migration_slugs.py | 62 | 1457 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from oscar.core.loading import get_model
# Django database migrations require us to fetch the category model
# via apps.get_model to get an instance of the model at this point
# in time of migrations. That snapshot does not expose custom
# properties, only whatever is represented in the migration files.
# But because for our slug munging below, we need to know the slug
# separator, which is a custom property, we also load the actual
# ORM model. We MUST NOT use that to save data, we just fetch
# the property.
ORMCategory = get_model('catalogue', 'Category')
def remove_ancestor_slugs(apps, schema_editor):
MigrationCategory = apps.get_model('catalogue', 'Category')
for category in MigrationCategory.objects.all():
category.slug = category.slug.split(ORMCategory._slug_separator)[-1]
category.save()
def add_ancestor_slugs(apps, schema_editor):
MigrationCategory = apps.get_model('catalogue', 'Category')
for category in MigrationCategory.objects.all():
orm_category = ORMCategory.objects.get(pk=category.pk)
category.slug = orm_category.full_slug
category.save()
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0002_auto_20150217_1221'),
]
operations = [
migrations.RunPython(remove_ancestor_slugs, add_ancestor_slugs),
]
| bsd-3-clause | 9f22fd113f90fd3c688bcec60254de6e | 32.883721 | 76 | 0.719286 | 3.885333 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/payment/migrations/0001_initial.py | 11 | 4917 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields.autoslugfield
from django.conf import settings
from decimal import Decimal
class Migration(migrations.Migration):
dependencies = [
('order', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Bankcard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('card_type', models.CharField(max_length=128, verbose_name='Card Type')),
('name', models.CharField(max_length=255, verbose_name='Name', blank=True)),
('number', models.CharField(max_length=32, verbose_name='Number')),
('expiry_date', models.DateField(verbose_name='Expiry Date')),
('partner_reference', models.CharField(max_length=255, verbose_name='Partner Reference', blank=True)),
('user', models.ForeignKey(verbose_name='User', related_name='bankcards', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Bankcards',
'verbose_name': 'Bankcard',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Source',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('currency', models.CharField(default='GBP', max_length=12, verbose_name='Currency')),
('amount_allocated', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Amount Allocated')),
('amount_debited', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Amount Debited')),
('amount_refunded', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Amount Refunded')),
('reference', models.CharField(max_length=128, verbose_name='Reference', blank=True)),
('label', models.CharField(max_length=128, verbose_name='Label', blank=True)),
('order', models.ForeignKey(verbose_name='Order', related_name='sources', to='order.Order', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Sources',
'verbose_name': 'Source',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SourceType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('code', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Code', editable=False, max_length=128, help_text='This is used within forms to identify this source type', blank=True)),
],
options={
'verbose_name_plural': 'Source Types',
'verbose_name': 'Source Type',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('txn_type', models.CharField(max_length=128, verbose_name='Type', blank=True)),
('amount', models.DecimalField(max_digits=12, decimal_places=2, verbose_name='Amount')),
('reference', models.CharField(max_length=128, verbose_name='Reference', blank=True)),
('status', models.CharField(max_length=128, verbose_name='Status', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('source', models.ForeignKey(verbose_name='Source', related_name='transactions', to='payment.Source', on_delete=models.CASCADE)),
],
options={
'ordering': ['-date_created'],
'verbose_name_plural': 'Transactions',
'verbose_name': 'Transaction',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='source',
name='source_type',
field=models.ForeignKey(verbose_name='Source Type', related_name='sources', to='payment.SourceType', on_delete=models.CASCADE),
preserve_default=True,
),
]
| bsd-3-clause | 2d28076a30e0b416e6a554550015977c | 51.308511 | 242 | 0.576571 | 4.335979 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/analytics/receivers.py | 3 | 4057 | import logging
from django.db import IntegrityError
from django.db.models import F
from django.dispatch import receiver
from oscar.apps.basket.signals import basket_addition
from oscar.apps.catalogue.signals import product_viewed
from oscar.apps.order.signals import order_placed
from oscar.apps.search.signals import user_search
from oscar.core.loading import get_model
ProductRecord = get_model('analytics', 'ProductRecord')
UserProductView = get_model('analytics', 'UserProductView')
UserRecord = get_model('analytics', 'UserRecord')
UserSearch = get_model('analytics', 'UserSearch')
# Helpers
logger = logging.getLogger('oscar.analytics')
def _update_counter(model, field_name, filter_kwargs, increment=1):
"""
Efficiently updates a counter field by a given increment. Uses Django's
update() call to fetch and update in one query.
TODO: This has a race condition, we should use UPSERT here
:param model: The model class of the recording model
:param field_name: The name of the field to update
:param filter_kwargs: Parameters to the ORM's filter() function to get the
correct instance
"""
try:
record = model.objects.filter(**filter_kwargs)
affected = record.update(**{field_name: F(field_name) + increment})
if not affected:
filter_kwargs[field_name] = increment
model.objects.create(**filter_kwargs)
except IntegrityError: # pragma: no cover
# get_or_create has a race condition (we should use upsert in supported)
# databases. For now just ignore these errors
logger.error(
"IntegrityError when updating analytics counter for %s", model)
def _record_products_in_order(order):
# surely there's a way to do this without causing a query for each line?
for line in order.lines.all():
_update_counter(
ProductRecord, 'num_purchases',
{'product': line.product}, line.quantity)
def _record_user_order(user, order):
try:
record = UserRecord.objects.filter(user=user)
affected = record.update(
num_orders=F('num_orders') + 1,
num_order_lines=F('num_order_lines') + order.num_lines,
num_order_items=F('num_order_items') + order.num_items,
total_spent=F('total_spent') + order.total_incl_tax,
date_last_order=order.date_placed)
if not affected:
UserRecord.objects.create(
user=user, num_orders=1, num_order_lines=order.num_lines,
num_order_items=order.num_items,
total_spent=order.total_incl_tax,
date_last_order=order.date_placed)
except IntegrityError: # pragma: no cover
logger.error(
"IntegrityError in analytics when recording a user order.")
# Receivers
@receiver(product_viewed)
def receive_product_view(sender, product, user, **kwargs):
if kwargs.get('raw', False):
return
_update_counter(ProductRecord, 'num_views', {'product': product})
if user and user.is_authenticated:
_update_counter(UserRecord, 'num_product_views', {'user': user})
UserProductView.objects.create(product=product, user=user)
@receiver(user_search)
def receive_product_search(sender, query, user, **kwargs):
if user and user.is_authenticated and not kwargs.get('raw', False):
UserSearch._default_manager.create(user=user, query=query)
@receiver(basket_addition)
def receive_basket_addition(sender, product, user, **kwargs):
if kwargs.get('raw', False):
return
_update_counter(
ProductRecord, 'num_basket_additions', {'product': product})
if user and user.is_authenticated:
_update_counter(UserRecord, 'num_basket_additions', {'user': user})
@receiver(order_placed)
def receive_order_placed(sender, order, user, **kwargs):
if kwargs.get('raw', False):
return
_record_products_in_order(order)
if user and user.is_authenticated:
_record_user_order(user, order)
| bsd-3-clause | c8806a8980f05340c1c9145d941a5199 | 35.881818 | 80 | 0.670446 | 3.830973 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/dashboard/widgets.py | 5 | 3127 | import copy
import re
from django.forms import Widget
from django.urls import reverse
class RelatedFieldWidgetWrapper(Widget):
"""
This class is a wrapper to a given widget to add the add icon for the
Oscar dashboard.
"""
template_name = 'oscar/dashboard/widgets/related_widget_wrapper.html'
IS_POPUP_VALUE = '1'
IS_POPUP_VAR = '_popup'
TO_FIELD_VAR = '_to_field'
def __init__(self, widget, rel):
self.needs_multipart_form = widget.needs_multipart_form
self.attrs = widget.attrs
self.choices = widget.choices
self.widget = widget
self.rel = rel
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.widget = copy.deepcopy(self.widget, memo)
obj.attrs = self.widget.attrs
memo[id(self)] = obj
return obj
@property
def is_hidden(self):
return self.widget.is_hidden
@property
def media(self):
return self.widget.media
def get_related_url(self, info, action, *args):
app_label = info[0]
model_object_name = info[1]
# Convert the model's object name into lowercase, with dashes between
# the camel-cased words
model_object_name = '-'.join(re.sub('([a-z])([A-Z])', r'\1 \2', model_object_name).lower().split())
# Does not specify current app
return reverse("dashboard:%s-%s-%s" % (app_label, model_object_name, action), args=args)
def get_context(self, name, value, attrs):
rel_opts = self.rel.model._meta
info = (rel_opts.app_label, rel_opts.object_name)
self.widget.choices = self.choices
url_params = '&'.join("%s=%s" % param for param in [
(RelatedFieldWidgetWrapper.TO_FIELD_VAR, self.rel.get_related_field().name),
(RelatedFieldWidgetWrapper.IS_POPUP_VAR, RelatedFieldWidgetWrapper.IS_POPUP_VALUE),
])
context = {
'rendered_widget': self.widget.render(name, value, attrs),
'name': name,
'url_params': url_params,
'model': rel_opts.verbose_name,
}
change_related_template_url = self.get_related_url(info, 'update', '__fk__')
context.update(
change_related_template_url=change_related_template_url,
)
add_related_url = self.get_related_url(info, 'create')
context.update(
add_related_url=add_related_url,
)
delete_related_template_url = self.get_related_url(info, 'delete', '__fk__')
context.update(
delete_related_template_url=delete_related_template_url,
)
return context
def value_from_datadict(self, data, files, name):
return self.widget.value_from_datadict(data, files, name)
def value_omitted_from_data(self, data, files, name):
return self.widget.value_omitted_from_data(data, files, name)
def id_for_label(self, id_):
return self.widget.id_for_label(id_)
class RelatedMultipleFieldWidgetWrapper(RelatedFieldWidgetWrapper):
template_name = 'oscar/dashboard/widgets/related_multiple_widget_wrapper.html'
| bsd-3-clause | 72e4856b5a65d7ede88a3c386ce224cb | 33.744444 | 107 | 0.622642 | 3.6875 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/offer/queryset.py | 1 | 2778 | from django.db import models
from oscar.core.loading import get_class
ExpandUpwardsCategoryQueryset = get_class("catalogue.expressions", "ExpandUpwardsCategoryQueryset")
class RangeQuerySet(models.query.QuerySet):
"""
This queryset add ``contains_product`` which allows selecting the
ranges that contain the product in question.
"""
def _excluded_products_clause(self, product):
if product.structure == product.CHILD:
# child products are excluded from a range if either they are
# excluded, or their parent.
return ~(
models.Q(excluded_products=product)
| models.Q(excluded_products__id=product.parent_id)
)
return ~models.Q(excluded_products=product)
def _included_products_clause(self, product):
if product.structure == product.CHILD:
# child products are included in a range if either they are
# included, or their parent is included
return models.Q(included_products=product) | models.Q(
included_products__id=product.parent_id
)
else:
return models.Q(included_products=product)
def _productclasses_clause(self, product):
if product.structure == product.CHILD:
# child products are included in a range if their parent is
# included in the range by means of their productclass.
return models.Q(classes__products__parent_id=product.parent_id)
return models.Q(classes__id=product.product_class_id)
def _get_category_ids(self, product):
if product.structure == product.CHILD:
# Since a child can not be in a category, it must be determined
# which category the parent is in
ProductCategory = product.productcategory_set.model
return ProductCategory.objects.filter(product_id=product.parent_id).values("category_id")
return product.categories.values("id")
def contains_product(self, product):
# the wide query is used to determine which ranges have includes_all_products
# turned on, we only need to look at explicit exclusions, the other
# mechanism for adding a product to a range don't need to be checked
wide = self.filter(
self._excluded_products_clause(product), includes_all_products=True
)
narrow = self.filter(
self._excluded_products_clause(product),
self._included_products_clause(product)
| models.Q(included_categories__in=ExpandUpwardsCategoryQueryset(self._get_category_ids(product)))
| self._productclasses_clause(product),
includes_all_products=False,
)
return wide | narrow
| bsd-3-clause | 77e6d1b4d533de45ab3dfd143b2bff1a | 42.40625 | 110 | 0.651908 | 4.52443 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/payment/forms.py | 2 | 9663 | import re
from calendar import monthrange
from datetime import date
from django import forms
from django.core import validators
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_class, get_model
from oscar.forms.mixins import PhoneNumberMixin
from . import bankcards
Country = get_model('address', 'Country')
BillingAddress = get_model('order', 'BillingAddress')
Bankcard = get_model('payment', 'Bankcard')
AbstractAddressForm = get_class('address.forms', 'AbstractAddressForm')
# List of card names for all the card types supported in payment.bankcards
VALID_CARDS = set([card_type[0] for card_type in bankcards.CARD_TYPES])
class BankcardNumberField(forms.CharField):
def __init__(self, *args, **kwargs):
_kwargs = {
'max_length': 20,
'widget': forms.TextInput(attrs={'autocomplete': 'off'}),
'label': _("Card number")
}
if 'types' in kwargs:
self.accepted_cards = set(kwargs.pop('types'))
difference = self.accepted_cards - VALID_CARDS
if difference:
raise ImproperlyConfigured('The following accepted_cards are '
'unknown: %s' % difference)
_kwargs.update(kwargs)
super().__init__(*args, **_kwargs)
def clean(self, value):
"""
Check if given CC number is valid and one of the
card types we accept
"""
non_decimal = re.compile(r'\D+')
value = non_decimal.sub('', (value or '').strip())
if value and not bankcards.luhn(value):
raise forms.ValidationError(
_("Please enter a valid credit card number."))
if hasattr(self, 'accepted_cards'):
card_type = bankcards.bankcard_type(value)
if card_type not in self.accepted_cards:
raise forms.ValidationError(
_("%s cards are not accepted." % card_type))
return super().clean(value)
class BankcardMonthWidget(forms.MultiWidget):
"""
Widget containing two select boxes for selecting the month and year
"""
def decompress(self, value):
return [value.month, value.year] if value else [None, None]
def format_output(self, rendered_widgets):
html = ' '.join(rendered_widgets)
return '<span style="white-space: nowrap">%s</span>' % html
class BankcardMonthField(forms.MultiValueField):
"""
A modified version of the snippet: http://djangosnippets.org/snippets/907/
"""
default_error_messages = {
'invalid_month': _('Enter a valid month.'),
'invalid_year': _('Enter a valid year.'),
}
num_years = 5
def __init__(self, *args, **kwargs):
# Allow the number of years to be specified
if 'num_years' in kwargs:
self.num_years = kwargs.pop('num_years')
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
fields = (
forms.ChoiceField(
choices=self.month_choices(),
error_messages={'invalid': errors['invalid_month']}),
forms.ChoiceField(
choices=self.year_choices(),
error_messages={'invalid': errors['invalid_year']}),
)
if 'widget' not in kwargs:
kwargs['widget'] = BankcardMonthWidget(
widgets=[fields[0].widget, fields[1].widget])
super().__init__(fields, *args, **kwargs)
def month_choices(self):
return []
def year_choices(self):
return []
class BankcardExpiryMonthField(BankcardMonthField):
num_years = 10
def __init__(self, *args, **kwargs):
today = date.today()
_kwargs = {
'required': True,
'label': _("Valid to"),
'initial': ["%.2d" % today.month, today.year]
}
_kwargs.update(kwargs)
super().__init__(*args, **_kwargs)
def month_choices(self):
return [("%.2d" % x, "%.2d" % x) for x in range(1, 13)]
def year_choices(self):
return [(x, x) for x in range(
date.today().year,
date.today().year + self.num_years)]
def clean(self, value):
expiry_date = super().clean(value)
if expiry_date and date.today() > expiry_date:
raise forms.ValidationError(
_("The expiration date you entered is in the past."))
return expiry_date
def compress(self, data_list):
if data_list:
if data_list[1] in validators.EMPTY_VALUES:
error = self.error_messages['invalid_year']
raise forms.ValidationError(error)
if data_list[0] in validators.EMPTY_VALUES:
error = self.error_messages['invalid_month']
raise forms.ValidationError(error)
year = int(data_list[1])
month = int(data_list[0])
# find last day of the month
day = monthrange(year, month)[1]
return date(year, month, day)
return None
class BankcardStartingMonthField(BankcardMonthField):
def __init__(self, *args, **kwargs):
_kwargs = {
'required': False,
'label': _("Valid from"),
}
_kwargs.update(kwargs)
super().__init__(*args, **_kwargs)
def month_choices(self):
months = [("%.2d" % x, "%.2d" % x) for x in range(1, 13)]
months.insert(0, ("", "--"))
return months
def year_choices(self):
today = date.today()
years = [(x, x) for x in range(
today.year - self.num_years,
today.year + 1)]
years.insert(0, ("", "--"))
return years
def clean(self, value):
starting_date = super().clean(value)
if starting_date and date.today() < starting_date:
raise forms.ValidationError(
_("The starting date you entered is in the future."))
return starting_date
def compress(self, data_list):
if data_list:
if data_list[1] in validators.EMPTY_VALUES:
error = self.error_messages['invalid_year']
raise forms.ValidationError(error)
if data_list[0] in validators.EMPTY_VALUES:
error = self.error_messages['invalid_month']
raise forms.ValidationError(error)
year = int(data_list[1])
month = int(data_list[0])
return date(year, month, 1)
return None
class BankcardCCVField(forms.RegexField):
def __init__(self, *args, **kwargs):
_kwargs = {
'required': True,
'label': _("CCV number"),
'widget': forms.TextInput(attrs={'size': '5'}),
'error_messages': {
'invalid': _("Please enter a 3 or 4 digit number")},
'help_text': _("This is the 3 or 4 digit security number "
"on the back of your bankcard")
}
_kwargs.update(kwargs)
super().__init__(
r'^\d{3,4}$', *args, **_kwargs)
def clean(self, value):
if value is not None:
value = value.strip()
return super().clean(value)
class BankcardForm(forms.ModelForm):
# By default, this number field will accept any number. The only validation
# is whether it passes the luhn check. If you wish to only accept certain
# types of card, you can pass a types kwarg to BankcardNumberField, e.g.
#
# BankcardNumberField(types=[bankcards.VISA, bankcards.VISA_ELECTRON,])
number = BankcardNumberField()
ccv = BankcardCCVField()
start_month = BankcardStartingMonthField()
expiry_month = BankcardExpiryMonthField()
class Meta:
model = Bankcard
fields = ('number', 'start_month', 'expiry_month', 'ccv')
def clean(self):
data = self.cleaned_data
number, ccv = data.get('number'), data.get('ccv')
if number and ccv:
if bankcards.is_amex(number) and len(ccv) != 4:
raise forms.ValidationError(_(
"American Express cards use a 4 digit security code"))
return data
def save(self, *args, **kwargs):
# It doesn't really make sense to save directly from the form as saving
# will obfuscate some of the card details which you normally need to
# pass to a payment gateway. Better to use the bankcard property below
# to get the cleaned up data, then once you've used the sensitive
# details, you can save.
raise RuntimeError("Don't save bankcards directly from form")
@property
def bankcard(self):
"""
Return an instance of the Bankcard model (unsaved)
"""
return Bankcard(number=self.cleaned_data['number'],
expiry_date=self.cleaned_data['expiry_month'],
start_date=self.cleaned_data['start_month'],
ccv=self.cleaned_data['ccv'])
class BillingAddressForm(PhoneNumberMixin, AbstractAddressForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_country_queryset()
def set_country_queryset(self):
self.fields['country'].queryset = Country._default_manager.all()
class Meta:
model = BillingAddress
fields = [
'first_name', 'last_name',
'line1', 'line2', 'line3', 'line4',
'state', 'postcode', 'country',
]
| bsd-3-clause | 27063a445a4ba60da80f9174735356fa | 33.144876 | 79 | 0.575184 | 4.136558 | false | false | false | false |
django-oscar/django-oscar | tests/_site/myauth/models.py | 3 | 1632 | # -*- coding: utf-8 -*-
import re
from django.contrib.auth.models import BaseUserManager
from django.core import validators
from django.db import models
from django.utils.translation import gettext_lazy as _
from oscar.apps.customer.abstract_models import AbstractUser
class CustomUserManager(BaseUserManager):
def create_user(self, username, email, password):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=CustomUserManager.normalize_email(email),
username=username,
is_active=True,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password=password)
u.is_admin = True
u.is_staff = True
u.save(using=self._db)
return u
class User(AbstractUser):
"""
Custom user based on Oscar's AbstractUser
"""
username = models.CharField(
_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile(r'^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
extra_field = models.CharField(
_('Nobody needs me'), max_length=5, blank=True)
objects = CustomUserManager()
class Meta:
app_label = 'myauth'
| bsd-3-clause | 36f519ef9e8fb11433a49873cf585322 | 27.631579 | 106 | 0.616422 | 4.142132 | false | false | false | false |
django-oscar/django-oscar | tests/unit/offer/test_reports.py | 2 | 1269 | from django.test import TestCase
from oscar.apps.offer.reports import OfferReportGenerator
from oscar.test.factories import (
ConditionalOfferFactory, OrderDiscountFactory, create_order)
class OfferReportGeneratorTestCase(TestCase):
def test_generator_queryset_and_annotation(self):
offer = ConditionalOfferFactory(pk=2)
OrderDiscountFactory(offer_id=offer.pk, offer_name=offer.name, amount=2, order=create_order())
OrderDiscountFactory(offer_id=offer.pk, offer_name=offer.name, amount=3, order=create_order())
# Discount on a deleted offer
OrderDiscountFactory(offer_id=1, offer_name="Deleted offer", amount=4, order=create_order())
queryset = OfferReportGenerator().generate()
self.assertEqual(queryset.count(), 2)
self.assertEqual(queryset[0]["offer_id"], 2)
self.assertEqual(queryset[0]["display_offer_name"], offer.name)
self.assertEqual(queryset[0]["total_discount"], 5)
self.assertEqual(queryset[0]["offer"], offer.pk)
self.assertEqual(queryset[1]["offer_id"], 1)
self.assertEqual(queryset[1]["display_offer_name"], "Deleted offer")
self.assertEqual(queryset[1]["total_discount"], 4)
self.assertEqual(queryset[1]["offer"], None)
| bsd-3-clause | d95ad0165c6346c97a37c6f7f7ea802c | 47.807692 | 102 | 0.704492 | 3.833837 | false | true | false | false |
django-oscar/django-oscar | src/oscar/apps/partner/strategy.py | 1 | 13435 | from collections import namedtuple
from decimal import Decimal as D
from oscar.core.loading import get_class
Unavailable = get_class('partner.availability', 'Unavailable')
Available = get_class('partner.availability', 'Available')
StockRequiredAvailability = get_class('partner.availability', 'StockRequired')
UnavailablePrice = get_class('partner.prices', 'Unavailable')
FixedPrice = get_class('partner.prices', 'FixedPrice')
TaxInclusiveFixedPrice = get_class('partner.prices', 'TaxInclusiveFixedPrice')
# A container for policies
PurchaseInfo = namedtuple(
'PurchaseInfo', ['price', 'availability', 'stockrecord'])
class Selector(object):
"""
Responsible for returning the appropriate strategy class for a given
user/session.
This can be called in three ways:
#) Passing a request and user. This is for determining
prices/availability for a normal user browsing the site.
#) Passing just the user. This is for offline processes that don't
have a request instance but do know which user to determine prices for.
#) Passing nothing. This is for offline processes that don't
correspond to a specific user, e.g., determining a price to store in
a search index.
"""
def strategy(self, request=None, user=None, **kwargs):
"""
Return an instantiated strategy instance
"""
# Default to the backwards-compatible strategy of picking the first
# stockrecord but charging zero tax.
return Default(request)
class Base(object):
"""
The base strategy class
Given a product, strategies are responsible for returning a
``PurchaseInfo`` instance which contains:
- The appropriate stockrecord for this customer
- A pricing policy instance
- An availability policy instance
"""
def __init__(self, request=None):
self.request = request
self.user = None
if request and request.user.is_authenticated:
self.user = request.user
def fetch_for_product(self, product, stockrecord=None):
"""
Given a product, return a ``PurchaseInfo`` instance.
The ``PurchaseInfo`` class is a named tuple with attributes:
- ``price``: a pricing policy object.
- ``availability``: an availability policy object.
- ``stockrecord``: the stockrecord that is being used
If a stockrecord is passed, return the appropriate ``PurchaseInfo``
instance for that product and stockrecord is returned.
"""
raise NotImplementedError(
"A strategy class must define a fetch_for_product method "
"for returning the availability and pricing "
"information."
)
def fetch_for_parent(self, product):
"""
Given a parent product, fetch a ``StockInfo`` instance
"""
raise NotImplementedError(
"A strategy class must define a fetch_for_parent method "
"for returning the availability and pricing "
"information."
)
def fetch_for_line(self, line, stockrecord=None):
"""
Given a basket line instance, fetch a ``PurchaseInfo`` instance.
This method is provided to allow purchase info to be determined using a
basket line's attributes. For instance, "bundle" products often use
basket line attributes to store SKUs of contained products. For such
products, we need to look at the availability of each contained product
to determine overall availability.
"""
# Default to ignoring any basket line options as we don't know what to
# do with them within Oscar - that's up to your project to implement.
return self.fetch_for_product(line.product)
class Structured(Base):
"""
A strategy class which provides separate, overridable methods for
determining the 3 things that a ``PurchaseInfo`` instance requires:
#) A stockrecord
#) A pricing policy
#) An availability policy
"""
def fetch_for_product(self, product, stockrecord=None):
"""
Return the appropriate ``PurchaseInfo`` instance.
This method is not intended to be overridden.
"""
if stockrecord is None:
stockrecord = self.select_stockrecord(product)
return PurchaseInfo(
price=self.pricing_policy(product, stockrecord),
availability=self.availability_policy(product, stockrecord),
stockrecord=stockrecord)
def fetch_for_parent(self, product):
# Select children and associated stockrecords
children_stock = self.select_children_stockrecords(product)
return PurchaseInfo(
price=self.parent_pricing_policy(product, children_stock),
availability=self.parent_availability_policy(
product, children_stock),
stockrecord=None)
def select_stockrecord(self, product):
"""
Select the appropriate stockrecord
"""
raise NotImplementedError(
"A structured strategy class must define a "
"'select_stockrecord' method")
def select_children_stockrecords(self, product):
"""
Select appropriate stock record for all children of a product
"""
records = []
for child in product.children.public():
# Use tuples of (child product, stockrecord)
records.append((child, self.select_stockrecord(child)))
return records
def pricing_policy(self, product, stockrecord):
"""
Return the appropriate pricing policy
"""
raise NotImplementedError(
"A structured strategy class must define a "
"'pricing_policy' method")
def parent_pricing_policy(self, product, children_stock):
raise NotImplementedError(
"A structured strategy class must define a "
"'parent_pricing_policy' method")
def availability_policy(self, product, stockrecord):
"""
Return the appropriate availability policy
"""
raise NotImplementedError(
"A structured strategy class must define a "
"'availability_policy' method")
def parent_availability_policy(self, product, children_stock):
raise NotImplementedError(
"A structured strategy class must define a "
"'parent_availability_policy' method")
# Mixins - these can be used to construct the appropriate strategy class
class UseFirstStockRecord:
"""
Stockrecord selection mixin for use with the ``Structured`` base strategy.
This mixin picks the first (normally only) stockrecord to fulfil a product.
"""
def select_stockrecord(self, product):
# We deliberately fetch by index here, to ensure that no additional database queries are made
# when stockrecords have already been prefetched in a queryset annotated using ProductQuerySet.base_queryset
try:
return product.stockrecords.all()[0]
except IndexError:
pass
class StockRequired(object):
"""
Availability policy mixin for use with the ``Structured`` base strategy.
This mixin ensures that a product can only be bought if it has stock
available (if stock is being tracked).
"""
def availability_policy(self, product, stockrecord):
if not stockrecord:
return Unavailable()
if not product.get_product_class().track_stock:
return Available()
else:
return StockRequiredAvailability(
stockrecord.net_stock_level)
def parent_availability_policy(self, product, children_stock):
# A parent product is available if one of its children is
for child, stockrecord in children_stock:
policy = self.availability_policy(child, stockrecord)
if policy.is_available_to_buy:
return Available()
return Unavailable()
class NoTax(object):
"""
Pricing policy mixin for use with the ``Structured`` base strategy.
This mixin specifies zero tax and uses the ``price`` from the
stockrecord.
"""
def pricing_policy(self, product, stockrecord):
# Check stockrecord has the appropriate data
if not stockrecord or stockrecord.price is None:
return UnavailablePrice()
return FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price,
tax=D('0.00'))
def parent_pricing_policy(self, product, children_stock):
stockrecords = [x[1] for x in children_stock if x[1] is not None]
if not stockrecords:
return UnavailablePrice()
# We take price from first record
stockrecord = stockrecords[0]
return FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price,
tax=D('0.00'))
class FixedRateTax(object):
"""
Pricing policy mixin for use with the ``Structured`` base strategy. This
mixin applies a fixed rate tax to the base price from the product's
stockrecord. The price_incl_tax is quantized to two decimal places.
Rounding behaviour is Decimal's default
"""
rate = D('0') # Subclass and specify the correct rate
exponent = D('0.01') # Default to two decimal places
def pricing_policy(self, product, stockrecord):
if not stockrecord or stockrecord.price is None:
return UnavailablePrice()
rate = self.get_rate(product, stockrecord)
exponent = self.get_exponent(stockrecord)
tax = (stockrecord.price * rate).quantize(exponent)
return TaxInclusiveFixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price,
tax=tax)
def parent_pricing_policy(self, product, children_stock):
stockrecords = [x[1] for x in children_stock if x[1] is not None]
if not stockrecords:
return UnavailablePrice()
# We take price from first record
stockrecord = stockrecords[0]
rate = self.get_rate(product, stockrecord)
exponent = self.get_exponent(stockrecord)
tax = (stockrecord.price * rate).quantize(exponent)
return FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price,
tax=tax)
def get_rate(self, product, stockrecord):
"""
This method serves as hook to be able to plug in support for varying tax rates
based on the product.
TODO: Needs tests.
"""
return self.rate
def get_exponent(self, stockrecord):
"""
This method serves as hook to be able to plug in support for a varying exponent
based on the currency.
TODO: Needs tests.
"""
return self.exponent
class DeferredTax(object):
"""
Pricing policy mixin for use with the ``Structured`` base strategy.
This mixin does not specify the product tax and is suitable to territories
where tax isn't known until late in the checkout process.
"""
def pricing_policy(self, product, stockrecord):
if not stockrecord or stockrecord.price is None:
return UnavailablePrice()
return FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price)
def parent_pricing_policy(self, product, children_stock):
stockrecords = [x[1] for x in children_stock if x[1] is not None]
if not stockrecords:
return UnavailablePrice()
# We take price from first record
stockrecord = stockrecords[0]
return FixedPrice(
currency=stockrecord.price_currency,
excl_tax=stockrecord.price)
# Example strategy composed of above mixins. For real projects, it's likely
# you'll want to use a different pricing mixin as you'll probably want to
# charge tax!
class Default(UseFirstStockRecord, StockRequired, NoTax, Structured):
"""
Default stock/price strategy that uses the first found stockrecord for a
product, ensures that stock is available (unless the product class
indicates that we don't need to track stock) and charges zero tax.
"""
class UK(UseFirstStockRecord, StockRequired, FixedRateTax, Structured):
"""
Sample strategy for the UK that:
- uses the first stockrecord for each product (effectively assuming
there is only one).
- requires that a product has stock available to be bought
- applies a fixed rate of tax on all products
This is just a sample strategy used for internal development. It is not
recommended to be used in production, especially as the tax rate is
hard-coded.
"""
# Use UK VAT rate (as of December 2013)
rate = D('0.20')
class US(UseFirstStockRecord, StockRequired, DeferredTax, Structured):
"""
Sample strategy for the US.
- uses the first stockrecord for each product (effectively assuming
there is only one).
- requires that a product has stock available to be bought
- doesn't apply a tax to product prices (normally this will be done
after the shipping address is entered).
This is just a sample one used for internal development. It is not
recommended to be used in production.
"""
| bsd-3-clause | 1dd9c656eb12491da4fd2d6ac24ca82d | 34.262467 | 116 | 0.657611 | 4.585324 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/dashboard/catalogue/apps.py | 3 | 8509 | from django.urls import path
from django.utils.translation import gettext_lazy as _
from oscar.core.application import OscarDashboardConfig
from oscar.core.loading import get_class
class CatalogueDashboardConfig(OscarDashboardConfig):
label = 'catalogue_dashboard'
name = 'oscar.apps.dashboard.catalogue'
verbose_name = _('Catalogue')
default_permissions = ['is_staff', ]
permissions_map = _map = {
'catalogue-product': (['is_staff'], ['partner.dashboard_access']),
'catalogue-product-create': (['is_staff'],
['partner.dashboard_access']),
'catalogue-product-list': (['is_staff'], ['partner.dashboard_access']),
'catalogue-product-delete': (['is_staff'],
['partner.dashboard_access']),
'catalogue-product-lookup': (['is_staff'],
['partner.dashboard_access']),
}
def ready(self):
self.product_list_view = get_class('dashboard.catalogue.views',
'ProductListView')
self.product_lookup_view = get_class('dashboard.catalogue.views',
'ProductLookupView')
self.product_create_redirect_view = get_class('dashboard.catalogue.views',
'ProductCreateRedirectView')
self.product_createupdate_view = get_class('dashboard.catalogue.views',
'ProductCreateUpdateView')
self.product_delete_view = get_class('dashboard.catalogue.views',
'ProductDeleteView')
self.product_class_create_view = get_class('dashboard.catalogue.views',
'ProductClassCreateView')
self.product_class_update_view = get_class('dashboard.catalogue.views',
'ProductClassUpdateView')
self.product_class_list_view = get_class('dashboard.catalogue.views',
'ProductClassListView')
self.product_class_delete_view = get_class('dashboard.catalogue.views',
'ProductClassDeleteView')
self.category_list_view = get_class('dashboard.catalogue.views',
'CategoryListView')
self.category_detail_list_view = get_class('dashboard.catalogue.views',
'CategoryDetailListView')
self.category_create_view = get_class('dashboard.catalogue.views',
'CategoryCreateView')
self.category_update_view = get_class('dashboard.catalogue.views',
'CategoryUpdateView')
self.category_delete_view = get_class('dashboard.catalogue.views',
'CategoryDeleteView')
self.stock_alert_view = get_class('dashboard.catalogue.views',
'StockAlertListView')
self.attribute_option_group_create_view = get_class('dashboard.catalogue.views',
'AttributeOptionGroupCreateView')
self.attribute_option_group_list_view = get_class('dashboard.catalogue.views',
'AttributeOptionGroupListView')
self.attribute_option_group_update_view = get_class('dashboard.catalogue.views',
'AttributeOptionGroupUpdateView')
self.attribute_option_group_delete_view = get_class('dashboard.catalogue.views',
'AttributeOptionGroupDeleteView')
self.option_list_view = get_class('dashboard.catalogue.views', 'OptionListView')
self.option_create_view = get_class('dashboard.catalogue.views', 'OptionCreateView')
self.option_update_view = get_class('dashboard.catalogue.views', 'OptionUpdateView')
self.option_delete_view = get_class('dashboard.catalogue.views', 'OptionDeleteView')
def get_urls(self):
urls = [
path('products/<int:pk>/', self.product_createupdate_view.as_view(), name='catalogue-product'),
path('products/create/', self.product_create_redirect_view.as_view(), name='catalogue-product-create'),
path(
'products/create/<slug:product_class_slug>/',
self.product_createupdate_view.as_view(),
name='catalogue-product-create'),
path(
'products/<int:parent_pk>/create-variant/',
self.product_createupdate_view.as_view(),
name='catalogue-product-create-child'),
path('products/<int:pk>/delete/', self.product_delete_view.as_view(), name='catalogue-product-delete'),
path('', self.product_list_view.as_view(), name='catalogue-product-list'),
path('stock-alerts/', self.stock_alert_view.as_view(), name='stock-alert-list'),
path('product-lookup/', self.product_lookup_view.as_view(), name='catalogue-product-lookup'),
path('categories/', self.category_list_view.as_view(), name='catalogue-category-list'),
path(
'categories/<int:pk>/',
self.category_detail_list_view.as_view(),
name='catalogue-category-detail-list'),
path(
'categories/create/', self.category_create_view.as_view(),
name='catalogue-category-create'),
path(
'categories/create/<int:parent>/',
self.category_create_view.as_view(),
name='catalogue-category-create-child'),
path(
'categories/<int:pk>/update/',
self.category_update_view.as_view(),
name='catalogue-category-update'),
path(
'categories/<int:pk>/delete/',
self.category_delete_view.as_view(),
name='catalogue-category-delete'),
path(
'product-type/create/',
self.product_class_create_view.as_view(),
name='catalogue-class-create'),
path(
'product-types/',
self.product_class_list_view.as_view(),
name='catalogue-class-list'),
path(
'product-type/<int:pk>/update/',
self.product_class_update_view.as_view(),
name='catalogue-class-update'),
path(
'product-type/<int:pk>/delete/',
self.product_class_delete_view.as_view(),
name='catalogue-class-delete'),
path(
'attribute-option-group/create/',
self.attribute_option_group_create_view.as_view(),
name='catalogue-attribute-option-group-create'),
path(
'attribute-option-group/',
self.attribute_option_group_list_view.as_view(),
name='catalogue-attribute-option-group-list'),
# The RelatedFieldWidgetWrapper code does something funny with
# placeholder urls, so it does need to match more than just a pk
path(
'attribute-option-group/<str:pk>/update/',
self.attribute_option_group_update_view.as_view(),
name='catalogue-attribute-option-group-update'),
# The RelatedFieldWidgetWrapper code does something funny with
# placeholder urls, so it does need to match more than just a pk
path(
'attribute-option-group/<str:pk>/delete/',
self.attribute_option_group_delete_view.as_view(),
name='catalogue-attribute-option-group-delete'),
path('option/', self.option_list_view.as_view(), name='catalogue-option-list'),
path('option/create/', self.option_create_view.as_view(), name='catalogue-option-create'),
path('option/<str:pk>/update/', self.option_update_view.as_view(), name='catalogue-option-update'),
path('option/<str:pk>/delete/', self.option_delete_view.as_view(), name='catalogue-option-delete'),
]
return self.post_process_urls(urls)
| bsd-3-clause | 12c9c05a0878aaa5d994a180de51b182 | 55.350993 | 115 | 0.548008 | 4.735114 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/catalogue/migrations/0024_remove_duplicate_attributes.py | 2 | 5106 | # Generated by Django 3.2.9 on 2022-01-25 19:01
import logging
from django.db import migrations
from django.db.models import CharField, Count, Value
from django.db.models.functions import Concat
from oscar.core.loading import get_model
# Needed for calling _get_value, the historical model can't be used for that.
NonHistoricalProductAttributeValue = get_model('catalogue', 'ProductAttributeValue')
logger = logging.getLogger(__name__)
def remove_duplicate_attributes(apps, schema_editor):
"""
Removes duplicate attributes that have the same code and product class.
"""
ProductAttribute = apps.get_model('catalogue', 'ProductAttribute')
ProductAttributeValue = apps.get_model('catalogue', 'ProductAttributeValue')
ProductClass = apps.get_model("catalogue", "ProductClass")
# Instead of iterating over all attributes, we concat the code and product class pk
# with a "|" so we can find duplicate attributes in one query.
duplicate_attributes = ProductAttribute.objects.filter(product_class__isnull=False).annotate(
code_and_product_class=Concat('code', Value('|'), 'product_class__pk', output_field=CharField())
).values('code_and_product_class').annotate(
same_code_count=Count('code_and_product_class')
).filter(same_code_count__gt=1)
for attribute in duplicate_attributes:
attribute_code, product_class_pk = attribute["code_and_product_class"].split("|")
product_class = ProductClass.objects.get(pk=product_class_pk)
attributes = ProductAttribute.objects.filter(
code=attribute_code,
product_class=product_class
)
used_attributes = attributes.filter(productattributevalue__isnull=False)
used_attribute_count = used_attributes.distinct().count()
# In most cases, the used attributes count will be one or zero as
# the dashboard will always show one attribute. If the used attribute
# count is one, we exclude this from attributes and remove the others.
# If it's zero, we pick the last created and delete others.
if used_attribute_count == 1:
attributes.exclude(pk=used_attributes.first().pk).delete()
continue
elif used_attribute_count == 0:
attributes.exclude(pk=attributes.last().pk).delete()
continue
# If we found multiple attributes that have values linked to them,
# we must move them to one attribute and then delete the others.
# We can only do this if the value_types are all the same!
ASSERTION_MESSAGE = """Duplicate attribute found with code: %s but different types!
You could fix this by renaming the duplicate codes or by matching all types to one
type and update the attribute values accordingly for their new type. After that you can
re-run the migration.""" % attribute_code
assert used_attributes.values("type").distinct().count() == 1, ASSERTION_MESSAGE
# Choose one attribute that will be used to move to and others to be deleted.
to_be_used_attribute = used_attributes.first()
to_be_deleted_attributes = used_attributes.exclude(pk=to_be_used_attribute.pk)
for attribute in to_be_deleted_attributes:
for attribute_value in attribute.productattributevalue_set.all():
product = attribute_value.product
# ProductAttributeValue has a unique together constraint on 'product' and 'attribute'.
# This means, if the product of the current 'attribute_value' already has a ProductAttributeValue
# linked to the 'to_be_used_attribute' attribute, we can't update the attribute on the
# 'attribute_value' as this would raise an IntegrityError.
to_be_used_attribute_value = to_be_used_attribute.productattributevalue_set.filter(product=product).first()
if not to_be_used_attribute_value:
attribute_value.attribute = to_be_used_attribute
attribute_value.save()
else:
msg = """Product with ID '%s' had more than one attribute value linked to an attribute
with code '%s'. We've kept the value '%s' and removed the value '%s' as this is the one you
would see in the dashboard when editing the product.
""" % (
product.id,
attribute.code,
NonHistoricalProductAttributeValue._get_value(to_be_used_attribute_value),
NonHistoricalProductAttributeValue._get_value(attribute_value)
)
logger.warning(msg)
# Once the attribute values have been updated, we can safely remove the attribute instance.
attribute.delete()
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0023_auto_20210824_1414'),
]
operations = [
migrations.RunPython(remove_duplicate_attributes, migrations.RunPython.noop)
] | bsd-3-clause | 83c6042d78d9506fbbe7cca850b6fd3b | 48.105769 | 123 | 0.659616 | 4.534636 | false | false | false | false |
django-oscar/django-oscar | src/oscar/apps/partner/migrations/0001_initial.py | 24 | 6571 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields.autoslugfield
import oscar.models.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0001_initial'),
('address', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Partner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Code', max_length=128, editable=False, blank=True)),
('name', models.CharField(max_length=128, verbose_name='Name', blank=True)),
('users', models.ManyToManyField(related_name='partners', blank=True, verbose_name='Users', to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'verbose_name_plural': 'Fulfillment partners',
'verbose_name': 'Fulfillment partner',
'abstract': False,
'permissions': (('dashboard_access', 'Can access dashboard'),),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PartnerAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(verbose_name='Title', max_length=64, blank=True, choices=[('Mr', 'Mr'), ('Miss', 'Miss'), ('Mrs', 'Mrs'), ('Ms', 'Ms'), ('Dr', 'Dr')])),
('first_name', models.CharField(max_length=255, verbose_name='First name', blank=True)),
('last_name', models.CharField(max_length=255, verbose_name='Last name', blank=True)),
('line1', models.CharField(max_length=255, verbose_name='First line of address')),
('line2', models.CharField(max_length=255, verbose_name='Second line of address', blank=True)),
('line3', models.CharField(max_length=255, verbose_name='Third line of address', blank=True)),
('line4', models.CharField(max_length=255, verbose_name='City', blank=True)),
('state', models.CharField(max_length=255, verbose_name='State/County', blank=True)),
('postcode', oscar.models.fields.UppercaseCharField(max_length=64, verbose_name='Post/Zip-code', blank=True)),
('search_text', models.TextField(editable=False, verbose_name='Search text - used only for searching addresses')),
('country', models.ForeignKey(verbose_name='Country', to='address.Country', on_delete=models.CASCADE)),
('partner', models.ForeignKey(verbose_name='Partner', related_name='addresses', to='partner.Partner', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Partner addresses',
'verbose_name': 'Partner address',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StockAlert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('threshold', models.PositiveIntegerField(verbose_name='Threshold')),
('status', models.CharField(default='Open', max_length=128, verbose_name='Status', choices=[('Open', 'Open'), ('Closed', 'Closed')])),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('date_closed', models.DateTimeField(blank=True, verbose_name='Date Closed', null=True)),
],
options={
'ordering': ('-date_created',),
'verbose_name_plural': 'Stock alerts',
'verbose_name': 'Stock alert',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StockRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('partner_sku', models.CharField(max_length=128, verbose_name='Partner SKU')),
('price_currency', models.CharField(default='GBP', max_length=12, verbose_name='Currency')),
('price_excl_tax', models.DecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Price (excl. tax)', null=True)),
('price_retail', models.DecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Price (retail)', null=True)),
('cost_price', models.DecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Cost Price', null=True)),
('num_in_stock', models.PositiveIntegerField(blank=True, verbose_name='Number in stock', null=True)),
('num_allocated', models.IntegerField(blank=True, verbose_name='Number allocated', null=True)),
('low_stock_threshold', models.PositiveIntegerField(blank=True, verbose_name='Low Stock Threshold', null=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('date_updated', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Date updated')),
('partner', models.ForeignKey(verbose_name='Partner', related_name='stockrecords', to='partner.Partner', on_delete=models.CASCADE)),
('product', models.ForeignKey(verbose_name='Product', related_name='stockrecords', to='catalogue.Product', on_delete=models.CASCADE)),
],
options={
'verbose_name_plural': 'Stock records',
'verbose_name': 'Stock record',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='stockrecord',
unique_together=set([('partner', 'partner_sku')]),
),
migrations.AddField(
model_name='stockalert',
name='stockrecord',
field=models.ForeignKey(verbose_name='Stock Record', related_name='alerts', to='partner.StockRecord', on_delete=models.CASCADE),
preserve_default=True,
),
]
| bsd-3-clause | f8f077dea80b5299054ebf6f5a580d1e | 58.736364 | 179 | 0.588799 | 4.253074 | false | false | false | false |
django-oscar/django-oscar | src/oscar/defaults.py | 2 | 7714 | from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
OSCAR_SHOP_NAME = 'Oscar'
OSCAR_SHOP_TAGLINE = ''
OSCAR_HOMEPAGE = reverse_lazy('catalogue:index')
# Dynamic class loading
OSCAR_DYNAMIC_CLASS_LOADER = 'oscar.core.loading.default_class_loader'
# Basket settings
OSCAR_BASKET_COOKIE_LIFETIME = 7 * 24 * 60 * 60
OSCAR_BASKET_COOKIE_OPEN = 'oscar_open_basket'
OSCAR_BASKET_COOKIE_SECURE = False
OSCAR_MAX_BASKET_QUANTITY_THRESHOLD = 10000
# Recently-viewed products
OSCAR_RECENTLY_VIEWED_COOKIE_LIFETIME = 7 * 24 * 60 * 60
OSCAR_RECENTLY_VIEWED_COOKIE_NAME = 'oscar_history'
OSCAR_RECENTLY_VIEWED_COOKIE_SECURE = False
OSCAR_RECENTLY_VIEWED_PRODUCTS = 20
# Currency
OSCAR_DEFAULT_CURRENCY = 'GBP'
# Paths
OSCAR_IMAGE_FOLDER = 'images/products/%Y/%m/'
OSCAR_DELETE_IMAGE_FILES = True
# Copy this image from oscar/static/img to your MEDIA_ROOT folder.
# It needs to be there so Sorl can resize it.
OSCAR_MISSING_IMAGE_URL = 'image_not_found.jpg'
# Address settings
OSCAR_REQUIRED_ADDRESS_FIELDS = ('first_name', 'last_name', 'line1',
'line4', 'postcode', 'country')
# Pagination settings
OSCAR_OFFERS_PER_PAGE = 20
OSCAR_PRODUCTS_PER_PAGE = 20
OSCAR_REVIEWS_PER_PAGE = 20
OSCAR_NOTIFICATIONS_PER_PAGE = 20
OSCAR_EMAILS_PER_PAGE = 20
OSCAR_ORDERS_PER_PAGE = 20
OSCAR_ADDRESSES_PER_PAGE = 20
OSCAR_STOCK_ALERTS_PER_PAGE = 20
OSCAR_DASHBOARD_ITEMS_PER_PAGE = 20
# Checkout
OSCAR_ALLOW_ANON_CHECKOUT = False
# Reviews
OSCAR_ALLOW_ANON_REVIEWS = True
OSCAR_MODERATE_REVIEWS = False
# Accounts
OSCAR_ACCOUNTS_REDIRECT_URL = 'customer:profile-view'
# This enables sending alert notifications/emails instantly when products get
# back in stock by listening to stock record update signals.
# This might impact performance for large numbers of stock record updates.
# Alternatively, the management command ``oscar_send_alerts`` can be used to
# run periodically, e.g. as a cron job. In this case eager alerts should be
# disabled.
OSCAR_EAGER_ALERTS = True
# Registration
OSCAR_SEND_REGISTRATION_EMAIL = True
OSCAR_FROM_EMAIL = 'oscar@example.com'
# Slug handling
OSCAR_SLUG_FUNCTION = 'oscar.core.utils.default_slugifier'
OSCAR_SLUG_MAP = {}
OSCAR_SLUG_BLACKLIST = []
OSCAR_SLUG_ALLOW_UNICODE = False
# Cookies
OSCAR_COOKIES_DELETE_ON_LOGOUT = ['oscar_recently_viewed_products', ]
# Offers
OSCAR_OFFERS_INCL_TAX = False
# Values (using the names of the model constants) from
# "offer.ConditionalOffer.TYPE_CHOICES"
OSCAR_OFFERS_IMPLEMENTED_TYPES = [
'SITE',
'VOUCHER',
]
# Hidden Oscar features, e.g. wishlists or reviews
OSCAR_HIDDEN_FEATURES = []
# Menu structure of the dashboard navigation
OSCAR_DASHBOARD_NAVIGATION = [
{
'label': _('Dashboard'),
'icon': 'fas fa-list',
'url_name': 'dashboard:index',
},
{
'label': _('Catalogue'),
'icon': 'fas fa-sitemap',
'children': [
{
'label': _('Products'),
'url_name': 'dashboard:catalogue-product-list',
},
{
'label': _('Product Types'),
'url_name': 'dashboard:catalogue-class-list',
},
{
'label': _('Categories'),
'url_name': 'dashboard:catalogue-category-list',
},
{
'label': _('Ranges'),
'url_name': 'dashboard:range-list',
},
{
'label': _('Low stock alerts'),
'url_name': 'dashboard:stock-alert-list',
},
{
'label': _('Options'),
'url_name': 'dashboard:catalogue-option-list',
},
]
},
{
'label': _('Fulfilment'),
'icon': 'fas fa-shopping-cart',
'children': [
{
'label': _('Orders'),
'url_name': 'dashboard:order-list',
},
{
'label': _('Statistics'),
'url_name': 'dashboard:order-stats',
},
{
'label': _('Partners'),
'url_name': 'dashboard:partner-list',
},
# The shipping method dashboard is disabled by default as it might
# be confusing. Weight-based shipping methods aren't hooked into
# the shipping repository by default (as it would make
# customising the repository slightly more difficult).
# {
# 'label': _('Shipping charges'),
# 'url_name': 'dashboard:shipping-method-list',
# },
]
},
{
'label': _('Customers'),
'icon': 'fas fa-users',
'children': [
{
'label': _('Customers'),
'url_name': 'dashboard:users-index',
},
{
'label': _('Stock alert requests'),
'url_name': 'dashboard:user-alert-list',
},
]
},
{
'label': _('Offers'),
'icon': 'fas fa-bullhorn',
'children': [
{
'label': _('Offers'),
'url_name': 'dashboard:offer-list',
},
{
'label': _('Vouchers'),
'url_name': 'dashboard:voucher-list',
},
{
'label': _('Voucher Sets'),
'url_name': 'dashboard:voucher-set-list',
},
],
},
{
'label': _('Content'),
'icon': 'fas fa-folder',
'children': [
{
'label': _('Pages'),
'url_name': 'dashboard:page-list',
},
{
'label': _('Email templates'),
'url_name': 'dashboard:comms-list',
},
{
'label': _('Reviews'),
'url_name': 'dashboard:reviews-list',
},
]
},
{
'label': _('Reports'),
'icon': 'fas fa-chart-bar',
'url_name': 'dashboard:reports-index',
},
]
OSCAR_DASHBOARD_DEFAULT_ACCESS_FUNCTION = 'oscar.apps.dashboard.nav.default_access_fn' # noqa
# Search facets
OSCAR_SEARCH_FACETS = {
'fields': {
# The key for these dicts will be used when passing facet data
# to the template. Same for the 'queries' dict below.
'product_class': {'name': _('Type'), 'field': 'product_class'},
'rating': {'name': _('Rating'), 'field': 'rating'},
# You can specify an 'options' element that will be passed to the
# SearchQuerySet.facet() call.
# For instance, with Elasticsearch backend, 'options': {'order': 'term'}
# will sort items in a facet by title instead of number of items.
# It's hard to get 'missing' to work
# correctly though as of Solr's hilarious syntax for selecting
# items without a specific facet:
# http://wiki.apache.org/solr/SimpleFacetParameters#facet.method
# 'options': {'missing': 'true'}
},
'queries': {
'price_range': {
'name': _('Price range'),
'field': 'price',
'queries': [
# This is a list of (name, query) tuples where the name will
# be displayed on the front-end.
(_('0 to 20'), '[0 TO 20]'),
(_('20 to 40'), '[20 TO 40]'),
(_('40 to 60'), '[40 TO 60]'),
(_('60+'), '[60 TO *]'),
]
},
},
}
OSCAR_PRODUCT_SEARCH_HANDLER = None
OSCAR_THUMBNAILER = 'oscar.core.thumbnails.SorlThumbnail'
OSCAR_URL_SCHEMA = 'http'
OSCAR_SAVE_SENT_EMAILS_TO_DB = True
| bsd-3-clause | 787feee56e8ea938cc5e7afbd6317876 | 29.370079 | 94 | 0.542779 | 3.631827 | false | false | false | false |
django-oscar/django-oscar | tests/_site/apps/catalogue/migrations/0016_auto_20190327_0757.py | 6 | 1394 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 07:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0015_product_is_public'),
]
operations = [
migrations.AlterField(
model_name='productattributevalue',
name='value_boolean',
field=models.NullBooleanField(db_index=True, verbose_name='Boolean'),
),
migrations.AlterField(
model_name='productattributevalue',
name='value_date',
field=models.DateField(blank=True, db_index=True, null=True, verbose_name='Date'),
),
migrations.AlterField(
model_name='productattributevalue',
name='value_datetime',
field=models.DateTimeField(blank=True, db_index=True, null=True, verbose_name='DateTime'),
),
migrations.AlterField(
model_name='productattributevalue',
name='value_float',
field=models.FloatField(blank=True, db_index=True, null=True, verbose_name='Float'),
),
migrations.AlterField(
model_name='productattributevalue',
name='value_integer',
field=models.IntegerField(blank=True, db_index=True, null=True, verbose_name='Integer'),
),
]
| bsd-3-clause | 98c935daea128abe950df80b7dd259ea | 33.85 | 102 | 0.605452 | 4.315789 | false | false | false | false |
pandas-dev/pandas | pandas/core/_numba/kernels/min_max_.py | 2 | 1857 | """
Numba 1D min/max kernels that can be shared by
* Dataframe / Series
* groupby
* rolling / expanding
Mirrors pandas/_libs/window/aggregation.pyx
"""
from __future__ import annotations
import numba
import numpy as np
@numba.jit(nopython=True, nogil=True, parallel=False)
def sliding_min_max(
values: np.ndarray,
start: np.ndarray,
end: np.ndarray,
min_periods: int,
is_max: bool,
) -> np.ndarray:
N = len(start)
nobs = 0
output = np.empty(N, dtype=np.float64)
# Use deque once numba supports it
# https://github.com/numba/numba/issues/7417
Q: list = []
W: list = []
for i in range(N):
curr_win_size = end[i] - start[i]
if i == 0:
st = start[i]
else:
st = end[i - 1]
for k in range(st, end[i]):
ai = values[k]
if not np.isnan(ai):
nobs += 1
elif is_max:
ai = -np.inf
else:
ai = np.inf
# Discard previous entries if we find new min or max
if is_max:
while Q and ((ai >= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):
Q.pop()
else:
while Q and ((ai <= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]):
Q.pop()
Q.append(k)
W.append(k)
# Discard entries outside and left of current window
while Q and Q[0] <= start[i] - 1:
Q.pop(0)
while W and W[0] <= start[i] - 1:
if not np.isnan(values[W[0]]):
nobs -= 1
W.pop(0)
# Save output based on index in input value array
if Q and curr_win_size > 0 and nobs >= min_periods:
output[i] = values[Q[0]]
else:
output[i] = np.nan
return output
| bsd-3-clause | 01458071bcd1c7ed0cedb55a585a8b6d | 25.528571 | 86 | 0.493807 | 3.413603 | false | false | false | false |
pandas-dev/pandas | pandas/tests/frame/methods/test_combine_first.py | 1 | 19110 | from datetime import datetime
import numpy as np
import pytest
from pandas.compat import pa_version_under7p0
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import is_dtype_equal
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
)
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail.iloc[:10, tail.columns.get_loc("A")] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "e"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3})
idx = Index(["a", "b", "c", "f"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = Series(["a", "b", "c", "f"], index=idx)
ser3 = Series([12, 4, 5, 97], index=idx)
frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3})
combined = frame1.combine_first(frame2)
assert len(combined.columns) == 5
def test_combine_first_same_as_in_update(self):
# gh 3016 (same as in update)
df = DataFrame(
[[1.0, 2.0, False, True], [4.0, 5.0, True, False]],
columns=["A", "B", "bool1", "bool2"],
)
other = DataFrame([[45, 45]], index=[0], columns=["A", "B"])
result = df.combine_first(other)
tm.assert_frame_equal(result, df)
df.loc[0, "A"] = np.nan
result = df.combine_first(other)
df.loc[0, "A"] = 45
tm.assert_frame_equal(result, df)
def test_combine_first_doc_example(self):
# doc example
df1 = DataFrame(
{"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]}
)
df2 = DataFrame(
{
"A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0],
"B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0],
}
)
result = df1.combine_first(df2)
expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]})
tm.assert_frame_equal(result, expected)
def test_combine_first_return_obj_type_with_bools(self):
# GH3552
df1 = DataFrame(
[[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]]
)
df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2])
expected = Series([True, True, False], name=2, dtype=bool)
result_12 = df1.combine_first(df2)[2]
tm.assert_series_equal(result_12, expected)
result_21 = df2.combine_first(df1)[2]
tm.assert_series_equal(result_21, expected)
@pytest.mark.parametrize(
"data1, data2, data_expected",
(
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[pd.NaT, pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
(
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[datetime(2000, 1, 2), pd.NaT, pd.NaT],
[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
),
),
)
def test_combine_first_convert_datatime_correctly(
self, data1, data2, data_expected
):
# GH 3593
df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2})
result = df1.combine_first(df2)
expected = DataFrame({"a": data_expected})
tm.assert_frame_equal(result, expected)
def test_combine_first_align_nan(self):
# GH 7509 (not fixed)
dfa = DataFrame([[pd.Timestamp("2011-01-01"), 2]], columns=["a", "b"])
dfb = DataFrame([[4], [5]], columns=["b"])
assert dfa["a"].dtype == "datetime64[ns]"
assert dfa["b"].dtype == "int64"
res = dfa.combine_first(dfb)
exp = DataFrame(
{"a": [pd.Timestamp("2011-01-01"), pd.NaT], "b": [2, 5]},
columns=["a", "b"],
)
tm.assert_frame_equal(res, exp)
assert res["a"].dtype == "datetime64[ns]"
# TODO: this must be int64
assert res["b"].dtype == "int64"
res = dfa.iloc[:0].combine_first(dfb)
exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"])
tm.assert_frame_equal(res, exp)
# TODO: this must be datetime64
assert res["a"].dtype == "float64"
# TODO: this must be int64
assert res["b"].dtype == "int64"
def test_combine_first_timezone(self):
# see gh-7630
data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC")
df1 = DataFrame(
columns=["UTCdatetime", "abc"],
data=data1,
index=pd.date_range("20140627", periods=1),
)
data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC")
df2 = DataFrame(
columns=["UTCdatetime", "xyz"],
data=data2,
index=pd.date_range("20140628", periods=1),
)
res = df2[["UTCdatetime"]].combine_first(df1)
exp = DataFrame(
{
"UTCdatetime": [
pd.Timestamp("2010-01-01 01:01", tz="UTC"),
pd.Timestamp("2012-12-12 12:12", tz="UTC"),
],
"abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT],
},
columns=["UTCdatetime", "abc"],
index=pd.date_range("20140627", periods=2, freq="D"),
)
assert res["UTCdatetime"].dtype == "datetime64[ns, UTC]"
assert res["abc"].dtype == "datetime64[ns, UTC]"
tm.assert_frame_equal(res, exp)
# see gh-10567
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, UTC]"
dts1 = pd.DatetimeIndex(
["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern"
)
df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7])
dts2 = pd.DatetimeIndex(
["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern"
)
df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.DatetimeIndex(
[
"2011-01-01",
"2012-01-01",
"NaT",
"2012-01-02",
"2011-01-03",
"2011-01-04",
],
tz="US/Eastern",
)
exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
# different tz
dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-03", "2015-01-05")
df2 = DataFrame({"DATE": dts2})
# if df1 doesn't have NaN, keep its dtype
res = df1.combine_first(df2)
tm.assert_frame_equal(res, df1)
assert res["DATE"].dtype == "datetime64[ns, US/Eastern]"
dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern")
df1 = DataFrame({"DATE": dts1})
dts2 = pd.date_range("2015-01-01", "2015-01-03")
df2 = DataFrame({"DATE": dts2})
res = df1.combine_first(df2)
exp_dts = [
pd.Timestamp("2015-01-01", tz="US/Eastern"),
pd.Timestamp("2015-01-02", tz="US/Eastern"),
pd.Timestamp("2015-01-03"),
]
exp = DataFrame({"DATE": exp_dts})
tm.assert_frame_equal(res, exp)
assert res["DATE"].dtype == "object"
def test_combine_first_timedelta(self):
data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"])
df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7])
data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"])
df2 = DataFrame({"TD": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.TimedeltaIndex(
["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"]
)
exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["TD"].dtype == "timedelta64[ns]"
def test_combine_first_period(self):
data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M")
df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7])
data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M")
df2 = DataFrame({"P": data2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = pd.PeriodIndex(
["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M"
)
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == data1.dtype
# different freq
dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D")
df2 = DataFrame({"P": dts2}, index=[2, 4, 5])
res = df1.combine_first(df2)
exp_dts = [
pd.Period("2011-01", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.NaT,
pd.Period("2012-01-02", freq="D"),
pd.Period("2011-03", freq="M"),
pd.Period("2011-04", freq="M"),
]
exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7])
tm.assert_frame_equal(res, exp)
assert res["P"].dtype == "object"
def test_combine_first_int(self):
# GH14687 - integer series that do no align exactly
df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64")
df2 = DataFrame({"a": [1, 4]}, dtype="int64")
result_12 = df1.combine_first(df2)
expected_12 = DataFrame({"a": [0, 1, 3, 5]})
tm.assert_frame_equal(result_12, expected_12)
result_21 = df2.combine_first(df1)
expected_21 = DataFrame({"a": [1, 4, 3, 5]})
tm.assert_frame_equal(result_21, expected_21)
@pytest.mark.parametrize("val", [1, 1.0])
def test_combine_first_with_asymmetric_other(self, val):
# see gh-20699
df1 = DataFrame({"isNum": [val]})
df2 = DataFrame({"isBool": [True]})
res = df1.combine_first(df2)
exp = DataFrame({"isBool": [True], "isNum": [val]})
tm.assert_frame_equal(res, exp)
def test_combine_first_string_dtype_only_na(self, nullable_string_dtype):
# GH: 37519
df = DataFrame(
{"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype
)
df2 = DataFrame({"a": ["85"], "b": [pd.NA]}, dtype=nullable_string_dtype)
with tm.maybe_produces_warning(
PerformanceWarning,
pa_version_under7p0 and nullable_string_dtype == "string[pyarrow]",
):
df.set_index(["a", "b"], inplace=True)
with tm.maybe_produces_warning(
PerformanceWarning,
pa_version_under7p0 and nullable_string_dtype == "string[pyarrow]",
):
df2.set_index(["a", "b"], inplace=True)
with tm.maybe_produces_warning(
PerformanceWarning,
pa_version_under7p0 and nullable_string_dtype == "string[pyarrow]",
):
result = df.combine_first(df2)
with tm.maybe_produces_warning(
PerformanceWarning,
pa_version_under7p0 and nullable_string_dtype == "string[pyarrow]",
):
expected = DataFrame(
{"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype
).set_index(["a", "b"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"scalar1, scalar2",
[
(datetime(2020, 1, 1), datetime(2020, 1, 2)),
(pd.Period("2020-01-01", "D"), pd.Period("2020-01-02", "D")),
(pd.Timedelta("89 days"), pd.Timedelta("60 min")),
(pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, closed="left")),
],
)
def test_combine_first_timestamp_bug(scalar1, scalar2, nulls_fixture):
# GH28481
na_value = nulls_fixture
frame = DataFrame([[na_value, na_value]], columns=["a", "b"])
other = DataFrame([[scalar1, scalar2]], columns=["b", "c"])
common_dtype = find_common_type([frame.dtypes["b"], other.dtypes["b"]])
if is_dtype_equal(common_dtype, "object") or frame.dtypes["b"] == other.dtypes["b"]:
val = scalar1
else:
val = na_value
result = frame.combine_first(other)
expected = DataFrame([[na_value, val, scalar2]], columns=["a", "b", "c"])
expected["b"] = expected["b"].astype(common_dtype)
tm.assert_frame_equal(result, expected)
def test_combine_first_timestamp_bug_NaT():
# GH28481
frame = DataFrame([[pd.NaT, pd.NaT]], columns=["a", "b"])
other = DataFrame(
[[datetime(2020, 1, 1), datetime(2020, 1, 2)]], columns=["b", "c"]
)
result = frame.combine_first(other)
expected = DataFrame(
[[pd.NaT, datetime(2020, 1, 1), datetime(2020, 1, 2)]], columns=["a", "b", "c"]
)
tm.assert_frame_equal(result, expected)
def test_combine_first_with_nan_multiindex():
# gh-36562
mi1 = MultiIndex.from_arrays(
[["b", "b", "c", "a", "b", np.nan], [1, 2, 3, 4, 5, 6]], names=["a", "b"]
)
df = DataFrame({"c": [1, 1, 1, 1, 1, 1]}, index=mi1)
mi2 = MultiIndex.from_arrays(
[["a", "b", "c", "a", "b", "d"], [1, 1, 1, 1, 1, 1]], names=["a", "b"]
)
s = Series([1, 2, 3, 4, 5, 6], index=mi2)
res = df.combine_first(DataFrame({"d": s}))
mi_expected = MultiIndex.from_arrays(
[
["a", "a", "a", "b", "b", "b", "b", "c", "c", "d", np.nan],
[1, 1, 4, 1, 1, 2, 5, 1, 3, 1, 6],
],
names=["a", "b"],
)
expected = DataFrame(
{
"c": [np.nan, np.nan, 1, 1, 1, 1, 1, np.nan, 1, np.nan, 1],
"d": [1.0, 4.0, np.nan, 2.0, 5.0, np.nan, np.nan, 3.0, np.nan, 6.0, np.nan],
},
index=mi_expected,
)
tm.assert_frame_equal(res, expected)
def test_combine_preserve_dtypes():
# GH7509
a_column = Series(["a", "b"], index=range(2))
b_column = Series(range(2), index=range(2))
df1 = DataFrame({"A": a_column, "B": b_column})
c_column = Series(["a", "b"], index=range(5, 7))
b_column = Series(range(-1, 1), index=range(5, 7))
df2 = DataFrame({"B": b_column, "C": c_column})
expected = DataFrame(
{
"A": ["a", "b", np.nan, np.nan],
"B": [0, 1, -1, 0],
"C": [np.nan, np.nan, "a", "b"],
},
index=[0, 1, 5, 6],
)
combined = df1.combine_first(df2)
tm.assert_frame_equal(combined, expected)
def test_combine_first_duplicates_rows_for_nan_index_values():
# GH39881
df1 = DataFrame(
{"x": [9, 10, 11]},
index=MultiIndex.from_arrays([[1, 2, 3], [np.nan, 5, 6]], names=["a", "b"]),
)
df2 = DataFrame(
{"y": [12, 13, 14]},
index=MultiIndex.from_arrays([[1, 2, 4], [np.nan, 5, 7]], names=["a", "b"]),
)
expected = DataFrame(
{
"x": [9.0, 10.0, 11.0, np.nan],
"y": [12.0, 13.0, np.nan, 14.0],
},
index=MultiIndex.from_arrays(
[[1, 2, 3, 4], [np.nan, 5.0, 6.0, 7.0]], names=["a", "b"]
),
)
combined = df1.combine_first(df2)
tm.assert_frame_equal(combined, expected)
def test_combine_first_int64_not_cast_to_float64():
# GH 28613
df_1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
df_2 = DataFrame({"A": [1, 20, 30], "B": [40, 50, 60], "C": [12, 34, 65]})
result = df_1.combine_first(df_2)
expected = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [12, 34, 65]})
tm.assert_frame_equal(result, expected)
| bsd-3-clause | bc949fbc4602355445e52666eeffda59 | 34.06422 | 88 | 0.514652 | 3.120509 | false | false | false | false |
pandas-dev/pandas | pandas/core/indexes/datetimelike.py | 1 | 24625 | """
Base and utility classes for tseries type pandas objects.
"""
from __future__ import annotations
from datetime import datetime
from typing import (
TYPE_CHECKING,
Any,
Callable,
Sequence,
TypeVar,
cast,
final,
)
import numpy as np
from pandas._libs import (
NaT,
Timedelta,
lib,
)
from pandas._libs.tslibs import (
BaseOffset,
Resolution,
Tick,
parsing,
to_offset,
)
from pandas._typing import (
Axis,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import NullFrequencyError
from pandas.util._decorators import (
Appender,
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dtype_equal,
is_integer,
is_list_like,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
_index_shared_docs,
)
from pandas.core.indexes.extension import (
NDArrayBackedExtensionIndex,
inherit_names,
)
from pandas.core.indexes.range import RangeIndex
from pandas.core.tools.timedeltas import to_timedelta
if TYPE_CHECKING:
from pandas import CategoricalIndex
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_T = TypeVar("_T", bound="DatetimeIndexOpsMixin")
_TDT = TypeVar("_TDT", bound="DatetimeTimedeltaMixin")
@inherit_names(
["inferred_freq", "_resolution_obj", "resolution"],
DatetimeLikeArrayMixin,
cache=True,
)
@inherit_names(["mean", "freq", "freqstr"], DatetimeLikeArrayMixin)
class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex):
"""
Common ops mixin to support a unified interface datetimelike Index.
"""
_is_numeric_dtype = False
_can_hold_strings = False
_data: DatetimeArray | TimedeltaArray | PeriodArray
freq: BaseOffset | None
freqstr: str | None
_resolution_obj: Resolution
@property
def asi8(self) -> npt.NDArray[np.int64]:
return self._data.asi8
# ------------------------------------------------------------------------
@cache_readonly
def hasnans(self) -> bool:
return self._data._hasna
def equals(self, other: Any) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
elif other.dtype.kind in ["f", "i", "u", "c"]:
return False
elif not isinstance(other, type(self)):
should_try = False
inferable = self._data._infer_matches
if other.dtype == object:
should_try = other.inferred_type in inferable
elif is_categorical_dtype(other.dtype):
other = cast("CategoricalIndex", other)
should_try = other.categories.inferred_type in inferable
if should_try:
try:
other = type(self)(other)
except (ValueError, TypeError, OverflowError):
# e.g.
# ValueError -> cannot parse str entry, or OutOfBoundsDatetime
# TypeError -> trying to convert IntervalIndex to DatetimeIndex
# OverflowError -> Index([very_large_timedeltas])
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
return np.array_equal(self.asi8, other.asi8)
@Appender(Index.__contains__.__doc__)
def __contains__(self, key: Any) -> bool:
hash(key)
try:
self.get_loc(key)
except (KeyError, TypeError, ValueError):
return False
return True
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
return super()._convert_tolerance(tolerance, target)
# --------------------------------------------------------------------
# Rendering Methods
def format(
self,
name: bool = False,
formatter: Callable | None = None,
na_rep: str = "NaT",
date_format: str | None = None,
) -> list[str]:
"""
Render a string representation of the Index.
"""
header = []
if name:
header.append(
ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
if self.name is not None
else ""
)
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, na_rep=na_rep, date_format=date_format)
def _format_with_header(
self, header: list[str], na_rep: str = "NaT", date_format: str | None = None
) -> list[str]:
# matches base class except for whitespace padding and date_format
return header + list(
self._format_native_types(na_rep=na_rep, date_format=date_format)
)
@property
def _formatter_func(self):
return self._data._formatter()
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
# iterating over _attributes prevents us from doing this for PeriodIndex
if attrib == "freq":
freq = self.freqstr
if freq is not None:
freq = repr(freq) # e.g. D -> 'D'
attrs.append(("freq", freq))
return attrs
@Appender(Index._summary.__doc__)
def _summary(self, name=None) -> str:
result = super()._summary(name=name)
if self.freq:
result += f"\nFreq: {self.freqstr}"
return result
# --------------------------------------------------------------------
# Indexing Methods
@final
def _can_partial_date_slice(self, reso: Resolution) -> bool:
# e.g. test_getitem_setitem_periodindex
# History of conversation GH#3452, GH#3931, GH#2369, GH#14826
return reso > self._resolution_obj
# NB: for DTI/PI, not TDI
def _parsed_string_to_bounds(self, reso: Resolution, parsed):
raise NotImplementedError
def _parse_with_reso(self, label: str):
# overridden by TimedeltaIndex
try:
if self.freq is None or hasattr(self.freq, "rule_code"):
freq = self.freq
except NotImplementedError:
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso_str = parsing.parse_time_string(label, freq)
reso = Resolution.from_attrname(reso_str)
return parsed, reso
def _get_string_slice(self, key: str):
# overridden by TimedeltaIndex
parsed, reso = self._parse_with_reso(key)
try:
return self._partial_date_slice(reso, parsed)
except KeyError as err:
raise KeyError(key) from err
@final
def _partial_date_slice(
self,
reso: Resolution,
parsed: datetime,
):
"""
Parameters
----------
reso : Resolution
parsed : datetime
Returns
-------
slice or ndarray[intp]
"""
if not self._can_partial_date_slice(reso):
raise ValueError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
vals = self._data._ndarray
unbox = self._data._unbox
if self.is_monotonic_increasing:
if len(self) and (
(t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
):
# we are out of range
raise KeyError
# TODO: does this depend on being monotonic _increasing_?
# a monotonic (sorted) series can be sliced
left = vals.searchsorted(unbox(t1), side="left")
right = vals.searchsorted(unbox(t2), side="right")
return slice(left, right)
else:
lhs_mask = vals >= unbox(t1)
rhs_mask = vals <= unbox(t2)
# try to find the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _maybe_cast_slice_bound(self, label, side: str):
"""
If label is a string, cast it to scalar type according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
if isinstance(label, str):
try:
parsed, reso = self._parse_with_reso(label)
except ValueError as err:
# DTI -> parsing.DateParseError
# TDI -> 'unit abbreviation w/o a number'
# PI -> string cannot be parsed as datetime-like
self._raise_invalid_indexer("slice", label, err)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
return lower if side == "left" else upper
elif not isinstance(label, self._data._recognized_scalars):
self._raise_invalid_indexer("slice", label)
return label
# --------------------------------------------------------------------
# Arithmetic Methods
def shift(self: _T, periods: int = 1, freq=None) -> _T:
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
raise NotImplementedError
# --------------------------------------------------------------------
@doc(Index._maybe_cast_listlike_indexer)
def _maybe_cast_listlike_indexer(self, keyarr):
try:
res = self._data._validate_listlike(keyarr, allow_object=True)
except (ValueError, TypeError):
if not isinstance(keyarr, ExtensionArray):
# e.g. we don't want to cast DTA to ndarray[object]
res = com.asarray_tuplesafe(keyarr)
# TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray
else:
res = keyarr
return Index(res, dtype=res.dtype)
class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin):
"""
Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
but not PeriodIndex
"""
_data: DatetimeArray | TimedeltaArray
_comparables = ["name", "freq"]
_attributes = ["name", "freq"]
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
_join_precedence = 10
def _with_freq(self, freq):
arr = self._data._with_freq(freq)
return type(self)._simple_new(arr, name=self._name)
@property
def values(self) -> np.ndarray:
# NB: For Datetime64TZ this is lossy
return self._data._ndarray
@doc(DatetimeIndexOpsMixin.shift)
def shift(self: _TDT, periods: int = 1, freq=None) -> _TDT:
if freq is not None and freq != self.freq:
if isinstance(freq, str):
freq = to_offset(freq)
offset = periods * freq
return self + offset
if periods == 0 or len(self) == 0:
# GH#14811 empty case
return self.copy()
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + periods * self.freq
end = self[-1] + periods * self.freq
# Note: in the DatetimeTZ case, _generate_range will infer the
# appropriate timezone from `start` and `end`, so tz does not need
# to be passed explicitly.
result = self._data._generate_range(
start=start, end=end, periods=None, freq=self.freq
)
return type(self)._simple_new(result, name=self.name)
# --------------------------------------------------------------------
# Set Operation Methods
@cache_readonly
def _as_range_index(self) -> RangeIndex:
# Convert our i8 representations to RangeIndex
# Caller is responsible for checking isinstance(self.freq, Tick)
freq = cast(Tick, self.freq)
tick = freq.delta.value
rng = range(self[0].value, self[-1].value + tick, tick)
return RangeIndex(rng)
def _can_range_setop(self, other):
return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
def _wrap_range_setop(self, other, res_i8):
new_freq = None
if not len(res_i8):
# RangeIndex defaults to step=1, which we don't want.
new_freq = self.freq
elif isinstance(res_i8, RangeIndex):
new_freq = to_offset(Timedelta(res_i8.step))
res_i8 = res_i8
# TODO(GH#41493): we cannot just do
# type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)
# because test_setops_preserve_freq fails with _validate_frequency raising.
# This raising is incorrect, as 'on_freq' is incorrect. This will
# be fixed by GH#41493
res_values = res_i8.values.view(self._data._ndarray.dtype)
result = type(self._data)._simple_new(
res_values, dtype=self.dtype, freq=new_freq
)
return self._wrap_setop_result(other, result)
def _range_intersect(self, other, sort):
# Dispatch to RangeIndex intersection logic.
left = self._as_range_index
right = other._as_range_index
res_i8 = left.intersection(right, sort=sort)
return self._wrap_range_setop(other, res_i8)
def _range_union(self, other, sort):
# Dispatch to RangeIndex union logic.
left = self._as_range_index
right = other._as_range_index
res_i8 = left.union(right, sort=sort)
return self._wrap_range_setop(other, res_i8)
def _intersection(self, other: Index, sort: bool = False) -> Index:
"""
intersection specialized to the case with matching dtypes and both non-empty.
"""
other = cast("DatetimeTimedeltaMixin", other)
if self._can_range_setop(other):
return self._range_intersect(other, sort=sort)
if not self._can_fast_intersect(other):
result = Index._intersection(self, other, sort=sort)
# We need to invalidate the freq because Index._intersection
# uses _shallow_copy on a view of self._data, which will preserve
# self.freq if we're not careful.
# At this point we should have result.dtype == self.dtype
# and type(result) is type(self._data)
result = self._wrap_setop_result(other, result)
return result._with_freq(None)._with_freq("infer")
else:
return self._fast_intersect(other, sort)
def _fast_intersect(self, other, sort):
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
result = self[:0]
else:
lslice = slice(*left.slice_locs(start, end))
result = left._values[lslice]
return result
def _can_fast_intersect(self: _T, other: _T) -> bool:
# Note: we only get here with len(self) > 0 and len(other) > 0
if self.freq is None:
return False
elif other.freq != self.freq:
return False
elif not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
return False
# this along with matching freqs ensure that we "line up",
# so intersection will preserve freq
# Note we are assuming away Ticks, as those go through _range_intersect
# GH#42104
return self.freq.n == 1
def _can_fast_union(self: _T, other: _T) -> bool:
# Assumes that type(self) == type(other), as per the annotation
# The ability to fast_union also implies that `freq` should be
# retained on union.
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic_increasing:
# Because freq is not None, we must then be monotonic decreasing
# TODO: do union on the reversed indexes?
return False
if len(self) == 0 or len(other) == 0:
# only reached via union_many
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self: _TDT, other: _TDT, sort=None) -> _TDT:
# Caller is responsible for ensuring self and other are non-empty
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
elif sort is False:
# TDIs are not in the "correct" order and we don't want
# to sort but want to remove overlaps
left, right = self, other
left_start = left[0]
loc = right.searchsorted(left_start, side="left")
right_chunk = right._values[:loc]
dates = concat_compat((left._values, right_chunk))
result = type(self)._simple_new(dates, name=self.name)
return result
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side="right")
right_chunk = right._values[loc:]
dates = concat_compat([left._values, right_chunk])
# The can_fast_union check ensures that the result.freq
# should match self.freq
dates = type(self._data)(dates, freq=self.freq)
result = type(self)._simple_new(dates)
return result
else:
return left
def _union(self, other, sort):
# We are called by `union`, which is responsible for this validation
assert isinstance(other, type(self))
assert self.dtype == other.dtype
if self._can_range_setop(other):
return self._range_union(other, sort=sort)
if self._can_fast_union(other):
result = self._fast_union(other, sort=sort)
# in the case with sort=None, the _can_fast_union check ensures
# that result.freq == self.freq
return result
else:
return super()._union(other, sort)._with_freq("infer")
# --------------------------------------------------------------------
# Join Methods
def _get_join_freq(self, other):
"""
Get the freq to attach to the result of a join operation.
"""
freq = None
if self._can_fast_union(other):
freq = self.freq
return freq
def _wrap_joined_index(self, joined, other):
assert other.dtype == self.dtype, (other.dtype, self.dtype)
result = super()._wrap_joined_index(joined, other)
result._data._freq = self._get_join_freq(other)
return result
def _get_engine_target(self) -> np.ndarray:
# engine methods and libjoin methods need dt64/td64 values cast to i8
return self._data._ndarray.view("i8")
def _from_join_target(self, result: np.ndarray):
# view e.g. i8 back to M8[ns]
result = result.view(self._data._ndarray.dtype)
return self._data._from_backing_data(result)
# --------------------------------------------------------------------
# List-like Methods
def _get_delete_freq(self, loc: int | slice | Sequence[int]):
"""
Find the `freq` for self.delete(loc).
"""
freq = None
if self.freq is not None:
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
# error: Incompatible types in assignment (expression has
# type "Union[slice, ndarray]", variable has type
# "Union[int, slice, Sequence[int]]")
loc = lib.maybe_indices_to_slice( # type: ignore[assignment]
np.asarray(loc, dtype=np.intp), len(self)
)
if isinstance(loc, slice) and loc.step in (1, None):
if loc.start in (0, None) or loc.stop in (len(self), None):
freq = self.freq
return freq
def _get_insert_freq(self, loc: int, item):
"""
Find the `freq` for self.insert(loc, item).
"""
value = self._data._validate_scalar(item)
item = self._data._box_func(value)
freq = None
if self.freq is not None:
# freq can be preserved on edge cases
if self.size:
if item is NaT:
pass
elif loc in (0, -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
else:
# Adding a single item to an empty index may preserve freq
if isinstance(self.freq, Tick):
# all TimedeltaIndex cases go through here; is_on_offset
# would raise TypeError
freq = self.freq
elif self.freq.is_on_offset(item):
freq = self.freq
return freq
@doc(NDArrayBackedExtensionIndex.delete)
def delete(self, loc) -> DatetimeTimedeltaMixin:
result = super().delete(loc)
result._data._freq = self._get_delete_freq(loc)
return result
@doc(NDArrayBackedExtensionIndex.insert)
def insert(self, loc: int, item):
result = super().insert(loc, item)
if isinstance(result, type(self)):
# i.e. parent class method did not cast
result._data._freq = self._get_insert_freq(loc, item)
return result
# --------------------------------------------------------------------
# NDArray-Like Methods
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(
self,
indices,
axis: Axis = 0,
allow_fill: bool = True,
fill_value=None,
**kwargs,
):
nv.validate_take((), kwargs)
indices = np.asarray(indices, dtype=np.intp)
result = NDArrayBackedExtensionIndex.take(
self, indices, axis, allow_fill, fill_value, **kwargs
)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
freq = self._data._get_getitem_freq(maybe_slice)
result._data._freq = freq
return result
| bsd-3-clause | 3341f3c1f51ca9d8277d2536b517aadd | 32.594816 | 87 | 0.557929 | 4.129633 | false | false | false | false |
pandas-dev/pandas | pandas/_config/config.py | 1 | 24603 | """
The config module holds package-wide configurables and provides
a uniform API for working with them.
Overview
========
This module supports the following requirements:
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
- keys are case-insensitive.
- functions should accept partial/regex keys, when unambiguous.
- options can be registered by modules at import time.
- options can be registered at init-time (via core.config_init)
- options have a default value, and (optionally) a description and
validation function associated with them.
- options can be deprecated, in which case referencing them
should produce a warning.
- deprecated options can optionally be rerouted to a replacement
so that accessing a deprecated option reroutes to a differently
named option.
- options can be reset to their default value.
- all option can be reset to their default value at once.
- all options in a certain sub - namespace can be reset at once.
- the user can set / get / reset or ask for the description of an option.
- a developer can register and mark an option as deprecated.
- you can register a callback to be invoked when the option value
is set or reset. Changing the stored value is considered misuse, but
is not verboten.
Implementation
==============
- Data is stored using nested dictionaries, and should be accessed
through the provided API.
- "Registered options" and "Deprecated options" have metadata associated
with them, which are stored in auxiliary dictionaries keyed on the
fully-qualified key, e.g. "x.y.z.option".
- the config_init module is imported by the package's __init__.py file.
placing any register_option() calls there will ensure those options
are available as soon as pandas is loaded. If you use register_option
in a module, it will only be available after that module is imported,
which you should be aware of.
- `config_prefix` is a context_manager (for use with the `with` keyword)
which can save developers some typing, see the docstring.
"""
from __future__ import annotations
from contextlib import (
ContextDecorator,
contextmanager,
)
import re
from typing import (
Any,
Callable,
Generator,
Generic,
Iterable,
NamedTuple,
cast,
)
import warnings
from pandas._typing import (
F,
T,
)
from pandas.util._exceptions import find_stack_level
class DeprecatedOption(NamedTuple):
key: str
msg: str | None
rkey: str | None
removal_ver: str | None
class RegisteredOption(NamedTuple):
key: str
defval: object
doc: str
validator: Callable[[object], Any] | None
cb: Callable[[str], Any] | None
# holds deprecated option metadata
_deprecated_options: dict[str, DeprecatedOption] = {}
# holds registered option metadata
_registered_options: dict[str, RegisteredOption] = {}
# holds the current values for registered options
_global_config: dict[str, Any] = {}
# keys which have a special meaning
_reserved_keys: list[str] = ["all"]
class OptionError(AttributeError, KeyError):
"""
Exception raised for pandas.options.
Backwards compatible with KeyError checks.
"""
#
# User API
def _get_single_key(pat: str, silent: bool) -> str:
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError(f"No such keys(s): {repr(pat)}")
if len(keys) > 1:
raise OptionError("Pattern matched multiple keys")
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat: str, silent: bool = False) -> Any:
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs) -> None:
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword arguments")
# default to false
silent = kwargs.pop("silent", False)
if kwargs:
kwarg = list(kwargs.keys())[0]
raise TypeError(f'_set_option() got an unexpected keyword argument "{kwarg}"')
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
if silent:
with warnings.catch_warnings(record=True):
o.cb(key)
else:
o.cb(key)
def _describe_option(pat: str = "", _print_desc: bool = True) -> str | None:
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError("No such keys(s)")
s = "\n".join([_build_option_description(k) for k in keys])
if _print_desc:
print(s)
return None
return s
def _reset_option(pat: str, silent: bool = False) -> None:
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError("No such keys(s)")
if len(keys) > 1 and len(pat) < 4 and pat != "all":
raise ValueError(
"You must specify at least 4 characters when "
"resetting multiple keys, use the special keyword "
'"all" to reset all the options to their default value'
)
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat: str):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper:
"""provide attribute-style access to a nested dict"""
def __init__(self, d: dict[str, Any], prefix: str = "") -> None:
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key: str, val: Any) -> None:
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key: str):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
try:
v = object.__getattribute__(self, "d")[key]
except KeyError as err:
raise OptionError("No such option") from err
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self) -> Iterable[str]:
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a property function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(Generic[T]):
def __init__(self, func: Callable[..., T], doc_tmpl: str) -> None:
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds) -> T:
return self.__func__(*args, **kwds)
# error: Signature of "__doc__" incompatible with supertype "object"
@property
def __doc__(self) -> str: # type: ignore[override]
opts_desc = _describe_option("all", _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc, opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
Please reference the :ref:`User Guide <options>` for more information.
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value : object
New value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
Please reference the :ref:`User Guide <options>` for more information.
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with no arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
Please reference the :ref:`User Guide <options>` for more information.
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
Please reference the :ref:`User Guide <options>` for more information.
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(ContextDecorator):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
... pass
"""
def __init__(self, *args) -> None:
if len(args) % 2 != 0 or len(args) < 2:
raise ValueError(
"Need to invoke as option_context(pat, val, [(pat, val), ...])."
)
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self) -> None:
self.undo = [(pat, _get_option(pat, silent=True)) for pat, val in self.ops]
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args) -> None:
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(
key: str,
defval: object,
doc: str = "",
validator: Callable[[object], Any] | None = None,
cb: Callable[[str], Any] | None = None,
) -> None:
"""
Register an option in the package-wide pandas config object
Parameters
----------
key : str
Fully-qualified key, e.g. "x.y.option - z".
defval : object
Default value of the option.
doc : str
Description of the option.
validator : Callable, optional
Function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb
a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import keyword
import tokenize
key = key.lower()
if key in _registered_options:
raise OptionError(f"Option '{key}' has already been registered")
if key in _reserved_keys:
raise OptionError(f"Option '{key}' is a reserved key")
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split(".")
for k in path:
if not re.match("^" + tokenize.Name + "$", k):
raise ValueError(f"{k} is not a valid identifier")
if keyword.iskeyword(k):
raise ValueError(f"{k} is a python keyword")
cursor = _global_config
msg = "Path prefix to option '{option}' is already an option"
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError(msg.format(option=".".join(path[:i])))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError(msg.format(option=".".join(path[:-1])))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(
key=key, defval=defval, doc=doc, validator=validator, cb=cb
)
def deprecate_option(
key: str,
msg: str | None = None,
rkey: str | None = None,
removal_ver: str | None = None,
) -> None:
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key : str
Name of the option to be deprecated.
must be a fully-qualified option name (e.g "x.y.z.rkey").
msg : str, optional
Warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey : str, optional
Name of an option to reroute access to.
If specified, any referenced `key` will be
re-routed to `rkey` including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver : str, optional
Specifies the version in which this option will
be removed. used by the default message if no `msg` is specified.
Raises
------
OptionError
If the specified key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError(f"Option '{key}' has already been defined as deprecated.")
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat: str) -> list[str]:
"""
returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == "all": # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key: str) -> tuple[dict[str, Any], str]:
path = key.split(".")
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key: str) -> bool:
"""Returns True if the given option has been deprecated"""
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key: str):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key: str):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key: str) -> str:
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key: str) -> bool:
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
warnings.warn(
d.msg,
FutureWarning,
stacklevel=find_stack_level(),
)
else:
msg = f"'{key}' is deprecated"
if d.removal_ver:
msg += f" and will be removed in {d.removal_ver}"
if d.rkey:
msg += f", please use '{d.rkey}' instead."
else:
msg += ", please refrain from using it."
warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
return True
return False
def _build_option_description(k: str) -> str:
"""Builds a formatted description of a registered option and prints it"""
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = f"{k} "
if o.doc:
s += "\n".join(o.doc.strip().split("\n"))
else:
s += "No description available."
if o:
s += f"\n [default: {o.defval}] [currently: {_get_option(k, True)}]"
if d:
rkey = d.rkey or ""
s += "\n (Deprecated"
s += f", use `{rkey}` instead."
s += ")"
return s
def pp_options_list(keys: Iterable[str], width: int = 80, _print: bool = False):
"""Builds a concise listing of available options, grouped by prefix"""
from itertools import groupby
from textwrap import wrap
def pp(name: str, ks: Iterable[str]) -> list[str]:
pfx = "- " + name + ".[" if name else ""
ls = wrap(
", ".join(ks),
width,
initial_indent=pfx,
subsequent_indent=" ",
break_long_words=False,
)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + "]"
return ls
ls: list[str] = []
singles = [x for x in sorted(keys) if x.find(".") < 0]
if singles:
ls += pp("", singles)
keys = [x for x in keys if x.find(".") >= 0]
for k, g in groupby(sorted(keys), lambda x: x[: x.rfind(".")]):
ks = [x[len(k) + 1 :] for x in list(g)]
ls += pp(k, ks)
s = "\n".join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix) -> Generator[None, None, None]:
"""
contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example
-------
import pandas._config.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option
def wrap(func: F) -> F:
def inner(key: str, *args, **kwds):
pkey = f"{prefix}.{key}"
return func(pkey, *args, **kwds)
return cast(F, inner)
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
try:
yield
finally:
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type: type[Any]) -> Callable[[Any], None]:
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which raises
ValueError if type(x) is not equal to `_type`
"""
def inner(x) -> None:
if type(x) != _type:
raise ValueError(f"Value must have type '{_type}'")
return inner
def is_instance_factory(_type) -> Callable[[Any], None]:
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which raises
ValueError if x is not an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = f"'{_type}'"
def inner(x) -> None:
if not isinstance(x, _type):
raise ValueError(f"Value must be an instance of {type_repr}")
return inner
def is_one_of_factory(legal_values) -> Callable[[Any], None]:
callables = [c for c in legal_values if callable(c)]
legal_values = [c for c in legal_values if not callable(c)]
def inner(x) -> None:
if x not in legal_values:
if not any(c(x) for c in callables):
uvals = [str(lval) for lval in legal_values]
pp_values = "|".join(uvals)
msg = f"Value must be one of {pp_values}"
if len(callables):
msg += " or a callable"
raise ValueError(msg)
return inner
def is_nonnegative_int(value: object) -> None:
"""
Verify that value is None or a positive int.
Parameters
----------
value : None or int
The `value` to be checked.
Raises
------
ValueError
When the value is not None or is a negative integer
"""
if value is None:
return
elif isinstance(value, int):
if value >= 0:
return
msg = "Value must be a nonnegative integer or None"
raise ValueError(msg)
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
is_text = is_instance_factory((str, bytes))
def is_callable(obj) -> bool:
"""
Parameters
----------
`obj` - the object to be checked
Returns
-------
validator - returns True if object is callable
raises ValueError otherwise.
"""
if not callable(obj):
raise ValueError("Value must be a callable")
return True
| bsd-3-clause | d7e0eef3e82a39ea7a43c0ded1597739 | 25.976974 | 86 | 0.619233 | 3.855061 | false | false | false | false |
pandas-dev/pandas | pandas/tests/frame/methods/test_isin.py | 5 | 7323 | import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
class TestDataFrameIsIn:
def test_isin(self):
# GH#4211
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
other = ["a", "b", "c"]
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_isin_empty(self, empty):
# GH#16991
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
d = {"A": ["a"]}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
df.columns = ["A", "A"]
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH#4763
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
msg = (
r"only list-like or dict-like objects are allowed "
r"to be passed to DataFrame.isin\(\), you passed a 'str'"
)
with pytest.raises(TypeError, match=msg):
df.isin("a")
with pytest.raises(TypeError, match=msg):
df.isin("aaa")
def test_isin_df(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
df2 = DataFrame({"A": [0, 2, 12, 4], "B": [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected.loc[[1, 3], "A"] = True
expected.loc[[0, 2], "B"] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ["A", "C"]
result = df1.isin(df2)
expected["B"] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH#16394
df = DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]})
df["C"] = list(zip(df["A"], df["B"]))
result = df["C"].isin([(1, "a")])
tm.assert_series_equal(result, Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], columns=["B", "B"])
msg = r"cannot compute isin with a duplicate axis\."
with pytest.raises(ValueError, match=msg):
df1.isin(df2)
# just index duped
df2 = DataFrame(
[[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=["A", "B"],
index=[0, 0, 1, 1],
)
with pytest.raises(ValueError, match=msg):
df1.isin(df2)
# cols and index:
df2.columns = ["B", "B"]
with pytest.raises(ValueError, match=msg):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({"A": [1, 0, 1, 0], "B": [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=["A", "A"])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = DataFrame(
{"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}, index=["a", "b", "c", "d"]
)
s = Series([1, 3, 11, 4], index=["a", "b", "c", "d"])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc["a", "A"] = True
expected.loc["d"] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples(
[
(0, "a", "foo"),
(0, "a", "bar"),
(0, "b", "bar"),
(0, "b", "baz"),
(2, "a", "foo"),
(2, "a", "bar"),
(2, "c", "bar"),
(2, "c", "baz"),
(1, "b", "foo"),
(1, "b", "bar"),
(1, "c", "bar"),
(1, "c", "baz"),
]
)
df1 = DataFrame({"A": np.ones(12), "B": np.zeros(12)}, index=idx)
df2 = DataFrame(
{
"A": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
"B": [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1],
}
)
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=["A", "B"], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH#15473
df1_ts = DataFrame({"date": pd.to_datetime(["2014-01-01", "2014-01-02"])})
df1_td = DataFrame({"date": [pd.Timedelta(1, "s"), pd.Timedelta(2, "s")]})
df2 = DataFrame({"date": []})
df3 = DataFrame()
expected = DataFrame({"date": [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
DataFrame({"a": [1, 2, 3]}, dtype="category"),
Series([1, 2, 3], dtype="category"),
],
)
def test_isin_category_frame(self, values):
# GH#34256
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
expected = DataFrame({"a": [True, True, True], "b": [False, False, False]})
result = df.isin(values)
tm.assert_frame_equal(result, expected)
def test_isin_read_only(self):
# https://github.com/pandas-dev/pandas/issues/37174
arr = np.array([1, 2, 3])
arr.setflags(write=False)
df = DataFrame([1, 2, 3])
result = df.isin(arr)
expected = DataFrame([True, True, True])
tm.assert_frame_equal(result, expected)
| bsd-3-clause | 2dc17fd398a7261fa0d02031f005cede | 32.438356 | 83 | 0.474669 | 3.240265 | false | true | false | false |
pandas-dev/pandas | asv_bench/benchmarks/strftime.py | 2 | 1818 | import numpy as np
import pandas as pd
from pandas import offsets
class DatetimeStrftime:
timeout = 1500
params = [1000, 10000]
param_names = ["obs"]
def setup(self, obs):
d = "2018-11-29"
dt = "2018-11-26 11:18:27.0"
self.data = pd.DataFrame(
{
"dt": [np.datetime64(dt)] * obs,
"d": [np.datetime64(d)] * obs,
"r": [np.random.uniform()] * obs,
}
)
def time_frame_date_to_str(self, obs):
self.data["d"].astype(str)
def time_frame_date_formatting_default(self, obs):
self.data["d"].dt.strftime(date_format="%Y-%m-%d")
def time_frame_date_formatting_custom(self, obs):
self.data["d"].dt.strftime(date_format="%Y---%m---%d")
def time_frame_datetime_to_str(self, obs):
self.data["dt"].astype(str)
def time_frame_datetime_formatting_default_date_only(self, obs):
self.data["dt"].dt.strftime(date_format="%Y-%m-%d")
def time_frame_datetime_formatting_default(self, obs):
self.data["dt"].dt.strftime(date_format="%Y-%m-%d %H:%M:%S")
def time_frame_datetime_formatting_default_with_float(self, obs):
self.data["dt"].dt.strftime(date_format="%Y-%m-%d %H:%M:%S.%f")
def time_frame_datetime_formatting_custom(self, obs):
self.data["dt"].dt.strftime(date_format="%Y-%m-%d --- %H:%M:%S")
class BusinessHourStrftime:
timeout = 1500
params = [1000, 10000]
param_names = ["obs"]
def setup(self, obs):
self.data = pd.DataFrame(
{
"off": [offsets.BusinessHour()] * obs,
}
)
def time_frame_offset_str(self, obs):
self.data["off"].apply(str)
def time_frame_offset_repr(self, obs):
self.data["off"].apply(repr)
| bsd-3-clause | 9636d3138181946ea50967b0faafd712 | 27.40625 | 72 | 0.562156 | 3.252236 | false | false | false | false |
pandas-dev/pandas | pandas/core/common.py | 1 | 17076 | """
Misc tools for implementing data structures
Note: pandas.core.common is *not* part of the public API.
"""
from __future__ import annotations
import builtins
from collections import (
abc,
defaultdict,
)
import contextlib
from functools import partial
import inspect
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Generator,
Hashable,
Iterable,
Sequence,
cast,
overload,
)
import numpy as np
from pandas._libs import lib
from pandas._typing import (
AnyArrayLike,
ArrayLike,
NpDtype,
RandomState,
T,
)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import iterable_not_string
from pandas.core.dtypes.missing import isna
if TYPE_CHECKING:
from pandas import Index
def flatten(line):
"""
Flatten an arbitrarily nested sequence.
Parameters
----------
line : sequence
The non string sequence to flatten
Notes
-----
This doesn't consider strings sequences.
Returns
-------
flattened : generator
"""
for element in line:
if iterable_not_string(element):
yield from flatten(element)
else:
yield element
def consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
try:
if obj.name != name:
name = None
except ValueError:
name = None
return name
def is_bool_indexer(key: Any) -> bool:
"""
Check whether `key` is a valid boolean indexer.
Parameters
----------
key : Any
Only list-likes may be considered boolean indexers.
All other types are not considered a boolean indexer.
For array-like input, boolean ndarrays or ExtensionArrays
with ``_is_boolean`` set are considered boolean indexers.
Returns
-------
bool
Whether `key` is a valid boolean indexer.
Raises
------
ValueError
When the array is an object-dtype ndarray or ExtensionArray
and contains missing values.
See Also
--------
check_array_indexer : Check that `key` is a valid array to index,
and convert to an ndarray.
"""
if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or (
is_array_like(key) and is_extension_array_dtype(key.dtype)
):
if key.dtype == np.object_:
key_array = np.asarray(key)
if not lib.is_bool_array(key_array):
na_msg = "Cannot mask with non-boolean array containing NA / NaN values"
if lib.infer_dtype(key_array) == "boolean" and isna(key_array).any():
# Don't raise on e.g. ["A", "B", np.nan], see
# test_loc_getitem_list_of_labels_categoricalindex_with_na
raise ValueError(na_msg)
return False
return True
elif is_bool_dtype(key.dtype):
return True
elif isinstance(key, list):
# check if np.array(key).dtype would be bool
if len(key) > 0:
if type(key) is not list:
# GH#42461 cython will raise TypeError if we pass a subclass
key = list(key)
return lib.is_bool_list(key)
return False
def cast_scalar_indexer(val):
"""
Disallow indexing with a float key, even if that key is a round number.
Parameters
----------
val : scalar
Returns
-------
outval : scalar
"""
# assumes lib.is_scalar(val)
if lib.is_float(val) and val.is_integer():
raise IndexError(
# GH#34193
"Indexing with a float is no longer supported. Manually convert "
"to an integer key instead."
)
return val
def not_none(*args):
"""
Returns a generator consisting of the arguments that are not None.
"""
return (arg for arg in args if arg is not None)
def any_none(*args) -> bool:
"""
Returns a boolean indicating if any argument is None.
"""
return any(arg is None for arg in args)
def all_none(*args) -> bool:
"""
Returns a boolean indicating if all arguments are None.
"""
return all(arg is None for arg in args)
def any_not_none(*args) -> bool:
"""
Returns a boolean indicating if any argument is not None.
"""
return any(arg is not None for arg in args)
def all_not_none(*args) -> bool:
"""
Returns a boolean indicating if all arguments are not None.
"""
return all(arg is not None for arg in args)
def count_not_none(*args) -> int:
"""
Returns the count of arguments that are not None.
"""
return sum(x is not None for x in args)
@overload
def asarray_tuplesafe(
values: ArrayLike | list | tuple | zip, dtype: NpDtype | None = ...
) -> np.ndarray:
# ExtensionArray can only be returned when values is an Index, all other iterables
# will return np.ndarray. Unfortunately "all other" cannot be encoded in a type
# signature, so instead we special-case some common types.
...
@overload
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = ...) -> ArrayLike:
...
def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None = None) -> ArrayLike:
if not (isinstance(values, (list, tuple)) or hasattr(values, "__array__")):
values = list(values)
elif isinstance(values, ABCIndex):
return values._values
if isinstance(values, list) and dtype in [np.object_, object]:
return construct_1d_object_array_from_listlike(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, str):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
# Avoid building an array of arrays:
values = [tuple(x) for x in values]
result = construct_1d_object_array_from_listlike(values)
return result
def index_labels_to_array(
labels: np.ndarray | Iterable, dtype: NpDtype | None = None
) -> np.ndarray:
"""
Transform label or iterable of labels to array, for use in Index.
Parameters
----------
dtype : dtype
If specified, use as dtype of the resulting array, otherwise infer.
Returns
-------
array
"""
if isinstance(labels, (str, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = asarray_tuplesafe(labels, dtype=dtype)
return labels
def maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
def maybe_iterable_to_list(obj: Iterable[T] | T) -> Collection[T] | T:
"""
If obj is Iterable but not list-like, consume into list.
"""
if isinstance(obj, abc.Iterable) and not isinstance(obj, abc.Sized):
return list(obj)
obj = cast(Collection, obj)
return obj
def is_null_slice(obj) -> bool:
"""
We have a null slice.
"""
return (
isinstance(obj, slice)
and obj.start is None
and obj.stop is None
and obj.step is None
)
def is_true_slices(line) -> list[bool]:
"""
Find non-trivial slices in "line": return a list of booleans with same length.
"""
return [isinstance(k, slice) and not is_null_slice(k) for k in line]
# TODO: used only once in indexing; belongs elsewhere?
def is_full_slice(obj, line: int) -> bool:
"""
We have a full length slice.
"""
return (
isinstance(obj, slice)
and obj.start == 0
and obj.stop == line
and obj.step is None
)
def get_callable_name(obj):
# typical case has name
if hasattr(obj, "__name__"):
return getattr(obj, "__name__")
# some objects don't; could recurse
if isinstance(obj, partial):
return get_callable_name(obj.func)
# fall back to class name
if callable(obj):
return type(obj).__name__
# everything failed (probably because the argument
# wasn't actually callable); we return None
# instead of the empty string in this case to allow
# distinguishing between no name and a name of ''
return None
def apply_if_callable(maybe_callable, obj, **kwargs):
"""
Evaluate possibly callable input using obj and kwargs if it is callable,
otherwise return as it is.
Parameters
----------
maybe_callable : possibly a callable
obj : NDFrame
**kwargs
"""
if callable(maybe_callable):
return maybe_callable(obj, **kwargs)
return maybe_callable
def standardize_mapping(into):
"""
Helper function to standardize a supplied mapping.
Parameters
----------
into : instance or subclass of collections.abc.Mapping
Must be a class, an initialized collections.defaultdict,
or an instance of a collections.abc.Mapping subclass.
Returns
-------
mapping : a collections.abc.Mapping subclass or other constructor
a callable object that can accept an iterator to create
the desired Mapping.
See Also
--------
DataFrame.to_dict
Series.to_dict
"""
if not inspect.isclass(into):
if isinstance(into, defaultdict):
return partial(defaultdict, into.default_factory)
into = type(into)
if not issubclass(into, abc.Mapping):
raise TypeError(f"unsupported type: {into}")
if into == defaultdict:
raise TypeError("to_dict() only accepts initialized defaultdicts")
return into
@overload
def random_state(state: np.random.Generator) -> np.random.Generator:
...
@overload
def random_state(
state: int | ArrayLike | np.random.BitGenerator | np.random.RandomState | None,
) -> np.random.RandomState:
...
def random_state(state: RandomState | None = None):
"""
Helper function for processing random_state arguments.
Parameters
----------
state : int, array-like, BitGenerator, Generator, np.random.RandomState, None.
If receives an int, array-like, or BitGenerator, passes to
np.random.RandomState() as seed.
If receives an np.random RandomState or Generator, just returns that unchanged.
If receives `None`, returns np.random.
If receives anything else, raises an informative ValueError.
.. versionchanged:: 1.1.0
array-like and BitGenerator object now passed to np.random.RandomState()
as seed
Default None.
Returns
-------
np.random.RandomState or np.random.Generator. If state is None, returns np.random
"""
if (
is_integer(state)
or is_array_like(state)
or isinstance(state, np.random.BitGenerator)
):
# error: Argument 1 to "RandomState" has incompatible type "Optional[Union[int,
# Union[ExtensionArray, ndarray[Any, Any]], Generator, RandomState]]"; expected
# "Union[None, Union[Union[_SupportsArray[dtype[Union[bool_, integer[Any]]]],
# Sequence[_SupportsArray[dtype[Union[bool_, integer[Any]]]]],
# Sequence[Sequence[_SupportsArray[dtype[Union[bool_, integer[Any]]]]]],
# Sequence[Sequence[Sequence[_SupportsArray[dtype[Union[bool_,
# integer[Any]]]]]]],
# Sequence[Sequence[Sequence[Sequence[_SupportsArray[dtype[Union[bool_,
# integer[Any]]]]]]]]], Union[bool, int, Sequence[Union[bool, int]],
# Sequence[Sequence[Union[bool, int]]], Sequence[Sequence[Sequence[Union[bool,
# int]]]], Sequence[Sequence[Sequence[Sequence[Union[bool, int]]]]]]],
# BitGenerator]"
return np.random.RandomState(state) # type: ignore[arg-type]
elif isinstance(state, np.random.RandomState):
return state
elif isinstance(state, np.random.Generator):
return state
elif state is None:
return np.random
else:
raise ValueError(
"random_state must be an integer, array-like, a BitGenerator, Generator, "
"a numpy RandomState, or None"
)
def pipe(
obj, func: Callable[..., T] | tuple[Callable[..., T], str], *args, **kwargs
) -> T:
"""
Apply a function ``func`` to object ``obj`` either by passing obj as the
first argument to the function or, in the case that the func is a tuple,
interpret the first element of the tuple as a function and pass the obj to
that function as a keyword argument whose key is the value of the second
element of the tuple.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this object or, alternatively, a
``(callable, data_keyword)`` tuple where ``data_keyword`` is a
string indicating the keyword of ``callable`` that expects the
object.
*args : iterable, optional
Positional arguments passed into ``func``.
**kwargs : dict, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
msg = f"{target} is both the pipe target and a keyword argument"
raise ValueError(msg)
kwargs[target] = obj
return func(*args, **kwargs)
else:
return func(obj, *args, **kwargs)
def get_rename_function(mapper):
"""
Returns a function that will map names/labels, dependent if mapper
is a dict, Series or just a function.
"""
def f(x):
if x in mapper:
return mapper[x]
else:
return x
return f if isinstance(mapper, (abc.Mapping, ABCSeries)) else mapper
def convert_to_list_like(
values: Hashable | Iterable | AnyArrayLike,
) -> list | AnyArrayLike:
"""
Convert list-like or scalar input to list-like. List, numpy and pandas array-like
inputs are returned unmodified whereas others are converted to list.
"""
if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)):
return values
elif isinstance(values, abc.Iterable) and not isinstance(values, str):
return list(values)
return [values]
@contextlib.contextmanager
def temp_setattr(obj, attr: str, value) -> Generator[None, None, None]:
"""Temporarily set attribute on an object.
Args:
obj: Object whose attribute will be modified.
attr: Attribute to modify.
value: Value to temporarily set attribute to.
Yields:
obj with modified attribute.
"""
old_value = getattr(obj, attr)
setattr(obj, attr, value)
try:
yield obj
finally:
setattr(obj, attr, old_value)
def require_length_match(data, index: Index) -> None:
"""
Check the length of data matches the length of the index.
"""
if len(data) != len(index):
raise ValueError(
"Length of values "
f"({len(data)}) "
"does not match length of index "
f"({len(index)})"
)
# the ufuncs np.maximum.reduce and np.minimum.reduce default to axis=0,
# whereas np.min and np.max (which directly call obj.min and obj.max)
# default to axis=None.
_builtin_table = {
builtins.sum: np.sum,
builtins.max: np.maximum.reduce,
builtins.min: np.minimum.reduce,
}
_cython_table = {
builtins.sum: "sum",
builtins.max: "max",
builtins.min: "min",
np.all: "all",
np.any: "any",
np.sum: "sum",
np.nansum: "sum",
np.mean: "mean",
np.nanmean: "mean",
np.prod: "prod",
np.nanprod: "prod",
np.std: "std",
np.nanstd: "std",
np.var: "var",
np.nanvar: "var",
np.median: "median",
np.nanmedian: "median",
np.max: "max",
np.nanmax: "max",
np.min: "min",
np.nanmin: "min",
np.cumprod: "cumprod",
np.nancumprod: "cumprod",
np.cumsum: "cumsum",
np.nancumsum: "cumsum",
}
def get_cython_func(arg: Callable) -> str | None:
"""
if we define an internal function for this argument, return it
"""
return _cython_table.get(arg)
def is_builtin_func(arg):
"""
if we define a builtin function for this argument, return it,
otherwise return the arg
"""
return _builtin_table.get(arg, arg)
def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]:
"""
If a name is missing then replace it by level_n, where n is the count
.. versionadded:: 1.4.0
Parameters
----------
names : list-like
list of column names or None values.
Returns
-------
list
list of column names with the None values replaced.
"""
return [f"level_{i}" if name is None else name for i, name in enumerate(names)]
| bsd-3-clause | c0b958c9e4f22ca745845c5911b8c246 | 26.061807 | 88 | 0.618763 | 3.985994 | false | false | false | false |
pandas-dev/pandas | pandas/core/arrays/string_.py | 1 | 18543 | from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from pandas._config import get_option
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._typing import (
AxisInt,
Dtype,
Scalar,
npt,
type_t,
)
from pandas.compat import pa_version_under6p0
from pandas.compat.numpy import function as nv
from pandas.util._decorators import doc
from pandas.core.dtypes.base import (
ExtensionDtype,
StorageExtensionDtype,
register_extension_dtype,
)
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_dtype_equal,
is_integer_dtype,
is_object_dtype,
is_string_dtype,
pandas_dtype,
)
from pandas.core import ops
from pandas.core.array_algos import masked_reductions
from pandas.core.arrays import (
ExtensionArray,
FloatingArray,
IntegerArray,
)
from pandas.core.arrays.floating import FloatingDtype
from pandas.core.arrays.integer import IntegerDtype
from pandas.core.arrays.numpy_ import PandasArray
from pandas.core.construction import extract_array
from pandas.core.indexers import check_array_indexer
from pandas.core.missing import isna
if TYPE_CHECKING:
import pyarrow
from pandas import Series
@register_extension_dtype
class StringDtype(StorageExtensionDtype):
"""
Extension dtype for string data.
.. versionadded:: 1.0.0
.. warning::
StringDtype is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
storage : {"python", "pyarrow"}, optional
If not given, the value of ``pd.options.mode.string_storage``.
Attributes
----------
None
Methods
-------
None
Examples
--------
>>> pd.StringDtype()
string[python]
>>> pd.StringDtype(storage="pyarrow")
string[pyarrow]
"""
name = "string"
#: StringDtype().na_value uses pandas.NA
@property
def na_value(self) -> libmissing.NAType:
return libmissing.NA
_metadata = ("storage",)
def __init__(self, storage=None) -> None:
if storage is None:
storage = get_option("mode.string_storage")
if storage not in {"python", "pyarrow"}:
raise ValueError(
f"Storage must be 'python' or 'pyarrow'. Got {storage} instead."
)
if storage == "pyarrow" and pa_version_under6p0:
raise ImportError(
"pyarrow>=6.0.0 is required for PyArrow backed StringArray."
)
self.storage = storage
@property
def type(self) -> type[str]:
return str
@classmethod
def construct_from_string(cls, string):
"""
Construct a StringDtype from a string.
Parameters
----------
string : str
The type of the name. The storage type will be taking from `string`.
Valid options and their storage types are
========================== ==============================================
string result storage
========================== ==============================================
``'string'`` pd.options.mode.string_storage, default python
``'string[python]'`` python
``'string[pyarrow]'`` pyarrow
========================== ==============================================
Returns
-------
StringDtype
Raise
-----
TypeError
If the string is not a valid option.
"""
if not isinstance(string, str):
raise TypeError(
f"'construct_from_string' expects a string, got {type(string)}"
)
if string == "string":
return cls()
elif string == "string[python]":
return cls(storage="python")
elif string == "string[pyarrow]":
return cls(storage="pyarrow")
else:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
# https://github.com/pandas-dev/pandas/issues/36126
# error: Signature of "construct_array_type" incompatible with supertype
# "ExtensionDtype"
def construct_array_type( # type: ignore[override]
self,
) -> type_t[BaseStringArray]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
from pandas.core.arrays.string_arrow import ArrowStringArray
if self.storage == "python":
return StringArray
else:
return ArrowStringArray
def __from_arrow__(
self, array: pyarrow.Array | pyarrow.ChunkedArray
) -> BaseStringArray:
"""
Construct StringArray from pyarrow Array/ChunkedArray.
"""
if self.storage == "pyarrow":
from pandas.core.arrays.string_arrow import ArrowStringArray
return ArrowStringArray(array)
else:
import pyarrow
if isinstance(array, pyarrow.Array):
chunks = [array]
else:
# pyarrow.ChunkedArray
chunks = array.chunks
results = []
for arr in chunks:
# using _from_sequence to ensure None is converted to NA
str_arr = StringArray._from_sequence(np.array(arr))
results.append(str_arr)
if results:
return StringArray._concat_same_type(results)
else:
return StringArray(np.array([], dtype="object"))
class BaseStringArray(ExtensionArray):
"""
Mixin class for StringArray, ArrowStringArray.
"""
@doc(ExtensionArray.tolist)
def tolist(self):
if self.ndim > 1:
return [x.tolist() for x in self]
return list(self.to_numpy())
class StringArray(BaseStringArray, PandasArray):
"""
Extension array for string data.
.. versionadded:: 1.0.0
.. warning::
StringArray is considered experimental. The implementation and
parts of the API may change without warning.
Parameters
----------
values : array-like
The array of data.
.. warning::
Currently, this expects an object-dtype ndarray
where the elements are Python strings
or nan-likes (``None``, ``np.nan``, ``NA``).
This may change without warning in the future. Use
:meth:`pandas.array` with ``dtype="string"`` for a stable way of
creating a `StringArray` from any sequence.
.. versionchanged:: 1.5.0
StringArray now accepts array-likes containing
nan-likes(``None``, ``np.nan``) for the ``values`` parameter
in addition to strings and :attr:`pandas.NA`
copy : bool, default False
Whether to copy the array of data.
Attributes
----------
None
Methods
-------
None
See Also
--------
array
The recommended function for creating a StringArray.
Series.str
The string methods are available on Series backed by
a StringArray.
Notes
-----
StringArray returns a BooleanArray for comparison methods.
Examples
--------
>>> pd.array(['This is', 'some text', None, 'data.'], dtype="string")
<StringArray>
['This is', 'some text', <NA>, 'data.']
Length: 4, dtype: string
Unlike arrays instantiated with ``dtype="object"``, ``StringArray``
will convert the values to strings.
>>> pd.array(['1', 1], dtype="object")
<PandasArray>
['1', 1]
Length: 2, dtype: object
>>> pd.array(['1', 1], dtype="string")
<StringArray>
['1', '1']
Length: 2, dtype: string
However, instantiating StringArrays directly with non-strings will raise an error.
For comparison methods, `StringArray` returns a :class:`pandas.BooleanArray`:
>>> pd.array(["a", None, "c"], dtype="string") == "a"
<BooleanArray>
[True, <NA>, False]
Length: 3, dtype: boolean
"""
# undo the PandasArray hack
_typ = "extension"
def __init__(self, values, copy: bool = False) -> None:
values = extract_array(values)
super().__init__(values, copy=copy)
if not isinstance(values, type(self)):
self._validate()
NDArrayBacked.__init__(self, self._ndarray, StringDtype(storage="python"))
def _validate(self):
"""Validate that we only store NA or strings."""
if len(self._ndarray) and not lib.is_string_array(self._ndarray, skipna=True):
raise ValueError("StringArray requires a sequence of strings or pandas.NA")
if self._ndarray.dtype != "object":
raise ValueError(
"StringArray requires a sequence of strings or pandas.NA. Got "
f"'{self._ndarray.dtype}' dtype instead."
)
# Check to see if need to convert Na values to pd.NA
if self._ndarray.ndim > 2:
# Ravel if ndims > 2 b/c no cythonized version available
lib.convert_nans_to_NA(self._ndarray.ravel("K"))
else:
lib.convert_nans_to_NA(self._ndarray)
@classmethod
def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
if dtype and not (isinstance(dtype, str) and dtype == "string"):
dtype = pandas_dtype(dtype)
assert isinstance(dtype, StringDtype) and dtype.storage == "python"
from pandas.core.arrays.masked import BaseMaskedArray
if isinstance(scalars, BaseMaskedArray):
# avoid costly conversion to object dtype
na_values = scalars._mask
result = scalars._data
result = lib.ensure_string_array(result, copy=copy, convert_na_value=False)
result[na_values] = libmissing.NA
else:
# convert non-na-likes to str, and nan-likes to StringDtype().na_value
result = lib.ensure_string_array(scalars, na_value=libmissing.NA, copy=copy)
# Manually creating new array avoids the validation step in the __init__, so is
# faster. Refactor need for validation?
new_string_array = cls.__new__(cls)
NDArrayBacked.__init__(new_string_array, result, StringDtype(storage="python"))
return new_string_array
@classmethod
def _from_sequence_of_strings(
cls, strings, *, dtype: Dtype | None = None, copy: bool = False
):
return cls._from_sequence(strings, dtype=dtype, copy=copy)
@classmethod
def _empty(cls, shape, dtype) -> StringArray:
values = np.empty(shape, dtype=object)
values[:] = libmissing.NA
return cls(values).astype(dtype, copy=False)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow as pa
if type is None:
type = pa.string()
values = self._ndarray.copy()
values[self.isna()] = None
return pa.array(values, type=type, from_pandas=True)
def _values_for_factorize(self):
arr = self._ndarray.copy()
mask = self.isna()
arr[mask] = None
return arr, None
def __setitem__(self, key, value):
value = extract_array(value, extract_numpy=True)
if isinstance(value, type(self)):
# extract_array doesn't extract PandasArray subclasses
value = value._ndarray
key = check_array_indexer(self, key)
scalar_key = lib.is_scalar(key)
scalar_value = lib.is_scalar(value)
if scalar_key and not scalar_value:
raise ValueError("setting an array element with a sequence.")
# validate new items
if scalar_value:
if isna(value):
value = libmissing.NA
elif not isinstance(value, str):
raise ValueError(
f"Cannot set non-string value '{value}' into a StringArray."
)
else:
if not is_array_like(value):
value = np.asarray(value, dtype=object)
if len(value) and not lib.is_string_array(value, skipna=True):
raise ValueError("Must provide strings.")
value[isna(value)] = libmissing.NA
super().__setitem__(key, value)
def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None:
# the super() method NDArrayBackedExtensionArray._putmask uses
# np.putmask which doesn't properly handle None/pd.NA, so using the
# base class implementation that uses __setitem__
ExtensionArray._putmask(self, mask, value)
def astype(self, dtype, copy: bool = True):
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if copy:
return self.copy()
return self
elif isinstance(dtype, IntegerDtype):
arr = self._ndarray.copy()
mask = self.isna()
arr[mask] = 0
values = arr.astype(dtype.numpy_dtype)
return IntegerArray(values, mask, copy=False)
elif isinstance(dtype, FloatingDtype):
arr = self.copy()
mask = self.isna()
arr[mask] = "0"
values = arr.astype(dtype.numpy_dtype)
return FloatingArray(values, mask, copy=False)
elif isinstance(dtype, ExtensionDtype):
return super().astype(dtype, copy=copy)
elif np.issubdtype(dtype, np.floating):
arr = self._ndarray.copy()
mask = self.isna()
arr[mask] = 0
values = arr.astype(dtype)
values[mask] = np.nan
return values
return super().astype(dtype, copy)
def _reduce(
self, name: str, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs
):
if name in ["min", "max"]:
return getattr(self, name)(skipna=skipna, axis=axis)
raise TypeError(f"Cannot perform reduction '{name}' with string dtype")
def min(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_min((), kwargs)
result = masked_reductions.min(
values=self.to_numpy(), mask=self.isna(), skipna=skipna
)
return self._wrap_reduction_result(axis, result)
def max(self, axis=None, skipna: bool = True, **kwargs) -> Scalar:
nv.validate_max((), kwargs)
result = masked_reductions.max(
values=self.to_numpy(), mask=self.isna(), skipna=skipna
)
return self._wrap_reduction_result(axis, result)
def value_counts(self, dropna: bool = True) -> Series:
from pandas import value_counts
result = value_counts(self._ndarray, dropna=dropna).astype("Int64")
result.index = result.index.astype(self.dtype)
return result
def memory_usage(self, deep: bool = False) -> int:
result = self._ndarray.nbytes
if deep:
return result + lib.memory_usage_of_objects(self._ndarray)
return result
def _cmp_method(self, other, op):
from pandas.arrays import BooleanArray
if isinstance(other, StringArray):
other = other._ndarray
mask = isna(self) | isna(other)
valid = ~mask
if not lib.is_scalar(other):
if len(other) != len(self):
# prevent improper broadcasting when other is 2D
raise ValueError(
f"Lengths of operands do not match: {len(self)} != {len(other)}"
)
other = np.asarray(other)
other = other[valid]
if op.__name__ in ops.ARITHMETIC_BINOPS:
result = np.empty_like(self._ndarray, dtype="object")
result[mask] = libmissing.NA
result[valid] = op(self._ndarray[valid], other)
return StringArray(result)
else:
# logical
result = np.zeros(len(self._ndarray), dtype="bool")
result[valid] = op(self._ndarray[valid], other)
return BooleanArray(result, mask)
_arith_method = _cmp_method
# ------------------------------------------------------------------------
# String methods interface
# error: Incompatible types in assignment (expression has type "NAType",
# base class "PandasArray" defined the type as "float")
_str_na_value = libmissing.NA # type: ignore[assignment]
def _str_map(
self, f, na_value=None, dtype: Dtype | None = None, convert: bool = True
):
from pandas.arrays import BooleanArray
if dtype is None:
dtype = StringDtype(storage="python")
if na_value is None:
na_value = self.dtype.na_value
mask = isna(self)
arr = np.asarray(self)
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
constructor: type[IntegerArray] | type[BooleanArray]
if is_integer_dtype(dtype):
constructor = IntegerArray
else:
constructor = BooleanArray
na_value_is_na = isna(na_value)
if na_value_is_na:
na_value = 1
result = lib.map_infer_mask(
arr,
f,
mask.view("uint8"),
convert=False,
na_value=na_value,
# error: Argument 1 to "dtype" has incompatible type
# "Union[ExtensionDtype, str, dtype[Any], Type[object]]"; expected
# "Type[object]"
dtype=np.dtype(dtype), # type: ignore[arg-type]
)
if not na_value_is_na:
mask[:] = False
return constructor(result, mask)
elif is_string_dtype(dtype) and not is_object_dtype(dtype):
# i.e. StringDtype
result = lib.map_infer_mask(
arr, f, mask.view("uint8"), convert=False, na_value=na_value
)
return StringArray(result)
else:
# This is when the result type is object. We reach this when
# -> We know the result type is truly object (e.g. .encode returns bytes
# or .findall returns a list).
# -> We don't know the result type. E.g. `.get` can return anything.
return lib.map_infer_mask(arr, f, mask.view("uint8"))
| bsd-3-clause | 56b5725004ca4d0bf8d6c5f74435d718 | 30.751712 | 88 | 0.569595 | 4.221995 | false | false | false | false |
pandas-dev/pandas | pandas/tests/base/test_misc.py | 2 | 6231 | import sys
import numpy as np
import pytest
from pandas.compat import (
IS64,
PYPY,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dtype_equal,
is_object_dtype,
)
import pandas as pd
from pandas import (
Index,
Series,
)
import pandas._testing as tm
def test_isnull_notnull_docstrings():
# GH#41855 make sure its clear these are aliases
doc = pd.DataFrame.notnull.__doc__
assert doc.startswith("\nDataFrame.notnull is an alias for DataFrame.notna.\n")
doc = pd.DataFrame.isnull.__doc__
assert doc.startswith("\nDataFrame.isnull is an alias for DataFrame.isna.\n")
doc = Series.notnull.__doc__
assert doc.startswith("\nSeries.notnull is an alias for Series.notna.\n")
doc = Series.isnull.__doc__
assert doc.startswith("\nSeries.isnull is an alias for Series.isna.\n")
@pytest.mark.parametrize(
"op_name, op",
[
("add", "+"),
("sub", "-"),
("mul", "*"),
("mod", "%"),
("pow", "**"),
("truediv", "/"),
("floordiv", "//"),
],
)
def test_binary_ops_docstring(frame_or_series, op_name, op):
# not using the all_arithmetic_functions fixture with _get_opstr
# as _get_opstr is used internally in the dynamic implementation of the docstring
klass = frame_or_series
operand1 = klass.__name__.lower()
operand2 = "other"
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
def test_ndarray_compat_properties(index_or_series_obj):
obj = index_or_series_obj
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(obj, p, None) is not None
# deprecated properties
for p in ["strides", "itemsize", "base", "data"]:
assert not hasattr(obj, p)
msg = "can only convert an array of size 1 to a Python scalar"
with pytest.raises(ValueError, match=msg):
obj.item() # len > 1
assert obj.ndim == 1
assert obj.size == len(obj)
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_array_wrap_compat():
# Note: at time of dask 2022.01.0, this is still used by eg dask
# (https://github.com/dask/dask/issues/8580).
# This test is a small dummy ensuring coverage
orig = Series([1, 2, 3], dtype="int64", index=["a", "b", "c"])
with tm.assert_produces_warning(DeprecationWarning):
result = orig.__array_wrap__(np.array([2, 4, 6], dtype="int64"))
expected = orig * 2
tm.assert_series_equal(result, expected)
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(index_or_series_obj):
obj = index_or_series_obj
res = obj.memory_usage()
res_deep = obj.memory_usage(deep=True)
is_ser = isinstance(obj, Series)
is_object = is_object_dtype(obj) or (
isinstance(obj, Series) and is_object_dtype(obj.index)
)
is_categorical = is_categorical_dtype(obj.dtype) or (
isinstance(obj, Series) and is_categorical_dtype(obj.index.dtype)
)
is_object_string = is_dtype_equal(obj, "string[python]") or (
is_ser and is_dtype_equal(obj.index.dtype, "string[python]")
)
if len(obj) == 0:
if isinstance(obj, Index):
expected = 0
else:
expected = 108 if IS64 else 64
assert res_deep == res == expected
elif is_object or is_categorical or is_object_string:
# only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(obj)
assert abs(diff) < 100
def test_memory_usage_components_series(series_with_simple_index):
series = series_with_simple_index
total_usage = series.memory_usage(index=True)
non_index_usage = series.memory_usage(index=False)
index_usage = series.index.memory_usage()
assert total_usage == non_index_usage + index_usage
@pytest.mark.parametrize("dtype", tm.NARROW_NP_DTYPES)
def test_memory_usage_components_narrow_series(dtype):
series = tm.make_rand_series(name="a", dtype=dtype)
total_usage = series.memory_usage(index=True)
non_index_usage = series.memory_usage(index=False)
index_usage = series.index.memory_usage()
assert total_usage == non_index_usage + index_usage
def test_searchsorted(request, index_or_series_obj):
# numpy.searchsorted calls obj.searchsorted under the hood.
# See gh-12238
obj = index_or_series_obj
if isinstance(obj, pd.MultiIndex):
# See gh-14833
request.node.add_marker(
pytest.mark.xfail(
reason="np.searchsorted doesn't work on pd.MultiIndex: GH 14833"
)
)
elif obj.dtype.kind == "c" and isinstance(obj, Index):
# TODO: Should Series cases also raise? Looks like they use numpy
# comparison semantics https://github.com/numpy/numpy/issues/15981
mark = pytest.mark.xfail(reason="complex objects are not comparable")
request.node.add_marker(mark)
max_obj = max(obj, default=0)
index = np.searchsorted(obj, max_obj)
assert 0 <= index <= len(obj)
index = np.searchsorted(obj, max_obj, sorter=range(len(obj)))
assert 0 <= index <= len(obj)
def test_access_by_position(index_flat):
index = index_flat
if len(index) == 0:
pytest.skip("Test doesn't make sense on empty data")
series = Series(index)
assert index[0] == series.iloc[0]
assert index[5] == series.iloc[5]
assert index[-1] == series.iloc[-1]
size = len(index)
assert index[-1] == index[size - 1]
msg = f"index {size} is out of bounds for axis 0 with size {size}"
if is_dtype_equal(index.dtype, "string[pyarrow]"):
msg = "index out of bounds"
with pytest.raises(IndexError, match=msg):
index[size]
msg = "single positional indexer is out-of-bounds"
with pytest.raises(IndexError, match=msg):
series.iloc[size]
| bsd-3-clause | 7f8d7427e299b865eb2d3c5e6e56a905 | 30.469697 | 85 | 0.636495 | 3.475181 | false | true | false | false |
pandas-dev/pandas | asv_bench/benchmarks/io/sql.py | 1 | 5553 | import sqlite3
import numpy as np
from sqlalchemy import create_engine
from pandas import (
DataFrame,
date_range,
read_sql_query,
read_sql_table,
)
from ..pandas_vb_common import tm
class SQL:
params = ["sqlalchemy", "sqlite"]
param_names = ["connection"]
def setup(self, connection):
N = 10000
con = {
"sqlalchemy": create_engine("sqlite:///:memory:"),
"sqlite": sqlite3.connect(":memory:"),
}
self.table_name = "test_type"
self.query_all = f"SELECT * FROM {self.table_name}"
self.con = con[connection]
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=tm.makeStringIndex(N),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
self.df["time"] = self.df["datetime"].dt.time
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_to_sql_dataframe(self, connection):
self.df.to_sql("test1", self.con, if_exists="replace")
def time_read_sql_query(self, connection):
read_sql_query(self.query_all, self.con)
class WriteSQLDtypes:
params = (
["sqlalchemy", "sqlite"],
[
"float",
"float_with_nan",
"string",
"bool",
"int",
"date",
"time",
"datetime",
],
)
param_names = ["connection", "dtype"]
def setup(self, connection, dtype):
N = 10000
con = {
"sqlalchemy": create_engine("sqlite:///:memory:"),
"sqlite": sqlite3.connect(":memory:"),
}
self.table_name = "test_type"
self.query_col = f"SELECT {dtype} FROM {self.table_name}"
self.con = con[connection]
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=tm.makeStringIndex(N),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
self.df["time"] = self.df["datetime"].dt.time
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_to_sql_dataframe_column(self, connection, dtype):
self.df[[dtype]].to_sql("test1", self.con, if_exists="replace")
def time_read_sql_query_select_column(self, connection, dtype):
read_sql_query(self.query_col, self.con)
class ReadSQLTable:
def setup(self):
N = 10000
self.table_name = "test"
self.con = create_engine("sqlite:///:memory:")
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=tm.makeStringIndex(N),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
self.df["time"] = self.df["datetime"].dt.time
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_read_sql_table_all(self):
read_sql_table(self.table_name, self.con)
def time_read_sql_table_parse_dates(self):
read_sql_table(
self.table_name,
self.con,
columns=["datetime_string"],
parse_dates=["datetime_string"],
)
class ReadSQLTableDtypes:
params = [
"float",
"float_with_nan",
"string",
"bool",
"int",
"date",
"time",
"datetime",
]
param_names = ["dtype"]
def setup(self, dtype):
N = 10000
self.table_name = "test"
self.con = create_engine("sqlite:///:memory:")
self.df = DataFrame(
{
"float": np.random.randn(N),
"float_with_nan": np.random.randn(N),
"string": ["foo"] * N,
"bool": [True] * N,
"int": np.random.randint(0, N, size=N),
"datetime": date_range("2000-01-01", periods=N, freq="s"),
},
index=tm.makeStringIndex(N),
)
self.df.iloc[1000:3000, 1] = np.nan
self.df["date"] = self.df["datetime"].dt.date
self.df["time"] = self.df["datetime"].dt.time
self.df["datetime_string"] = self.df["datetime"].astype(str)
self.df.to_sql(self.table_name, self.con, if_exists="replace")
def time_read_sql_table_column(self, dtype):
read_sql_table(self.table_name, self.con, columns=[dtype])
from ..pandas_vb_common import setup # noqa: F401 isort:skip
| bsd-3-clause | db3e3f0a907d1250e73c6f84b357edf0 | 30.372881 | 74 | 0.512696 | 3.514557 | false | false | false | false |
pandas-dev/pandas | pandas/tests/reshape/test_union_categoricals.py | 2 | 15004 | import numpy as np
import pytest
from pandas.core.dtypes.concat import union_categoricals
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
Series,
)
import pandas._testing as tm
class TestUnionCategoricals:
@pytest.mark.parametrize(
"a, b, combined",
[
(list("abc"), list("abd"), list("abcabd")),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(
["b", "b", np.nan, "a"],
["a", np.nan, "c"],
["b", "b", np.nan, "a", "a", np.nan, "c"],
),
(
pd.date_range("2014-01-01", "2014-01-05"),
pd.date_range("2014-01-06", "2014-01-07"),
pd.date_range("2014-01-01", "2014-01-07"),
),
(
pd.date_range("2014-01-01", "2014-01-05", tz="US/Central"),
pd.date_range("2014-01-06", "2014-01-07", tz="US/Central"),
pd.date_range("2014-01-01", "2014-01-07", tz="US/Central"),
),
(
pd.period_range("2014-01-01", "2014-01-05"),
pd.period_range("2014-01-06", "2014-01-07"),
pd.period_range("2014-01-01", "2014-01-07"),
),
],
)
@pytest.mark.parametrize("box", [Categorical, CategoricalIndex, Series])
def test_union_categorical(self, a, b, combined, box):
# GH 13361
result = union_categoricals([box(Categorical(a)), box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected)
def test_union_categorical_ordered_appearance(self):
# new categories ordered by appearance
s = Categorical(["x", "y", "z"])
s2 = Categorical(["a", "b", "c"])
result = union_categoricals([s, s2])
expected = Categorical(
["x", "y", "z", "a", "b", "c"], categories=["x", "y", "z", "a", "b", "c"]
)
tm.assert_categorical_equal(result, expected)
def test_union_categorical_ordered_true(self):
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_union_categorical_match_types(self):
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = "dtype of categories must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([s, s2])
def test_union_categorical_empty(self):
msg = "No Categoricals to union"
with pytest.raises(ValueError, match=msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals(
[Categorical([1, 2, np.nan]), Categorical([3, 2, np.nan])]
)
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals(
[Categorical(["A", "B"]), Categorical(["B", "B", np.nan])]
)
exp = Categorical(["A", "B", "B", "B", np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-03-01"), pd.NaT]
val2 = [pd.NaT, pd.Timestamp("2011-01-01"), pd.Timestamp("2011-02-01")]
res = union_categoricals([Categorical(val1), Categorical(val2)])
exp = Categorical(
val1 + val2,
categories=[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-03-01"),
pd.Timestamp("2011-02-01"),
],
)
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals(
[
Categorical(np.array([np.nan, np.nan], dtype=object)),
Categorical(["X"]),
]
)
exp = Categorical([np.nan, np.nan, "X"])
tm.assert_categorical_equal(res, exp)
res = union_categoricals(
[Categorical([np.nan, np.nan]), Categorical([np.nan, np.nan])]
)
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
@pytest.mark.parametrize("val", [[], ["1"]])
def test_union_categoricals_empty(self, val):
# GH 13759
res = union_categoricals([Categorical([]), Categorical(val)])
exp = Categorical(val)
tm.assert_categorical_equal(res, exp)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan], categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
def test_union_categorical_same_category_str(self):
c1 = Categorical(["z", "z", "z"], categories=["x", "y", "z"])
c2 = Categorical(["x", "x", "x"], categories=["x", "y", "z"])
res = union_categoricals([c1, c2])
exp = Categorical(["z", "z", "z", "x", "x", "x"], categories=["x", "y", "z"])
tm.assert_categorical_equal(res, exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
c1 = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
c2 = Categorical(["a", "b", "c"], categories=["b", "a", "c"])
result = union_categoricals([c1, c2])
expected = Categorical(
["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"]
)
tm.assert_categorical_equal(result, expected)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = "Categorical.ordered must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = "Categorical.ordered must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True, sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2], ignore_order=False)
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(["x", "y", "z"])
c2 = Categorical(["a", "b", "c"])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(
["x", "y", "z", "a", "b", "c"], categories=["a", "b", "c", "x", "y", "z"]
)
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(["a", "b"], categories=["b", "a", "c"])
c2 = Categorical(["b", "c"], categories=["b", "a", "c"])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(["a", "b"], categories=["c", "a", "b"])
c2 = Categorical(["b", "c"], categories=["c", "a", "b"])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(["a", "b"], categories=["a", "b", "c"])
c2 = Categorical(["b", "c"], categories=["a", "b", "c"])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(["x", np.nan])
c2 = Categorical([np.nan, "b"])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(["x", np.nan, np.nan, "b"], categories=["b", "x"])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([np.nan, np.nan])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(["b", "a"], categories=["b", "a", "c"], ordered=True)
c2 = Categorical(["a", "c"], categories=["b", "a", "c"], ordered=True)
msg = "Cannot use sort_categories=True with ordered Categoricals"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, c2], sort_categories=True)
def test_union_categoricals_sort_false(self):
# GH 13846
c1 = Categorical(["x", "y", "z"])
c2 = Categorical(["a", "b", "c"])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(
["x", "y", "z", "a", "b", "c"], categories=["x", "y", "z", "a", "b", "c"]
)
tm.assert_categorical_equal(result, expected)
def test_union_categoricals_sort_false_fastpath(self):
# fastpath
c1 = Categorical(["a", "b"], categories=["b", "a", "c"])
c2 = Categorical(["b", "c"], categories=["b", "a", "c"])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(["a", "b", "b", "c"], categories=["b", "a", "c"])
tm.assert_categorical_equal(result, expected)
def test_union_categoricals_sort_false_skipresort(self):
# fastpath - skip resort
c1 = Categorical(["a", "b"], categories=["a", "b", "c"])
c2 = Categorical(["b", "c"], categories=["a", "b", "c"])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"])
tm.assert_categorical_equal(result, expected)
def test_union_categoricals_sort_false_one_nan(self):
c1 = Categorical(["x", np.nan])
c2 = Categorical([np.nan, "b"])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(["x", np.nan, np.nan, "b"], categories=["x", "b"])
tm.assert_categorical_equal(result, expected)
def test_union_categoricals_sort_false_only_nan(self):
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([np.nan, np.nan])
tm.assert_categorical_equal(result, expected)
def test_union_categoricals_sort_false_empty(self):
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
def test_union_categoricals_sort_false_ordered_true(self):
c1 = Categorical(["b", "a"], categories=["b", "a", "c"], ordered=True)
c2 = Categorical(["a", "c"], categories=["b", "a", "c"], ordered=True)
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(
["b", "a", "a", "c"], categories=["b", "a", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_union_categorical_unwrap(self):
# GH 14173
c1 = Categorical(["a", "b"])
c2 = Series(["b", "c"], dtype="category")
result = union_categoricals([c1, c2])
expected = Categorical(["a", "b", "b", "c"])
tm.assert_categorical_equal(result, expected)
c2 = CategoricalIndex(c2)
result = union_categoricals([c1, c2])
tm.assert_categorical_equal(result, expected)
c1 = Series(c1)
result = union_categoricals([c1, c2])
tm.assert_categorical_equal(result, expected)
msg = "all components to combine must be Categorical"
with pytest.raises(TypeError, match=msg):
union_categoricals([c1, ["a", "b", "c"]])
| bsd-3-clause | 7e5cd222622dd6f63ec02721872db3b9 | 40.333333 | 85 | 0.55152 | 3.354348 | false | true | false | false |
pandas-dev/pandas | pandas/core/array_algos/take.py | 1 | 20618 | from __future__ import annotations
import functools
from typing import (
TYPE_CHECKING,
cast,
overload,
)
import numpy as np
from pandas._libs import (
algos as libalgos,
lib,
)
from pandas._typing import (
ArrayLike,
AxisInt,
npt,
)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
ensure_platform_int,
is_1d_only_ea_obj,
)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
if TYPE_CHECKING:
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.arrays.base import ExtensionArray
@overload
def take_nd(
arr: np.ndarray,
indexer,
axis: AxisInt = ...,
fill_value=...,
allow_fill: bool = ...,
) -> np.ndarray:
...
@overload
def take_nd(
arr: ExtensionArray,
indexer,
axis: AxisInt = ...,
fill_value=...,
allow_fill: bool = ...,
) -> ArrayLike:
...
def take_nd(
arr: ArrayLike,
indexer,
axis: AxisInt = 0,
fill_value=lib.no_default,
allow_fill: bool = True,
) -> ArrayLike:
"""
Specialized Cython take which sets NaN values in one pass
This dispatches to ``take`` defined on ExtensionArrays. It does not
currently dispatch to ``SparseArray.take`` for sparse ``arr``.
Note: this function assumes that the indexer is a valid(ated) indexer with
no out of bound indices.
Parameters
----------
arr : np.ndarray or ExtensionArray
Input array.
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indices are filed with fill_value
axis : int, default 0
Axis to take from
fill_value : any, default np.nan
Fill value to replace -1 values with
allow_fill : bool, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
Returns
-------
subarray : np.ndarray or ExtensionArray
May be the same type as the input, or cast to an ndarray.
"""
if fill_value is lib.no_default:
fill_value = na_value_for_dtype(arr.dtype, compat=False)
elif isinstance(arr.dtype, np.dtype) and arr.dtype.kind in "mM":
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if arr.dtype != dtype:
# EA.take is strict about returning a new object of the same type
# so for that case cast upfront
arr = arr.astype(dtype)
if not isinstance(arr, np.ndarray):
# i.e. ExtensionArray,
# includes for EA to catch DatetimeArray, TimedeltaArray
if not is_1d_only_ea_obj(arr):
# i.e. DatetimeArray, TimedeltaArray
arr = cast("NDArrayBackedExtensionArray", arr)
return arr.take(
indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis
)
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
arr = np.asarray(arr)
return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)
def _take_nd_ndarray(
arr: np.ndarray,
indexer: npt.NDArray[np.intp] | None,
axis: AxisInt,
fill_value,
allow_fill: bool,
) -> np.ndarray:
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.intp)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = ensure_platform_int(indexer)
dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
arr, indexer, fill_value, allow_fill
)
flip_order = False
if arr.ndim == 2 and arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
out_shape_ = list(arr.shape)
out_shape_[axis] = len(indexer)
out_shape = tuple(out_shape_)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order="F")
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(
arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info
)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
def take_1d(
arr: ArrayLike,
indexer: npt.NDArray[np.intp],
fill_value=None,
allow_fill: bool = True,
mask: npt.NDArray[np.bool_] | None = None,
) -> ArrayLike:
"""
Specialized version for 1D arrays. Differences compared to `take_nd`:
- Assumes input array has already been converted to numpy array / EA
- Assumes indexer is already guaranteed to be intp dtype ndarray
- Only works for 1D arrays
To ensure the lowest possible overhead.
Note: similarly to `take_nd`, this function assumes that the indexer is
a valid(ated) indexer with no out of bound indices.
Parameters
----------
arr : np.ndarray or ExtensionArray
Input array.
indexer : ndarray
1-D array of indices to take (validated indices, intp dtype).
fill_value : any, default np.nan
Fill value to replace -1 values with
allow_fill : bool, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
mask : np.ndarray, optional, default None
If `allow_fill` is True, and the mask (where indexer == -1) is already
known, it can be passed to avoid recomputation.
"""
if not isinstance(arr, np.ndarray):
# ExtensionArray -> dispatch to their method
return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
if not allow_fill:
return arr.take(indexer)
dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
arr, indexer, fill_value, True, mask
)
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
out = np.empty(indexer.shape, dtype=dtype)
func = _get_take_nd_function(
arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info
)
func(arr, indexer, out, fill_value)
return out
def take_2d_multi(
arr: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
fill_value=np.nan,
) -> np.ndarray:
"""
Specialized Cython take which sets NaN values in one pass.
"""
# This is only called from one place in DataFrame._reindex_multi,
# so we know indexer is well-behaved.
assert indexer is not None
assert indexer[0] is not None
assert indexer[1] is not None
row_idx, col_idx = indexer
row_idx = ensure_platform_int(row_idx)
col_idx = ensure_platform_int(col_idx)
indexer = row_idx, col_idx
mask_info = None
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype:
# check if promotion is actually required based on indexer
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if not (row_needs or col_needs):
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is not None:
func(arr, indexer, out=out, fill_value=fill_value)
else:
# test_reindex_multi
_take_2d_multi_object(
arr, indexer, out, fill_value=fill_value, mask_info=mask_info
)
return out
@functools.lru_cache(maxsize=128)
def _get_take_nd_function_cached(
ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt
):
"""
Part of _get_take_nd_function below that doesn't need `mask_info` and thus
can be cached (mask_info potentially contains a numpy ndarray which is not
hashable and thus cannot be used as argument for cached function).
"""
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
# We get here with string, uint, float16, and complex dtypes that could
# potentially be handled in algos_take_helper.
# Also a couple with (M8[ns], object) and (m8[ns], object)
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
return None
def _get_take_nd_function(
ndim: int,
arr_dtype: np.dtype,
out_dtype: np.dtype,
axis: AxisInt = 0,
mask_info=None,
):
"""
Get the appropriate "take" implementation for the given dimension, axis
and dtypes.
"""
func = None
if ndim <= 2:
# for this part we don't need `mask_info` -> use the cached algo lookup
func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis)
if func is None:
def func(arr, indexer, out, fill_value=np.nan) -> None:
indexer = ensure_platform_int(indexer)
_take_nd_object(
arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info
)
return func
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
) -> None:
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(
arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
) -> None:
if conv_dtype == object:
# GH#39755 avoid casting dt64/td64 to integers
arr = ensure_wrapped_if_datetimelike(arr)
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
_take_1d_dict = {
("int8", "int8"): libalgos.take_1d_int8_int8,
("int8", "int32"): libalgos.take_1d_int8_int32,
("int8", "int64"): libalgos.take_1d_int8_int64,
("int8", "float64"): libalgos.take_1d_int8_float64,
("int16", "int16"): libalgos.take_1d_int16_int16,
("int16", "int32"): libalgos.take_1d_int16_int32,
("int16", "int64"): libalgos.take_1d_int16_int64,
("int16", "float64"): libalgos.take_1d_int16_float64,
("int32", "int32"): libalgos.take_1d_int32_int32,
("int32", "int64"): libalgos.take_1d_int32_int64,
("int32", "float64"): libalgos.take_1d_int32_float64,
("int64", "int64"): libalgos.take_1d_int64_int64,
("int64", "float64"): libalgos.take_1d_int64_float64,
("float32", "float32"): libalgos.take_1d_float32_float32,
("float32", "float64"): libalgos.take_1d_float32_float64,
("float64", "float64"): libalgos.take_1d_float64_float64,
("object", "object"): libalgos.take_1d_object_object,
("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8),
("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
),
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
),
}
_take_2d_axis0_dict = {
("int8", "int8"): libalgos.take_2d_axis0_int8_int8,
("int8", "int32"): libalgos.take_2d_axis0_int8_int32,
("int8", "int64"): libalgos.take_2d_axis0_int8_int64,
("int8", "float64"): libalgos.take_2d_axis0_int8_float64,
("int16", "int16"): libalgos.take_2d_axis0_int16_int16,
("int16", "int32"): libalgos.take_2d_axis0_int16_int32,
("int16", "int64"): libalgos.take_2d_axis0_int16_int64,
("int16", "float64"): libalgos.take_2d_axis0_int16_float64,
("int32", "int32"): libalgos.take_2d_axis0_int32_int32,
("int32", "int64"): libalgos.take_2d_axis0_int32_int64,
("int32", "float64"): libalgos.take_2d_axis0_int32_float64,
("int64", "int64"): libalgos.take_2d_axis0_int64_int64,
("int64", "float64"): libalgos.take_2d_axis0_int64_float64,
("float32", "float32"): libalgos.take_2d_axis0_float32_float32,
("float32", "float64"): libalgos.take_2d_axis0_float32_float64,
("float64", "float64"): libalgos.take_2d_axis0_float64_float64,
("object", "object"): libalgos.take_2d_axis0_object_object,
("bool", "bool"): _view_wrapper(
libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8
),
("bool", "object"): _view_wrapper(
libalgos.take_2d_axis0_bool_object, np.uint8, None
),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
_take_2d_axis1_dict = {
("int8", "int8"): libalgos.take_2d_axis1_int8_int8,
("int8", "int32"): libalgos.take_2d_axis1_int8_int32,
("int8", "int64"): libalgos.take_2d_axis1_int8_int64,
("int8", "float64"): libalgos.take_2d_axis1_int8_float64,
("int16", "int16"): libalgos.take_2d_axis1_int16_int16,
("int16", "int32"): libalgos.take_2d_axis1_int16_int32,
("int16", "int64"): libalgos.take_2d_axis1_int16_int64,
("int16", "float64"): libalgos.take_2d_axis1_int16_float64,
("int32", "int32"): libalgos.take_2d_axis1_int32_int32,
("int32", "int64"): libalgos.take_2d_axis1_int32_int64,
("int32", "float64"): libalgos.take_2d_axis1_int32_float64,
("int64", "int64"): libalgos.take_2d_axis1_int64_int64,
("int64", "float64"): libalgos.take_2d_axis1_int64_float64,
("float32", "float32"): libalgos.take_2d_axis1_float32_float32,
("float32", "float64"): libalgos.take_2d_axis1_float32_float64,
("float64", "float64"): libalgos.take_2d_axis1_float64_float64,
("object", "object"): libalgos.take_2d_axis1_object_object,
("bool", "bool"): _view_wrapper(
libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8
),
("bool", "object"): _view_wrapper(
libalgos.take_2d_axis1_bool_object, np.uint8, None
),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
_take_2d_multi_dict = {
("int8", "int8"): libalgos.take_2d_multi_int8_int8,
("int8", "int32"): libalgos.take_2d_multi_int8_int32,
("int8", "int64"): libalgos.take_2d_multi_int8_int64,
("int8", "float64"): libalgos.take_2d_multi_int8_float64,
("int16", "int16"): libalgos.take_2d_multi_int16_int16,
("int16", "int32"): libalgos.take_2d_multi_int16_int32,
("int16", "int64"): libalgos.take_2d_multi_int16_int64,
("int16", "float64"): libalgos.take_2d_multi_int16_float64,
("int32", "int32"): libalgos.take_2d_multi_int32_int32,
("int32", "int64"): libalgos.take_2d_multi_int32_int64,
("int32", "float64"): libalgos.take_2d_multi_int32_float64,
("int64", "int64"): libalgos.take_2d_multi_int64_int64,
("int64", "float64"): libalgos.take_2d_multi_int64_float64,
("float32", "float32"): libalgos.take_2d_multi_float32_float32,
("float32", "float64"): libalgos.take_2d_multi_float32_float64,
("float64", "float64"): libalgos.take_2d_multi_float64_float64,
("object", "object"): libalgos.take_2d_multi_object_object,
("bool", "bool"): _view_wrapper(
libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8
),
("bool", "object"): _view_wrapper(
libalgos.take_2d_multi_bool_object, np.uint8, None
),
("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
),
}
def _take_nd_object(
arr: np.ndarray,
indexer: npt.NDArray[np.intp],
out: np.ndarray,
axis: AxisInt,
fill_value,
mask_info,
) -> None:
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(indexer, axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
def _take_2d_multi_object(
arr: np.ndarray,
indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
out: np.ndarray,
fill_value,
mask_info,
) -> None:
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer # both np.intp
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i, u_ in enumerate(row_idx):
if u_ != -1:
for j, v in enumerate(col_idx):
if v != -1:
out[i, j] = arr[u_, v]
def _take_preprocess_indexer_and_fill_value(
arr: np.ndarray,
indexer: npt.NDArray[np.intp],
fill_value,
allow_fill: bool,
mask: npt.NDArray[np.bool_] | None = None,
):
mask_info: tuple[np.ndarray | None, bool] | None = None
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype:
# check if promotion is actually required based on indexer
if mask is not None:
needs_masking = True
else:
mask = indexer == -1
needs_masking = bool(mask.any())
mask_info = mask, needs_masking
if not needs_masking:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
return dtype, fill_value, mask_info
| bsd-3-clause | 04489db793dc07d7c0e5964ed245ae4a | 34.005093 | 88 | 0.621981 | 3.108397 | false | false | false | false |
pandas-dev/pandas | pandas/tests/indexing/multiindex/test_chaining_and_caching.py | 2 | 2416 | import numpy as np
import pytest
from pandas.errors import SettingWithCopyError
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
def test_detect_chained_assignment(using_copy_on_write):
# Inplace ops, originally from:
# https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
a = [12, 23]
b = [123, None]
c = [1234, 2345]
d = [12345, 23456]
tuples = [("eyes", "left"), ("eyes", "right"), ("ears", "left"), ("ears", "right")]
events = {
("eyes", "left"): a,
("eyes", "right"): b,
("ears", "left"): c,
("ears", "right"): d,
}
multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
zed = DataFrame(events, index=["a", "b"], columns=multiind)
if using_copy_on_write:
zed["eyes"]["right"].fillna(value=555, inplace=True)
else:
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(SettingWithCopyError, match=msg):
zed["eyes"]["right"].fillna(value=555, inplace=True)
@td.skip_array_manager_invalid_test # with ArrayManager df.loc[0] is not a view
def test_cache_updating(using_copy_on_write):
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=["x", "y", "z"])
df_original = df.copy()
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]["z"].iloc[0] = 1.0
result = df.loc[(0, 0), "z"]
if using_copy_on_write:
assert result == df_original.loc[0, "z"]
else:
assert result == 1
# correct setting
df.loc[(0, 0), "z"] = 2
result = df.loc[(0, 0), "z"]
assert result == 2
@pytest.mark.slow
def test_indexer_caching():
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = (range(n), range(n))
index = MultiIndex.from_tuples(zip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
| bsd-3-clause | 7e5adcd511e83189919b06aa3bfefb87 | 29.2 | 118 | 0.607202 | 3.133593 | false | true | false | false |
pandas-dev/pandas | pandas/tests/tseries/offsets/test_month.py | 1 | 23728 | """
Tests for the following offsets:
- SemiMonthBegin
- SemiMonthEnd
- MonthBegin
- MonthEnd
"""
from __future__ import annotations
from datetime import datetime
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
MonthBegin,
MonthEnd,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
assert_is_on_offset,
assert_offset_equal,
)
class TestSemiMonthEnd:
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
shift = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthEnd() + shift
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SM")
exp = DatetimeIndex(dates, freq="SM")
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append(
(
SemiMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16),
},
)
)
offset_cases.append(
(
SemiMonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
# https://github.com/pandas-dev/pandas/issues/34580
offset, cases = case
shift = DatetimeIndex(cases.keys())
exp = DatetimeIndex(cases.values())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + shift
tm.assert_index_equal(result, exp)
on_offset_cases = [
(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
dt, expected = case
assert_is_on_offset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize("klass", [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
shift = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = shift + SemiMonthEnd()
result2 = SemiMonthEnd() + shift
exp = klass(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
shift = klass(
[
Timestamp("2000-01-01 00:15:00", tz="US/Central"),
Timestamp("2000-02-01", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = shift + SemiMonthEnd()
result2 = SemiMonthEnd() + shift
exp = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
class TestSemiMonthBegin:
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
shift = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthBegin() + shift
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SMS")
exp = DatetimeIndex(dates, freq="SMS")
tm.assert_index_equal(result, exp)
offset_cases = [
(
SemiMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
),
(
SemiMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1),
},
),
(
SemiMonthBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 6, 14): datetime(2008, 6, 1),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
),
(
SemiMonthBegin(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 4),
datetime(2007, 1, 4): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2006, 12, 2): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 4),
},
),
(
SemiMonthBegin(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 1),
datetime(2008, 6, 14): datetime(2008, 5, 15),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 15): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 1),
},
),
]
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
offset, cases = case
shift = DatetimeIndex(cases.keys())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + shift
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [
(datetime(2007, 12, 1), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 31), False),
(datetime(2008, 2, 15), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
dt, expected = case
assert_is_on_offset(SemiMonthBegin(), dt, expected)
@pytest.mark.parametrize("klass", [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
shift = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = shift + SemiMonthBegin()
result2 = SemiMonthBegin() + shift
exp = klass(
[
Timestamp("2000-02-01 00:15:00", tz="US/Central"),
Timestamp("2000-03-01", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
shift = klass(
[
Timestamp("2000-01-01 00:15:00", tz="US/Central"),
Timestamp("2000-02-01", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = shift + SemiMonthBegin()
result2 = SemiMonthBegin() + shift
exp = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
class TestMonthBegin:
offset_cases = []
# NOTE: I'm not entirely happy with the logic here for Begin -ss
# see thread 'offset conventions' on the ML
offset_cases.append(
(
MonthBegin(),
{
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 2, 1): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1),
},
)
)
offset_cases.append(
(
MonthBegin(0),
{
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 12, 3): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1),
},
)
)
offset_cases.append(
(
MonthBegin(2),
{
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 12, 28): datetime(2008, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1),
},
)
)
offset_cases.append(
(
MonthBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 5, 31): datetime(2008, 5, 1),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 1, 2): datetime(2006, 1, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
class TestMonthEnd:
def test_day_of_month(self):
dt = datetime(2007, 1, 1)
offset = MonthEnd()
result = dt + offset
assert result == Timestamp(2007, 1, 31)
result = result + offset
assert result == Timestamp(2007, 2, 28)
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + MonthEnd(normalize=True)
expected = dt.replace(hour=0) + MonthEnd()
assert result == expected
offset_cases = []
offset_cases.append(
(
MonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
MonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
},
)
)
offset_cases.append(
(
MonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
MonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
| bsd-3-clause | 548cf56a992929edaceb446e315f63bf | 33.99705 | 74 | 0.488368 | 3.836378 | false | true | false | false |
pandas-dev/pandas | pandas/io/pytables.py | 1 | 171363 | """
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
from __future__ import annotations
from contextlib import suppress
import copy
from datetime import (
date,
tzinfo,
)
import itertools
import os
import re
from textwrap import dedent
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Final,
Hashable,
Iterator,
Literal,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
from pandas._config import (
config,
get_option,
)
from pandas._libs import (
lib,
writers as libwriters,
)
from pandas._libs.tslibs import timezones
from pandas._typing import (
AnyArrayLike,
ArrayLike,
AxisInt,
DtypeArg,
FilePath,
Shape,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.pickle_compat import patch_pickle
from pandas.errors import (
AttributeConflictWarning,
ClosedFileError,
IncompatibilityWarning,
PerformanceWarning,
PossibleDataLossError,
)
from pandas.util._decorators import cache_readonly
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_list_like,
is_string_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
concat,
isna,
)
from pandas.core.api import Int64Index
from pandas.core.arrays import (
Categorical,
DatetimeArray,
PeriodArray,
)
import pandas.core.common as com
from pandas.core.computation.pytables import (
PyTablesExpr,
maybe_expression,
)
from pandas.core.construction import extract_array
from pandas.core.indexes.api import ensure_index
from pandas.core.internals import (
ArrayManager,
BlockManager,
)
from pandas.io.common import stringify_path
from pandas.io.formats.printing import (
adjoin,
pprint_thing,
)
if TYPE_CHECKING:
from tables import (
Col,
File,
Node,
)
from pandas.core.internals import Block
# versioning attribute
_version = "0.15.2"
# encoding
_default_encoding = "UTF-8"
def _ensure_decoded(s):
"""if we have bytes, decode them to unicode"""
if isinstance(s, np.bytes_):
s = s.decode("UTF-8")
return s
def _ensure_encoding(encoding: str | None) -> str:
# set the encoding if we need
if encoding is None:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""
Ensure that an index / column name is a str (python 3); otherwise they
may be np.string dtype. Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, str):
name = str(name)
return name
Term = PyTablesExpr
def _ensure_term(where, scope_level: int):
"""
Ensure that the where is a Term or a list of Term.
This makes sure that we are capturing the scope of variables that are
passed create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automatically a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
where = [
Term(term, scope_level=level + 1) if maybe_expression(term) else term
for term in where
if term is not None
]
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where if where is None or len(where) else None
incompatibility_doc: Final = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
attribute_conflict_doc: Final = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
performance_doc: Final = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {"f": "fixed", "fixed": "fixed", "t": "table", "table": "table"}
# axes map
_AXES_MAP = {DataFrame: [0]}
# register our configuration options
dropna_doc: Final = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc: Final = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix("io.hdf"):
config.register_option("dropna_table", False, dropna_doc, validator=config.is_bool)
config.register_option(
"default_format",
None,
format_doc,
validator=config.is_one_of_factory(["fixed", "table", None]),
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
with suppress(AttributeError):
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == "strict"
)
return _table_mod
# interface to/from ###
def to_hdf(
path_or_buf: FilePath | HDFStore,
key: str,
value: DataFrame | Series,
mode: str = "a",
complevel: int | None = None,
complib: str | None = None,
append: bool = False,
format: str | None = None,
index: bool = True,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
dropna: bool | None = None,
data_columns: Literal[True] | list[str] | None = None,
errors: str = "strict",
encoding: str = "UTF-8",
) -> None:
"""store this object, close it if we opened it"""
if append:
f = lambda store: store.append(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
dropna=dropna,
data_columns=data_columns,
errors=errors,
encoding=encoding,
)
else:
# NB: dropna is not passed to `put`
f = lambda store: store.put(
key,
value,
format=format,
index=index,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
errors=errors,
encoding=encoding,
dropna=dropna,
)
path_or_buf = stringify_path(path_or_buf)
if isinstance(path_or_buf, str):
with HDFStore(
path_or_buf, mode=mode, complevel=complevel, complib=complib
) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(
path_or_buf: FilePath | HDFStore,
key=None,
mode: str = "r",
errors: str = "strict",
where: str | list | None = None,
start: int | None = None,
stop: int | None = None,
columns: list[str] | None = None,
iterator: bool = False,
chunksize: int | None = None,
**kwargs,
):
"""
Read from the store, close it if we opened it.
Retrieve pandas object stored in file, optionally based on where
criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path_or_buf : str, path object, pandas.HDFStore
Any valid string path is acceptable. Only supports the local file system,
remote URLs and file-like objects are not supported.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
Alternatively, pandas accepts an open :class:`pandas.HDFStore` object.
key : object, optional
The group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
mode : {'r', 'r+', 'a'}, default 'r'
Mode to use when opening the file. Ignored if path_or_buf is a
:class:`pandas.HDFStore`. Default is 'r'.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
where : list, optional
A list of Term (or convertible) objects.
start : int, optional
Row number to start selection.
stop : int, optional
Row number to stop selection.
columns : list, optional
A list of columns names to return.
iterator : bool, optional
Return an iterator object.
chunksize : int, optional
Number of rows to include in an iteration when using an iterator.
**kwargs
Additional keyword arguments passed to HDFStore.
Returns
-------
item : object
The selected object. Return type depends on the object stored.
See Also
--------
DataFrame.to_hdf : Write a HDF file from a DataFrame.
HDFStore : Low-level access to HDF files.
Examples
--------
>>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) # doctest: +SKIP
>>> df.to_hdf('./store.h5', 'data') # doctest: +SKIP
>>> reread = pd.read_hdf('./store.h5') # doctest: +SKIP
"""
if mode not in ["r", "r+", "a"]:
raise ValueError(
f"mode {mode} is not allowed while performing a read. "
f"Allowed modes are r, r+ and a."
)
# grab the scope
if where is not None:
where = _ensure_term(where, scope_level=1)
if isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise OSError("The HDFStore must be open for reading.")
store = path_or_buf
auto_close = False
else:
path_or_buf = stringify_path(path_or_buf)
if not isinstance(path_or_buf, str):
raise NotImplementedError(
"Support for generic buffers has not been implemented."
)
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise FileNotFoundError(f"File {path_or_buf} does not exist")
store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError(
"Dataset(s) incompatible with Pandas data types, "
"not table, or no datasets found in HDF5 file."
)
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError(
"key must be provided when HDF5 "
"file contains multiple datasets."
)
key = candidate_only_group._v_pathname
return store.select(
key,
where=where,
start=start,
stop=stop,
columns=columns,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
except (ValueError, TypeError, KeyError):
if not isinstance(path_or_buf, HDFStore):
# if there is an error, close the store if we opened it.
with suppress(AttributeError):
store.close()
raise
def _is_metadata_of(group: Node, parent_group: Node) -> bool:
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == "meta":
return True
current = current._v_parent
return False
class HDFStore:
"""
Dict-like IO interface for storing pandas objects in PyTables.
Either Fixed or Table format.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
path : str
File path to HDF5 file.
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
**kwargs
These parameters will be passed to the PyTables open_file method.
Examples
--------
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
**Create or load HDF5 file in-memory**
When passing the `driver` option to the PyTables open_file method through
**kwargs, the HDF5 file is loaded or created in-memory and will only be
written when closed:
>>> bar = pd.DataFrame(np.random.randn(10, 4))
>>> store = pd.HDFStore('test.h5', driver='H5FD_CORE')
>>> store['foo'] = bar
>>> store.close() # only now, data is written to disk
"""
_handle: File | None
_mode: str
def __init__(
self,
path,
mode: str = "a",
complevel: int | None = None,
complib=None,
fletcher32: bool = False,
**kwargs,
) -> None:
if "format" in kwargs:
raise ValueError("format is not a defined argument for HDFStore")
tables = import_optional_dependency("tables")
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
f"complib only supports {tables.filters.all_complibs} compression."
)
if complib is None and complevel is not None:
complib = tables.filters.default_complib
self._path = stringify_path(path)
if mode is None:
mode = "a"
self._mode = mode
self._handle = None
self._complevel = complevel if complevel else 0
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
def __fspath__(self) -> str:
return self._path
@property
def root(self):
"""return the root node"""
self._check_if_open()
assert self._handle is not None # for mypy
return self._handle.root
@property
def filename(self) -> str:
return self._path
def __getitem__(self, key: str):
return self.get(key)
def __setitem__(self, key: str, value) -> None:
self.put(key, value)
def __delitem__(self, key: str) -> None:
return self.remove(key)
def __getattr__(self, name: str):
"""allow attribute access to get stores"""
try:
return self.get(name)
except (KeyError, ClosedFileError):
pass
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def __contains__(self, key: str) -> bool:
"""
check for existence of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if key in (name, name[1:]):
return True
return False
def __len__(self) -> int:
return len(self.groups())
def __repr__(self) -> str:
pstr = pprint_thing(self._path)
return f"{type(self)}\nFile path: {pstr}\n"
def __enter__(self) -> HDFStore:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close()
def keys(self, include: str = "pandas") -> list[str]:
"""
Return a list of keys corresponding to objects stored in HDFStore.
Parameters
----------
include : str, default 'pandas'
When kind equals 'pandas' return pandas objects.
When kind equals 'native' return native HDF5 Table objects.
.. versionadded:: 1.1.0
Returns
-------
list
List of ABSOLUTE path-names (e.g. have the leading '/').
Raises
------
raises ValueError if kind has an illegal value
"""
if include == "pandas":
return [n._v_pathname for n in self.groups()]
elif include == "native":
assert self._handle is not None # mypy
return [
n._v_pathname for n in self._handle.walk_nodes("/", classname="Table")
]
raise ValueError(
f"`include` should be either 'pandas' or 'native' but is '{include}'"
)
def __iter__(self) -> Iterator[str]:
return iter(self.keys())
def items(self) -> Iterator[tuple[str, list]]:
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
def open(self, mode: str = "a", **kwargs) -> None:
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
**kwargs
These parameters will be passed to the PyTables open_file method.
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ["a", "w"] and mode in ["r", "r+"]:
pass
elif mode in ["w"]:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
f"Re-opening the file [{self._path}] with mode [{self._mode}] "
"will delete the current file!"
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complevel and self._complevel > 0:
self._filters = _tables().Filters(
self._complevel, self._complib, fletcher32=self._fletcher32
)
if _table_file_open_policy_is_strict and self.is_open:
msg = (
"Cannot open HDF5 file, which is already opened, "
"even in read-only mode."
)
raise ValueError(msg)
self._handle = tables.open_file(self._path, self._mode, **kwargs)
def close(self) -> None:
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self) -> bool:
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync: bool = False) -> None:
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
with suppress(OSError):
os.fsync(self._handle.fileno())
def get(self, key: str):
"""
Retrieve pandas object stored in file.
Parameters
----------
key : str
Returns
-------
object
Same type as object stored in file.
"""
with patch_pickle():
# GH#31167 Without this patch, pickle doesn't know how to unpickle
# old DateOffset objects now that they are cdef classes.
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
return self._read_group(group)
def select(
self,
key: str,
where=None,
start=None,
stop=None,
columns=None,
iterator: bool = False,
chunksize=None,
auto_close: bool = False,
):
"""
Retrieve pandas object stored in file, optionally based on where criteria.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
Object being retrieved from file.
where : list or None
List of Term (or convertible) objects, optional.
start : int or None
Row number to start selection.
stop : int, default None
Row number to stop selection.
columns : list or None
A list of columns that if not None, will limit the return columns.
iterator : bool or False
Returns an iterator.
chunksize : int or None
Number or rows to include in iteration, return an iterator.
auto_close : bool or False
Should automatically close the store when finished.
Returns
-------
object
Retrieved object from file.
"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop, where=_where, columns=columns)
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=s.nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result()
def select_as_coordinates(
self,
key: str,
where=None,
start: int | None = None,
stop: int | None = None,
):
"""
return the selection as an Index
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_coordinates with a table")
return tbl.read_coordinates(where=where, start=start, stop=stop)
def select_column(
self,
key: str,
column: str,
start: int | None = None,
stop: int | None = None,
):
"""
return a single column from the table. This is generally only useful to
select an indexable
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
key : str
column : str
The column of interest.
start : int or None, default None
stop : int or None, default None
Raises
------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
tbl = self.get_storer(key)
if not isinstance(tbl, Table):
raise TypeError("can only read_column with a table")
return tbl.read_column(column=column, start=start, stop=stop)
def select_as_multiple(
self,
keys,
where=None,
selector=None,
columns=None,
start=None,
stop=None,
iterator: bool = False,
chunksize=None,
auto_close: bool = False,
):
"""
Retrieve pandas objects from multiple tables.
.. warning::
Pandas uses PyTables for reading and writing HDF5 files, which allows
serializing object-dtype data with pickle when using the "fixed" format.
Loading pickled data received from untrusted sources can be unsafe.
See: https://docs.python.org/3/library/pickle.html for more.
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : bool, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : bool, default False
Should automatically close the store when finished.
Raises
------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, str):
return self.select(
key=keys,
where=where,
columns=columns,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError(f"Invalid table [{k}]")
if not t.is_table:
raise TypeError(
f"object [{t.pathname}] is not a table, and cannot be used in all "
"select as multiple"
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError("all tables must have exactly the same nrows!")
# The isinstance checks here are redundant with the check above,
# but necessary for mypy; see GH#29757
_tbls = [x for x in tbls if isinstance(x, Table)]
# axis is the concentration axes
axis = {t.non_index_axes[0][0] for t in _tbls}.pop()
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [
t.read(where=_where, columns=columns, start=_start, stop=_stop)
for t in tbls
]
# concat and return
return concat(objs, axis=axis, verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(
self,
s,
func,
where=where,
nrows=nrows,
start=start,
stop=stop,
iterator=iterator,
chunksize=chunksize,
auto_close=auto_close,
)
return it.get_result(coordinates=True)
def put(
self,
key: str,
value: DataFrame | Series,
format=None,
index: bool = True,
append: bool = False,
complib=None,
complevel: int | None = None,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
data_columns: Literal[True] | list[str] | None = None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
dropna: bool = False,
) -> None:
"""
Store object in HDFStore.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'fixed(f)|table(t)', default is 'fixed'
Format to use when storing object in HDFStore. Value can be one of:
``'fixed'``
Fixed format. Fast writing/reading. Not-appendable, nor searchable.
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
index : bool, default True
Write DataFrame index as a column.
append : bool, default False
This will force Table format, append the input data to the existing.
data_columns : list of columns or True, default None
List of columns to create as data columns, or True to use all columns.
See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
encoding : str, default None
Provide an encoding for strings.
track_times : bool, default True
Parameter is propagated to 'create_table' method of 'PyTables'.
If set to False it enables to have the same h5 files (same hashes)
independent on creation time.
dropna : bool, default False, optional
Remove missing values.
.. versionadded:: 1.1.0
"""
if format is None:
format = get_option("io.hdf.default_format") or "fixed"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
encoding=encoding,
errors=errors,
track_times=track_times,
dropna=dropna,
)
def remove(self, key: str, where=None, start=None, stop=None) -> None:
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : str
Node to remove or delete rows from
where : list of Term (or convertible) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Raises
------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except KeyError:
# the key is not a valid store, re-raising KeyError
raise
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as err:
# In tests we get here with ClosedFileError, TypeError, and
# _table_mod.NoSuchNodeError. TODO: Catch only these?
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!"
) from err
# we are actually trying to remove a node (with children)
node = self.get_node(key)
if node is not None:
node._f_remove(recursive=True)
return None
# remove the node
if com.all_none(where, start, stop):
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
"can only remove with where on objects written as tables"
)
return s.delete(where=where, start=start, stop=stop)
def append(
self,
key: str,
value: DataFrame | Series,
format=None,
axes=None,
index: bool | list[str] = True,
append: bool = True,
complib=None,
complevel: int | None = None,
columns=None,
min_itemsize: int | dict[str, int] | None = None,
nan_rep=None,
chunksize=None,
expectedrows=None,
dropna: bool | None = None,
data_columns: Literal[True] | list[str] | None = None,
encoding=None,
errors: str = "strict",
) -> None:
"""
Append to Table in file.
Node must already exist and be Table format.
Parameters
----------
key : str
value : {Series, DataFrame}
format : 'table' is the default
Format to use when storing object in HDFStore. Value can be one of:
``'table'``
Table format. Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching / selecting
subsets of the data.
index : bool, default True
Write DataFrame index as a column.
append : bool, default True
Append the input data to the existing.
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum str sizes
nan_rep : str to use as str nan representation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for str
dropna : bool, default False, optional
Do not write an ALL nan row to the store settable
by the option 'io.hdf.dropna_table'.
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError(
"columns is not a supported keyword in append, try data_columns"
)
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or "table"
format = self._validate_format(format)
self._write_to_group(
key,
value,
format=format,
axes=axes,
index=index,
append=append,
complib=complib,
complevel=complevel,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
data_columns=data_columns,
encoding=encoding,
errors=errors,
)
def append_to_multiple(
self,
d: dict,
value,
selector,
data_columns=None,
axes=None,
dropna: bool = False,
**kwargs,
) -> None:
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError(
"axes is currently not accepted as a parameter to append_to_multiple; "
"you can create the tables independently instead"
)
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values: list = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how="all").index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
min_itemsize = kwargs.pop("min_itemsize", None)
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex(v, axis=axis)
filtered = (
{key: value for (key, value) in min_itemsize.items() if key in v}
if min_itemsize is not None
else None
)
self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs)
def create_table_index(
self,
key: str,
columns=None,
optlevel: int | None = None,
kind: str | None = None,
) -> None:
"""
Create a pytables index on the table.
Parameters
----------
key : str
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError: raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not isinstance(s, Table):
raise TypeError("cannot create table index on a Fixed format store")
s.create_index(columns=columns, optlevel=optlevel, kind=kind)
def groups(self) -> list:
"""
Return a list of all the top-level nodes.
Each node returned is not a pandas storage object.
Returns
-------
list
List of objects.
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
return [
g
for g in self._handle.walk_groups()
if (
not isinstance(g, _table_mod.link.Link)
and (
getattr(g._v_attrs, "pandas_type", None)
or getattr(g, "table", None)
or (isinstance(g, _table_mod.table.Table) and g._v_name != "table")
)
)
]
def walk(self, where: str = "/") -> Iterator[tuple[str, list[str], list[str]]]:
"""
Walk the pytables group hierarchy for pandas objects.
This generator will yield the group path, subgroups and pandas object
names for each group.
Any non-pandas PyTables objects that are not a group will be ignored.
The `where` group itself is listed first (preorder), then each of its
child groups (following an alphanumerical order) is also traversed,
following the same procedure.
Parameters
----------
where : str, default "/"
Group where to start walking.
Yields
------
path : str
Full path to a group (without trailing '/').
groups : list
Names (strings) of the groups contained in `path`.
leaves : list
Names (strings) of the pandas objects contained in `path`.
"""
_tables()
self._check_if_open()
assert self._handle is not None # for mypy
assert _table_mod is not None # for mypy
for g in self._handle.walk_groups(where):
if getattr(g._v_attrs, "pandas_type", None) is not None:
continue
groups = []
leaves = []
for child in g._v_children.values():
pandas_type = getattr(child._v_attrs, "pandas_type", None)
if pandas_type is None:
if isinstance(child, _table_mod.group.Group):
groups.append(child._v_name)
else:
leaves.append(child._v_name)
yield (g._v_pathname.rstrip("/"), groups, leaves)
def get_node(self, key: str) -> Node | None:
"""return the node with the key or None if it does not exist"""
self._check_if_open()
if not key.startswith("/"):
key = "/" + key
assert self._handle is not None
assert _table_mod is not None # for mypy
try:
node = self._handle.get_node(self.root, key)
except _table_mod.exceptions.NoSuchNodeError:
return None
assert isinstance(node, _table_mod.Node), type(node)
return node
def get_storer(self, key: str) -> GenericFixed | Table:
"""return the storer object for a key, raise if not in the file"""
group = self.get_node(key)
if group is None:
raise KeyError(f"No object named {key} in the file")
s = self._create_storer(group)
s.infer_axes()
return s
def copy(
self,
file,
mode: str = "w",
propindexes: bool = True,
keys=None,
complib=None,
complevel: int | None = None,
fletcher32: bool = False,
overwrite: bool = True,
) -> HDFStore:
"""
Copy the existing store to a new file, updating in place.
Parameters
----------
propindexes : bool, default True
Restore indexes in copied file.
keys : list, optional
List of keys to include in the copy (defaults to all).
overwrite : bool, default True
Whether to overwrite (remove and replace) existing nodes in the new store.
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32
)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if isinstance(s, Table):
index: bool | list[str] = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k,
data,
index=index,
data_columns=getattr(s, "data_columns", None),
encoding=s.encoding,
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
def info(self) -> str:
"""
Print detailed information on the store.
Returns
-------
str
"""
path = pprint_thing(self._path)
output = f"{type(self)}\nFile path: {path}\n"
if self.is_open:
lkeys = sorted(self.keys())
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(pprint_thing(s or "invalid_HDFStore node"))
except AssertionError:
# surface any assertion errors for e.g. debugging
raise
except Exception as detail:
keys.append(k)
dstr = pprint_thing(detail)
values.append(f"[invalid_HDFStore node: {dstr}]")
output += adjoin(12, keys, values)
else:
output += "Empty"
else:
output += "File is CLOSED"
return output
# ------------------------------------------------------------------------
# private methods
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError(f"{self._path} file is not open!")
def _validate_format(self, format: str) -> str:
"""validate / deprecate formats"""
# validate
try:
format = _FORMAT_MAP[format.lower()]
except KeyError as err:
raise TypeError(f"invalid HDFStore format specified [{format}]") from err
return format
def _create_storer(
self,
group,
format=None,
value: DataFrame | Series | None = None,
encoding: str = "UTF-8",
errors: str = "strict",
) -> GenericFixed | Table:
"""return a suitable class to operate"""
cls: type[GenericFixed] | type[Table]
if value is not None and not isinstance(value, (Series, DataFrame)):
raise TypeError("value must be None, Series, or DataFrame")
pt = _ensure_decoded(getattr(group._v_attrs, "pandas_type", None))
tt = _ensure_decoded(getattr(group._v_attrs, "table_type", None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
assert _table_mod is not None # for mypy
if getattr(group, "table", None) or isinstance(
group, _table_mod.table.Table
):
pt = "frame_table"
tt = "generic_table"
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed"
)
else:
if isinstance(value, Series):
pt = "series"
else:
pt = "frame"
# we are actually a table
if format == "table":
pt += "_table"
# a storer node
if "table" not in pt:
_STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed}
try:
cls = _STORER_MAP[pt]
except KeyError as err:
raise TypeError(
f"cannot properly create the storer for: [_STORER_MAP] [group->"
f"{group},value->{type(value)},format->{format}"
) from err
return cls(self, group, encoding=encoding, errors=errors)
# existing node (and must be a table)
if tt is None:
# if we are a writer, determine the tt
if value is not None:
if pt == "series_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_series"
elif index.nlevels > 1:
tt = "appendable_multiseries"
elif pt == "frame_table":
index = getattr(value, "index", None)
if index is not None:
if index.nlevels == 1:
tt = "appendable_frame"
elif index.nlevels > 1:
tt = "appendable_multiframe"
_TABLE_MAP = {
"generic_table": GenericTable,
"appendable_series": AppendableSeriesTable,
"appendable_multiseries": AppendableMultiSeriesTable,
"appendable_frame": AppendableFrameTable,
"appendable_multiframe": AppendableMultiFrameTable,
"worm": WORMTable,
}
try:
cls = _TABLE_MAP[tt]
except KeyError as err:
raise TypeError(
f"cannot properly create the storer for: [_TABLE_MAP] [group->"
f"{group},value->{type(value)},format->{format}"
) from err
return cls(self, group, encoding=encoding, errors=errors)
def _write_to_group(
self,
key: str,
value: DataFrame | Series,
format,
axes=None,
index: bool | list[str] = True,
append: bool = False,
complib=None,
complevel: int | None = None,
fletcher32=None,
min_itemsize: int | dict[str, int] | None = None,
chunksize=None,
expectedrows=None,
dropna: bool = False,
nan_rep=None,
data_columns=None,
encoding=None,
errors: str = "strict",
track_times: bool = True,
) -> None:
# we don't want to store a table node at all if our object is 0-len
# as there are not dtypes
if getattr(value, "empty", None) and (format == "table" or append):
return
group = self._identify_group(key, append)
s = self._create_storer(group, format, value, encoding=encoding, errors=errors)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if not s.is_table or (s.is_table and format == "fixed" and s.is_exists):
raise ValueError("Can only append to Tables")
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError("Compression not supported on Fixed format stores")
# write the object
s.write(
obj=value,
axes=axes,
append=append,
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
min_itemsize=min_itemsize,
chunksize=chunksize,
expectedrows=expectedrows,
dropna=dropna,
nan_rep=nan_rep,
data_columns=data_columns,
track_times=track_times,
)
if isinstance(s, Table) and index:
s.create_index(columns=index)
def _read_group(self, group: Node):
s = self._create_storer(group)
s.infer_axes()
return s.read()
def _identify_group(self, key: str, append: bool) -> Node:
"""Identify HDF5 group based on key, delete/create group if needed."""
group = self.get_node(key)
# we make this assertion for mypy; the get_node call will already
# have raised if this is incorrect
assert self._handle is not None
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
if group is None:
group = self._create_nodes_and_group(key)
return group
def _create_nodes_and_group(self, key: str) -> Node:
"""Create nodes from key and return group name."""
# assertion for mypy
assert self._handle is not None
paths = key.split("/")
# recursively create the groups
path = "/"
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith("/"):
new_path += "/"
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
return group
class TableIterator:
"""
Define the iteration interface on a table
Parameters
----------
store : HDFStore
s : the referred storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : bool, default False
Whether to use the default iterator.
chunksize : the passed chunking value (default is 100000)
auto_close : bool, default False
Whether to automatically close the store at the end of iteration.
"""
chunksize: int | None
store: HDFStore
s: GenericFixed | Table
def __init__(
self,
store: HDFStore,
s: GenericFixed | Table,
func,
where,
nrows,
start=None,
stop=None,
iterator: bool = False,
chunksize: int | None = None,
auto_close: bool = False,
) -> None:
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self) -> Iterator:
# iterate
current = self.start
if self.coordinates is None:
raise ValueError("Cannot iterate until get_result is called.")
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self) -> None:
if self.auto_close:
self.store.close()
def get_result(self, coordinates: bool = False):
# return the actual iterator
if self.chunksize is not None:
if not isinstance(self.s, Table):
raise TypeError("can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
if not isinstance(self.s, Table):
raise TypeError("can only read_coordinates on a table")
where = self.s.read_coordinates(
where=self.where, start=self.start, stop=self.stop
)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol:
"""
an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable: bool = True
is_data_indexable: bool = True
_info_fields = ["freq", "tz", "index_name"]
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname: str | None = None,
axis=None,
pos=None,
freq=None,
tz=None,
index_name=None,
ordered=None,
table=None,
meta=None,
metadata=None,
) -> None:
if not isinstance(name, str):
raise ValueError("`name` must be a str.")
self.values = values
self.kind = kind
self.typ = typ
self.name = name
self.cname = cname or name
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.ordered = ordered
self.table = table
self.meta = meta
self.metadata = metadata
if pos is not None:
self.set_pos(pos)
# These are ensured as long as the passed arguments match the
# constructor annotations.
assert isinstance(self.name, str)
assert isinstance(self.cname, str)
@property
def itemsize(self) -> int:
# Assumes self.typ has already been initialized
return self.typ.itemsize
@property
def kind_attr(self) -> str:
return f"{self.name}_kind"
def set_pos(self, pos: int) -> None:
"""set the position of this column in the Table"""
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
def __repr__(self) -> str:
temp = tuple(
map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))
)
return ",".join(
[
f"{key}->{value}"
for key, value in zip(["name", "cname", "axis", "pos", "kind"], temp)
]
)
def __eq__(self, other: Any) -> bool:
"""compare 2 col items"""
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "axis", "pos"]
)
def __ne__(self, other) -> bool:
return not self.__eq__(other)
@property
def is_indexed(self) -> bool:
"""return whether I am an indexed column"""
if not hasattr(self.table, "cols"):
# e.g. if infer hasn't been called yet, self.table will be None.
return False
return getattr(self.table.cols, self.cname).is_indexed
def convert(
self, values: np.ndarray, nan_rep, encoding: str, errors: str
) -> tuple[np.ndarray, np.ndarray] | tuple[DatetimeIndex, DatetimeIndex]:
"""
Convert the data from this selection to the appropriate pandas type.
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
val_kind = _ensure_decoded(self.kind)
values = _maybe_convert(values, val_kind, encoding, errors)
kwargs = {}
kwargs["name"] = _ensure_decoded(self.index_name)
if self.freq is not None:
kwargs["freq"] = _ensure_decoded(self.freq)
factory: type[Index] | type[DatetimeIndex] = Index
if is_datetime64_dtype(values.dtype) or is_datetime64tz_dtype(values.dtype):
factory = DatetimeIndex
elif values.dtype == "i8" and "freq" in kwargs:
# PeriodIndex data is stored as i8
# error: Incompatible types in assignment (expression has type
# "Callable[[Any, KwArg(Any)], PeriodIndex]", variable has type
# "Union[Type[Index], Type[DatetimeIndex]]")
factory = lambda x, **kwds: PeriodIndex( # type: ignore[assignment]
ordinal=x, **kwds
)
# making an Index instance could throw a number of different errors
try:
new_pd_index = factory(values, **kwargs)
except ValueError:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if "freq" in kwargs:
kwargs["freq"] = None
new_pd_index = factory(values, **kwargs)
final_pd_index = _set_tz(new_pd_index, self.tz)
return final_pd_index, final_pd_index
def take_data(self):
"""return the values"""
return self.values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
"""return my current col description"""
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
"""return my cython values"""
return self.values
def __iter__(self) -> Iterator:
return iter(self.values)
def maybe_set_size(self, min_itemsize=None) -> None:
"""
maybe set a string col itemsize:
min_itemsize can be an integer or a dict with this columns name
with an integer size
"""
if _ensure_decoded(self.kind) == "string":
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos)
def validate_names(self) -> None:
pass
def validate_and_set(self, handler: AppendableTable, append: bool) -> None:
self.table = handler.table
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
"""validate this column: return the compared against itemsize"""
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == "string":
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
f"Trying to store a string with len [{itemsize}] in "
f"[{self.cname}] column but\nthis column has a limit of "
f"[{c.itemsize}]!\nConsider using min_itemsize to "
"preset the sizes on these columns"
)
return c.itemsize
return None
def validate_attr(self, append: bool) -> None:
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError(
f"incompatible kind in col [{existing_kind} - {self.kind}]"
)
def update_info(self, info) -> None:
"""
set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed
"""
for key in self._info_fields:
value = getattr(self, key, None)
idx = info.setdefault(self.name, {})
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ["freq", "index_name"]:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(
ws, AttributeConflictWarning, stacklevel=find_stack_level()
)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
f"invalid info for [{self.name}] for [{key}], "
f"existing_value [{existing_value}] conflicts with "
f"new value [{value}]"
)
else:
if value is not None or existing_value is not None:
idx[key] = value
def set_info(self, info) -> None:
"""set my state from the passed info"""
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def set_attr(self) -> None:
"""set the kind for this column"""
setattr(self.attrs, self.kind_attr, self.kind)
def validate_metadata(self, handler: AppendableTable) -> None:
"""validate that kind=category does not change the categories"""
if self.meta == "category":
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if (
new_metadata is not None
and cur_metadata is not None
and not array_equivalent(new_metadata, cur_metadata)
):
raise ValueError(
"cannot append a categorical with "
"different categories to the existing"
)
def write_metadata(self, handler: AppendableTable) -> None:
"""set the meta data"""
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
"""an index which is not represented in the data of the table"""
@property
def is_indexed(self) -> bool:
return False
# error: Return type "Tuple[Int64Index, Int64Index]" of "convert"
# incompatible with return type "Union[Tuple[ndarray[Any, Any],
# ndarray[Any, Any]], Tuple[DatetimeIndex, DatetimeIndex]]" in
# supertype "IndexCol"
def convert( # type: ignore[override]
self, values: np.ndarray, nan_rep, encoding: str, errors: str
) -> tuple[Int64Index, Int64Index]:
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep : str
encoding : str
errors : str
"""
assert isinstance(values, np.ndarray), type(values)
index = Int64Index(np.arange(len(values)))
return index, index
def set_attr(self) -> None:
pass
class DataCol(IndexCol):
"""
a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ["tz", "ordered"]
def __init__(
self,
name: str,
values=None,
kind=None,
typ=None,
cname: str | None = None,
pos=None,
tz=None,
ordered=None,
table=None,
meta=None,
metadata=None,
dtype: DtypeArg | None = None,
data=None,
) -> None:
super().__init__(
name=name,
values=values,
kind=kind,
typ=typ,
pos=pos,
cname=cname,
tz=tz,
ordered=ordered,
table=table,
meta=meta,
metadata=metadata,
)
self.dtype = dtype
self.data = data
@property
def dtype_attr(self) -> str:
return f"{self.name}_dtype"
@property
def meta_attr(self) -> str:
return f"{self.name}_meta"
def __repr__(self) -> str:
temp = tuple(
map(
pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape)
)
)
return ",".join(
[
f"{key}->{value}"
for key, value in zip(["name", "cname", "dtype", "kind", "shape"], temp)
]
)
def __eq__(self, other: Any) -> bool:
"""compare 2 col items"""
return all(
getattr(self, a, None) == getattr(other, a, None)
for a in ["name", "cname", "dtype", "pos"]
)
def set_data(self, data: ArrayLike) -> None:
assert data is not None
assert self.dtype is None
data, dtype_name = _get_data_and_dtype_name(data)
self.data = data
self.dtype = dtype_name
self.kind = _dtype_to_kind(dtype_name)
def take_data(self):
"""return the data"""
return self.data
@classmethod
def _get_atom(cls, values: ArrayLike) -> Col:
"""
Get an appropriately typed and shaped pytables.Col object for values.
"""
dtype = values.dtype
# error: Item "ExtensionDtype" of "Union[ExtensionDtype, dtype[Any]]" has no
# attribute "itemsize"
itemsize = dtype.itemsize # type: ignore[union-attr]
shape = values.shape
if values.ndim == 1:
# EA, use block shape pretending it is 2D
# TODO(EA2D): not necessary with 2D EAs
shape = (1, values.size)
if isinstance(values, Categorical):
codes = values.codes
atom = cls.get_atom_data(shape, kind=codes.dtype.name)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
atom = cls.get_atom_datetime64(shape)
elif is_timedelta64_dtype(dtype):
atom = cls.get_atom_timedelta64(shape)
elif is_complex_dtype(dtype):
atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0])
elif is_string_dtype(dtype):
atom = cls.get_atom_string(shape, itemsize)
else:
atom = cls.get_atom_data(shape, kind=dtype.name)
return atom
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=shape[0])
@classmethod
def get_atom_coltype(cls, kind: str) -> type[Col]:
"""return the PyTables column class for this column"""
if kind.startswith("uint"):
k4 = kind[4:]
col_name = f"UInt{k4}Col"
elif kind.startswith("period"):
# we store as integer
col_name = "Int64Col"
else:
kcap = kind.capitalize()
col_name = f"{kcap}Col"
return getattr(_tables(), col_name)
@classmethod
def get_atom_data(cls, shape, kind: str) -> Col:
return cls.get_atom_coltype(kind=kind)(shape=shape[0])
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col(shape=shape[0])
@property
def shape(self):
return getattr(self.data, "shape", None)
@property
def cvalues(self):
"""return my cython values"""
return self.data
def validate_attr(self, append) -> None:
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if existing_fields is not None and existing_fields != list(self.values):
raise ValueError("appended items do not match existing items in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if existing_dtype is not None and existing_dtype != self.dtype:
raise ValueError(
"appended items dtype do not match existing items dtype in table!"
)
def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str):
"""
Convert the data from this selection to the appropriate pandas type.
Parameters
----------
values : np.ndarray
nan_rep :
encoding : str
errors : str
Returns
-------
index : listlike to become an Index
data : ndarraylike to become a column
"""
assert isinstance(values, np.ndarray), type(values)
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
assert self.typ is not None
if self.dtype is None:
# Note: in tests we never have timedelta64 or datetime64,
# so the _get_data_and_dtype_name may be unnecessary
converted, dtype_name = _get_data_and_dtype_name(values)
kind = _dtype_to_kind(dtype_name)
else:
converted = values
dtype_name = self.dtype
kind = self.kind
assert isinstance(converted, np.ndarray) # for mypy
# use the meta if needed
meta = _ensure_decoded(self.meta)
metadata = self.metadata
ordered = self.ordered
tz = self.tz
assert dtype_name is not None
# convert to the correct dtype
dtype = _ensure_decoded(dtype_name)
# reverse converts
if dtype == "datetime64":
# recreate with tz if indicated
converted = _set_tz(converted, tz, coerce=True)
elif dtype == "timedelta64":
converted = np.asarray(converted, dtype="m8[ns]")
elif dtype == "date":
try:
converted = np.asarray(
[date.fromordinal(v) for v in converted], dtype=object
)
except ValueError:
converted = np.asarray(
[date.fromtimestamp(v) for v in converted], dtype=object
)
elif meta == "category":
# we have a categorical
categories = metadata
codes = converted.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
if categories is None:
# Handle case of NaN-only categorical columns in which case
# the categories are an empty array; when this is stored,
# pytables cannot write a zero-len array, so on readback
# the categories would be None and `read_hdf()` would fail.
categories = Index([], dtype=np.float64)
else:
mask = isna(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum()._values
converted = Categorical.from_codes(
codes, categories=categories, ordered=ordered
)
else:
try:
converted = converted.astype(dtype, copy=False)
except TypeError:
converted = converted.astype("O", copy=False)
# convert nans / decode
if _ensure_decoded(kind) == "string":
converted = _unconvert_string_array(
converted, nan_rep=nan_rep, encoding=encoding, errors=errors
)
return self.values, converted
def set_attr(self) -> None:
"""set the data for this column"""
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
assert self.dtype is not None
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
"""represent a data column that can be indexed"""
is_data_indexable = True
def validate_names(self) -> None:
if not Index(self.values).is_object():
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
@classmethod
def get_atom_string(cls, shape, itemsize):
return _tables().StringCol(itemsize=itemsize)
@classmethod
def get_atom_data(cls, shape, kind: str) -> Col:
return cls.get_atom_coltype(kind=kind)()
@classmethod
def get_atom_datetime64(cls, shape):
return _tables().Int64Col()
@classmethod
def get_atom_timedelta64(cls, shape):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
"""represent a generic pytables data column"""
class Fixed:
"""
represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : HDFStore
group : Node
The group node where the table resides.
"""
pandas_kind: str
format_type: str = "fixed" # GH#30962 needed by dask
obj_type: type[DataFrame | Series]
ndim: int
parent: HDFStore
is_table: bool = False
def __init__(
self,
parent: HDFStore,
group: Node,
encoding: str | None = "UTF-8",
errors: str = "strict",
) -> None:
assert isinstance(parent, HDFStore), type(parent)
assert _table_mod is not None # needed for mypy
assert isinstance(group, _table_mod.Node), type(group)
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.errors = errors
@property
def is_old_version(self) -> bool:
return self.version[0] <= 0 and self.version[1] <= 10 and self.version[2] < 1
@property
def version(self) -> tuple[int, int, int]:
"""compute and set our version"""
version = _ensure_decoded(getattr(self.group._v_attrs, "pandas_version", None))
try:
version = tuple(int(x) for x in version.split("."))
if len(version) == 2:
version = version + (0,)
except AttributeError:
version = (0, 0, 0)
return version
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs, "pandas_type", None))
def __repr__(self) -> str:
"""return a pretty representation of myself"""
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
jshape = ",".join([pprint_thing(x) for x in s])
s = f"[{jshape}]"
return f"{self.pandas_type:12.12} (shape->{s})"
return self.pandas_type
def set_object_info(self) -> None:
"""set my pandas type & version"""
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
def copy(self) -> Fixed:
new_self = copy.copy(self)
return new_self
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self) -> int:
return self.parent._complevel
@property
def _fletcher32(self) -> bool:
return self.parent._fletcher32
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self) -> None:
"""set our object attributes"""
def get_attrs(self) -> None:
"""get our object attributes"""
@property
def storable(self):
"""return my storable"""
return self.group
@property
def is_exists(self) -> bool:
return False
@property
def nrows(self):
return getattr(self.storable, "nrows", None)
def validate(self, other) -> Literal[True] | None:
"""validate against an existing storable"""
if other is None:
return None
return True
def validate_version(self, where=None) -> None:
"""are we trying to operate on an old version?"""
def infer_axes(self) -> bool:
"""
infer the axes of my storer
return a boolean indicating if we have a valid storer or not
"""
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement"
)
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: subclasses should implement"
)
def delete(
self, where=None, start: int | None = None, stop: int | None = None
) -> None:
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if com.all_none(where, start, stop):
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
"""a generified fixed version"""
_index_type_map = {DatetimeIndex: "datetime", PeriodIndex: "period"}
_reverse_index_map = {v: k for k, v in _index_type_map.items()}
attributes: list[str] = []
# indexer helpers
def _class_to_alias(self, cls) -> str:
return self._index_type_map.get(cls, "")
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, attrs):
index_class = self._alias_to_class(
_ensure_decoded(getattr(attrs, "index_class", ""))
)
factory: Callable
if index_class == DatetimeIndex:
def f(values, freq=None, tz=None):
# data are already in UTC, localize and convert if tz present
dta = DatetimeArray._simple_new(values.values, freq=freq)
result = DatetimeIndex._simple_new(dta, name=None)
if tz is not None:
result = result.tz_localize("UTC").tz_convert(tz)
return result
factory = f
elif index_class == PeriodIndex:
def f(values, freq=None, tz=None):
parr = PeriodArray._simple_new(values, freq=freq)
return PeriodIndex._simple_new(parr, name=None)
factory = f
else:
factory = index_class
kwargs = {}
if "freq" in attrs:
kwargs["freq"] = attrs["freq"]
if index_class is Index:
# DTI/PI would be gotten by _alias_to_class
factory = TimedeltaIndex
if "tz" in attrs:
if isinstance(attrs["tz"], bytes):
# created by python2
kwargs["tz"] = attrs["tz"].decode("utf-8")
else:
# created by python3
kwargs["tz"] = attrs["tz"]
assert index_class is DatetimeIndex # just checking
return factory, kwargs
def validate_read(self, columns, where) -> None:
"""
raise if any keywords are passed which are not-None
"""
if columns is not None:
raise TypeError(
"cannot pass a column specification when reading "
"a Fixed format store. this store must be selected in its entirety"
)
if where is not None:
raise TypeError(
"cannot pass a where specification when reading "
"from a Fixed format store. this store must be selected in its entirety"
)
@property
def is_exists(self) -> bool:
return True
def set_attrs(self) -> None:
"""set our object attributes"""
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
def get_attrs(self) -> None:
"""retrieve our attributes"""
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
# error: Signature of "write" incompatible with supertype "Fixed"
def write(self, obj, **kwargs) -> None: # type: ignore[override]
self.set_attrs()
def read_array(self, key: str, start: int | None = None, stop: int | None = None):
"""read an array for the specified node (off of group"""
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, "transposed", False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = _ensure_decoded(getattr(attrs, "value_type", None))
shape = getattr(attrs, "shape", None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == "datetime64":
# reconstruct a timezone if indicated
tz = getattr(attrs, "tz", None)
ret = _set_tz(ret, tz, coerce=True)
elif dtype == "timedelta64":
ret = np.asarray(ret, dtype="m8[ns]")
if transposed:
return ret.T
else:
return ret
def read_index(
self, key: str, start: int | None = None, stop: int | None = None
) -> Index:
variety = _ensure_decoded(getattr(self.attrs, f"{key}_variety"))
if variety == "multi":
return self.read_multi_index(key, start=start, stop=stop)
elif variety == "regular":
node = getattr(self.group, key)
index = self.read_index_node(node, start=start, stop=stop)
return index
else: # pragma: no cover
raise TypeError(f"unrecognized index variety: {variety}")
def write_index(self, key: str, index: Index) -> None:
if isinstance(index, MultiIndex):
setattr(self.attrs, f"{key}_variety", "multi")
self.write_multi_index(key, index)
else:
setattr(self.attrs, f"{key}_variety", "regular")
converted = _convert_index("index", index, self.encoding, self.errors)
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
node._v_attrs.freq = index.freq
if isinstance(index, DatetimeIndex) and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_multi_index(self, key: str, index: MultiIndex) -> None:
setattr(self.attrs, f"{key}_nlevels", index.nlevels)
for i, (lev, level_codes, name) in enumerate(
zip(index.levels, index.codes, index.names)
):
# write the level
if is_extension_array_dtype(lev):
raise NotImplementedError(
"Saving a MultiIndex with an extension dtype is not supported."
)
level_key = f"{key}_level{i}"
conv_level = _convert_index(level_key, lev, self.encoding, self.errors)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, f"{key}_name{name}", name)
# write the labels
label_key = f"{key}_label{i}"
self.write_array(label_key, level_codes)
def read_multi_index(
self, key: str, start: int | None = None, stop: int | None = None
) -> MultiIndex:
nlevels = getattr(self.attrs, f"{key}_nlevels")
levels = []
codes = []
names: list[Hashable] = []
for i in range(nlevels):
level_key = f"{key}_level{i}"
node = getattr(self.group, level_key)
lev = self.read_index_node(node, start=start, stop=stop)
levels.append(lev)
names.append(lev.name)
label_key = f"{key}_label{i}"
level_codes = self.read_array(label_key, start=start, stop=stop)
codes.append(level_codes)
return MultiIndex(
levels=levels, codes=codes, names=names, verify_integrity=True
)
def read_index_node(
self, node: Node, start: int | None = None, stop: int | None = None
) -> Index:
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we replace it with the original.
if "shape" in node._v_attrs and np.prod(node._v_attrs.shape) == 0:
data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type)
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if "name" in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
name = _ensure_decoded(name)
attrs = node._v_attrs
factory, kwargs = self._get_index_factory(attrs)
if kind in ("date", "object"):
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
dtype=object,
**kwargs,
)
else:
index = factory(
_unconvert_index(
data, kind, encoding=self.encoding, errors=self.errors
),
**kwargs,
)
index.name = name
return index
def write_array_empty(self, key: str, value: ArrayLike) -> None:
"""write a 0-len array"""
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
node = getattr(self.group, key)
node._v_attrs.value_type = str(value.dtype)
node._v_attrs.shape = value.shape
def write_array(
self, key: str, obj: AnyArrayLike, items: Index | None = None
) -> None:
# TODO: we only have a few tests that get here, the only EA
# that gets passed is DatetimeArray, and we never have
# both self._filters and EA
value = extract_array(obj, extract_numpy=True)
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = value.size == 0
transposed = False
if is_categorical_dtype(value.dtype):
raise NotImplementedError(
"Cannot store a category dtype in a HDF5 dataset that uses format="
'"fixed". Use format="table".'
)
if not empty_array:
if hasattr(value, "T"):
# ExtensionArrays (1d) may not have transpose.
value = value.T
transposed = True
atom = None
if self._filters is not None:
with suppress(ValueError):
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
if atom is not None:
# We only get here if self._filters is non-None and
# the Atom.from_dtype call succeeded
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(
self.group, key, atom, value.shape, filters=self._filters
)
ca[:] = value
else:
self.write_array_empty(key, value)
elif value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value, skipna=False)
if empty_array:
pass
elif inferred_type == "string":
pass
else:
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=find_stack_level())
vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom())
vlarr.append(value)
elif is_datetime64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "datetime64"
elif is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "asi8"
self._handle.create_array(
self.group, key, value.asi8 # type: ignore[union-attr]
)
node = getattr(self.group, key)
# error: Item "ExtensionArray" of "Union[Any, ExtensionArray]" has no
# attribute "tz"
node._v_attrs.tz = _get_tz(value.tz) # type: ignore[union-attr]
node._v_attrs.value_type = "datetime64"
elif is_timedelta64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view("i8"))
getattr(self.group, key)._v_attrs.value_type = "timedelta64"
elif empty_array:
self.write_array_empty(key, value)
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class SeriesFixed(GenericFixed):
pandas_kind = "series"
attributes = ["name"]
name: Hashable
@property
def shape(self):
try:
return (len(self.group.values),)
except (TypeError, AttributeError):
return None
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
) -> Series:
self.validate_read(columns, where)
index = self.read_index("index", start=start, stop=stop)
values = self.read_array("values", start=start, stop=stop)
return Series(values, index=index, name=self.name)
# error: Signature of "write" incompatible with supertype "Fixed"
def write(self, obj, **kwargs) -> None: # type: ignore[override]
super().write(obj, **kwargs)
self.write_index("index", obj.index)
self.write_array("values", obj)
self.attrs.name = obj.name
class BlockManagerFixed(GenericFixed):
attributes = ["ndim", "nblocks"]
nblocks: int
@property
def shape(self) -> Shape | None:
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, f"block{i}_items")
shape = getattr(node, "shape", None)
if shape is not None:
items += shape[0]
# data shape
node = self.group.block0_values
shape = getattr(node, "shape", None)
if shape is not None:
shape = list(shape[0 : (ndim - 1)])
else:
shape = []
shape.append(items)
return shape
except AttributeError:
return None
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
) -> DataFrame:
# start, stop applied to rows, so 0th axis only
self.validate_read(columns, where)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index(f"axis{i}", start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
dfs = []
for i in range(self.nblocks):
blk_items = self.read_index(f"block{i}_items")
values = self.read_array(f"block{i}_values", start=_start, stop=_stop)
columns = items[items.get_indexer(blk_items)]
df = DataFrame(values.T, columns=columns, index=axes[1])
dfs.append(df)
if len(dfs) > 0:
out = concat(dfs, axis=1)
out = out.reindex(columns=items, copy=False)
return out
return DataFrame(columns=axes[0], index=axes[1])
# error: Signature of "write" incompatible with supertype "Fixed"
def write(self, obj, **kwargs) -> None: # type: ignore[override]
super().write(obj, **kwargs)
# TODO(ArrayManager) HDFStore relies on accessing the blocks
if isinstance(obj._mgr, ArrayManager):
obj = obj._as_manager("block")
data = obj._mgr
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0 and (not ax.is_unique):
raise ValueError("Columns index has to be unique for fixed format")
self.write_index(f"axis{i}", ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array(f"block{i}_values", blk.values, items=blk_items)
self.write_index(f"block{i}_items", blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = "frame"
obj_type = DataFrame
class Table(Fixed):
"""
represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes)
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = "wide_table"
format_type: str = "table" # GH#30962 needed by dask
table_type: str
levels: int | list[Hashable] = 1
is_table = True
metadata: list
def __init__(
self,
parent: HDFStore,
group: Node,
encoding: str | None = None,
errors: str = "strict",
index_axes: list[IndexCol] | None = None,
non_index_axes: list[tuple[AxisInt, Any]] | None = None,
values_axes: list[DataCol] | None = None,
data_columns: list | None = None,
info: dict | None = None,
nan_rep=None,
) -> None:
super().__init__(parent, group, encoding=encoding, errors=errors)
self.index_axes = index_axes or []
self.non_index_axes = non_index_axes or []
self.values_axes = values_axes or []
self.data_columns = data_columns or []
self.info = info or {}
self.nan_rep = nan_rep
@property
def table_type_short(self) -> str:
return self.table_type.split("_")[0]
def __repr__(self) -> str:
"""return a pretty representation of myself"""
self.infer_axes()
jdc = ",".join(self.data_columns) if len(self.data_columns) else ""
dc = f",dc->[{jdc}]"
ver = ""
if self.is_old_version:
jver = ".".join([str(x) for x in self.version])
ver = f"[{jver}]"
jindex_axes = ",".join([a.name for a in self.index_axes])
return (
f"{self.pandas_type:12.12}{ver} "
f"(typ->{self.table_type_short},nrows->{self.nrows},"
f"ncols->{self.ncols},indexers->[{jindex_axes}]{dc})"
)
def __getitem__(self, c: str):
"""return the axis for c"""
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other) -> None:
"""validate against an existing table"""
if other is None:
return
if other.table_type != self.table_type:
raise TypeError(
"incompatible table_type with existing "
f"[{other.table_type} - {self.table_type}]"
)
for c in ["index_axes", "non_index_axes", "values_axes"]:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
# Argument 1 to "enumerate" has incompatible type
# "Optional[Any]"; expected "Iterable[Any]" [arg-type]
for i, sax in enumerate(sv): # type: ignore[arg-type]
# Value of type "Optional[Any]" is not indexable [index]
oax = ov[i] # type: ignore[index]
if sax != oax:
raise ValueError(
f"invalid combination of [{c}] on appending data "
f"[{sax}] vs current table [{oax}]"
)
# should never get here
raise Exception(
f"invalid combination of [{c}] on appending data [{sv}] vs "
f"current table [{ov}]"
)
@property
def is_multi_index(self) -> bool:
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_multiindex(
self, obj: DataFrame | Series
) -> tuple[DataFrame, list[Hashable]]:
"""
validate that we can store the multi-index; reset and return the
new object
"""
levels = com.fill_missing_names(obj.index.names)
try:
reset_obj = obj.reset_index()
except ValueError as err:
raise ValueError(
"duplicate names/columns in the multi-index when storing as a table"
) from err
assert isinstance(reset_obj, DataFrame) # for mypy
return reset_obj, levels
@property
def nrows_expected(self) -> int:
"""based on our axes, compute the expected nrows"""
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self) -> bool:
"""has this table been created"""
return "table" in self.group
@property
def storable(self):
return getattr(self.group, "table", None)
@property
def table(self):
"""return the table group (this is my storable)"""
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self) -> int:
"""the number of total columns in the values axes"""
return sum(len(a.values) for a in self.values_axes)
@property
def is_transposed(self) -> bool:
return False
@property
def data_orientation(self) -> tuple[int, ...]:
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(
itertools.chain(
[int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes],
)
)
def queryables(self) -> dict[str, Any]:
"""return a dict of the kinds allowable columns for this object"""
# mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here
axis_names = {0: "index", 1: "columns"}
# compute the values_axes queryables
d1 = [(a.cname, a) for a in self.index_axes]
d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes]
d3 = [
(v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)
]
return dict(d1 + d2 + d3)
def index_cols(self):
"""return a list of my index cols"""
# Note: each `i.cname` below is assured to be a str.
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self) -> list[str]:
"""return a list of my values cols"""
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key: str) -> str:
"""return the metadata pathname for this key"""
group = self.group._v_pathname
return f"{group}/meta/{key}/meta"
def write_metadata(self, key: str, values: np.ndarray) -> None:
"""
Write out a metadata array to the key as a fixed-format Series.
Parameters
----------
key : str
values : ndarray
"""
self.parent.put(
self._get_metadata_path(key),
Series(values),
format="table",
encoding=self.encoding,
errors=self.errors,
nan_rep=self.nan_rep,
)
def read_metadata(self, key: str):
"""return the meta data array for this key"""
if getattr(getattr(self.group, "meta", None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_attrs(self) -> None:
"""set our table type & indexables"""
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
self.attrs.levels = self.levels
self.attrs.info = self.info
def get_attrs(self) -> None:
"""retrieve our attributes"""
self.non_index_axes = getattr(self.attrs, "non_index_axes", None) or []
self.data_columns = getattr(self.attrs, "data_columns", None) or []
self.info = getattr(self.attrs, "info", None) or {}
self.nan_rep = getattr(self.attrs, "nan_rep", None)
self.encoding = _ensure_encoding(getattr(self.attrs, "encoding", None))
self.errors = _ensure_decoded(getattr(self.attrs, "errors", "strict"))
self.levels: list[Hashable] = getattr(self.attrs, "levels", None) or []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
def validate_version(self, where=None) -> None:
"""are we trying to operate on an old version?"""
if where is not None:
if self.is_old_version:
ws = incompatibility_doc % ".".join([str(x) for x in self.version])
warnings.warn(
ws,
IncompatibilityWarning,
stacklevel=find_stack_level(),
)
def validate_min_itemsize(self, min_itemsize) -> None:
"""
validate the min_itemsize doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k in min_itemsize:
# ok, apply generally
if k == "values":
continue
if k not in q:
raise ValueError(
f"min_itemsize has the key [{k}] which is not an axis or "
"data_column"
)
@cache_readonly
def indexables(self):
"""create/cache the indexables if they don't exist"""
_indexables = []
desc = self.description
table_attrs = self.table.attrs
# Note: each of the `name` kwargs below are str, ensured
# by the definition in index_cols.
# index columns
for i, (axis, name) in enumerate(self.attrs.index_cols):
atom = getattr(desc, name)
md = self.read_metadata(name)
meta = "category" if md is not None else None
kind_attr = f"{name}_kind"
kind = getattr(table_attrs, kind_attr, None)
index_col = IndexCol(
name=name,
axis=axis,
pos=i,
kind=kind,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(index_col)
# values columns
dc = set(self.data_columns)
base_pos = len(_indexables)
def f(i, c):
assert isinstance(c, str)
klass = DataCol
if c in dc:
klass = DataIndexableCol
atom = getattr(desc, c)
adj_name = _maybe_adjust_name(c, self.version)
# TODO: why kind_attr here?
values = getattr(table_attrs, f"{adj_name}_kind", None)
dtype = getattr(table_attrs, f"{adj_name}_dtype", None)
# Argument 1 to "_dtype_to_kind" has incompatible type
# "Optional[Any]"; expected "str" [arg-type]
kind = _dtype_to_kind(dtype) # type: ignore[arg-type]
md = self.read_metadata(c)
# TODO: figure out why these two versions of `meta` dont always match.
# meta = "category" if md is not None else None
meta = getattr(table_attrs, f"{adj_name}_meta", None)
obj = klass(
name=adj_name,
cname=c,
values=values,
kind=kind,
pos=base_pos + i,
typ=atom,
table=self.table,
meta=meta,
metadata=md,
dtype=dtype,
)
return obj
# Note: the definition of `values_cols` ensures that each
# `c` below is a str.
_indexables.extend([f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return _indexables
def create_index(
self, columns=None, optlevel=None, kind: str | None = None
) -> None:
"""
Create a pytables index on the specified columns.
Parameters
----------
columns : None, bool, or listlike[str]
Indicate which columns to create an index on.
* False : Do not create any indexes.
* True : Create indexes on all columns.
* None : Create indexes on all columns.
* listlike : Create indexes on the given columns.
optlevel : int or None, default None
Optimization level, if None, pytables defaults to 6.
kind : str or None, default None
Kind of index, if None, pytables defaults to "medium".
Raises
------
TypeError if trying to create an index on a complex-type column.
Notes
-----
Cannot index Time64Col or ComplexCol.
Pytables must be >= 3.0.
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = {}
if optlevel is not None:
kw["optlevel"] = optlevel
if kind is not None:
kw["kind"] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw["kind"] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw["optlevel"] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith("complex"):
raise TypeError(
"Columns containing complex values can be stored but "
"cannot be indexed when using table format. Either use "
"fixed format, set index=False, or do not include "
"the columns containing complex values to "
"data_columns when initializing the table."
)
v.create_index(**kw)
elif c in self.non_index_axes[0][1]:
# GH 28156
raise AttributeError(
f"column {c} is not a data_column.\n"
f"In order to read column {c} you must reload the dataframe \n"
f"into HDFStore and include {c} with the data_columns argument."
)
def _read_axes(
self, where, start: int | None = None, stop: int | None = None
) -> list[tuple[ArrayLike, ArrayLike]]:
"""
Create the axes sniffed from the table.
Parameters
----------
where : ???
start : int or None, default None
stop : int or None, default None
Returns
-------
List[Tuple[index_values, column_values]]
"""
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
values = selection.select()
results = []
# convert the data
for a in self.axes:
a.set_info(self.info)
res = a.convert(
values,
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
results.append(res)
return results
@classmethod
def get_object(cls, obj, transposed: bool):
"""return the data for this obj"""
return obj
def validate_data_columns(self, data_columns, min_itemsize, non_index_axes):
"""
take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(non_index_axes):
return []
axis, axis_labels = non_index_axes[0]
info = self.info.get(axis, {})
if info.get("type") == "MultiIndex" and data_columns:
raise ValueError(
f"cannot use a multi-index on axis [{axis}] with "
f"data_columns {data_columns}"
)
# evaluate the passed data_columns, True == use all columns
# take only valid axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns = list(data_columns) # ensure we do not modify
data_columns.extend(
[
k
for k in min_itemsize.keys()
if k != "values" and k not in existing_data_columns
]
)
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def _create_axes(
self,
axes,
obj: DataFrame,
validate: bool = True,
nan_rep=None,
data_columns=None,
min_itemsize=None,
):
"""
Create and return the axes.
Parameters
----------
axes: list or None
The names or numbers of the axes to create.
obj : DataFrame
The object to create axes on.
validate: bool, default True
Whether to validate the obj against an existing object already written.
nan_rep :
A value to use for string column nan_rep.
data_columns : List[str], True, or None, default None
Specify the columns that we want to create to allow indexing on.
* True : Use all available columns.
* None : Use no columns.
* List[str] : Use the specified columns.
min_itemsize: Dict[str, int] or None, default None
The min itemsize for a column in bytes.
"""
if not isinstance(obj, DataFrame):
group = self.group._v_name
raise TypeError(
f"cannot properly create the storer for: [group->{group},"
f"value->{type(obj)}]"
)
# set the default axes if needed
if axes is None:
axes = [0]
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
table_exists = True
axes = [a.axis for a in self.index_axes]
data_columns = list(self.data_columns)
nan_rep = self.nan_rep
# TODO: do we always have validate=True here?
else:
table_exists = False
new_info = self.info
assert self.ndim == 2 # with next check, we must have len(axes) == 1
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable"
)
# create according to the new data
new_non_index_axes: list = []
# nan_representation
if nan_rep is None:
nan_rep = "nan"
# We construct the non-index-axis first, since that alters new_info
idx = [x for x in [0, 1] if x not in axes][0]
a = obj.axes[idx]
# we might be able to change the axes on the appending data if necessary
append_axis = list(a)
if table_exists:
indexer = len(new_non_index_axes) # i.e. 0
exist_axis = self.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis), np.array(exist_axis)):
# ahah! -> reindex
if array_equivalent(
np.array(sorted(append_axis)), np.array(sorted(exist_axis))
):
append_axis = exist_axis
# the non_index_axes info
info = new_info.setdefault(idx, {})
info["names"] = list(a.names)
info["type"] = type(a).__name__
new_non_index_axes.append((idx, append_axis))
# Now we can construct our new index axis
idx = axes[0]
a = obj.axes[idx]
axis_name = obj._get_axis_name(idx)
new_index = _convert_index(axis_name, a, self.encoding, self.errors)
new_index.axis = idx
# Because we are always 2D, there is only one new_index, so
# we know it will have pos=0
new_index.set_pos(0)
new_index.update_info(new_info)
new_index.maybe_set_size(min_itemsize) # check for column conflicts
new_index_axes = [new_index]
j = len(new_index_axes) # i.e. 1
assert j == 1
# reindex by our non_index_axes & compute data_columns
assert len(new_non_index_axes) == 1
for a in new_non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
transposed = new_index.axis == 1
# figure out data_columns and get out blocks
data_columns = self.validate_data_columns(
data_columns, min_itemsize, new_non_index_axes
)
frame = self.get_object(obj, transposed)._consolidate()
blocks, blk_items = self._get_blocks_and_items(
frame, table_exists, new_non_index_axes, self.values_axes, data_columns
)
# add my values
vaxes = []
for i, (blk, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if data_columns and len(b_items) == 1 and b_items[0] in data_columns:
klass = DataIndexableCol
name = b_items[0]
if not (name is None or isinstance(name, str)):
# TODO: should the message here be more specifically non-str?
raise ValueError("cannot have non-object label DataIndexableCol")
# make sure that we match up the existing columns
# if we have an existing table
existing_col: DataCol | None
if table_exists and validate:
try:
existing_col = self.values_axes[i]
except (IndexError, KeyError) as err:
raise ValueError(
f"Incompatible appended table [{blocks}]"
f"with existing table [{self.values_axes}]"
) from err
else:
existing_col = None
new_name = name or f"values_block_{i}"
data_converted = _maybe_convert_for_string_atom(
new_name,
blk.values,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
errors=self.errors,
columns=b_items,
)
adj_name = _maybe_adjust_name(new_name, self.version)
typ = klass._get_atom(data_converted)
kind = _dtype_to_kind(data_converted.dtype.name)
tz = None
if getattr(data_converted, "tz", None) is not None:
tz = _get_tz(data_converted.tz)
meta = metadata = ordered = None
if is_categorical_dtype(data_converted.dtype):
ordered = data_converted.ordered
meta = "category"
metadata = np.array(data_converted.categories, copy=False).ravel()
data, dtype_name = _get_data_and_dtype_name(data_converted)
col = klass(
name=adj_name,
cname=new_name,
values=list(b_items),
typ=typ,
pos=j,
kind=kind,
tz=tz,
ordered=ordered,
meta=meta,
metadata=metadata,
dtype=dtype_name,
data=data,
)
col.update_info(new_info)
vaxes.append(col)
j += 1
dcs = [col.name for col in vaxes if col.is_data_indexable]
new_table = type(self)(
parent=self.parent,
group=self.group,
encoding=self.encoding,
errors=self.errors,
index_axes=new_index_axes,
non_index_axes=new_non_index_axes,
values_axes=vaxes,
data_columns=dcs,
info=new_info,
nan_rep=nan_rep,
)
if hasattr(self, "levels"):
# TODO: get this into constructor, only for appropriate subclass
new_table.levels = self.levels
new_table.validate_min_itemsize(min_itemsize)
if validate and table_exists:
new_table.validate(self)
return new_table
@staticmethod
def _get_blocks_and_items(
frame: DataFrame,
table_exists: bool,
new_non_index_axes,
values_axes,
data_columns,
):
# Helper to clarify non-state-altering parts of _create_axes
# TODO(ArrayManager) HDFStore relies on accessing the blocks
if isinstance(frame._mgr, ArrayManager):
frame = frame._as_manager("block")
def get_blk_items(mgr):
return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks]
mgr = frame._mgr
mgr = cast(BlockManager, mgr)
blocks: list[Block] = list(mgr.blocks)
blk_items: list[Index] = get_blk_items(mgr)
if len(data_columns):
axis, axis_labels = new_non_index_axes[0]
new_labels = Index(axis_labels).difference(Index(data_columns))
mgr = frame.reindex(new_labels, axis=axis)._mgr
mgr = cast(BlockManager, mgr)
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr)
for c in data_columns:
mgr = frame.reindex([c], axis=axis)._mgr
mgr = cast(BlockManager, mgr)
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr))
# reorder the blocks in the same order as the existing table if we can
if table_exists:
by_items = {
tuple(b_items.tolist()): (b, b_items)
for b, b_items in zip(blocks, blk_items)
}
new_blocks: list[Block] = []
new_blk_items = []
for ea in values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except (IndexError, KeyError) as err:
jitems = ",".join([pprint_thing(item) for item in items])
raise ValueError(
f"cannot match existing table structure for [{jitems}] "
"on appending data"
) from err
blocks = new_blocks
blk_items = new_blk_items
return blocks, blk_items
def process_axes(self, obj, selection: Selection, columns=None) -> DataFrame:
"""process axes filters"""
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
assert isinstance(self.levels, list) # assured by is_multi_index
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
def process_filter(field, filt, op):
for axis_name in obj._AXIS_ORDERS:
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
assert axis_number is not None
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc(axis=axis_number)[takers]
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc(axis=axis_number)[takers]
raise ValueError(f"cannot find the field [{field}] for filtering!")
# apply the selection filters (but keep in the same order)
if selection.filter is not None:
for field, op, filt in selection.filter.format():
obj = process_filter(field, filt, op)
return obj
def create_description(
self,
complib,
complevel: int | None,
fletcher32: bool,
expectedrows: int | None,
) -> dict[str, Any]:
"""create the description of the table from the axes & values"""
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = {"name": "table", "expectedrows": expectedrows}
# description from the axes & values
d["description"] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel,
complib=complib,
fletcher32=fletcher32 or self._fletcher32,
)
d["filters"] = filters
elif self._filters is not None:
d["filters"] = self._filters
return d
def read_coordinates(
self, where=None, start: int | None = None, stop: int | None = None
):
"""
select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
selection = Selection(self, where=where, start=start, stop=stop)
coords = selection.select_coords()
if selection.filter is not None:
for field, op, filt in selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1
)
coords = coords[op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(
self,
column: str,
where=None,
start: int | None = None,
stop: int | None = None,
):
"""
return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
f"column [{column}] can not be extracted individually; "
"it is not data indexable"
)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
col_values = a.convert(
c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors,
)
return Series(_set_tz(col_values[1], a.tz), name=column)
raise KeyError(f"column [{column}] not found in the table")
class WORMTable(Table):
"""
a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = "worm"
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
):
"""
read the indices and the indexing array, calculate offset rows and return
"""
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs) -> None:
"""
write in a format that we can search later on (but cannot append
to): write out the indices and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORMTable needs to implement write")
class AppendableTable(Table):
"""support the new appendable table formats"""
table_type = "appendable"
# error: Signature of "write" incompatible with supertype "Fixed"
def write( # type: ignore[override]
self,
obj,
axes=None,
append: bool = False,
complib=None,
complevel=None,
fletcher32=None,
min_itemsize=None,
chunksize=None,
expectedrows=None,
dropna: bool = False,
nan_rep=None,
data_columns=None,
track_times: bool = True,
) -> None:
if not append and self.is_exists:
self._handle.remove_node(self.group, "table")
# create the axes
table = self._create_axes(
axes=axes,
obj=obj,
validate=append,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
data_columns=data_columns,
)
for a in table.axes:
a.validate_names()
if not table.is_exists:
# create the table
options = table.create_description(
complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows,
)
# set the table attributes
table.set_attrs()
options["track_times"] = track_times
# create the table
table._handle.create_table(table.group, **options)
# update my info
table.attrs.info = table.info
# validate the axes and set the kinds
for a in table.axes:
a.validate_and_set(table, append)
# add the rows
table.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize: int | None, dropna: bool = False) -> None:
"""
we form the data into a 2-d including indexes,values,mask write chunk-by-chunk
"""
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype("u1", copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
assert nindexes == 1, nindexes # ensures we dont need to broadcast
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = nrows // chunksize + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in indexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues],
)
def write_data_chunk(
self,
rows: np.ndarray,
indexes: list[np.ndarray],
mask: npt.NDArray[np.bool_] | None,
values: list[np.ndarray],
) -> None:
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
if len(rows):
self.table.append(rows)
self.table.flush()
def delete(self, where=None, start: int | None = None, stop: int | None = None):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
selection = Selection(self, where, start=start, stop=stop)
values = selection.select_coords()
# delete the rows in reverse order
sorted_series = Series(values).sort_values()
ln = len(sorted_series)
if ln:
# construct groups of consecutive rows
diff = sorted_series.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = sorted_series.take(range(g, pg))
table.remove_rows(
start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1
)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
"""support the new appendable table formats"""
pandas_kind = "frame_table"
table_type = "appendable_frame"
ndim = 2
obj_type: type[DataFrame | Series] = DataFrame
@property
def is_transposed(self) -> bool:
return self.index_axes[0].axis == 1
@classmethod
def get_object(cls, obj, transposed: bool):
"""these are written transposed"""
if transposed:
obj = obj.T
return obj
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
):
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return None
result = self._read_axes(where=where, start=start, stop=stop)
info = (
self.info.get(self.non_index_axes[0][0], {})
if len(self.non_index_axes)
else {}
)
inds = [i for i, ax in enumerate(self.axes) if ax is self.index_axes[0]]
assert len(inds) == 1
ind = inds[0]
index = result[ind][0]
frames = []
for i, a in enumerate(self.axes):
if a not in self.values_axes:
continue
index_vals, cvalues = result[i]
# we could have a multi-index constructor here
# ensure_index doesn't recognized our list-of-tuples here
if info.get("type") != "MultiIndex":
cols = Index(index_vals)
else:
cols = MultiIndex.from_tuples(index_vals)
names = info.get("names")
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, "name", None))
else:
values = cvalues.T
index_ = Index(index, name=getattr(index, "name", None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
if isinstance(values, np.ndarray):
df = DataFrame(values.T, columns=cols_, index=index_)
elif isinstance(values, Index):
df = DataFrame(values, columns=cols_, index=index_)
else:
# Categorical
df = DataFrame._from_arrays([values], columns=cols_, index=index_)
assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype)
frames.append(df)
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
selection = Selection(self, where=where, start=start, stop=stop)
# apply the selection filters & axis orderings
df = self.process_axes(df, selection=selection, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
"""support the new appendable table formats"""
pandas_kind = "series_table"
table_type = "appendable_series"
ndim = 2
obj_type = Series
@property
def is_transposed(self) -> bool:
return False
@classmethod
def get_object(cls, obj, transposed: bool):
return obj
def write(self, obj, data_columns=None, **kwargs):
"""we are going to write this as a frame table"""
if not isinstance(obj, DataFrame):
name = obj.name or "values"
obj = obj.to_frame(name)
return super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
) -> Series:
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
assert isinstance(self.levels, list) # needed for mypy
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super().read(where=where, columns=columns, start=start, stop=stop)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == "values":
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
"""support the new appendable table formats"""
pandas_kind = "series_table"
table_type = "appendable_multiseries"
def write(self, obj, **kwargs):
"""we are going to write this as a frame table"""
name = obj.name or "values"
newobj, self.levels = self.validate_multiindex(obj)
assert isinstance(self.levels, list) # for mypy
cols = list(self.levels)
cols.append(name)
newobj.columns = Index(cols)
return super().write(obj=newobj, **kwargs)
class GenericTable(AppendableFrameTable):
"""a table that read/writes the generic pytables table format"""
pandas_kind = "frame_table"
table_type = "generic_table"
ndim = 2
obj_type = DataFrame
levels: list[Hashable]
@property
def pandas_type(self) -> str:
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, "table", None) or self.group
def get_attrs(self) -> None:
"""retrieve our attributes"""
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a for a in self.indexables if a.is_an_indexable]
self.values_axes = [a for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@cache_readonly
def indexables(self):
"""create the indexables from the table description"""
d = self.description
# TODO: can we get a typ for this? AFAICT it is the only place
# where we aren't passing one
# the index columns is just a simple index
md = self.read_metadata("index")
meta = "category" if md is not None else None
index_col = GenericIndexCol(
name="index", axis=0, table=self.table, meta=meta, metadata=md
)
_indexables: list[GenericIndexCol | GenericDataIndexableCol] = [index_col]
for i, n in enumerate(d._v_names):
assert isinstance(n, str)
atom = getattr(d, n)
md = self.read_metadata(n)
meta = "category" if md is not None else None
dc = GenericDataIndexableCol(
name=n,
pos=i,
values=[n],
typ=atom,
table=self.table,
meta=meta,
metadata=md,
)
_indexables.append(dc)
return _indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
"""a frame with a multi-index"""
table_type = "appendable_multiframe"
obj_type = DataFrame
ndim = 2
_re_levels = re.compile(r"^level_\d+$")
@property
def table_type_short(self) -> str:
return "appendable_multi"
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
assert isinstance(self.levels, list) # for mypy
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super().write(obj=obj, data_columns=data_columns, **kwargs)
def read(
self,
where=None,
columns=None,
start: int | None = None,
stop: int | None = None,
):
df = super().read(where=where, columns=columns, start=start, stop=stop)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names(
[None if self._re_levels.search(name) else name for name in df.index.names]
)
return df
def _reindex_axis(
obj: DataFrame, axis: AxisInt, labels: Index, other=None
) -> DataFrame:
ax = obj._get_axis(axis)
labels = ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = ensure_index(labels.unique())
if other is not None:
labels = ensure_index(other.unique()).intersection(labels, sort=False)
if not labels.equals(ax):
slicer: list[slice | Index] = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
# tz to/from coercion
def _get_tz(tz: tzinfo) -> str | tzinfo:
"""for a tz-aware type, return an encoded zone"""
zone = timezones.get_timezone(tz)
return zone
@overload
def _set_tz(
values: np.ndarray | Index, tz: str | tzinfo, coerce: bool = False
) -> DatetimeIndex:
...
@overload
def _set_tz(values: np.ndarray | Index, tz: None, coerce: bool = False) -> np.ndarray:
...
def _set_tz(
values: np.ndarray | Index, tz: str | tzinfo | None, coerce: bool = False
) -> np.ndarray | DatetimeIndex:
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray or Index
tz : str or tzinfo
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if isinstance(values, DatetimeIndex):
# If values is tzaware, the tz gets dropped in the values.ravel()
# call below (which returns an ndarray). So we are only non-lossy
# if `tz` matches `values.tz`.
assert values.tz is None or values.tz == tz
if tz is not None:
if isinstance(values, DatetimeIndex):
name = values.name
values = values.asi8
else:
name = None
values = values.ravel()
tz = _ensure_decoded(tz)
values = DatetimeIndex(values, name=name)
values = values.tz_localize("UTC").tz_convert(tz)
elif coerce:
values = np.asarray(values, dtype="M8[ns]")
# error: Incompatible return value type (got "Union[ndarray, Index]",
# expected "Union[ndarray, DatetimeIndex]")
return values # type: ignore[return-value]
def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol:
assert isinstance(name, str)
index_name = index.name
# error: Argument 1 to "_get_data_and_dtype_name" has incompatible type "Index";
# expected "Union[ExtensionArray, ndarray]"
converted, dtype_name = _get_data_and_dtype_name(index) # type: ignore[arg-type]
kind = _dtype_to_kind(dtype_name)
atom = DataIndexableCol._get_atom(converted)
if (
isinstance(index, Int64Index)
or needs_i8_conversion(index.dtype)
or is_bool_dtype(index.dtype)
):
# Includes Int64Index, RangeIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex,
# in which case "kind" is "integer", "integer", "datetime64",
# "timedelta64", and "integer", respectively.
return IndexCol(
name,
values=converted,
kind=kind,
typ=atom,
freq=getattr(index, "freq", None),
tz=getattr(index, "tz", None),
index_name=index_name,
)
if isinstance(index, MultiIndex):
raise TypeError("MultiIndex not supported here!")
inferred_type = lib.infer_dtype(index, skipna=False)
# we won't get inferred_type of "datetime64" or "timedelta64" as these
# would go through the DatetimeIndex/TimedeltaIndex paths above
values = np.asarray(index)
if inferred_type == "date":
converted = np.asarray([v.toordinal() for v in values], dtype=np.int32)
return IndexCol(
name, converted, "date", _tables().Time32Col(), index_name=index_name
)
elif inferred_type == "string":
converted = _convert_string_array(values, encoding, errors)
itemsize = converted.dtype.itemsize
return IndexCol(
name,
converted,
"string",
_tables().StringCol(itemsize),
index_name=index_name,
)
elif inferred_type in ["integer", "floating"]:
return IndexCol(
name, values=converted, kind=kind, typ=atom, index_name=index_name
)
else:
assert isinstance(converted, np.ndarray) and converted.dtype == object
assert kind == "object", kind
atom = _tables().ObjectAtom()
return IndexCol(name, converted, kind, atom, index_name=index_name)
def _unconvert_index(data, kind: str, encoding: str, errors: str) -> np.ndarray | Index:
index: Index | np.ndarray
if kind == "datetime64":
index = DatetimeIndex(data)
elif kind == "timedelta64":
index = TimedeltaIndex(data)
elif kind == "date":
try:
index = np.asarray([date.fromordinal(v) for v in data], dtype=object)
except ValueError:
index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object)
elif kind in ("integer", "float", "bool"):
index = np.asarray(data)
elif kind in ("string"):
index = _unconvert_string_array(
data, nan_rep=None, encoding=encoding, errors=errors
)
elif kind == "object":
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError(f"unrecognized index type {kind}")
return index
def _maybe_convert_for_string_atom(
name: str,
bvalues: ArrayLike,
existing_col,
min_itemsize,
nan_rep,
encoding,
errors,
columns: list[str],
):
if bvalues.dtype != object:
return bvalues
bvalues = cast(np.ndarray, bvalues)
dtype_name = bvalues.dtype.name
inferred_type = lib.infer_dtype(bvalues, skipna=False)
if inferred_type == "date":
raise TypeError("[date] is not implemented as a table column")
if inferred_type == "datetime":
# after GH#8260
# this only would be hit for a multi-timezone dtype which is an error
raise TypeError(
"too many timezones in this block, create separate data columns"
)
if not (inferred_type == "string" or dtype_name == "object"):
return bvalues
mask = isna(bvalues)
data = bvalues.copy()
data[mask] = nan_rep
# see if we have a valid string type
inferred_type = lib.infer_dtype(data, skipna=False)
if inferred_type != "string":
# we cannot serialize this data, so report an exception on a column
# by column basis
# expected behaviour:
# search block for a non-string object column by column
for i in range(data.shape[0]):
col = data[i]
inferred_type = lib.infer_dtype(col, skipna=False)
if inferred_type != "string":
error_column_label = columns[i] if len(columns) > i else f"No.{i}"
raise TypeError(
f"Cannot serialize the column [{error_column_label}]\n"
f"because its data contents are not [string] but "
f"[{inferred_type}] object dtype"
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(name) or min_itemsize.get("values") or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci is not None and eci > itemsize:
itemsize = eci
data_converted = data_converted.astype(f"|S{itemsize}", copy=False)
return data_converted
def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray:
"""
Take a string-like that is object dtype and coerce to a fixed size string type.
Parameters
----------
data : np.ndarray[object]
encoding : str
errors : str
Handler for encoding errors.
Returns
-------
np.ndarray[fixed-length-string]
"""
# encode if needed
if len(data):
data = (
Series(data.ravel())
.str.encode(encoding, errors)
._values.reshape(data.shape)
)
# create the sized dtype
ensured = ensure_object(data.ravel())
itemsize = max(1, libwriters.max_len_string_array(ensured))
data = np.asarray(data, dtype=f"S{itemsize}")
return data
def _unconvert_string_array(
data: np.ndarray, nan_rep, encoding: str, errors: str
) -> np.ndarray:
"""
Inverse of _convert_string_array.
Parameters
----------
data : np.ndarray[fixed-length-string]
nan_rep : the storage repr of NaN
encoding : str
errors : str
Handler for encoding errors.
Returns
-------
np.ndarray[object]
Decoded data.
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
if len(data):
itemsize = libwriters.max_len_string_array(ensure_object(data))
dtype = f"U{itemsize}"
if isinstance(data[0], bytes):
data = Series(data).str.decode(encoding, errors=errors)._values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = "nan"
libwriters.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str):
assert isinstance(val_kind, str), type(val_kind)
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding, errors)
values = conv(values)
return values
def _get_converter(kind: str, encoding: str, errors: str):
if kind == "datetime64":
return lambda x: np.asarray(x, dtype="M8[ns]")
elif kind == "string":
return lambda x: _unconvert_string_array(
x, nan_rep=None, encoding=encoding, errors=errors
)
else: # pragma: no cover
raise ValueError(f"invalid kind {kind}")
def _need_convert(kind: str) -> bool:
if kind in ("datetime64", "string"):
return True
return False
def _maybe_adjust_name(name: str, version: Sequence[int]) -> str:
"""
Prior to 0.10.1, we named values blocks like: values_block_0 an the
name values_0, adjust the given name if necessary.
Parameters
----------
name : str
version : Tuple[int, int, int]
Returns
-------
str
"""
if isinstance(version, str) or len(version) < 3:
raise ValueError("Version is incorrect, expected sequence of 3 integers.")
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
grp = m.groups()[0]
name = f"values_{grp}"
return name
def _dtype_to_kind(dtype_str: str) -> str:
"""
Find the "kind" string describing the given dtype name.
"""
dtype_str = _ensure_decoded(dtype_str)
if dtype_str.startswith("string") or dtype_str.startswith("bytes"):
kind = "string"
elif dtype_str.startswith("float"):
kind = "float"
elif dtype_str.startswith("complex"):
kind = "complex"
elif dtype_str.startswith("int") or dtype_str.startswith("uint"):
kind = "integer"
elif dtype_str.startswith("datetime64"):
kind = "datetime64"
elif dtype_str.startswith("timedelta"):
kind = "timedelta64"
elif dtype_str.startswith("bool"):
kind = "bool"
elif dtype_str.startswith("category"):
kind = "category"
elif dtype_str.startswith("period"):
# We store the `freq` attr so we can restore from integers
kind = "integer"
elif dtype_str == "object":
kind = "object"
else:
raise ValueError(f"cannot interpret dtype of [{dtype_str}]")
return kind
def _get_data_and_dtype_name(data: ArrayLike):
"""
Convert the passed data into a storable form and a dtype string.
"""
if isinstance(data, Categorical):
data = data.codes
# For datetime64tz we need to drop the TZ in tests TODO: why?
dtype_name = data.dtype.name.split("[")[0]
if data.dtype.kind in ["m", "M"]:
data = np.asarray(data.view("i8"))
# TODO: we used to reshape for the dt64tz case, but no longer
# doing that doesn't seem to break anything. why?
elif isinstance(data, PeriodIndex):
data = data.asi8
data = np.asarray(data)
return data, dtype_name
class Selection:
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertible to)
start, stop: indices to start and/or stop selection
"""
def __init__(
self,
table: Table,
where=None,
start: int | None = None,
stop: int | None = None,
) -> None:
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
with suppress(ValueError):
inferred = lib.infer_dtype(where, skipna=False)
if inferred in ("integer", "boolean"):
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if (self.start is not None and (where < self.start).any()) or (
self.stop is not None and (where >= self.stop).any()
):
raise ValueError(
"where must have index locations >= start and < stop"
)
self.coordinates = where
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
"""where can be a : dict,list,tuple,string"""
if where is None:
return None
q = self.table.queryables()
try:
return PyTablesExpr(where, queryables=q, encoding=self.table.encoding)
except NameError as err:
# raise a nice message, suggesting that the user should use
# data_columns
qkeys = ",".join(q.keys())
msg = dedent(
f"""\
The passed where expression: {where}
contains an invalid variable reference
all of the variable references must be a reference to
an axis (e.g. 'index' or 'columns'), or a data_column
The currently defined references are: {qkeys}
"""
)
raise ValueError(msg) from err
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(
self.condition.format(), start=self.start, stop=self.stop
)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(
self.condition.format(), start=start, stop=stop, sort=True
)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
| bsd-3-clause | 5bb3bd0cf201a35df47d5820e6e67694 | 31.271751 | 104 | 0.548747 | 4.220764 | false | false | false | false |
pandas-dev/pandas | pandas/_testing/contexts.py | 1 | 5441 | from __future__ import annotations
from contextlib import contextmanager
import os
from pathlib import Path
import tempfile
from types import TracebackType
from typing import (
IO,
Any,
Generator,
)
import uuid
import numpy as np
from pandas import set_option
from pandas.io.common import get_handle
@contextmanager
def decompress_file(path, compression) -> Generator[IO[bytes], None, None]:
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None}
Name of the decompression to use
Returns
-------
file object
"""
with get_handle(path, "rb", compression=compression, is_text=False) as handle:
yield handle.handle
@contextmanager
def set_timezone(tz: str) -> Generator[None, None, None]:
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime(2021, 1, 1))
...
'EST'
"""
import time
def setTZ(tz) -> None:
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
@contextmanager
def ensure_clean(
filename=None, return_filelike: bool = False, **kwargs: Any
) -> Generator[Any, None, None]:
"""
Gets a temporary path and agrees to remove on close.
This implementation does not use tempfile.mkstemp to avoid having a file handle.
If the code using the returned path wants to delete the file itself, windows
requires that no program has a file handle to it.
Parameters
----------
filename : str (optional)
suffix of the created file.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords are passed to open().
"""
folder = Path(tempfile.gettempdir())
if filename is None:
filename = ""
filename = str(uuid.uuid4()) + filename
path = folder / filename
path.touch()
handle_or_str: str | IO = str(path)
if return_filelike:
kwargs.setdefault("mode", "w+b")
handle_or_str = open(path, **kwargs)
try:
yield handle_or_str
finally:
if not isinstance(handle_or_str, str):
handle_or_str.close()
if path.is_file():
path.unlink()
@contextmanager
def ensure_safe_environment_variables() -> Generator[None, None, None]:
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def with_csv_dialect(name, **kwargs) -> Generator[None, None, None]:
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
try:
yield
finally:
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None) -> Generator[None, None, None]:
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
set_option("compute.use_numexpr", use)
expr._MIN_ELEMENTS = min_elements
try:
yield
finally:
expr._MIN_ELEMENTS = oldmin
set_option("compute.use_numexpr", olduse)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed) -> None:
self.seed = seed
def __enter__(self) -> None:
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
np.random.set_state(self.start_state)
| bsd-3-clause | d9ff9e9092e1744bcb761e8a8a6a3b4a | 22.759825 | 84 | 0.611836 | 4.12197 | false | false | false | false |
pandas-dev/pandas | pandas/core/arrays/_ranges.py | 1 | 7434 | """
Helper functions to generate range-like data for DatetimeArray
(and possibly TimedeltaArray/PeriodArray)
"""
from __future__ import annotations
import numpy as np
from pandas._libs.lib import i8max
from pandas._libs.tslibs import (
BaseOffset,
OutOfBoundsDatetime,
Timedelta,
Timestamp,
iNaT,
)
from pandas._typing import npt
def generate_regular_range(
start: Timestamp | Timedelta | None,
end: Timestamp | Timedelta | None,
periods: int | None,
freq: BaseOffset,
unit: str = "ns",
) -> npt.NDArray[np.intp]:
"""
Generate a range of dates or timestamps with the spans between dates
described by the given `freq` DateOffset.
Parameters
----------
start : Timedelta, Timestamp or None
First point of produced date range.
end : Timedelta, Timestamp or None
Last point of produced date range.
periods : int or None
Number of periods in produced date range.
freq : Tick
Describes space between dates in produced date range.
unit : str, default "ns"
The resolution the output is meant to represent.
Returns
-------
ndarray[np.int64]
Representing the given resolution.
"""
istart = start.value if start is not None else None
iend = end.value if end is not None else None
freq.nanos # raises if non-fixed frequency
td = Timedelta(freq)
try:
td = td.as_unit( # pyright: ignore[reportGeneralTypeIssues]
unit, round_ok=False
)
except ValueError as err:
raise ValueError(
f"freq={freq} is incompatible with unit={unit}. "
"Use a lower freq or a higher unit instead."
) from err
stride = int(td.value)
if periods is None and istart is not None and iend is not None:
b = istart
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = b + (iend - b) // stride * stride + stride // 2 + 1
elif istart is not None and periods is not None:
b = istart
e = _generate_range_overflow_safe(b, periods, stride, side="start")
elif iend is not None and periods is not None:
e = iend + stride
b = _generate_range_overflow_safe(e, periods, stride, side="end")
else:
raise ValueError(
"at least 'start' or 'end' should be specified if a 'period' is given."
)
with np.errstate(over="raise"):
# If the range is sufficiently large, np.arange may overflow
# and incorrectly return an empty array if not caught.
try:
values = np.arange(b, e, stride, dtype=np.int64)
except FloatingPointError:
xdr = [b]
while xdr[-1] != e:
xdr.append(xdr[-1] + stride)
values = np.array(xdr[:-1], dtype=np.int64)
return values
def _generate_range_overflow_safe(
endpoint: int, periods: int, stride: int, side: str = "start"
) -> int:
"""
Calculate the second endpoint for passing to np.arange, checking
to avoid an integer overflow. Catch OverflowError and re-raise
as OutOfBoundsDatetime.
Parameters
----------
endpoint : int
nanosecond timestamp of the known endpoint of the desired range
periods : int
number of periods in the desired range
stride : int
nanoseconds between periods in the desired range
side : {'start', 'end'}
which end of the range `endpoint` refers to
Returns
-------
other_end : int
Raises
------
OutOfBoundsDatetime
"""
# GH#14187 raise instead of incorrectly wrapping around
assert side in ["start", "end"]
i64max = np.uint64(i8max)
msg = f"Cannot generate range with {side}={endpoint} and periods={periods}"
with np.errstate(over="raise"):
# if periods * strides cannot be multiplied within the *uint64* bounds,
# we cannot salvage the operation by recursing, so raise
try:
addend = np.uint64(periods) * np.uint64(np.abs(stride))
except FloatingPointError as err:
raise OutOfBoundsDatetime(msg) from err
if np.abs(addend) <= i64max:
# relatively easy case without casting concerns
return _generate_range_overflow_safe_signed(endpoint, periods, stride, side)
elif (endpoint > 0 and side == "start" and stride > 0) or (
endpoint < 0 < stride and side == "end"
):
# no chance of not-overflowing
raise OutOfBoundsDatetime(msg)
elif side == "end" and endpoint - stride <= i64max < endpoint:
# in _generate_regular_range we added `stride` thereby overflowing
# the bounds. Adjust to fix this.
return _generate_range_overflow_safe(
endpoint - stride, periods - 1, stride, side
)
# split into smaller pieces
mid_periods = periods // 2
remaining = periods - mid_periods
assert 0 < remaining < periods, (remaining, periods, endpoint, stride)
midpoint = _generate_range_overflow_safe(endpoint, mid_periods, stride, side)
return _generate_range_overflow_safe(midpoint, remaining, stride, side)
def _generate_range_overflow_safe_signed(
endpoint: int, periods: int, stride: int, side: str
) -> int:
"""
A special case for _generate_range_overflow_safe where `periods * stride`
can be calculated without overflowing int64 bounds.
"""
assert side in ["start", "end"]
if side == "end":
stride *= -1
with np.errstate(over="raise"):
addend = np.int64(periods) * np.int64(stride)
try:
# easy case with no overflows
result = np.int64(endpoint) + addend
if result == iNaT:
# Putting this into a DatetimeArray/TimedeltaArray
# would incorrectly be interpreted as NaT
raise OverflowError
# error: Incompatible return value type (got "signedinteger[_64Bit]",
# expected "int")
return result # type: ignore[return-value]
except (FloatingPointError, OverflowError):
# with endpoint negative and addend positive we risk
# FloatingPointError; with reversed signed we risk OverflowError
pass
# if stride and endpoint had opposite signs, then endpoint + addend
# should never overflow. so they must have the same signs
assert (stride > 0 and endpoint >= 0) or (stride < 0 and endpoint <= 0)
if stride > 0:
# watch out for very special case in which we just slightly
# exceed implementation bounds, but when passing the result to
# np.arange will get a result slightly within the bounds
# error: Incompatible types in assignment (expression has type
# "unsignedinteger[_64Bit]", variable has type "signedinteger[_64Bit]")
result = np.uint64(endpoint) + np.uint64(addend) # type: ignore[assignment]
i64max = np.uint64(i8max)
assert result > i64max
if result <= i64max + np.uint64(stride):
# error: Incompatible return value type (got "unsignedinteger", expected
# "int")
return result # type: ignore[return-value]
raise OutOfBoundsDatetime(
f"Cannot generate range with {side}={endpoint} and periods={periods}"
)
| bsd-3-clause | 9697f684e7081469c18ee76a7254c7be | 34.569378 | 88 | 0.622814 | 4.153073 | false | false | false | false |
pandas-dev/pandas | asv_bench/benchmarks/index_cached_properties.py | 1 | 2268 | import pandas as pd
class IndexCache:
number = 1
repeat = (3, 100, 20)
params = [
[
"CategoricalIndex",
"DatetimeIndex",
"Float64Index",
"IntervalIndex",
"Int64Index",
"MultiIndex",
"PeriodIndex",
"RangeIndex",
"TimedeltaIndex",
"UInt64Index",
]
]
param_names = ["index_type"]
def setup(self, index_type):
N = 10**5
if index_type == "MultiIndex":
self.idx = pd.MultiIndex.from_product(
[pd.date_range("1/1/2000", freq="T", periods=N // 2), ["a", "b"]]
)
elif index_type == "DatetimeIndex":
self.idx = pd.date_range("1/1/2000", freq="T", periods=N)
elif index_type == "Int64Index":
self.idx = pd.Index(range(N), dtype="int64")
elif index_type == "PeriodIndex":
self.idx = pd.period_range("1/1/2000", freq="T", periods=N)
elif index_type == "RangeIndex":
self.idx = pd.RangeIndex(start=0, stop=N)
elif index_type == "IntervalIndex":
self.idx = pd.IntervalIndex.from_arrays(range(N), range(1, N + 1))
elif index_type == "TimedeltaIndex":
self.idx = pd.TimedeltaIndex(range(N))
elif index_type == "Float64Index":
self.idx = pd.Index(range(N), dtype="float64")
elif index_type == "UInt64Index":
self.idx = pd.Index(range(N), dtype="uint64")
elif index_type == "CategoricalIndex":
self.idx = pd.CategoricalIndex(range(N), range(N))
else:
raise ValueError
assert len(self.idx) == N
self.idx._cache = {}
def time_values(self, index_type):
self.idx._values
def time_shape(self, index_type):
self.idx.shape
def time_is_monotonic_decreasing(self, index_type):
self.idx.is_monotonic_decreasing
def time_is_monotonic_increasing(self, index_type):
self.idx.is_monotonic_increasing
def time_is_unique(self, index_type):
self.idx.is_unique
def time_engine(self, index_type):
self.idx._engine
def time_inferred_type(self, index_type):
self.idx.inferred_type
| bsd-3-clause | f56b666c4b89fc1b348b47e48628d509 | 30.5 | 81 | 0.545414 | 3.6 | false | false | false | false |
pandas-dev/pandas | asv_bench/benchmarks/io/hdf.py | 1 | 3987 | import numpy as np
from pandas import (
DataFrame,
HDFStore,
date_range,
read_hdf,
)
from ..pandas_vb_common import (
BaseIO,
tm,
)
class HDFStoreDataFrame(BaseIO):
def setup(self):
N = 25000
index = tm.makeStringIndex(N)
self.df = DataFrame(
{"float1": np.random.randn(N), "float2": np.random.randn(N)}, index=index
)
self.df_mixed = DataFrame(
{
"float1": np.random.randn(N),
"float2": np.random.randn(N),
"string1": ["foo"] * N,
"bool1": [True] * N,
"int1": np.random.randint(0, N, size=N),
},
index=index,
)
self.df_wide = DataFrame(np.random.randn(N, 100))
self.start_wide = self.df_wide.index[10000]
self.stop_wide = self.df_wide.index[15000]
self.df2 = DataFrame(
{"float1": np.random.randn(N), "float2": np.random.randn(N)},
index=date_range("1/1/2000", periods=N),
)
self.start = self.df2.index[10000]
self.stop = self.df2.index[15000]
self.df_wide2 = DataFrame(
np.random.randn(N, 100), index=date_range("1/1/2000", periods=N)
)
self.df_dc = DataFrame(
np.random.randn(N, 10), columns=[f"C{i:03d}" for i in range(10)]
)
self.fname = "__test__.h5"
self.store = HDFStore(self.fname)
self.store.put("fixed", self.df)
self.store.put("fixed_mixed", self.df_mixed)
self.store.append("table", self.df2)
self.store.append("table_mixed", self.df_mixed)
self.store.append("table_wide", self.df_wide)
self.store.append("table_wide2", self.df_wide2)
def teardown(self):
self.store.close()
self.remove(self.fname)
def time_read_store(self):
self.store.get("fixed")
def time_read_store_mixed(self):
self.store.get("fixed_mixed")
def time_write_store(self):
self.store.put("fixed_write", self.df)
def time_write_store_mixed(self):
self.store.put("fixed_mixed_write", self.df_mixed)
def time_read_store_table_mixed(self):
self.store.select("table_mixed")
def time_write_store_table_mixed(self):
self.store.append("table_mixed_write", self.df_mixed)
def time_read_store_table(self):
self.store.select("table")
def time_write_store_table(self):
self.store.append("table_write", self.df)
def time_read_store_table_wide(self):
self.store.select("table_wide")
def time_write_store_table_wide(self):
self.store.append("table_wide_write", self.df_wide)
def time_write_store_table_dc(self):
self.store.append("table_dc_write", self.df_dc, data_columns=True)
def time_query_store_table_wide(self):
self.store.select(
"table_wide", where="index > self.start_wide and index < self.stop_wide"
)
def time_query_store_table(self):
self.store.select("table", where="index > self.start and index < self.stop")
def time_store_repr(self):
repr(self.store)
def time_store_str(self):
str(self.store)
def time_store_info(self):
self.store.info()
class HDF(BaseIO):
params = ["table", "fixed"]
param_names = ["format"]
def setup(self, format):
self.fname = "__test__.h5"
N = 100000
C = 5
self.df = DataFrame(
np.random.randn(N, C),
columns=[f"float{i}" for i in range(C)],
index=date_range("20000101", periods=N, freq="H"),
)
self.df["object"] = tm.makeStringIndex(N)
self.df.to_hdf(self.fname, "df", format=format)
def time_read_hdf(self, format):
read_hdf(self.fname, "df")
def time_write_hdf(self, format):
self.df.to_hdf(self.fname, "df", format=format)
from ..pandas_vb_common import setup # noqa: F401 isort:skip
| bsd-3-clause | 201e4a745c15b875e00e60702555ca84 | 27.891304 | 85 | 0.570855 | 3.281481 | false | false | false | false |
pandas-dev/pandas | pandas/_typing.py | 1 | 10358 | from __future__ import annotations
from datetime import (
datetime,
timedelta,
tzinfo,
)
from os import PathLike
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Hashable,
Iterator,
List,
Literal,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
Type as type_t,
TypeVar,
Union,
)
import numpy as np
# To prevent import cycles place any internal imports in the branch below
# and use a string literal forward reference to it in subsequent types
# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
if TYPE_CHECKING:
import numpy.typing as npt
from pandas._libs import (
NaTType,
Period,
Timedelta,
Timestamp,
)
from pandas._libs.tslibs import BaseOffset
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas import Interval
from pandas.core.arrays.base import ExtensionArray
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby.generic import (
DataFrameGroupBy,
GroupBy,
SeriesGroupBy,
)
from pandas.core.indexes.base import Index
from pandas.core.internals import (
ArrayManager,
BlockManager,
SingleArrayManager,
SingleBlockManager,
)
from pandas.core.resample import Resampler
from pandas.core.series import Series
from pandas.core.window.rolling import BaseWindow
from pandas.io.formats.format import EngFormatter
ScalarLike_co = Union[
int,
float,
complex,
str,
bytes,
np.generic,
]
# numpy compatible types
NumpyValueArrayLike = Union[ScalarLike_co, npt.ArrayLike]
# Name "npt._ArrayLikeInt_co" is not defined [name-defined]
NumpySorter = Optional[npt._ArrayLikeInt_co] # type: ignore[name-defined]
else:
npt: Any = None
HashableT = TypeVar("HashableT", bound=Hashable)
# array-like
ArrayLike = Union["ExtensionArray", np.ndarray]
AnyArrayLike = Union[ArrayLike, "Index", "Series"]
# scalars
PythonScalar = Union[str, float, bool]
DatetimeLikeScalar = Union["Period", "Timestamp", "Timedelta"]
PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"]
Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime]
IntStrT = TypeVar("IntStrT", int, str)
# timestamp and timedelta convertible types
TimestampConvertibleTypes = Union[
"Timestamp", datetime, np.datetime64, np.int64, float, str
]
TimedeltaConvertibleTypes = Union[
"Timedelta", timedelta, np.timedelta64, np.int64, float, str
]
Timezone = Union[str, tzinfo]
# NDFrameT is stricter and ensures that the same subclass of NDFrame always is
# used. E.g. `def func(a: NDFrameT) -> NDFrameT: ...` means that if a
# Series is passed into a function, a Series is always returned and if a DataFrame is
# passed in, a DataFrame is always returned.
NDFrameT = TypeVar("NDFrameT", bound="NDFrame")
NumpyIndexT = TypeVar("NumpyIndexT", np.ndarray, "Index")
AxisInt = int
Axis = Union[AxisInt, Literal["index", "columns", "rows"]]
IndexLabel = Union[Hashable, Sequence[Hashable]]
Level = Hashable
Shape = Tuple[int, ...]
Suffixes = Tuple[Optional[str], Optional[str]]
Ordered = Optional[bool]
JSONSerializable = Optional[Union[PythonScalar, List, Dict]]
Frequency = Union[str, "BaseOffset"]
Axes = Union[AnyArrayLike, List, range]
RandomState = Union[
int,
ArrayLike,
np.random.Generator,
np.random.BitGenerator,
np.random.RandomState,
]
# dtypes
NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]]
Dtype = Union["ExtensionDtype", NpDtype]
AstypeArg = Union["ExtensionDtype", "npt.DTypeLike"]
# DtypeArg specifies all allowable dtypes in a functions its dtype argument
DtypeArg = Union[Dtype, Dict[Hashable, Dtype]]
DtypeObj = Union[np.dtype, "ExtensionDtype"]
# converters
ConvertersArg = Dict[Hashable, Callable[[Dtype], Dtype]]
# parse_dates
ParseDatesArg = Union[
bool, List[Hashable], List[List[Hashable]], Dict[Hashable, List[Hashable]]
]
# For functions like rename that convert one label to another
Renamer = Union[Mapping[Any, Hashable], Callable[[Any], Hashable]]
# to maintain type information across generic functions and parametrization
T = TypeVar("T")
# used in decorators to preserve the signature of the function it decorates
# see https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators
FuncType = Callable[..., Any]
F = TypeVar("F", bound=FuncType)
# types of vectorized key functions for DataFrame::sort_values and
# DataFrame::sort_index, among others
ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]]
IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]]
# types of `func` kwarg for DataFrame.aggregate and Series.aggregate
AggFuncTypeBase = Union[Callable, str]
AggFuncTypeDict = Dict[Hashable, Union[AggFuncTypeBase, List[AggFuncTypeBase]]]
AggFuncType = Union[
AggFuncTypeBase,
List[AggFuncTypeBase],
AggFuncTypeDict,
]
AggObjType = Union[
"Series",
"DataFrame",
"GroupBy",
"SeriesGroupBy",
"DataFrameGroupBy",
"BaseWindow",
"Resampler",
]
PythonFuncType = Callable[[Any], Any]
# filenames and file-like-objects
AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True)
AnyStr_contra = TypeVar("AnyStr_contra", str, bytes, contravariant=True)
class BaseBuffer(Protocol):
@property
def mode(self) -> str:
# for _get_filepath_or_buffer
...
def seek(self, __offset: int, __whence: int = ...) -> int:
# with one argument: gzip.GzipFile, bz2.BZ2File
# with two arguments: zip.ZipFile, read_sas
...
def seekable(self) -> bool:
# for bz2.BZ2File
...
def tell(self) -> int:
# for zip.ZipFile, read_stata, to_stata
...
class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]):
def read(self, __n: int = ...) -> AnyStr_co:
# for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File
...
class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]):
def write(self, __b: AnyStr_contra) -> Any:
# for gzip.GzipFile, bz2.BZ2File
...
def flush(self) -> Any:
# for gzip.GzipFile, bz2.BZ2File
...
class ReadPickleBuffer(ReadBuffer[bytes], Protocol):
def readline(self) -> bytes:
...
class WriteExcelBuffer(WriteBuffer[bytes], Protocol):
def truncate(self, size: int | None = ...) -> int:
...
class ReadCsvBuffer(ReadBuffer[AnyStr_co], Protocol):
def __iter__(self) -> Iterator[AnyStr_co]:
# for engine=python
...
def fileno(self) -> int:
# for _MMapWrapper
...
def readline(self) -> AnyStr_co:
# for engine=python
...
@property
def closed(self) -> bool:
# for enine=pyarrow
...
FilePath = Union[str, "PathLike[str]"]
# for arbitrary kwargs passed during reading/writing files
StorageOptions = Optional[Dict[str, Any]]
# compression keywords and compression
CompressionDict = Dict[str, Any]
CompressionOptions = Optional[
Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict]
]
# types in DataFrameFormatter
FormattersType = Union[
List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable]
]
ColspaceType = Mapping[Hashable, Union[str, int]]
FloatFormatType = Union[str, Callable, "EngFormatter"]
ColspaceArgType = Union[
str, int, Sequence[Union[str, int]], Mapping[Hashable, Union[str, int]]
]
# Arguments for fillna()
FillnaOptions = Literal["backfill", "bfill", "ffill", "pad"]
# internals
Manager = Union[
"ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager"
]
SingleManager = Union["SingleArrayManager", "SingleBlockManager"]
Manager2D = Union["ArrayManager", "BlockManager"]
# indexing
# PositionalIndexer -> valid 1D positional indexer, e.g. can pass
# to ndarray.__getitem__
# ScalarIndexer is for a single value as the index
# SequenceIndexer is for list like or slices (but not tuples)
# PositionalIndexerTuple is extends the PositionalIndexer for 2D arrays
# These are used in various __getitem__ overloads
# TODO(typing#684): add Ellipsis, see
# https://github.com/python/typing/issues/684#issuecomment-548203158
# https://bugs.python.org/issue41810
# Using List[int] here rather than Sequence[int] to disallow tuples.
ScalarIndexer = Union[int, np.integer]
SequenceIndexer = Union[slice, List[int], np.ndarray]
PositionalIndexer = Union[ScalarIndexer, SequenceIndexer]
PositionalIndexerTuple = Tuple[PositionalIndexer, PositionalIndexer]
PositionalIndexer2D = Union[PositionalIndexer, PositionalIndexerTuple]
if TYPE_CHECKING:
TakeIndexer = Union[Sequence[int], Sequence[np.integer], npt.NDArray[np.integer]]
else:
TakeIndexer = Any
# Shared by functions such as drop and astype
IgnoreRaise = Literal["ignore", "raise"]
# Windowing rank methods
WindowingRankType = Literal["average", "min", "max"]
# read_csv engines
CSVEngine = Literal["c", "python", "pyarrow", "python-fwf"]
# read_xml parsers
XMLParsers = Literal["lxml", "etree"]
# Interval closed type
IntervalLeftRight = Literal["left", "right"]
IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]]
# datetime and NaTType
DatetimeNaTType = Union[datetime, "NaTType"]
DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]]
# sort_index
SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"]
NaPosition = Literal["first", "last"]
# quantile interpolation
QuantileInterpolation = Literal["linear", "lower", "higher", "midpoint", "nearest"]
# plotting
PlottingOrientation = Literal["horizontal", "vertical"]
# dropna
AnyAll = Literal["any", "all"]
MatplotlibColor = Union[str, Sequence[float]]
TimeGrouperOrigin = Union[
"Timestamp", Literal["epoch", "start", "start_day", "end", "end_day"]
]
TimeAmbiguous = Union[Literal["infer", "NaT", "raise"], "npt.NDArray[np.bool_]"]
TimeNonexistent = Union[
Literal["shift_forward", "shift_backward", "NaT", "raise"], timedelta
]
DropKeep = Literal["first", "last", False]
CorrelationMethod = Union[
Literal["pearson", "kendall", "spearman"], Callable[[np.ndarray, np.ndarray], float]
]
AlignJoin = Literal["outer", "inner", "left", "right"]
| bsd-3-clause | 389b2aa9ef4d15a9abe0fa240ad6eba1 | 27.932961 | 88 | 0.697528 | 3.54362 | false | false | false | false |
pandas-dev/pandas | asv_bench/benchmarks/tslibs/timedelta.py | 2 | 1593 | """
Timedelta benchmarks that rely only on tslibs. See benchmarks.timedeltas for
Timedelta benchmarks that rely on other parts of pandas.
"""
import datetime
import numpy as np
from pandas import Timedelta
class TimedeltaConstructor:
def setup(self):
self.nptimedelta64 = np.timedelta64(3600)
self.dttimedelta = datetime.timedelta(seconds=3600)
self.td = Timedelta(3600, unit="s")
def time_from_int(self):
Timedelta(123456789)
def time_from_unit(self):
Timedelta(1, unit="d")
def time_from_components(self):
Timedelta(
days=1,
hours=2,
minutes=3,
seconds=4,
milliseconds=5,
microseconds=6,
nanoseconds=7,
)
def time_from_datetime_timedelta(self):
Timedelta(self.dttimedelta)
def time_from_np_timedelta(self):
Timedelta(self.nptimedelta64)
def time_from_string(self):
Timedelta("1 days")
def time_from_iso_format(self):
Timedelta("P4DT12H30M5S")
def time_from_missing(self):
Timedelta("nat")
def time_from_pd_timedelta(self):
Timedelta(self.td)
class TimedeltaProperties:
def setup_cache(self):
td = Timedelta(days=365, minutes=35, seconds=25, milliseconds=35)
return td
def time_timedelta_days(self, td):
td.days
def time_timedelta_seconds(self, td):
td.seconds
def time_timedelta_microseconds(self, td):
td.microseconds
def time_timedelta_nanoseconds(self, td):
td.nanoseconds
| bsd-3-clause | 46c7a89d67b497066e323e818f7a50f3 | 22.086957 | 76 | 0.627119 | 3.739437 | false | false | false | false |
pandas-dev/pandas | pandas/tests/indexes/period/test_formats.py | 6 | 6587 | import numpy as np
import pytest
import pandas as pd
from pandas import (
PeriodIndex,
Series,
)
import pandas._testing as tm
def test_to_native_types():
index = PeriodIndex(["2017-01-01", "2017-01-02", "2017-01-03"], freq="D")
# First, with no arguments.
expected = np.array(["2017-01-01", "2017-01-02", "2017-01-03"], dtype="=U10")
result = index._format_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index._format_native_types(na_rep="pandas")
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(["01-2017-01", "01-2017-02", "01-2017-03"], dtype="=U10")
result = index._format_native_types(date_format="%m-%Y-%d")
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = PeriodIndex(["2017-01-01", pd.NaT, "2017-01-03"], freq="D")
expected = np.array(["2017-01-01", "NaT", "2017-01-03"], dtype=object)
result = index._format_native_types()
tm.assert_numpy_array_equal(result, expected)
expected = np.array(["2017-01-01", "pandas", "2017-01-03"], dtype=object)
result = index._format_native_types(na_rep="pandas")
tm.assert_numpy_array_equal(result, expected)
class TestPeriodIndexRendering:
def test_frame_repr(self):
df = pd.DataFrame({"A": [1, 2, 3]}, index=pd.date_range("2000", periods=3))
result = repr(df)
expected = " A\n2000-01-01 1\n2000-01-02 2\n2000-01-03 3"
assert result == expected
@pytest.mark.parametrize("method", ["__repr__", "__str__"])
def test_representation(self, method):
# GH#7601
idx1 = PeriodIndex([], freq="D")
idx2 = PeriodIndex(["2011-01-01"], freq="D")
idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A")
idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H")
idx7 = pd.period_range("2013Q1", periods=1, freq="Q")
idx8 = pd.period_range("2013Q1", periods=2, freq="Q")
idx9 = pd.period_range("2013Q1", periods=3, freq="Q")
idx10 = PeriodIndex(["2011-01-01", "2011-02-01"], freq="3D")
exp1 = "PeriodIndex([], dtype='period[D]')"
exp2 = "PeriodIndex(['2011-01-01'], dtype='period[D]')"
exp3 = "PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]')"
exp4 = (
"PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]')"
)
exp5 = "PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]')"
exp6 = (
"PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]')"
)
exp7 = "PeriodIndex(['2013Q1'], dtype='period[Q-DEC]')"
exp8 = "PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]')"
exp9 = "PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], dtype='period[Q-DEC]')"
exp10 = "PeriodIndex(['2011-01-01', '2011-02-01'], dtype='period[3D]')"
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9, exp10],
):
result = getattr(idx, method)()
assert result == expected
def test_representation_to_series(self):
# GH#10971
idx1 = PeriodIndex([], freq="D")
idx2 = PeriodIndex(["2011-01-01"], freq="D")
idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A")
idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H")
idx7 = pd.period_range("2013Q1", periods=1, freq="Q")
idx8 = pd.period_range("2013Q1", periods=2, freq="Q")
idx9 = pd.period_range("2013Q1", periods=3, freq="Q")
exp1 = """Series([], dtype: period[D])"""
exp2 = """0 2011-01-01
dtype: period[D]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: period[D]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: period[D]"""
exp5 = """0 2011
1 2012
2 2013
dtype: period[A-DEC]"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: period[H]"""
exp7 = """0 2013Q1
dtype: period[Q-DEC]"""
exp8 = """0 2013Q1
1 2013Q2
dtype: period[Q-DEC]"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: period[Q-DEC]"""
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9],
):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH#9116
idx1 = PeriodIndex([], freq="D")
idx2 = PeriodIndex(["2011-01-01"], freq="D")
idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A")
idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H")
idx7 = pd.period_range("2013Q1", periods=1, freq="Q")
idx8 = pd.period_range("2013Q1", periods=2, freq="Q")
idx9 = pd.period_range("2013Q1", periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9],
):
result = idx._summary()
assert result == expected
| bsd-3-clause | 2c78eac1fdd82a0bc40a7645a4cd0b79 | 32.100503 | 85 | 0.554577 | 3.00639 | false | false | false | false |
pandas-dev/pandas | asv_bench/benchmarks/attrs_caching.py | 1 | 1415 | import numpy as np
import pandas as pd
from pandas import DataFrame
try:
from pandas.core.construction import extract_array
except ImportError:
extract_array = None
class DataFrameAttributes:
def setup(self):
self.df = DataFrame(np.random.randn(10, 6))
self.cur_index = self.df.index
def time_get_index(self):
self.df.index
def time_set_index(self):
self.df.index = self.cur_index
class SeriesArrayAttribute:
params = [["numeric", "object", "category", "datetime64", "datetime64tz"]]
param_names = ["dtype"]
def setup(self, dtype):
if dtype == "numeric":
self.series = pd.Series([1, 2, 3])
elif dtype == "object":
self.series = pd.Series(["a", "b", "c"], dtype=object)
elif dtype == "category":
self.series = pd.Series(["a", "b", "c"], dtype="category")
elif dtype == "datetime64":
self.series = pd.Series(pd.date_range("2013", periods=3))
elif dtype == "datetime64tz":
self.series = pd.Series(pd.date_range("2013", periods=3, tz="UTC"))
def time_array(self, dtype):
self.series.array
def time_extract_array(self, dtype):
extract_array(self.series)
def time_extract_array_numpy(self, dtype):
extract_array(self.series, extract_numpy=True)
from .pandas_vb_common import setup # noqa: F401 isort:skip
| bsd-3-clause | 330ff5e4f41aa483a64e39679b54d932 | 26.745098 | 79 | 0.612721 | 3.609694 | false | false | false | false |
pandas-dev/pandas | pandas/core/reshape/tile.py | 1 | 21384 | """
Quantilization functions and related stuff
"""
from __future__ import annotations
from typing import (
Any,
Callable,
Literal,
)
import numpy as np
from pandas._libs import (
Timedelta,
Timestamp,
)
from pandas._libs.lib import infer_dtype
from pandas._typing import IntervalLeftRight
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
ensure_platform_int,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_extension_array_dtype,
is_integer,
is_list_like,
is_numeric_dtype,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import isna
from pandas import (
Categorical,
Index,
IntervalIndex,
to_datetime,
to_timedelta,
)
from pandas.core import nanops
import pandas.core.algorithms as algos
def cut(
x,
bins,
right: bool = True,
labels=None,
retbins: bool = False,
precision: int = 3,
include_lowest: bool = False,
duplicates: str = "raise",
ordered: bool = True,
):
"""
Bin values into discrete intervals.
Use `cut` when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable. For example, `cut` could convert ages to groups of
age ranges. Supports binning into an equal number of bins, or a
pre-specified array of bins.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used. Note that
IntervalIndex for `bins` must be non-overlapping.
right : bool, default True
Indicates whether `bins` includes the rightmost edge or not. If
``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
indicate (1,2], (2,3], (3,4]. This argument is ignored when
`bins` is an IntervalIndex.
labels : array or False, default None
Specifies the labels for the returned bins. Must be the same length as
the resulting bins. If False, returns only integer indicators of the
bins. This affects the type of the output container (see below).
This argument is ignored when `bins` is an IntervalIndex. If True,
raises an error. When `ordered=False`, labels must be provided.
retbins : bool, default False
Whether to return the bins or not. Useful when bins is provided
as a scalar.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
ordered : bool, default True
Whether the labels are ordered or not. Applies to returned types
Categorical and Series (with Categorical dtype). If True,
the resulting categorical will be ordered. If False, the resulting
categorical will be unordered (labels must be provided).
.. versionadded:: 1.1.0
Returns
-------
out : Categorical, Series, or ndarray
An array-like object representing the respective bin for each value
of `x`. The type depends on the value of `labels`.
* None (default) : returns a Series for Series `x` or a
Categorical for all other inputs. The values stored within
are Interval dtype.
* sequence of scalars : returns a Series for Series `x` or a
Categorical for all other inputs. The values stored within
are whatever the type in the sequence is.
* False : returns an ndarray of integers.
bins : numpy.ndarray or IntervalIndex.
The computed or specified bins. Only returned when `retbins=True`.
For scalar or sequence `bins`, this is an ndarray with the computed
bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
an IntervalIndex `bins`, this is equal to `bins`.
See Also
--------
qcut : Discretize variable into equal-sized buckets based on rank
or based on sample quantiles.
Categorical : Array type for storing data that come from a
fixed set of values.
Series : One-dimensional array with axis labels (including time series).
IntervalIndex : Immutable Index implementing an ordered, sliceable set.
Notes
-----
Any NA values will be NA in the result. Out of bounds values will be NA in
the resulting Series or Categorical object.
Reference :ref:`the user guide <reshaping.tile.cut>` for more examples.
Examples
--------
Discretize into three equal-sized bins.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
... # doctest: +ELLIPSIS
[(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ...
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
... # doctest: +ELLIPSIS
([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ...
array([0.994, 3. , 5. , 7. ]))
Discovers the same bins, but assign them specific labels. Notice that
the returned Categorical's categories are `labels` and is ordered.
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
... 3, labels=["bad", "medium", "good"])
['bad', 'good', 'medium', 'medium', 'good', 'bad']
Categories (3, object): ['bad' < 'medium' < 'good']
``ordered=False`` will result in unordered categories when labels are passed.
This parameter can be used to allow non-unique labels:
>>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3,
... labels=["B", "A", "B"], ordered=False)
['B', 'B', 'A', 'A', 'B', 'B']
Categories (2, object): ['A', 'B']
``labels=False`` implies you just want the bins back.
>>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
array([0, 1, 1, 3])
Passing a Series as an input returns a Series with categorical dtype:
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, 3)
... # doctest: +ELLIPSIS
a (1.992, 4.667]
b (1.992, 4.667]
c (4.667, 7.333]
d (7.333, 10.0]
e (7.333, 10.0]
dtype: category
Categories (3, interval[float64, right]): [(1.992, 4.667] < (4.667, ...
Passing a Series as an input returns a Series with mapping value.
It is used to map numerically to intervals based on bins.
>>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
... # doctest: +ELLIPSIS
(a 1.0
b 2.0
c 3.0
d 4.0
e NaN
dtype: float64,
array([ 0, 2, 4, 6, 8, 10]))
Use `drop` optional when bins is not unique
>>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
... right=False, duplicates='drop')
... # doctest: +ELLIPSIS
(a 1.0
b 2.0
c 3.0
d 3.0
e NaN
dtype: float64,
array([ 0, 2, 4, 6, 10]))
Passing an IntervalIndex for `bins` results in those categories exactly.
Notice that values not covered by the IntervalIndex are set to NaN. 0
is to the left of the first bin (which is closed on the right), and 1.5
falls between two bins.
>>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
>>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
[NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]]
Categories (3, interval[int64, right]): [(0, 1] < (2, 3] < (4, 5]]
"""
# NOTE: this binning code is changed a bit from histogram for var(x) == 0
original = x
x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
if not np.iterable(bins):
if is_scalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
try: # for array-like
sz = x.size
except AttributeError:
x = np.asarray(x)
sz = x.size
if sz == 0:
raise ValueError("Cannot cut empty array")
rng = (nanops.nanmin(x), nanops.nanmax(x))
mn, mx = (mi + 0.0 for mi in rng)
if np.isinf(mn) or np.isinf(mx):
# GH 24314
raise ValueError(
"cannot specify integer `bins` when input data contains infinity"
)
if mn == mx: # adjust end points before binning
mn -= 0.001 * abs(mn) if mn != 0 else 0.001
mx += 0.001 * abs(mx) if mx != 0 else 0.001
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
else: # adjust end points after binning
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001 # 0.1% of the range
if right:
bins[0] -= adj
else:
bins[-1] += adj
elif isinstance(bins, IntervalIndex):
if bins.is_overlapping:
raise ValueError("Overlapping IntervalIndex is not accepted.")
else:
if is_datetime64tz_dtype(bins):
bins = np.asarray(bins, dtype=DT64NS_DTYPE)
else:
bins = np.asarray(bins)
bins = _convert_bin_to_numeric_type(bins, dtype)
# GH 26045: cast to float64 to avoid an overflow
if (np.diff(bins.astype("float64")) < 0).any():
raise ValueError("bins must increase monotonically.")
fac, bins = _bins_to_cuts(
x,
bins,
right=right,
labels=labels,
precision=precision,
include_lowest=include_lowest,
dtype=dtype,
duplicates=duplicates,
ordered=ordered,
)
return _postprocess_for_cut(fac, bins, retbins, dtype, original)
def qcut(
x,
q,
labels=None,
retbins: bool = False,
precision: int = 3,
duplicates: str = "raise",
):
"""
Quantile-based discretization function.
Discretize variable into equal-sized buckets based on rank or based
on sample quantiles. For example 1000 values for 10 quantiles would
produce a Categorical object indicating quantile membership for each data point.
Parameters
----------
x : 1d ndarray or Series
q : int or list-like of float
Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.
labels : array or False, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, return only integer indicators of the
bins. If True, raises an error.
retbins : bool, optional
Whether to return the (bins, labels) or not. Can be useful if bins
is given as a scalar.
precision : int, optional
The precision at which to store and display the bins labels.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
Returns
-------
out : Categorical or Series or array of integers if labels is False
The return type (Categorical or Series) depends on the input: a Series
of type category if input is a Series else Categorical. Bins are
represented as categories when categorical data is returned.
bins : ndarray of floats
Returned only if `retbins` is True.
Notes
-----
Out of bounds values will be NA in the resulting Categorical object
Examples
--------
>>> pd.qcut(range(5), 4)
... # doctest: +ELLIPSIS
[(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ...
>>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
... # doctest: +SKIP
[good, good, medium, bad, bad]
Categories (3, object): [good < medium < bad]
>>> pd.qcut(range(5), 4, labels=False)
array([0, 0, 1, 2, 3])
"""
original = x
x = _preprocess_for_cut(x)
x, dtype = _coerce_to_type(x)
quantiles = np.linspace(0, 1, q + 1) if is_integer(q) else q
x_np = np.asarray(x)
x_np = x_np[~np.isnan(x_np)]
bins = np.quantile(x_np, quantiles)
fac, bins = _bins_to_cuts(
x,
bins,
labels=labels,
precision=precision,
include_lowest=True,
dtype=dtype,
duplicates=duplicates,
)
return _postprocess_for_cut(fac, bins, retbins, dtype, original)
def _bins_to_cuts(
x,
bins: np.ndarray,
right: bool = True,
labels=None,
precision: int = 3,
include_lowest: bool = False,
dtype=None,
duplicates: str = "raise",
ordered: bool = True,
):
if not ordered and labels is None:
raise ValueError("'labels' must be provided if 'ordered = False'")
if duplicates not in ["raise", "drop"]:
raise ValueError(
"invalid value for 'duplicates' parameter, valid options are: raise, drop"
)
if isinstance(bins, IntervalIndex):
# we have a fast-path here
ids = bins.get_indexer(x)
result = Categorical.from_codes(ids, categories=bins, ordered=True)
return result, bins
unique_bins = algos.unique(bins)
if len(unique_bins) < len(bins) and len(bins) != 2:
if duplicates == "raise":
raise ValueError(
f"Bin edges must be unique: {repr(bins)}.\n"
f"You can drop duplicate edges by setting the 'duplicates' kwarg"
)
bins = unique_bins
side: Literal["left", "right"] = "left" if right else "right"
ids = ensure_platform_int(bins.searchsorted(x, side=side))
if include_lowest:
ids[np.asarray(x) == bins[0]] = 1
na_mask = isna(x) | (ids == len(bins)) | (ids == 0)
has_nas = na_mask.any()
if labels is not False:
if not (labels is None or is_list_like(labels)):
raise ValueError(
"Bin labels must either be False, None or passed in as a "
"list-like argument"
)
if labels is None:
labels = _format_labels(
bins, precision, right=right, include_lowest=include_lowest, dtype=dtype
)
elif ordered and len(set(labels)) != len(labels):
raise ValueError(
"labels must be unique if ordered=True; pass ordered=False "
"for duplicate labels"
)
else:
if len(labels) != len(bins) - 1:
raise ValueError(
"Bin labels must be one fewer than the number of bin edges"
)
if not is_categorical_dtype(labels):
labels = Categorical(
labels,
categories=labels if len(set(labels)) == len(labels) else None,
ordered=ordered,
)
# TODO: handle mismatch between categorical label order and pandas.cut order.
np.putmask(ids, na_mask, 0)
result = algos.take_nd(labels, ids - 1)
else:
result = ids - 1
if has_nas:
result = result.astype(np.float64)
np.putmask(result, na_mask, np.nan)
return result, bins
def _coerce_to_type(x):
"""
if the passed data is of datetime/timedelta, bool or nullable int type,
this method converts it to numeric so that cut or qcut method can
handle it
"""
dtype = None
if is_datetime64tz_dtype(x.dtype):
dtype = x.dtype
elif is_datetime64_dtype(x.dtype):
x = to_datetime(x)
dtype = np.dtype("datetime64[ns]")
elif is_timedelta64_dtype(x.dtype):
x = to_timedelta(x)
dtype = np.dtype("timedelta64[ns]")
elif is_bool_dtype(x.dtype):
# GH 20303
x = x.astype(np.int64)
# To support cut and qcut for IntegerArray we convert to float dtype.
# Will properly support in the future.
# https://github.com/pandas-dev/pandas/pull/31290
# https://github.com/pandas-dev/pandas/issues/31389
elif is_extension_array_dtype(x.dtype) and is_numeric_dtype(x.dtype):
x = x.to_numpy(dtype=np.float64, na_value=np.nan)
if dtype is not None:
# GH 19768: force NaT to NaN during integer conversion
x = np.where(x.notna(), x.view(np.int64), np.nan)
return x, dtype
def _convert_bin_to_numeric_type(bins, dtype):
"""
if the passed bin is of datetime/timedelta type,
this method converts it to integer
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Raises
------
ValueError if bins are not of a compat dtype to dtype
"""
bins_dtype = infer_dtype(bins, skipna=False)
if is_timedelta64_dtype(dtype):
if bins_dtype in ["timedelta", "timedelta64"]:
bins = to_timedelta(bins).view(np.int64)
else:
raise ValueError("bins must be of timedelta64 dtype")
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
if bins_dtype in ["datetime", "datetime64"]:
bins = to_datetime(bins).view(np.int64)
else:
raise ValueError("bins must be of datetime64 dtype")
return bins
def _convert_bin_to_datelike_type(bins, dtype):
"""
Convert bins to a DatetimeIndex or TimedeltaIndex if the original dtype is
datelike
Parameters
----------
bins : list-like of bins
dtype : dtype of data
Returns
-------
bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is
datelike
"""
if is_datetime64tz_dtype(dtype):
bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz)
elif is_datetime_or_timedelta_dtype(dtype):
bins = Index(bins.astype(np.int64), dtype=dtype)
return bins
def _format_labels(
bins, precision: int, right: bool = True, include_lowest: bool = False, dtype=None
):
"""based on the dtype, return our labels"""
closed: IntervalLeftRight = "right" if right else "left"
formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta]
if is_datetime64tz_dtype(dtype):
formatter = lambda x: Timestamp(x, tz=dtype.tz)
adjust = lambda x: x - Timedelta("1ns")
elif is_datetime64_dtype(dtype):
formatter = Timestamp
adjust = lambda x: x - Timedelta("1ns")
elif is_timedelta64_dtype(dtype):
formatter = Timedelta
adjust = lambda x: x - Timedelta("1ns")
else:
precision = _infer_precision(precision, bins)
formatter = lambda x: _round_frac(x, precision)
adjust = lambda x: x - 10 ** (-precision)
breaks = [formatter(b) for b in bins]
if right and include_lowest:
# adjust lhs of first interval by precision to account for being right closed
breaks[0] = adjust(breaks[0])
return IntervalIndex.from_breaks(breaks, closed=closed)
def _preprocess_for_cut(x):
"""
handles preprocessing for cut where we convert passed
input to array, strip the index information and store it
separately
"""
# Check that the passed array is a Pandas or Numpy object
# We don't want to strip away a Pandas data-type here (e.g. datetimetz)
ndim = getattr(x, "ndim", None)
if ndim is None:
x = np.asarray(x)
if x.ndim != 1:
raise ValueError("Input array must be 1 dimensional")
return x
def _postprocess_for_cut(fac, bins, retbins: bool, dtype, original):
"""
handles post processing for the cut method where
we combine the index information if the originally passed
datatype was a series
"""
if isinstance(original, ABCSeries):
fac = original._constructor(fac, index=original.index, name=original.name)
if not retbins:
return fac
bins = _convert_bin_to_datelike_type(bins, dtype)
return fac, bins
def _round_frac(x, precision: int):
"""
Round the fractional part of the given number
"""
if not np.isfinite(x) or x == 0:
return x
else:
frac, whole = np.modf(x)
if whole == 0:
digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision
else:
digits = precision
return np.around(x, digits)
def _infer_precision(base_precision: int, bins) -> int:
"""
Infer an appropriate precision for _round_frac
"""
for precision in range(base_precision, 20):
levels = [_round_frac(b, precision) for b in bins]
if algos.unique(levels).size == bins.size:
return precision
return base_precision # default
| bsd-3-clause | 41983585346fcf20157e17ba34495765 | 32.102167 | 88 | 0.600402 | 3.701575 | false | false | false | false |
pandas-dev/pandas | pandas/tests/extension/test_floating.py | 2 | 5416 | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas.core.dtypes.common import is_extension_array_dtype
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float_dtype
from pandas.core.arrays.floating import (
Float32Dtype,
Float64Dtype,
)
from pandas.tests.extension import base
def make_data():
return (
list(np.arange(0.1, 0.9, 0.1))
+ [pd.NA]
+ list(np.arange(1, 9.8, 0.1))
+ [pd.NA]
+ [9.9, 10.0]
)
@pytest.fixture(params=[Float32Dtype, Float64Dtype])
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return pd.array(make_data(), dtype=dtype)
@pytest.fixture
def data_for_twos(dtype):
return pd.array(np.ones(100) * 2, dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return pd.array([pd.NA, 0.1], dtype=dtype)
@pytest.fixture
def data_for_sorting(dtype):
return pd.array([0.1, 0.2, 0.0], dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
return pd.array([0.1, pd.NA, 0.0], dtype=dtype)
@pytest.fixture
def na_cmp():
# we are pd.NA
return lambda x, y: x is pd.NA and y is pd.NA
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping(dtype):
b = 0.1
a = 0.0
c = 0.2
na = pd.NA
return pd.array([b, b, na, na, a, a, b, c], dtype=dtype)
class TestDtype(base.BaseDtypeTests):
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def check_opname(self, s, op_name, other, exc=None):
# overwriting to indicate ops don't raise an error
super().check_opname(s, op_name, other, exc=None)
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
sdtype = tm.get_dtype(s)
if (
hasattr(other, "dtype")
and not is_extension_array_dtype(other.dtype)
and is_float_dtype(other.dtype)
):
# other is np.float64 and would therefore always result in
# upcasting, so keeping other as same numpy_dtype
other = other.astype(sdtype.numpy_dtype)
result = op(s, other)
expected = self._combine(s, other, op)
# combine method result in 'biggest' (float64) dtype
expected = expected.astype(sdtype)
self.assert_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
class TestComparisonOps(base.BaseComparisonOpsTests):
# TODO: share with IntegerArray?
def _check_op(self, s, op, other, op_name, exc=NotImplementedError):
if exc is None:
result = op(s, other)
# Override to do the astype to boolean
expected = s.combine(other, op).astype("boolean")
self.assert_series_equal(result, expected)
else:
with pytest.raises(exc):
op(s, other)
def check_opname(self, s, op_name, other, exc=None):
super().check_opname(s, op_name, other, exc=None)
def _compare_other(self, s, data, op, other):
op_name = f"__{op.__name__}__"
self.check_opname(s, op_name, other)
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
pass
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestIndex(base.BaseIndexTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestMethods(base.BaseMethodsTests):
pass
class TestCasting(base.BaseCastingTests):
pass
class TestGroupby(base.BaseGroupbyTests):
pass
class TestNumericReduce(base.BaseNumericReduceTests):
def check_reduce(self, s, op_name, skipna):
# overwrite to ensure pd.NA is tested instead of np.nan
# https://github.com/pandas-dev/pandas/issues/30958
result = getattr(s, op_name)(skipna=skipna)
if not skipna and s.isna().any():
expected = pd.NA
else:
expected = getattr(s.dropna().astype(s.dtype.numpy_dtype), op_name)(
skipna=skipna
)
tm.assert_almost_equal(result, expected)
@pytest.mark.skip(reason="Tested in tests/reductions/test_reductions.py")
class TestBooleanReduce(base.BaseBooleanReduceTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestParsing(base.BaseParsingTests):
pass
@pytest.mark.filterwarnings("ignore:overflow encountered in reduce:RuntimeWarning")
class Test2DCompat(base.Dim2CompatTests):
pass
| bsd-3-clause | 0557829d264bebed44265c55ffeb153b | 24.074074 | 83 | 0.656573 | 3.469571 | false | true | false | false |
pandas-dev/pandas | pandas/tests/resample/test_resampler_grouper.py | 1 | 15621 | from textwrap import dedent
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.util._test_decorators import async_mark
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
TimedeltaIndex,
Timestamp,
)
import pandas._testing as tm
from pandas.core.api import Int64Index
from pandas.core.indexes.datetimes import date_range
test_frame = DataFrame(
{"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)},
index=date_range("1/1/2000", freq="s", periods=40),
)
@async_mark()
@td.check_file_leaks
async def test_tab_complete_ipython6_warning(ip):
from IPython.core.completer import provisionalcompleter
code = dedent(
"""\
import pandas._testing as tm
s = tm.makeTimeSeries()
rs = s.resample("D")
"""
)
await ip.run_code(code)
# GH 31324 newer jedi version raises Deprecation warning;
# appears resolved 2021-02-02
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("rs.", 1))
def test_deferred_with_groupby():
# GH 12486
# support deferred resample ops with groupby
data = [
["2010-01-01", "A", 2],
["2010-01-02", "A", 3],
["2010-01-05", "A", 8],
["2010-01-10", "A", 7],
["2010-01-13", "A", 3],
["2010-01-01", "B", 5],
["2010-01-03", "B", 2],
["2010-01-04", "B", 1],
["2010-01-11", "B", 7],
["2010-01-14", "B", 3],
]
df = DataFrame(data, columns=["date", "id", "score"])
df.date = pd.to_datetime(df.date)
def f_0(x):
return x.set_index("date").resample("D").asfreq()
expected = df.groupby("id").apply(f_0)
result = df.set_index("date").groupby("id").resample("D").asfreq()
tm.assert_frame_equal(result, expected)
df = DataFrame(
{
"date": date_range(start="2016-01-01", periods=4, freq="W"),
"group": [1, 1, 2, 2],
"val": [5, 6, 7, 8],
}
).set_index("date")
def f_1(x):
return x.resample("1D").ffill()
expected = df.groupby("group").apply(f_1)
result = df.groupby("group").resample("1D").ffill()
tm.assert_frame_equal(result, expected)
def test_getitem():
g = test_frame.groupby("A")
expected = g.B.apply(lambda x: x.resample("2s").mean())
result = g.resample("2s").B.mean()
tm.assert_series_equal(result, expected)
result = g.B.resample("2s").mean()
tm.assert_series_equal(result, expected)
result = g.resample("2s").mean().B
tm.assert_series_equal(result, expected)
def test_getitem_multiple():
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{"id": 1, "buyer": "A"}, {"id": 2, "buyer": "B"}]
df = DataFrame(data, index=date_range("2016-01-01", periods=2))
r = df.groupby("id").resample("1D")
result = r["buyer"].count()
expected = Series(
[1, 1],
index=pd.MultiIndex.from_tuples(
[(1, Timestamp("2016-01-01")), (2, Timestamp("2016-01-02"))],
names=["id", None],
),
name="buyer",
)
tm.assert_series_equal(result, expected)
result = r["buyer"].count()
tm.assert_series_equal(result, expected)
def test_groupby_resample_on_api_with_getitem():
# GH 17813
df = DataFrame(
{"id": list("aabbb"), "date": date_range("1-1-2016", periods=5), "data": 1}
)
exp = df.set_index("date").groupby("id").resample("2D")["data"].sum()
result = df.groupby("id").resample("2D", on="date")["data"].sum()
tm.assert_series_equal(result, exp)
def test_groupby_with_origin():
# GH 31809
freq = "1399min" # prime number that is smaller than 24h
start, end = "1/1/2000 00:00:00", "1/31/2000 00:00"
middle = "1/15/2000 00:00:00"
rng = date_range(start, end, freq="1231min") # prime number
ts = Series(np.random.randn(len(rng)), index=rng)
ts2 = ts[middle:end]
# proves that grouper without a fixed origin does not work
# when dealing with unusual frequencies
simple_grouper = pd.Grouper(freq=freq)
count_ts = ts.groupby(simple_grouper).agg("count")
count_ts = count_ts[middle:end]
count_ts2 = ts2.groupby(simple_grouper).agg("count")
with pytest.raises(AssertionError, match="Index are different"):
tm.assert_index_equal(count_ts.index, count_ts2.index)
# test origin on 1970-01-01 00:00:00
origin = Timestamp(0)
adjusted_grouper = pd.Grouper(freq=freq, origin=origin)
adjusted_count_ts = ts.groupby(adjusted_grouper).agg("count")
adjusted_count_ts = adjusted_count_ts[middle:end]
adjusted_count_ts2 = ts2.groupby(adjusted_grouper).agg("count")
tm.assert_series_equal(adjusted_count_ts, adjusted_count_ts2)
# test origin on 2049-10-18 20:00:00
origin_future = Timestamp(0) + pd.Timedelta("1399min") * 30_000
adjusted_grouper2 = pd.Grouper(freq=freq, origin=origin_future)
adjusted2_count_ts = ts.groupby(adjusted_grouper2).agg("count")
adjusted2_count_ts = adjusted2_count_ts[middle:end]
adjusted2_count_ts2 = ts2.groupby(adjusted_grouper2).agg("count")
tm.assert_series_equal(adjusted2_count_ts, adjusted2_count_ts2)
# both grouper use an adjusted timestamp that is a multiple of 1399 min
# they should be equals even if the adjusted_timestamp is in the future
tm.assert_series_equal(adjusted_count_ts, adjusted2_count_ts2)
def test_nearest():
# GH 17496
# Resample nearest
index = date_range("1/1/2000", periods=3, freq="T")
result = Series(range(3), index=index).resample("20s").nearest()
expected = Series(
[0, 0, 1, 1, 1, 2, 2],
index=pd.DatetimeIndex(
[
"2000-01-01 00:00:00",
"2000-01-01 00:00:20",
"2000-01-01 00:00:40",
"2000-01-01 00:01:00",
"2000-01-01 00:01:20",
"2000-01-01 00:01:40",
"2000-01-01 00:02:00",
],
dtype="datetime64[ns]",
freq="20S",
),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
"first",
"last",
"median",
"sem",
"sum",
"mean",
"min",
"max",
"size",
"count",
"nearest",
"bfill",
"ffill",
"asfreq",
"ohlc",
],
)
def test_methods(f):
g = test_frame.groupby("A")
r = g.resample("2s")
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_equal(result, expected)
def test_methods_nunique():
# series only
g = test_frame.groupby("A")
r = g.resample("2s")
result = r.B.nunique()
expected = g.B.apply(lambda x: x.resample("2s").nunique())
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("f", ["std", "var"])
def test_methods_std_var(f):
g = test_frame.groupby("A")
r = g.resample("2s")
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample("2s"), f)(ddof=1))
tm.assert_frame_equal(result, expected)
def test_apply():
g = test_frame.groupby("A")
r = g.resample("2s")
# reduction
expected = g.resample("2s").sum()
def f_0(x):
return x.resample("2s").sum()
result = r.apply(f_0)
tm.assert_frame_equal(result, expected)
def f_1(x):
return x.resample("2s").apply(lambda y: y.sum())
result = g.apply(f_1)
# y.sum() results in int64 instead of int32 on 32-bit architectures
expected = expected.astype("int64")
tm.assert_frame_equal(result, expected)
def test_apply_with_mutated_index():
# GH 15169
index = date_range("1-1-2015", "12-31-15", freq="D")
df = DataFrame(data={"col1": np.random.rand(len(index))}, index=index)
def f(x):
s = Series([1, 2], index=["a", "b"])
return s
expected = df.groupby(pd.Grouper(freq="M")).apply(f)
result = df.resample("M").apply(f)
tm.assert_frame_equal(result, expected)
# A case for series
expected = df["col1"].groupby(pd.Grouper(freq="M")).apply(f)
result = df["col1"].resample("M").apply(f)
tm.assert_series_equal(result, expected)
def test_apply_columns_multilevel():
# GH 16231
cols = pd.MultiIndex.from_tuples([("A", "a", "", "one"), ("B", "b", "i", "two")])
ind = date_range(start="2017-01-01", freq="15Min", periods=8)
df = DataFrame(np.array([0] * 16).reshape(8, 2), index=ind, columns=cols)
agg_dict = {col: (np.sum if col[3] == "one" else np.mean) for col in df.columns}
result = df.resample("H").apply(lambda x: agg_dict[x.name](x))
expected = DataFrame(
2 * [[0, 0.0]],
index=date_range(start="2017-01-01", freq="1H", periods=2),
columns=pd.MultiIndex.from_tuples(
[("A", "a", "", "one"), ("B", "b", "i", "two")]
),
)
tm.assert_frame_equal(result, expected)
def test_resample_groupby_with_label():
# GH 13235
index = date_range("2000-01-01", freq="2D", periods=5)
df = DataFrame(index=index, data={"col0": [0, 0, 1, 1, 2], "col1": [1, 1, 1, 1, 1]})
result = df.groupby("col0").resample("1W", label="left").sum()
mi = [
np.array([0, 0, 1, 2]),
pd.to_datetime(
np.array(["1999-12-26", "2000-01-02", "2000-01-02", "2000-01-02"])
),
]
mindex = pd.MultiIndex.from_arrays(mi, names=["col0", None])
expected = DataFrame(
data={"col0": [0, 0, 2, 2], "col1": [1, 1, 2, 1]}, index=mindex
)
tm.assert_frame_equal(result, expected)
def test_consistency_with_window():
# consistent return values with window
df = test_frame
expected = Int64Index([1, 2, 3], name="A")
result = df.groupby("A").resample("2s").mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby("A").rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns():
# GH 14233
df = DataFrame(
np.random.randn(20, 3),
columns=list("aaa"),
index=date_range("2012-01-01", periods=20, freq="s"),
)
df2 = df.copy()
df2.columns = ["a", "b", "c"]
expected = df2.resample("5s").median()
result = df.resample("5s").median()
expected.columns = result.columns
tm.assert_frame_equal(result, expected)
def test_apply_to_one_column_of_df():
# GH: 36951
df = DataFrame(
{"col": range(10), "col1": range(10, 20)},
index=date_range("2012-01-01", periods=10, freq="20min"),
)
# access "col" via getattr -> make sure we handle AttributeError
result = df.resample("H").apply(lambda group: group.col.sum())
expected = Series(
[3, 12, 21, 9], index=date_range("2012-01-01", periods=4, freq="H")
)
tm.assert_series_equal(result, expected)
# access "col" via _getitem__ -> make sure we handle KeyErrpr
result = df.resample("H").apply(lambda group: group["col"].sum())
tm.assert_series_equal(result, expected)
def test_resample_groupby_agg():
# GH: 33548
df = DataFrame(
{
"cat": [
"cat_1",
"cat_1",
"cat_2",
"cat_1",
"cat_2",
"cat_1",
"cat_2",
"cat_1",
],
"num": [5, 20, 22, 3, 4, 30, 10, 50],
"date": [
"2019-2-1",
"2018-02-03",
"2020-3-11",
"2019-2-2",
"2019-2-2",
"2018-12-4",
"2020-3-11",
"2020-12-12",
],
}
)
df["date"] = pd.to_datetime(df["date"])
resampled = df.groupby("cat").resample("Y", on="date")
msg = "The default value of numeric_only"
with tm.assert_produces_warning(FutureWarning, match=msg):
expected = resampled.sum()
result = resampled.agg({"num": "sum"})
tm.assert_frame_equal(result, expected)
def test_resample_groupby_agg_listlike():
# GH 42905
ts = Timestamp("2021-02-28 00:00:00")
df = DataFrame({"class": ["beta"], "value": [69]}, index=Index([ts], name="date"))
resampled = df.groupby("class").resample("M")["value"]
result = resampled.agg(["sum", "size"])
expected = DataFrame(
[[69, 1]],
index=pd.MultiIndex.from_tuples([("beta", ts)], names=["class", "date"]),
columns=["sum", "size"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("keys", [["a"], ["a", "b"]])
def test_empty(keys):
# GH 26411
df = DataFrame([], columns=["a", "b"], index=TimedeltaIndex([]))
result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean()
expected = (
DataFrame(columns=["a", "b"])
.set_index(keys, drop=False)
.set_index(TimedeltaIndex([]), append=True)
)
if len(keys) == 1:
expected.index.name = keys[0]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("consolidate", [True, False])
def test_resample_groupby_agg_object_dtype_all_nan(consolidate):
# https://github.com/pandas-dev/pandas/issues/39329
dates = date_range("2020-01-01", periods=15, freq="D")
df1 = DataFrame({"key": "A", "date": dates, "col1": range(15), "col_object": "val"})
df2 = DataFrame({"key": "B", "date": dates, "col1": range(15)})
df = pd.concat([df1, df2], ignore_index=True)
if consolidate:
df = df._consolidate()
result = df.groupby(["key"]).resample("W", on="date").min()
idx = pd.MultiIndex.from_arrays(
[
["A"] * 3 + ["B"] * 3,
pd.to_datetime(["2020-01-05", "2020-01-12", "2020-01-19"] * 2),
],
names=["key", "date"],
)
expected = DataFrame(
{
"key": ["A"] * 3 + ["B"] * 3,
"col1": [0, 5, 12] * 2,
"col_object": ["val"] * 3 + [np.nan] * 3,
},
index=idx,
)
tm.assert_frame_equal(result, expected)
def test_groupby_resample_with_list_of_keys():
# GH 47362
df = DataFrame(
data={
"date": date_range(start="2016-01-01", periods=8),
"group": [0, 0, 0, 0, 1, 1, 1, 1],
"val": [1, 7, 5, 2, 3, 10, 5, 1],
}
)
result = df.groupby("group").resample("2D", on="date")[["val"]].mean()
expected = DataFrame(
data={
"val": [4.0, 3.5, 6.5, 3.0],
},
index=Index(
data=[
(0, Timestamp("2016-01-01")),
(0, Timestamp("2016-01-03")),
(1, Timestamp("2016-01-05")),
(1, Timestamp("2016-01-07")),
],
name=("group", "date"),
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("keys", [["a"], ["a", "b"]])
def test_resample_empty_Dataframe(keys):
# GH 47705
df = DataFrame([], columns=["a", "b", "date"])
df["date"] = pd.to_datetime(df["date"])
df = df.set_index("date")
result = df.groupby(keys).resample(rule=pd.to_timedelta("00:00:01")).mean()
expected = DataFrame(columns=["a", "b", "date"]).set_index(keys, drop=False)
expected["date"] = pd.to_datetime(expected["date"])
expected = expected.set_index("date", append=True, drop=True)
if len(keys) == 1:
expected.index.name = keys[0]
tm.assert_frame_equal(result, expected)
| bsd-3-clause | 35cbe4f1a3d4efaa1e4b05a47684cf18 | 29.098266 | 88 | 0.560271 | 3.157033 | false | true | false | false |
pandas-dev/pandas | pandas/tests/tseries/offsets/test_ticks.py | 1 | 10870 | """
Tests for offsets.Tick and subclasses
"""
from datetime import (
datetime,
timedelta,
)
from hypothesis import (
assume,
example,
given,
)
import numpy as np
import pytest
from pandas._libs.tslibs.offsets import delta_to_tick
from pandas import (
Timedelta,
Timestamp,
)
import pandas._testing as tm
from pandas._testing._hypothesis import INT_NEG_999_TO_POS_999
from pandas.tests.tseries.offsets.common import assert_offset_equal
from pandas.tseries import offsets
from pandas.tseries.offsets import (
Hour,
Micro,
Milli,
Minute,
Nano,
Second,
)
# ---------------------------------------------------------------------
# Test Helpers
tick_classes = [Hour, Minute, Second, Milli, Micro, Nano]
# ---------------------------------------------------------------------
def test_apply_ticks():
result = offsets.Hour(3) + offsets.Hour(4)
exp = offsets.Hour(7)
assert result == exp
def test_delta_to_tick():
delta = timedelta(3)
tick = delta_to_tick(delta)
assert tick == offsets.Day(3)
td = Timedelta(nanoseconds=5)
tick = delta_to_tick(td)
assert tick == Nano(5)
@pytest.mark.parametrize("cls", tick_classes)
@example(n=2, m=3)
@example(n=800, m=300)
@example(n=1000, m=5)
@given(n=INT_NEG_999_TO_POS_999, m=INT_NEG_999_TO_POS_999)
def test_tick_add_sub(cls, n, m):
# For all Tick subclasses and all integers n, m, we should have
# tick(n) + tick(m) == tick(n+m)
# tick(n) - tick(m) == tick(n-m)
left = cls(n)
right = cls(m)
expected = cls(n + m)
assert left + right == expected
expected = cls(n - m)
assert left - right == expected
@pytest.mark.arm_slow
@pytest.mark.parametrize("cls", tick_classes)
@example(n=2, m=3)
@given(n=INT_NEG_999_TO_POS_999, m=INT_NEG_999_TO_POS_999)
def test_tick_equality(cls, n, m):
assume(m != n)
# tick == tock iff tick.n == tock.n
left = cls(n)
right = cls(m)
assert left != right
right = cls(n)
assert left == right
assert not left != right
if n != 0:
assert cls(n) != cls(-n)
# ---------------------------------------------------------------------
def test_Hour():
assert_offset_equal(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))
assert_offset_equal(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assert_offset_equal(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2))
assert_offset_equal(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assert Hour(3) + Hour(2) == Hour(5)
assert Hour(3) - Hour(2) == Hour()
assert Hour(4) != Hour(1)
def test_Minute():
assert_offset_equal(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1))
assert_offset_equal(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assert_offset_equal(2 * Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 2))
assert_offset_equal(-1 * Minute(), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assert Minute(3) + Minute(2) == Minute(5)
assert Minute(3) - Minute(2) == Minute()
assert Minute(5) != Minute()
def test_Second():
assert_offset_equal(Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 1))
assert_offset_equal(Second(-1), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1))
assert_offset_equal(
2 * Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 2)
)
assert_offset_equal(
-1 * Second(), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1)
)
assert Second(3) + Second(2) == Second(5)
assert Second(3) - Second(2) == Second()
def test_Millisecond():
assert_offset_equal(
Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1000)
)
assert_offset_equal(
Milli(-1), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1)
)
assert_offset_equal(
Milli(2), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000)
)
assert_offset_equal(
2 * Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000)
)
assert_offset_equal(
-1 * Milli(), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1)
)
assert Milli(3) + Milli(2) == Milli(5)
assert Milli(3) - Milli(2) == Milli()
def test_MillisecondTimestampArithmetic():
assert_offset_equal(
Milli(), Timestamp("2010-01-01"), Timestamp("2010-01-01 00:00:00.001")
)
assert_offset_equal(
Milli(-1), Timestamp("2010-01-01 00:00:00.001"), Timestamp("2010-01-01")
)
def test_Microsecond():
assert_offset_equal(Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1))
assert_offset_equal(
Micro(-1), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1)
)
assert_offset_equal(
2 * Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2)
)
assert_offset_equal(
-1 * Micro(), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1)
)
assert Micro(3) + Micro(2) == Micro(5)
assert Micro(3) - Micro(2) == Micro()
def test_NanosecondGeneric():
timestamp = Timestamp(datetime(2010, 1, 1))
assert timestamp.nanosecond == 0
result = timestamp + Nano(10)
assert result.nanosecond == 10
reverse_result = Nano(10) + timestamp
assert reverse_result.nanosecond == 10
def test_Nanosecond():
timestamp = Timestamp(datetime(2010, 1, 1))
assert_offset_equal(Nano(), timestamp, timestamp + np.timedelta64(1, "ns"))
assert_offset_equal(Nano(-1), timestamp + np.timedelta64(1, "ns"), timestamp)
assert_offset_equal(2 * Nano(), timestamp, timestamp + np.timedelta64(2, "ns"))
assert_offset_equal(-1 * Nano(), timestamp + np.timedelta64(1, "ns"), timestamp)
assert Nano(3) + Nano(2) == Nano(5)
assert Nano(3) - Nano(2) == Nano()
# GH9284
assert Nano(1) + Nano(10) == Nano(11)
assert Nano(5) + Micro(1) == Nano(1005)
assert Micro(5) + Nano(1) == Nano(5001)
@pytest.mark.parametrize(
"kls, expected",
[
(Hour, Timedelta(hours=5)),
(Minute, Timedelta(hours=2, minutes=3)),
(Second, Timedelta(hours=2, seconds=3)),
(Milli, Timedelta(hours=2, milliseconds=3)),
(Micro, Timedelta(hours=2, microseconds=3)),
(Nano, Timedelta(hours=2, nanoseconds=3)),
],
)
def test_tick_addition(kls, expected):
offset = kls(3)
td = Timedelta(hours=2)
for other in [td, td.to_pytimedelta(), td.to_timedelta64()]:
result = offset + other
assert isinstance(result, Timedelta)
assert result == expected
result = other + offset
assert isinstance(result, Timedelta)
assert result == expected
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_division(cls):
off = cls(10)
assert off / cls(5) == 2
assert off / 2 == cls(5)
assert off / 2.0 == cls(5)
assert off / off.delta == 1
assert off / off.delta.to_timedelta64() == 1
assert off / Nano(1) == off.delta / Nano(1).delta
if cls is not Nano:
# A case where we end up with a smaller class
result = off / 1000
assert isinstance(result, offsets.Tick)
assert not isinstance(result, cls)
assert result.delta == off.delta / 1000
if cls._nanos_inc < Timedelta(seconds=1).value:
# Case where we end up with a bigger class
result = off / 0.001
assert isinstance(result, offsets.Tick)
assert not isinstance(result, cls)
assert result.delta == off.delta / 0.001
def test_tick_mul_float():
off = Micro(2)
# Case where we retain type
result = off * 1.5
expected = Micro(3)
assert result == expected
assert isinstance(result, Micro)
# Case where we bump up to the next type
result = off * 1.25
expected = Nano(2500)
assert result == expected
assert isinstance(result, Nano)
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_rdiv(cls):
off = cls(10)
delta = off.delta
td64 = delta.to_timedelta64()
instance__type = ".".join([cls.__module__, cls.__name__])
msg = (
"unsupported operand type\\(s\\) for \\/: 'int'|'float' and "
f"'{instance__type}'"
)
with pytest.raises(TypeError, match=msg):
2 / off
with pytest.raises(TypeError, match=msg):
2.0 / off
assert (td64 * 2.5) / off == 2.5
if cls is not Nano:
# skip pytimedelta for Nano since it gets dropped
assert (delta.to_pytimedelta() * 2) / off == 2
result = np.array([2 * td64, td64]) / off
expected = np.array([2.0, 1.0])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("cls1", tick_classes)
@pytest.mark.parametrize("cls2", tick_classes)
def test_tick_zero(cls1, cls2):
assert cls1(0) == cls2(0)
assert cls1(0) + cls2(0) == cls1(0)
if cls1 is not Nano:
assert cls1(2) + cls2(0) == cls1(2)
if cls1 is Nano:
assert cls1(2) + Nano(0) == cls1(2)
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_equalities(cls):
assert cls() == cls(1)
@pytest.mark.parametrize("cls", tick_classes)
def test_tick_offset(cls):
assert not cls().is_anchored()
@pytest.mark.parametrize("cls", tick_classes)
def test_compare_ticks(cls):
three = cls(3)
four = cls(4)
assert three < cls(4)
assert cls(3) < four
assert four > cls(3)
assert cls(4) > three
assert cls(3) == cls(3)
assert cls(3) != cls(4)
@pytest.mark.parametrize("cls", tick_classes)
def test_compare_ticks_to_strs(cls):
# GH#23524
off = cls(19)
# These tests should work with any strings, but we particularly are
# interested in "infer" as that comparison is convenient to make in
# Datetime/Timedelta Array/Index constructors
assert not off == "infer"
assert not "foo" == off
instance_type = ".".join([cls.__module__, cls.__name__])
msg = (
"'<'|'<='|'>'|'>=' not supported between instances of "
f"'str' and '{instance_type}'|'{instance_type}' and 'str'"
)
for left, right in [("infer", off), (off, "infer")]:
with pytest.raises(TypeError, match=msg):
left < right
with pytest.raises(TypeError, match=msg):
left <= right
with pytest.raises(TypeError, match=msg):
left > right
with pytest.raises(TypeError, match=msg):
left >= right
@pytest.mark.parametrize("cls", tick_classes)
def test_compare_ticks_to_timedeltalike(cls):
off = cls(19)
td = off.delta
others = [td, td.to_timedelta64()]
if cls is not Nano:
others.append(td.to_pytimedelta())
for other in others:
assert off == other
assert not off != other
assert not off < other
assert not off > other
assert off <= other
assert off >= other
| bsd-3-clause | 8580dd3038ffd22b8f3709cfed2f7521 | 26.800512 | 88 | 0.59126 | 3.165405 | false | true | false | false |
pandas-dev/pandas | asv_bench/benchmarks/tslibs/fields.py | 3 | 1749 | import numpy as np
from pandas._libs.tslibs.fields import (
get_date_field,
get_start_end_field,
get_timedelta_field,
)
from .tslib import _sizes
class TimeGetTimedeltaField:
params = [
_sizes,
["days", "seconds", "microseconds", "nanoseconds"],
]
param_names = ["size", "field"]
def setup(self, size, field):
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
def time_get_timedelta_field(self, size, field):
get_timedelta_field(self.i8data, field)
class TimeGetDateField:
params = [
_sizes,
[
"Y",
"M",
"D",
"h",
"m",
"s",
"us",
"ns",
"doy",
"dow",
"woy",
"q",
"dim",
"is_leap_year",
],
]
param_names = ["size", "field"]
def setup(self, size, field):
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
def time_get_date_field(self, size, field):
get_date_field(self.i8data, field)
class TimeGetStartEndField:
params = [
_sizes,
["start", "end"],
["month", "quarter", "year"],
["B", None, "QS"],
[12, 3, 5],
]
param_names = ["size", "side", "period", "freqstr", "month_kw"]
def setup(self, size, side, period, freqstr, month_kw):
arr = np.random.randint(0, 10, size=size, dtype="i8")
self.i8data = arr
self.attrname = f"is_{period}_{side}"
def time_get_start_end_field(self, size, side, period, freqstr, month_kw):
get_start_end_field(self.i8data, self.attrname, freqstr, month_kw=month_kw)
| bsd-3-clause | c45c248ca844887cd11c6b19c80a6366 | 22.635135 | 83 | 0.509434 | 3.293785 | false | false | false | false |
pandas-dev/pandas | scripts/use_io_common_urlopen.py | 2 | 1677 | """
Check that pandas/core imports pandas.array as pd_array.
This makes it easier to grep for usage of pandas array.
This is meant to be run as a pre-commit hook - to run it manually, you can do:
pre-commit run use-io-common-urlopen --all-files
"""
from __future__ import annotations
import argparse
import ast
import sys
from typing import Sequence
ERROR_MESSAGE = (
"{path}:{lineno}:{col_offset}: "
"Don't use urllib.request.urlopen, use pandas.io.common.urlopen instead\n"
)
class Visitor(ast.NodeVisitor):
def __init__(self, path: str) -> None:
self.path = path
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
# Check that pandas.io.common.urlopen is used instead of
# urllib.request.urlopen
if (
node.module is not None
and node.module.startswith("urllib.request")
and any(i.name == "urlopen" for i in node.names)
):
msg = ERROR_MESSAGE.format(
path=self.path, lineno=node.lineno, col_offset=node.col_offset
)
sys.stdout.write(msg)
sys.exit(1)
super().generic_visit(node)
def use_io_common_urlopen(content: str, path: str) -> None:
tree = ast.parse(content)
visitor = Visitor(path)
visitor.visit(tree)
def main(argv: Sequence[str] | None = None) -> None:
parser = argparse.ArgumentParser()
parser.add_argument("paths", nargs="*")
args = parser.parse_args(argv)
for path in args.paths:
with open(path, encoding="utf-8") as fd:
content = fd.read()
use_io_common_urlopen(content, path)
if __name__ == "__main__":
main()
| bsd-3-clause | 6eb51b89146d82dee2439cacca41685c | 25.619048 | 78 | 0.621348 | 3.62203 | false | false | false | false |
pandas-dev/pandas | pandas/io/formats/excel.py | 1 | 32051 | """
Utilities for conversion to writer-agnostic Excel representation.
"""
from __future__ import annotations
from functools import (
lru_cache,
reduce,
)
import itertools
import re
from typing import (
Any,
Callable,
Hashable,
Iterable,
Mapping,
Sequence,
cast,
)
import warnings
import numpy as np
from pandas._libs.lib import is_list_like
from pandas._typing import (
IndexLabel,
StorageOptions,
)
from pandas.util._decorators import doc
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes import missing
from pandas.core.dtypes.common import (
is_float,
is_scalar,
)
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
)
import pandas.core.common as com
from pandas.core.shared_docs import _shared_docs
from pandas.io.formats._color_data import CSS4_COLORS
from pandas.io.formats.css import (
CSSResolver,
CSSWarning,
)
from pandas.io.formats.format import get_level_lengths
from pandas.io.formats.printing import pprint_thing
class ExcelCell:
__fields__ = ("row", "col", "val", "style", "mergestart", "mergeend")
__slots__ = __fields__
def __init__(
self,
row: int,
col: int,
val,
style=None,
mergestart: int | None = None,
mergeend: int | None = None,
) -> None:
self.row = row
self.col = col
self.val = val
self.style = style
self.mergestart = mergestart
self.mergeend = mergeend
class CssExcelCell(ExcelCell):
def __init__(
self,
row: int,
col: int,
val,
style: dict | None,
css_styles: dict[tuple[int, int], list[tuple[str, Any]]] | None,
css_row: int,
css_col: int,
css_converter: Callable | None,
**kwargs,
) -> None:
if css_styles and css_converter:
# Use dict to get only one (case-insensitive) declaration per property
declaration_dict = {
prop.lower(): val for prop, val in css_styles[css_row, css_col]
}
# Convert to frozenset for order-invariant caching
unique_declarations = frozenset(declaration_dict.items())
style = css_converter(unique_declarations)
super().__init__(row=row, col=col, val=val, style=style, **kwargs)
class CSSToExcelConverter:
"""
A callable for converting CSS declarations to ExcelWriter styles
Supports parts of CSS 2.2, with minimal CSS 3.0 support (e.g. text-shadow),
focusing on font styling, backgrounds, borders and alignment.
Operates by first computing CSS styles in a fairly generic
way (see :meth:`compute_css`) then determining Excel style
properties from CSS properties (see :meth:`build_xlstyle`).
Parameters
----------
inherited : str, optional
CSS declarations understood to be the containing scope for the
CSS processed by :meth:`__call__`.
"""
NAMED_COLORS = CSS4_COLORS
VERTICAL_MAP = {
"top": "top",
"text-top": "top",
"middle": "center",
"baseline": "bottom",
"bottom": "bottom",
"text-bottom": "bottom",
# OpenXML also has 'justify', 'distributed'
}
BOLD_MAP = {
"bold": True,
"bolder": True,
"600": True,
"700": True,
"800": True,
"900": True,
"normal": False,
"lighter": False,
"100": False,
"200": False,
"300": False,
"400": False,
"500": False,
}
ITALIC_MAP = {
"normal": False,
"italic": True,
"oblique": True,
}
FAMILY_MAP = {
"serif": 1, # roman
"sans-serif": 2, # swiss
"cursive": 4, # script
"fantasy": 5, # decorative
}
# NB: Most of the methods here could be classmethods, as only __init__
# and __call__ make use of instance attributes. We leave them as
# instancemethods so that users can easily experiment with extensions
# without monkey-patching.
inherited: dict[str, str] | None
def __init__(self, inherited: str | None = None) -> None:
if inherited is not None:
self.inherited = self.compute_css(inherited)
else:
self.inherited = None
# We should avoid lru_cache on the __call__ method.
# Otherwise once the method __call__ has been called
# garbage collection no longer deletes the instance.
self._call_cached = lru_cache(maxsize=None)(self._call_uncached)
compute_css = CSSResolver()
def __call__(
self, declarations: str | frozenset[tuple[str, str]]
) -> dict[str, dict[str, str]]:
"""
Convert CSS declarations to ExcelWriter style.
Parameters
----------
declarations : str | frozenset[tuple[str, str]]
CSS string or set of CSS declaration tuples.
e.g. "font-weight: bold; background: blue" or
{("font-weight", "bold"), ("background", "blue")}
Returns
-------
xlstyle : dict
A style as interpreted by ExcelWriter when found in
ExcelCell.style.
"""
return self._call_cached(declarations)
def _call_uncached(
self, declarations: str | frozenset[tuple[str, str]]
) -> dict[str, dict[str, str]]:
properties = self.compute_css(declarations, self.inherited)
return self.build_xlstyle(properties)
def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]:
out = {
"alignment": self.build_alignment(props),
"border": self.build_border(props),
"fill": self.build_fill(props),
"font": self.build_font(props),
"number_format": self.build_number_format(props),
}
# TODO: handle cell width and height: needs support in pandas.io.excel
def remove_none(d: dict[str, str | None]) -> None:
"""Remove key where value is None, through nested dicts"""
for k, v in list(d.items()):
if v is None:
del d[k]
elif isinstance(v, dict):
remove_none(v)
if not v:
del d[k]
remove_none(out)
return out
def build_alignment(self, props: Mapping[str, str]) -> dict[str, bool | str | None]:
# TODO: text-indent, padding-left -> alignment.indent
return {
"horizontal": props.get("text-align"),
"vertical": self._get_vertical_alignment(props),
"wrap_text": self._get_is_wrap_text(props),
}
def _get_vertical_alignment(self, props: Mapping[str, str]) -> str | None:
vertical_align = props.get("vertical-align")
if vertical_align:
return self.VERTICAL_MAP.get(vertical_align)
return None
def _get_is_wrap_text(self, props: Mapping[str, str]) -> bool | None:
if props.get("white-space") is None:
return None
return bool(props["white-space"] not in ("nowrap", "pre", "pre-line"))
def build_border(
self, props: Mapping[str, str]
) -> dict[str, dict[str, str | None]]:
return {
side: {
"style": self._border_style(
props.get(f"border-{side}-style"),
props.get(f"border-{side}-width"),
self.color_to_excel(props.get(f"border-{side}-color")),
),
"color": self.color_to_excel(props.get(f"border-{side}-color")),
}
for side in ["top", "right", "bottom", "left"]
}
def _border_style(self, style: str | None, width: str | None, color: str | None):
# convert styles and widths to openxml, one of:
# 'dashDot'
# 'dashDotDot'
# 'dashed'
# 'dotted'
# 'double'
# 'hair'
# 'medium'
# 'mediumDashDot'
# 'mediumDashDotDot'
# 'mediumDashed'
# 'slantDashDot'
# 'thick'
# 'thin'
if width is None and style is None and color is None:
# Return None will remove "border" from style dictionary
return None
if width is None and style is None:
# Return "none" will keep "border" in style dictionary
return "none"
if style in ("none", "hidden"):
return "none"
width_name = self._get_width_name(width)
if width_name is None:
return "none"
if style in (None, "groove", "ridge", "inset", "outset", "solid"):
# not handled
return width_name
if style == "double":
return "double"
if style == "dotted":
if width_name in ("hair", "thin"):
return "dotted"
return "mediumDashDotDot"
if style == "dashed":
if width_name in ("hair", "thin"):
return "dashed"
return "mediumDashed"
def _get_width_name(self, width_input: str | None) -> str | None:
width = self._width_to_float(width_input)
if width < 1e-5:
return None
elif width < 1.3:
return "thin"
elif width < 2.8:
return "medium"
return "thick"
def _width_to_float(self, width: str | None) -> float:
if width is None:
width = "2pt"
return self._pt_to_float(width)
def _pt_to_float(self, pt_string: str) -> float:
assert pt_string.endswith("pt")
return float(pt_string.rstrip("pt"))
def build_fill(self, props: Mapping[str, str]):
# TODO: perhaps allow for special properties
# -excel-pattern-bgcolor and -excel-pattern-type
fill_color = props.get("background-color")
if fill_color not in (None, "transparent", "none"):
return {"fgColor": self.color_to_excel(fill_color), "patternType": "solid"}
def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]:
fc = props.get("number-format")
fc = fc.replace("§", ";") if isinstance(fc, str) else fc
return {"format_code": fc}
def build_font(
self, props: Mapping[str, str]
) -> dict[str, bool | float | str | None]:
font_names = self._get_font_names(props)
decoration = self._get_decoration(props)
return {
"name": font_names[0] if font_names else None,
"family": self._select_font_family(font_names),
"size": self._get_font_size(props),
"bold": self._get_is_bold(props),
"italic": self._get_is_italic(props),
"underline": ("single" if "underline" in decoration else None),
"strike": ("line-through" in decoration) or None,
"color": self.color_to_excel(props.get("color")),
# shadow if nonzero digit before shadow color
"shadow": self._get_shadow(props),
}
def _get_is_bold(self, props: Mapping[str, str]) -> bool | None:
weight = props.get("font-weight")
if weight:
return self.BOLD_MAP.get(weight)
return None
def _get_is_italic(self, props: Mapping[str, str]) -> bool | None:
font_style = props.get("font-style")
if font_style:
return self.ITALIC_MAP.get(font_style)
return None
def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]:
decoration = props.get("text-decoration")
if decoration is not None:
return decoration.split()
else:
return ()
def _get_underline(self, decoration: Sequence[str]) -> str | None:
if "underline" in decoration:
return "single"
return None
def _get_shadow(self, props: Mapping[str, str]) -> bool | None:
if "text-shadow" in props:
return bool(re.search("^[^#(]*[1-9]", props["text-shadow"]))
return None
def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]:
font_names_tmp = re.findall(
r"""(?x)
(
"(?:[^"]|\\")+"
|
'(?:[^']|\\')+'
|
[^'",]+
)(?=,|\s*$)
""",
props.get("font-family", ""),
)
font_names = []
for name in font_names_tmp:
if name[:1] == '"':
name = name[1:-1].replace('\\"', '"')
elif name[:1] == "'":
name = name[1:-1].replace("\\'", "'")
else:
name = name.strip()
if name:
font_names.append(name)
return font_names
def _get_font_size(self, props: Mapping[str, str]) -> float | None:
size = props.get("font-size")
if size is None:
return size
return self._pt_to_float(size)
def _select_font_family(self, font_names) -> int | None:
family = None
for name in font_names:
family = self.FAMILY_MAP.get(name)
if family:
break
return family
def color_to_excel(self, val: str | None) -> str | None:
if val is None:
return None
if self._is_hex_color(val):
return self._convert_hex_to_excel(val)
try:
return self.NAMED_COLORS[val]
except KeyError:
warnings.warn(
f"Unhandled color format: {repr(val)}",
CSSWarning,
stacklevel=find_stack_level(),
)
return None
def _is_hex_color(self, color_string: str) -> bool:
return bool(color_string.startswith("#"))
def _convert_hex_to_excel(self, color_string: str) -> str:
code = color_string.lstrip("#")
if self._is_shorthand_color(color_string):
return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper()
else:
return code.upper()
def _is_shorthand_color(self, color_string: str) -> bool:
"""Check if color code is shorthand.
#FFF is a shorthand as opposed to full #FFFFFF.
"""
code = color_string.lstrip("#")
if len(code) == 3:
return True
elif len(code) == 6:
return False
else:
raise ValueError(f"Unexpected color {color_string}")
class ExcelFormatter:
"""
Class for formatting a DataFrame to a list of ExcelCells,
Parameters
----------
df : DataFrame or Styler
na_rep: na representation
float_format : str, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : bool or sequence of str, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : bool, default True
output row names (index)
index_label : str or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
merge_cells : bool, default False
Format MultiIndex and Hierarchical Rows as merged cells.
inf_rep : str, default `'inf'`
representation for np.inf values (which aren't representable in Excel)
A `'-'` sign will be added in front of -inf.
style_converter : callable, optional
This translates Styler styles (CSS) into ExcelWriter styles.
Defaults to ``CSSToExcelConverter()``.
It should have signature css_declarations string -> excel style.
This is only called for body cells.
"""
max_rows = 2**20
max_cols = 2**14
def __init__(
self,
df,
na_rep: str = "",
float_format: str | None = None,
cols: Sequence[Hashable] | None = None,
header: Sequence[Hashable] | bool = True,
index: bool = True,
index_label: IndexLabel | None = None,
merge_cells: bool = False,
inf_rep: str = "inf",
style_converter: Callable | None = None,
) -> None:
self.rowcounter = 0
self.na_rep = na_rep
if not isinstance(df, DataFrame):
self.styler = df
self.styler._compute() # calculate applied styles
df = df.data
if style_converter is None:
style_converter = CSSToExcelConverter()
self.style_converter: Callable | None = style_converter
else:
self.styler = None
self.style_converter = None
self.df = df
if cols is not None:
# all missing, raise
if not len(Index(cols).intersection(df.columns)):
raise KeyError("passes columns are not ALL present dataframe")
if len(Index(cols).intersection(df.columns)) != len(set(cols)):
# Deprecated in GH#17295, enforced in 1.0.0
raise KeyError("Not all names specified in 'columns' are found")
self.df = df.reindex(columns=cols)
self.columns = self.df.columns
self.float_format = float_format
self.index = index
self.index_label = index_label
self.header = header
self.merge_cells = merge_cells
self.inf_rep = inf_rep
@property
def header_style(self) -> dict[str, dict[str, str | bool]]:
return {
"font": {"bold": True},
"borders": {
"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin",
},
"alignment": {"horizontal": "center", "vertical": "top"},
}
def _format_value(self, val):
if is_scalar(val) and missing.isna(val):
val = self.na_rep
elif is_float(val):
if missing.isposinf_scalar(val):
val = self.inf_rep
elif missing.isneginf_scalar(val):
val = f"-{self.inf_rep}"
elif self.float_format is not None:
val = float(self.float_format % val)
if getattr(val, "tzinfo", None) is not None:
raise ValueError(
"Excel does not support datetimes with "
"timezones. Please ensure that datetimes "
"are timezone unaware before writing to Excel."
)
return val
def _format_header_mi(self) -> Iterable[ExcelCell]:
if self.columns.nlevels > 1:
if not self.index:
raise NotImplementedError(
"Writing to Excel with MultiIndex columns and no "
"index ('index'=False) is not yet implemented."
)
if not (self._has_aliases or self.header):
return
columns = self.columns
level_strs = columns.format(
sparsify=self.merge_cells, adjoin=False, names=False
)
level_lengths = get_level_lengths(level_strs)
coloffset = 0
lnum = 0
if self.index and isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0]) - 1
if self.merge_cells:
# Format multi-index as a merged cells.
for lnum, name in enumerate(columns.names):
yield ExcelCell(
row=lnum,
col=coloffset,
val=name,
style=self.header_style,
)
for lnum, (spans, levels, level_codes) in enumerate(
zip(level_lengths, columns.levels, columns.codes)
):
values = levels.take(level_codes)
for i, span_val in spans.items():
mergestart, mergeend = None, None
if span_val > 1:
mergestart, mergeend = lnum, coloffset + i + span_val
yield CssExcelCell(
row=lnum,
col=coloffset + i + 1,
val=values[i],
style=self.header_style,
css_styles=getattr(self.styler, "ctx_columns", None),
css_row=lnum,
css_col=i,
css_converter=self.style_converter,
mergestart=mergestart,
mergeend=mergeend,
)
else:
# Format in legacy format with dots to indicate levels.
for i, values in enumerate(zip(*level_strs)):
v = ".".join(map(pprint_thing, values))
yield CssExcelCell(
row=lnum,
col=coloffset + i + 1,
val=v,
style=self.header_style,
css_styles=getattr(self.styler, "ctx_columns", None),
css_row=lnum,
css_col=i,
css_converter=self.style_converter,
)
self.rowcounter = lnum
def _format_header_regular(self) -> Iterable[ExcelCell]:
if self._has_aliases or self.header:
coloffset = 0
if self.index:
coloffset = 1
if isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index.names)
colnames = self.columns
if self._has_aliases:
self.header = cast(Sequence, self.header)
if len(self.header) != len(self.columns):
raise ValueError(
f"Writing {len(self.columns)} cols "
f"but got {len(self.header)} aliases"
)
colnames = self.header
for colindex, colname in enumerate(colnames):
yield CssExcelCell(
row=self.rowcounter,
col=colindex + coloffset,
val=colname,
style=self.header_style,
css_styles=getattr(self.styler, "ctx_columns", None),
css_row=0,
css_col=colindex,
css_converter=self.style_converter,
)
def _format_header(self) -> Iterable[ExcelCell]:
gen: Iterable[ExcelCell]
if isinstance(self.columns, MultiIndex):
gen = self._format_header_mi()
else:
gen = self._format_header_regular()
gen2: Iterable[ExcelCell] = ()
if self.df.index.names:
row = [x if x is not None else "" for x in self.df.index.names] + [
""
] * len(self.columns)
if reduce(lambda x, y: x and y, map(lambda x: x != "", row)):
gen2 = (
ExcelCell(self.rowcounter, colindex, val, self.header_style)
for colindex, val in enumerate(row)
)
self.rowcounter += 1
return itertools.chain(gen, gen2)
def _format_body(self) -> Iterable[ExcelCell]:
if isinstance(self.df.index, MultiIndex):
return self._format_hierarchical_rows()
else:
return self._format_regular_rows()
def _format_regular_rows(self) -> Iterable[ExcelCell]:
if self._has_aliases or self.header:
self.rowcounter += 1
# output index and index_label?
if self.index:
# check aliases
# if list only take first as this is not a MultiIndex
if self.index_label and isinstance(
self.index_label, (list, tuple, np.ndarray, Index)
):
index_label = self.index_label[0]
# if string good to go
elif self.index_label and isinstance(self.index_label, str):
index_label = self.index_label
else:
index_label = self.df.index.names[0]
if isinstance(self.columns, MultiIndex):
self.rowcounter += 1
if index_label and self.header is not False:
yield ExcelCell(self.rowcounter - 1, 0, index_label, self.header_style)
# write index_values
index_values = self.df.index
if isinstance(self.df.index, PeriodIndex):
index_values = self.df.index.to_timestamp()
for idx, idxval in enumerate(index_values):
yield CssExcelCell(
row=self.rowcounter + idx,
col=0,
val=idxval,
style=self.header_style,
css_styles=getattr(self.styler, "ctx_index", None),
css_row=idx,
css_col=0,
css_converter=self.style_converter,
)
coloffset = 1
else:
coloffset = 0
yield from self._generate_body(coloffset)
def _format_hierarchical_rows(self) -> Iterable[ExcelCell]:
if self._has_aliases or self.header:
self.rowcounter += 1
gcolidx = 0
if self.index:
index_labels = self.df.index.names
# check for aliases
if self.index_label and isinstance(
self.index_label, (list, tuple, np.ndarray, Index)
):
index_labels = self.index_label
# MultiIndex columns require an extra row
# with index names (blank if None) for
# unambiguous round-trip, unless not merging,
# in which case the names all go on one row Issue #11328
if isinstance(self.columns, MultiIndex) and self.merge_cells:
self.rowcounter += 1
# if index labels are not empty go ahead and dump
if com.any_not_none(*index_labels) and self.header is not False:
for cidx, name in enumerate(index_labels):
yield ExcelCell(self.rowcounter - 1, cidx, name, self.header_style)
if self.merge_cells:
# Format hierarchical rows as merged cells.
level_strs = self.df.index.format(
sparsify=True, adjoin=False, names=False
)
level_lengths = get_level_lengths(level_strs)
for spans, levels, level_codes in zip(
level_lengths, self.df.index.levels, self.df.index.codes
):
values = levels.take(
level_codes,
allow_fill=levels._can_hold_na,
fill_value=levels._na_value,
)
for i, span_val in spans.items():
mergestart, mergeend = None, None
if span_val > 1:
mergestart = self.rowcounter + i + span_val - 1
mergeend = gcolidx
yield CssExcelCell(
row=self.rowcounter + i,
col=gcolidx,
val=values[i],
style=self.header_style,
css_styles=getattr(self.styler, "ctx_index", None),
css_row=i,
css_col=gcolidx,
css_converter=self.style_converter,
mergestart=mergestart,
mergeend=mergeend,
)
gcolidx += 1
else:
# Format hierarchical rows with non-merged values.
for indexcolvals in zip(*self.df.index):
for idx, indexcolval in enumerate(indexcolvals):
yield CssExcelCell(
row=self.rowcounter + idx,
col=gcolidx,
val=indexcolval,
style=self.header_style,
css_styles=getattr(self.styler, "ctx_index", None),
css_row=idx,
css_col=gcolidx,
css_converter=self.style_converter,
)
gcolidx += 1
yield from self._generate_body(gcolidx)
@property
def _has_aliases(self) -> bool:
"""Whether the aliases for column names are present."""
return is_list_like(self.header)
def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]:
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = self.df.iloc[:, colidx]
for i, val in enumerate(series):
yield CssExcelCell(
row=self.rowcounter + i,
col=colidx + coloffset,
val=val,
style=None,
css_styles=getattr(self.styler, "ctx", None),
css_row=i,
css_col=colidx,
css_converter=self.style_converter,
)
def get_formatted_cells(self) -> Iterable[ExcelCell]:
for cell in itertools.chain(self._format_header(), self._format_body()):
cell.val = self._format_value(cell.val)
yield cell
@doc(storage_options=_shared_docs["storage_options"])
def write(
self,
writer,
sheet_name: str = "Sheet1",
startrow: int = 0,
startcol: int = 0,
freeze_panes: tuple[int, int] | None = None,
engine: str | None = None,
storage_options: StorageOptions = None,
) -> None:
"""
writer : path-like, file-like, or ExcelWriter object
File path or existing ExcelWriter
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
freeze_panes : tuple of integer (length 2), default None
Specifies the one-based bottommost row and rightmost column that
is to be frozen
engine : string, default None
write engine to use if writer is a path - you can also set this
via the options ``io.excel.xlsx.writer``,
or ``io.excel.xlsm.writer``.
{storage_options}
.. versionadded:: 1.2.0
"""
from pandas.io.excel import ExcelWriter
num_rows, num_cols = self.df.shape
if num_rows > self.max_rows or num_cols > self.max_cols:
raise ValueError(
f"This sheet is too large! Your sheet size is: {num_rows}, {num_cols} "
f"Max sheet size is: {self.max_rows}, {self.max_cols}"
)
formatted_cells = self.get_formatted_cells()
if isinstance(writer, ExcelWriter):
need_save = False
else:
# error: Cannot instantiate abstract class 'ExcelWriter' with abstract
# attributes 'engine', 'save', 'supported_extensions' and 'write_cells'
writer = ExcelWriter( # type: ignore[abstract]
writer, engine=engine, storage_options=storage_options
)
need_save = True
try:
writer._write_cells(
formatted_cells,
sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
)
finally:
# make sure to close opened file handles
if need_save:
writer.close()
| bsd-3-clause | a3de55fdec5ae06c52981cd235ae3da1 | 33.573894 | 88 | 0.522715 | 4.16775 | false | false | false | false |
pandas-dev/pandas | pandas/tests/io/test_spss.py | 8 | 2745 | from pathlib import Path
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
pyreadstat = pytest.importorskip("pyreadstat")
@pytest.mark.parametrize("path_klass", [lambda p: p, Path])
def test_spss_labelled_num(path_klass, datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = path_klass(datapath("io", "data", "spss", "labelled-num.sav"))
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"VAR00002": "This is one"}, index=[0])
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"VAR00002": 1.0}, index=[0])
tm.assert_frame_equal(df, expected)
def test_spss_labelled_num_na(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "spss", "labelled-num-na.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"VAR00002": ["This is one", None]})
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"VAR00002": [1.0, np.nan]})
tm.assert_frame_equal(df, expected)
def test_spss_labelled_str(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "spss", "labelled-str.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame({"gender": ["Male", "Female"]})
expected["gender"] = pd.Categorical(expected["gender"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"gender": ["M", "F"]})
tm.assert_frame_equal(df, expected)
def test_spss_umlauts(datapath):
# test file from the Haven project (https://haven.tidyverse.org/)
fname = datapath("io", "data", "spss", "umlauts.sav")
df = pd.read_spss(fname, convert_categoricals=True)
expected = pd.DataFrame(
{"var1": ["the ä umlaut", "the ü umlaut", "the ä umlaut", "the ö umlaut"]}
)
expected["var1"] = pd.Categorical(expected["var1"])
tm.assert_frame_equal(df, expected)
df = pd.read_spss(fname, convert_categoricals=False)
expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]})
tm.assert_frame_equal(df, expected)
def test_spss_usecols(datapath):
# usecols must be list-like
fname = datapath("io", "data", "spss", "labelled-num.sav")
with pytest.raises(TypeError, match="usecols must be list-like."):
pd.read_spss(fname, usecols="VAR00002")
| bsd-3-clause | 72c37d26809dfe91ed4d47a207a6b089 | 35.065789 | 82 | 0.668734 | 3.059152 | false | true | false | false |
pandas-dev/pandas | pandas/tests/indexes/datetimes/test_unique.py | 3 | 2065 | from datetime import (
datetime,
timedelta,
)
from pandas import (
DatetimeIndex,
NaT,
Timestamp,
)
import pandas._testing as tm
def test_unique(tz_naive_fixture):
idx = DatetimeIndex(["2017"] * 2, tz=tz_naive_fixture)
expected = idx[:1]
result = idx.unique()
tm.assert_index_equal(result, expected)
# GH#21737
# Ensure the underlying data is consistent
assert result[0] == expected[0]
def test_index_unique(rand_series_with_duplicate_datetimeindex):
dups = rand_series_with_duplicate_datetimeindex
index = dups.index
uniques = index.unique()
expected = DatetimeIndex(
[
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
)
assert uniques.dtype == "M8[ns]" # sanity
tm.assert_index_equal(uniques, expected)
assert index.nunique() == 4
# GH#2563
assert isinstance(uniques, DatetimeIndex)
dups_local = index.tz_localize("US/Eastern")
dups_local.name = "foo"
result = dups_local.unique()
expected = DatetimeIndex(expected, name="foo")
expected = expected.tz_localize("US/Eastern")
assert result.tz is not None
assert result.name == "foo"
tm.assert_index_equal(result, expected)
def test_index_unique2():
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [NaT.value]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_index_unique3():
arr = [
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)
] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_is_unique_monotonic(rand_series_with_duplicate_datetimeindex):
index = rand_series_with_duplicate_datetimeindex.index
assert not index.is_unique
| bsd-3-clause | 3611ec5d16b884c1d876acaab2205e95 | 25.818182 | 82 | 0.642615 | 3.413223 | false | true | false | false |
pandas-dev/pandas | pandas/core/arrays/sparse/accessor.py | 1 | 11914 | """Sparse accessor"""
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import find_common_type
from pandas.core.accessor import (
PandasDelegate,
delegate_names,
)
from pandas.core.arrays.sparse.array import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
if TYPE_CHECKING:
from pandas import (
DataFrame,
Series,
)
class BaseAccessor:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
def __init__(self, data=None) -> None:
self._parent = data
self._validate(data)
def _validate(self, data):
raise NotImplementedError
@delegate_names(
SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
)
class SparseAccessor(BaseAccessor, PandasDelegate):
"""
Accessor for SparseSparse from other sparse matrix data types.
"""
def _validate(self, data):
if not isinstance(data.dtype, SparseDtype):
raise AttributeError(self._validation_msg)
def _delegate_property_get(self, name, *args, **kwargs):
return getattr(self._parent.array, name)
def _delegate_method(self, name, *args, **kwargs):
if name == "from_coo":
return self.from_coo(*args, **kwargs)
elif name == "to_coo":
return self.to_coo(*args, **kwargs)
else:
raise ValueError
@classmethod
def from_coo(cls, A, dense_index: bool = False) -> Series:
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : Series
A Series with sparse values.
Examples
--------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(
... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 2.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
0 2 1.0
3 2.0
1 0 3.0
dtype: Sparse[float64, nan]
"""
from pandas import Series
from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series
result = coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
return result
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False):
"""
Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
When `row_levels` and/or `column_levels` refer to a single level,
set to `True` for a faster execution.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples(
... [
... (1, 2, "a", 0),
... (1, 2, "a", 1),
... (1, 1, "b", 0),
... (1, 1, "b", 1),
... (2, 1, "b", 0),
... (2, 1, "b", 1)
... ],
... names=["A", "B", "C", "D"],
... )
>>> s
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: float64
>>> ss = s.astype("Sparse")
>>> ss
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: Sparse[float64, nan]
>>> A, rows, columns = ss.sparse.to_coo(
... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 3.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
A, rows, columns = sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
return A, rows, columns
def to_dense(self) -> Series:
"""
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
"""
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
class SparseFrameAccessor(BaseAccessor, PandasDelegate):
"""
DataFrame accessor for sparse data.
.. versionadded:: 0.25.0
"""
def _validate(self, data):
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(self._validation_msg)
@classmethod
def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame:
"""
Create a new DataFrame from a scipy sparse matrix.
.. versionadded:: 0.25.0
Parameters
----------
data : scipy.sparse.spmatrix
Must be convertible to csc format.
index, columns : Index, optional
Row and column labels to use for the resulting DataFrame.
Defaults to a RangeIndex.
Returns
-------
DataFrame
Each column of the DataFrame is stored as a
:class:`arrays.SparseArray`.
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.eye(3)
>>> pd.DataFrame.sparse.from_spmatrix(mat)
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas._libs.sparse import IntIndex
from pandas import DataFrame
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
# We need to make sure indices are sorted, as we create
# IntIndex with no input validation (i.e. check_integrity=False ).
# Indices may already be sorted in scipy in which case this adds
# a small overhead.
data.sort_indices()
indices = data.indices
indptr = data.indptr
array_data = data.data
dtype = SparseDtype(array_data.dtype, 0)
arrays = []
for i in range(n_columns):
sl = slice(indptr[i], indptr[i + 1])
idx = IntIndex(n_rows, indices[sl], check_integrity=False)
arr = SparseArray._simple_new(array_data[sl], idx, dtype)
arrays.append(arr)
return DataFrame._from_arrays(
arrays, columns=columns, index=index, verify_integrity=False
)
def to_dense(self) -> DataFrame:
"""
Convert a DataFrame with sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
DataFrame
A DataFrame with the same values stored as dense arrays.
Examples
--------
>>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
>>> df.sparse.to_dense()
A
0 0
1 1
2 0
"""
from pandas import DataFrame
data = {k: v.array.to_dense() for k, v in self._parent.items()}
return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.25.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
import_optional_dependency("scipy")
from scipy.sparse import coo_matrix
dtype = find_common_type(self._parent.dtypes.to_list())
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, data = [], [], []
for col, (_, ser) in enumerate(self._parent.items()):
sp_arr = ser.array
if sp_arr.fill_value != 0:
raise ValueError("fill value must be 0 when converting to COO matrix")
row = sp_arr.sp_index.indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
data.append(sp_arr.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
data = np.concatenate(data)
return coo_matrix((data, (rows, cols)), shape=self._parent.shape)
@property
def density(self) -> float:
"""
Ratio of non-sparse points to total (dense) data points.
"""
tmp = np.mean([column.array.density for _, column in self._parent.items()])
return tmp
@staticmethod
def _prep_index(data, index, columns):
from pandas.core.indexes.api import (
default_index,
ensure_index,
)
N, K = data.shape
if index is None:
index = default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = default_index(K)
else:
columns = ensure_index(columns)
if len(columns) != K:
raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
if len(index) != N:
raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
return index, columns
| bsd-3-clause | 71966b4b1e1701866c60ef75a672e088 | 29.162025 | 86 | 0.531056 | 3.971333 | false | false | false | false |
pandas-dev/pandas | pandas/core/indexers/objects.py | 2 | 12836 | """Indexer objects for computing start/end window bounds for rolling operations"""
from __future__ import annotations
from datetime import timedelta
import numpy as np
from pandas._libs.window.indexers import calculate_variable_window_bounds
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import ensure_platform_int
from pandas.tseries.offsets import Nano
get_window_bounds_doc = """
Computes the bounds of a window.
Parameters
----------
num_values : int, default 0
number of values that will be aggregated over
window_size : int, default 0
the number of rows in a window
min_periods : int, default None
min_periods passed from the top level rolling API
center : bool, default None
center passed from the top level rolling API
closed : str, default None
closed passed from the top level rolling API
step : int, default None
step passed from the top level rolling API
.. versionadded:: 1.5
win_type : str, default None
win_type passed from the top level rolling API
Returns
-------
A tuple of ndarray[int64]s, indicating the boundaries of each
window
"""
class BaseIndexer:
"""Base class for window bounds calculations."""
def __init__(
self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs
) -> None:
"""
Parameters
----------
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.index_array = index_array
self.window_size = window_size
# Set user defined kwargs as attributes that can be used in get_window_bounds
for key, value in kwargs.items():
setattr(self, key, value)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
raise NotImplementedError
class FixedWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of fixed length."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if center:
offset = (self.window_size - 1) // 2
else:
offset = 0
end = np.arange(1 + offset, num_values + 1 + offset, step, dtype="int64")
start = end - self.window_size
if closed in ["left", "both"]:
start -= 1
if closed in ["left", "neither"]:
end -= 1
end = np.clip(end, 0, num_values)
start = np.clip(start, 0, num_values)
return start, end
class VariableWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of variable length, namely for time series."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
# error: Argument 4 to "calculate_variable_window_bounds" has incompatible
# type "Optional[bool]"; expected "bool"
# error: Argument 6 to "calculate_variable_window_bounds" has incompatible
# type "Optional[ndarray]"; expected "ndarray"
return calculate_variable_window_bounds(
num_values,
self.window_size,
min_periods,
center, # type: ignore[arg-type]
closed,
self.index_array, # type: ignore[arg-type]
)
class VariableOffsetWindowIndexer(BaseIndexer):
"""Calculate window boundaries based on a non-fixed offset such as a BusinessDay."""
def __init__(
self,
index_array: np.ndarray | None = None,
window_size: int = 0,
index=None,
offset=None,
**kwargs,
) -> None:
super().__init__(index_array, window_size, **kwargs)
self.index = index
self.offset = offset
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if step is not None:
raise NotImplementedError("step not implemented for variable offset window")
if num_values <= 0:
return np.empty(0, dtype="int64"), np.empty(0, dtype="int64")
# if windows is variable, default is 'right', otherwise default is 'both'
if closed is None:
closed = "right" if self.index is not None else "both"
right_closed = closed in ["right", "both"]
left_closed = closed in ["left", "both"]
if self.index[num_values - 1] < self.index[0]:
index_growth_sign = -1
else:
index_growth_sign = 1
start = np.empty(num_values, dtype="int64")
start.fill(-1)
end = np.empty(num_values, dtype="int64")
end.fill(-1)
start[0] = 0
# right endpoint is closed
if right_closed:
end[0] = 1
# right endpoint is open
else:
end[0] = 0
# start is start of slice interval (including)
# end is end of slice interval (not including)
for i in range(1, num_values):
end_bound = self.index[i]
start_bound = self.index[i] - index_growth_sign * self.offset
# left endpoint is closed
if left_closed:
start_bound -= Nano(1)
# advance the start bound until we are
# within the constraint
start[i] = i
for j in range(start[i - 1], i):
if (self.index[j] - start_bound) * index_growth_sign > timedelta(0):
start[i] = j
break
# end bound is previous end
# or current index
if (self.index[end[i - 1]] - end_bound) * index_growth_sign <= timedelta(0):
end[i] = i + 1
else:
end[i] = end[i - 1]
# right endpoint is open
if not right_closed:
end[i] -= 1
return start, end
class ExpandingIndexer(BaseIndexer):
"""Calculate expanding window bounds, mimicking df.expanding()"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
return (
np.zeros(num_values, dtype=np.int64),
np.arange(1, num_values + 1, dtype=np.int64),
)
class FixedForwardWindowIndexer(BaseIndexer):
"""
Creates window boundaries for fixed-length windows that include the current row.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
>>> df.rolling(window=indexer, min_periods=1).sum()
B
0 1.0
1 3.0
2 2.0
3 4.0
4 4.0
"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
if center:
raise ValueError("Forward-looking windows can't have center=True")
if closed is not None:
raise ValueError(
"Forward-looking windows don't support setting the closed argument"
)
if step is None:
step = 1
start = np.arange(0, num_values, step, dtype="int64")
end = start + self.window_size
if self.window_size:
end = np.clip(end, 0, num_values)
return start, end
class GroupbyIndexer(BaseIndexer):
"""Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()"""
def __init__(
self,
index_array: np.ndarray | None = None,
window_size: int | BaseIndexer = 0,
groupby_indices: dict | None = None,
window_indexer: type[BaseIndexer] = BaseIndexer,
indexer_kwargs: dict | None = None,
**kwargs,
) -> None:
"""
Parameters
----------
index_array : np.ndarray or None
np.ndarray of the index of the original object that we are performing
a chained groupby operation over. This index has been pre-sorted relative to
the groups
window_size : int or BaseIndexer
window size during the windowing operation
groupby_indices : dict or None
dict of {group label: [positional index of rows belonging to the group]}
window_indexer : BaseIndexer
BaseIndexer class determining the start and end bounds of each group
indexer_kwargs : dict or None
Custom kwargs to be passed to window_indexer
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.groupby_indices = groupby_indices or {}
self.window_indexer = window_indexer
self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {}
super().__init__(
index_array=index_array,
window_size=self.indexer_kwargs.pop("window_size", window_size),
**kwargs,
)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
# 1) For each group, get the indices that belong to the group
# 2) Use the indices to calculate the start & end bounds of the window
# 3) Append the window bounds in group order
start_arrays = []
end_arrays = []
window_indices_start = 0
for key, indices in self.groupby_indices.items():
index_array: np.ndarray | None
if self.index_array is not None:
index_array = self.index_array.take(ensure_platform_int(indices))
else:
index_array = self.index_array
indexer = self.window_indexer(
index_array=index_array,
window_size=self.window_size,
**self.indexer_kwargs,
)
start, end = indexer.get_window_bounds(
len(indices), min_periods, center, closed, step
)
start = start.astype(np.int64)
end = end.astype(np.int64)
assert len(start) == len(
end
), "these should be equal in length from get_window_bounds"
# Cannot use groupby_indices as they might not be monotonic with the object
# we're rolling over
window_indices = np.arange(
window_indices_start, window_indices_start + len(indices)
)
window_indices_start += len(indices)
# Extend as we'll be slicing window like [start, end)
window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype(
np.int64, copy=False
)
start_arrays.append(window_indices.take(ensure_platform_int(start)))
end_arrays.append(window_indices.take(ensure_platform_int(end)))
if len(start_arrays) == 0:
return np.array([], dtype=np.int64), np.array([], dtype=np.int64)
start = np.concatenate(start_arrays)
end = np.concatenate(end_arrays)
return start, end
class ExponentialMovingWindowIndexer(BaseIndexer):
"""Calculate ewm window bounds (the entire window)"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: int | None = None,
center: bool | None = None,
closed: str | None = None,
step: int | None = None,
) -> tuple[np.ndarray, np.ndarray]:
return np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64)
| bsd-3-clause | e5694d70db3de8bb3028f415feae8d8d | 31.251256 | 88 | 0.572375 | 4.015014 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.